Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6: (29 commits)
  video: move SH_MIPI_DSI/SH_LCD_MIPI_DSI to the top of menu
  fbdev: Implement simple blanking in pseudocolor modes for vt8500lcdfb
  video: imx: Update the manufacturer's name
  nuc900fb: don't treat NULL clk as an error
  s3c2410fb: don't treat NULL clk as an error
  video: tidy up modedb formatting.
  video: matroxfb: Correct video option in comments and kernel config help.
  fbdev: sh_mobile_hdmi: simplify pointer handling
  fbdev: sh_mobile_hdmi: framebuffer notifiers have to be registered
  fbdev: sh_mobile_hdmi: add command line option to use the preferred EDID mode
  OMAP: DSS2: Introduce omap_channel as an omap_dss_device parameter, add new overlay manager.
  OMAP: DSS2: Use dss_features to handle DISPC bits removed on OMAP4
  OMAP: DSS2: LCD2 Channel Changes for DISPC
  OMAP: DSS2: Change remaining DISPC functions for new omap_channel argument
  OMAP: DSS2: Introduce omap_channel argument to DISPC functions used by interface drivers
  OMAP: DSS2: Represent DISPC register defines with channel as parameter
  OMAP: DSS2: Add dss_features for omap4 and overlay manager related features
  OMAP: DSS2: Clean up DISPC color mode validation checks
  OMAP: DSS2: Add back authors of panel-generic.c based drivers
  OMAP: DSS2: remove generic DPI panel driver duplicated panel drivers
  ...
diff --git a/CREDITS b/CREDITS
index 494b6e4..1d39a6d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -2811,8 +2811,8 @@
 N: Stelian Pop
 E: stelian@popies.net
 P: 1024D/EDBB6147 7B36 0E07 04BC 11DC A7A0  D3F7 7185 9E7A EDBB 6147
-D: sonypi, meye drivers, mct_u232 usb serial hacks
-S: Paris, France
+D: random kernel hacks
+S: Paimpont, France
 
 N: Pete Popov
 E: pete_popov@yahoo.com
diff --git a/Documentation/ABI/testing/sysfs-class-led b/Documentation/ABI/testing/sysfs-class-led
index 9e4541d..edff663 100644
--- a/Documentation/ABI/testing/sysfs-class-led
+++ b/Documentation/ABI/testing/sysfs-class-led
@@ -26,3 +26,12 @@
 		scheduler is chosen. Trigger specific parameters can appear in
 		/sys/class/leds/<led> once a given trigger is selected.
 
+What:		/sys/class/leds/<led>/inverted
+Date:		January 2011
+KernelVersion:	2.6.38
+Contact:	Richard Purdie <rpurdie@rpsys.net>
+Description:
+		Invert the LED on/off state. This parameter is specific to
+		gpio and backlight triggers. In case of the backlight trigger,
+		it is usefull when driving a LED which is intended to indicate
+		a device in a standby like state.
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 020ac80..620eb3f 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -250,7 +250,7 @@
 		<title>Device ready function</title>
 		<para>
 			If the hardware interface has the ready busy pin of the NAND chip connected to a
-			GPIO or other accesible I/O pin, this function is used to read back the state of the
+			GPIO or other accessible I/O pin, this function is used to read back the state of the
 			pin. The function has no arguments and should return 0, if the device is busy (R/B pin 
 			is low) and 1, if the device is ready (R/B pin is high).
 			If the hardware interface does not give access to the ready busy pin, then
diff --git a/Documentation/cgroups/cgroup_event_listener.c b/Documentation/cgroups/cgroup_event_listener.c
index 8c2bfc4..3e082f9 100644
--- a/Documentation/cgroups/cgroup_event_listener.c
+++ b/Documentation/cgroups/cgroup_event_listener.c
@@ -91,7 +91,7 @@
 
 		if (ret == -1) {
 			perror("cgroup.event_control "
-					"is not accessable any more");
+					"is not accessible any more");
 			break;
 		}
 
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 190018b..44b8b7a 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -355,13 +355,13 @@
 
 To change the set of subsystems bound to a mounted hierarchy, just
 remount with different options:
-# mount -o remount,cpuset,ns hier1 /dev/cgroup
+# mount -o remount,cpuset,blkio hier1 /dev/cgroup
 
-Now memory is removed from the hierarchy and ns is added.
+Now memory is removed from the hierarchy and blkio is added.
 
-Note this will add ns to the hierarchy but won't remove memory or
+Note this will add blkio to the hierarchy but won't remove memory or
 cpuset, because the new options are appended to the old ones:
-# mount -o remount,ns /dev/cgroup
+# mount -o remount,blkio /dev/cgroup
 
 To Specify a hierarchy's release_agent:
 # mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
diff --git a/Documentation/cgroups/memcg_test.txt b/Documentation/cgroups/memcg_test.txt
index b7eecec..fc8fa97 100644
--- a/Documentation/cgroups/memcg_test.txt
+++ b/Documentation/cgroups/memcg_test.txt
@@ -398,7 +398,7 @@
 	written to move_charge_at_immigrate.
 
  9.10 Memory thresholds
-	Memory controler implements memory thresholds using cgroups notification
+	Memory controller implements memory thresholds using cgroups notification
 	API. You can use Documentation/cgroups/cgroup_event_listener.c to test
 	it.
 
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 945ff3f..a0b58e2 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -104,6 +104,13 @@
 As an added bonus you can customise the message creation toolbar menu
 and put the "insert file" icon there.
 
+Make the the composer window wide enough so that no lines wrap. As of
+KMail 1.13.5 (KDE 4.5.4), KMail will apply word wrapping when sending
+the email if the lines wrap in the composer window. Having word wrapping
+disabled in the Options menu isn't enough. Thus, if your patch has very
+long lines, you must make the composer window very wide before sending
+the email. See: https://bugs.kde.org/show_bug.cgi?id=174034
+
 You can safely GPG sign attachments, but inlined text is preferred for
 patches so do not GPG sign them.  Signing patches that have been inserted
 as inlined text will make them tricky to extract from their 7-bit encoding.
@@ -179,26 +186,8 @@
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 Thunderbird (GUI)
 
-By default, thunderbird likes to mangle text, but there are ways to
-coerce it into being nice.
-
-- Under account settings, composition and addressing, uncheck "Compose
-  messages in HTML format".
-
-- Edit your Thunderbird config settings to tell it not to wrap lines:
-      user_pref("mailnews.wraplength", 0);
-
-- Edit your Thunderbird config settings so that it won't use format=flowed:
-      user_pref("mailnews.send_plaintext_flowed", false);
-
-- You need to get Thunderbird into preformat mode:
-. If you compose HTML messages by default, it's not too hard. Just select
-  "Preformat" from the drop-down box just under the subject line.
-. If you compose in text by default, you have to tell it to compose a new
-  message in HTML (just as a one-off), and then force it from there back to
-  text, else it will wrap lines. To do this, use shift-click on the Write
-  icon to compose to get HTML compose mode, then select "Preformat" from
-  the drop-down box just under the subject line.
+Thunderbird is an Outlook clone that likes to mangle text, but there are ways
+to coerce it into behaving.
 
 - Allows use of an external editor:
   The easiest thing to do with Thunderbird and patches is to use an
@@ -208,6 +197,27 @@
   View->Toolbars->Customize... and finally just click on it when in the
   Compose dialog.
 
+To beat some sense out of the internal editor, do this:
+
+- Under account settings, composition and addressing, uncheck "Compose
+  messages in HTML format".
+
+- Edit your Thunderbird config settings so that it won't use format=flowed.
+  Go to "edit->preferences->advanced->config editor" to bring up the
+  thunderbird's registry editor, and set "mailnews.send_plaintext_flowed" to
+  "false".
+
+- Enable "preformat" mode: Shft-click on the Write icon to bring up the HTML
+  composer, select "Preformat" from the drop-down box just under the subject
+  line, then close the message without saving.  (This setting also applies to
+  the text composer, but the only control for it is in the HTML composer.)
+
+- Install the "toggle wordwrap" extension.  Download the file from:
+    https://addons.mozilla.org/thunderbird/addon/2351/
+  Then go to "tools->add ons", select "install" at the bottom of the screen,
+  and browse to where you saved the .xul file.  This adds an "Enable
+  Wordwrap" entry under the Options menu of the message composer.
+
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 TkRat (GUI)
 
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 22f1081..6cbbd20 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -193,6 +193,20 @@
 
 ---------------------------
 
+What:	CS5535/CS5536 obsolete GPIO driver
+When:	June 2011
+Files:	drivers/staging/cs5535_gpio/*
+Check:	drivers/staging/cs5535_gpio/cs5535_gpio.c
+Why:	A newer driver replaces this; it is drivers/gpio/cs5535-gpio.c, and
+	integrates with the Linux GPIO subsystem.  The old driver has been
+	moved to staging, and will be removed altogether around 2.6.40.
+	Please test the new driver, and ensure that the functionality you
+	need and any bugfixes from the old driver are available in the new
+	one.
+Who:	Andres Salomon <dilinger@queued.net>
+
+--------------------------
+
 What:	remove EXPORT_SYMBOL(kernel_thread)
 When:	August 2006
 Files:	arch/*/kernel/*_ksyms.c
@@ -576,3 +590,13 @@
 Who:	Tejun Heo <tj@kernel.org>
 
 ----------------------------
+
+What:	Legacy, non-standard chassis intrusion detection interface.
+When:	June 2011
+Why:	The adm9240, w83792d and w83793 hardware monitoring drivers have
+	legacy interfaces for chassis intrusion detection. A standard
+	interface has been added to each driver, so the legacy interface
+	can be removed.
+Who:	Jean Delvare <khali@linux-fr.org>
+
+----------------------------
diff --git a/Documentation/filesystems/ntfs.txt b/Documentation/filesystems/ntfs.txt
index ac2a261..6ef8cf3 100644
--- a/Documentation/filesystems/ntfs.txt
+++ b/Documentation/filesystems/ntfs.txt
@@ -457,6 +457,9 @@
 
 Note, a technical ChangeLog aimed at kernel hackers is in fs/ntfs/ChangeLog.
 
+2.1.30:
+	- Fix writev() (it kept writing the first segment over and over again
+	  instead of moving onto subsequent segments).
 2.1.29:
 	- Fix a deadlock when mounting read-write.
 2.1.28:
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 07a32b4..266d205 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -385,3 +385,12 @@
 on many or all directory inodes on the way down a path walk (to check for
 exec permission). These must now be rcu-walk aware (flags & IPERM_RCU). See
 Documentation/filesystems/vfs.txt for more details.
+ 
+--
+[mandatory]
+	In ->fallocate() you must check the mode option passed in.  If your
+filesystem does not support hole punching (deallocating space in the middle of a
+file) you must return -EOPNOTSUPP if FALLOC_FL_PUNCH_HOLE is set in mode.
+Currently you can only have FALLOC_FL_PUNCH_HOLE with FALLOC_FL_KEEP_SIZE set,
+so the i_size should not change when hole punching, even when puching the end of
+a file off.
diff --git a/Documentation/gpio.txt b/Documentation/gpio.txt
index 792faa3..a492d92 100644
--- a/Documentation/gpio.txt
+++ b/Documentation/gpio.txt
@@ -135,7 +135,7 @@
 	int gpio_direction_input(unsigned gpio);
 	int gpio_direction_output(unsigned gpio, int value);
 
-The return value is zero for success, else a negative errno.  It should
+The return value is zero for success, else a negative errno.  It must
 be checked, since the get/set calls don't have error returns and since
 misconfiguration is possible.  You should normally issue these calls from
 a task context.  However, for spinlock-safe GPIOs it's OK to use them
diff --git a/Documentation/hwmon/adm9240 b/Documentation/hwmon/adm9240
index 2c6f1fe..36e8ec6 100644
--- a/Documentation/hwmon/adm9240
+++ b/Documentation/hwmon/adm9240
@@ -155,7 +155,7 @@
 The ADM9240 provides an internal open drain on this line, and may output
 a 20 ms active low pulse to reset an external Chassis Intrusion latch.
 
-Clear the CI latch by writing value 1 to the sysfs chassis_clear file.
+Clear the CI latch by writing value 0 to the sysfs intrusion0_alarm file.
 
 Alarm flags reported as 16-bit word
 
diff --git a/Documentation/hwmon/ads7828 b/Documentation/hwmon/ads7828
index 75bc4be..2bbebe6 100644
--- a/Documentation/hwmon/ads7828
+++ b/Documentation/hwmon/ads7828
@@ -9,7 +9,7 @@
                http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
 Authors:
-        Steve Hardy <steve@linuxrealtime.co.uk>
+        Steve Hardy <shardy@redhat.com>
 
 Module Parameters
 -----------------
diff --git a/Documentation/hwmon/dme1737 b/Documentation/hwmon/dme1737
index fc5df76..4d29351 100644
--- a/Documentation/hwmon/dme1737
+++ b/Documentation/hwmon/dme1737
@@ -42,7 +42,7 @@
 This driver implements support for the hardware monitoring capabilities of the
 SMSC DME1737 and Asus A8000 (which are the same), SMSC SCH5027, SCH311x,
 and SCH5127 Super-I/O chips. These chips feature monitoring of 3 temp sensors
-temp[1-3] (2 remote diodes and 1 internal), 7 voltages in[0-6] (6 external and
+temp[1-3] (2 remote diodes and 1 internal), 8 voltages in[0-7] (7 external and
 1 internal) and up to 6 fan speeds fan[1-6]. Additionally, the chips implement
 up to 5 PWM outputs pwm[1-3,5-6] for controlling fan speeds both manually and
 automatically.
@@ -105,6 +105,7 @@
 	in4: V1_IN				0V - 1.5V
 	in5: VTR	(+3.3V standby)		0V - 4.38V
 	in6: Vbat	(+3.0V)			0V - 4.38V
+	in7: Vtrip	(+1.5V)			0V - 1.99V
 
 Each voltage input has associated min and max limits which trigger an alarm
 when crossed.
@@ -217,10 +218,10 @@
 vrm				RW	Voltage regulator module version
 					number.
 
-in[0-6]_input			RO	Measured voltage in millivolts.
-in[0-6]_min			RW	Low limit for voltage input.
-in[0-6]_max			RW	High limit for voltage input.
-in[0-6]_alarm			RO	Voltage input alarm. Returns 1 if
+in[0-7]_input			RO	Measured voltage in millivolts.
+in[0-7]_min			RW	Low limit for voltage input.
+in[0-7]_max			RW	High limit for voltage input.
+in[0-7]_alarm			RO	Voltage input alarm. Returns 1 if
 					voltage input is or went outside the
 					associated min-max range, 0 otherwise.
 
@@ -324,3 +325,4 @@
 pwm5			opt		opt
 fan6			opt		opt
 pwm6			opt		opt
+in7						yes
diff --git a/Documentation/hwmon/w83627hf b/Documentation/hwmon/w83627hf
index fb145e5..8432e11 100644
--- a/Documentation/hwmon/w83627hf
+++ b/Documentation/hwmon/w83627hf
@@ -91,3 +91,25 @@
 
 The above sequence assumes a Super-I/O config space at 0x2e/0x2f, but
 0x4e/0x4f is also possible.
+
+Voltage pin mapping
+-------------------
+
+Here is a summary of the voltage pin mapping for the W83627THF. This
+can be useful to convert data provided by board manufacturers into
+working libsensors configuration statements.
+
+    W83627THF		|
+  Pin	| Name		| Register	| Sysfs attribute
+-----------------------------------------------------
+  100	| CPUVCORE	| 20h		| in0
+   99	| VIN0		| 21h		| in1
+   98	| VIN1		| 22h		| in2
+   97	| VIN2		| 24h		| in4
+  114	| AVCC		| 23h		| in3
+   61	| 5VSB		| 50h (bank 5)	| in7
+   74	| VBAT		| 51h (bank 5)	| in8
+
+For other supported devices, you'll have to take the hard path and
+look up the information in the datasheet yourself (and then add it
+to this document please.)
diff --git a/Documentation/hwmon/w83793 b/Documentation/hwmon/w83793
index 51171a8..6cc5f63 100644
--- a/Documentation/hwmon/w83793
+++ b/Documentation/hwmon/w83793
@@ -92,7 +92,7 @@
 
 * Chassis
   If the case open alarm triggers, it will stay in this state unless cleared
-  by any write to the sysfs file "chassis".
+  by writing 0 to the sysfs file "intrusion0_alarm".
 
 * VID and VRM
   The VRM version is detected automatically, don't modify the it unless you
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/gpio-i2cmux
new file mode 100644
index 0000000..811cd78
--- /dev/null
+++ b/Documentation/i2c/muxes/gpio-i2cmux
@@ -0,0 +1,65 @@
+Kernel driver gpio-i2cmux
+
+Author: Peter Korsgaard <peter.korsgaard@barco.com>
+
+Description
+-----------
+
+gpio-i2cmux is an i2c mux driver providing access to I2C bus segments
+from a master I2C bus and a hardware MUX controlled through GPIO pins.
+
+E.G.:
+
+  ----------              ----------  Bus segment 1   - - - - -
+ |          | SCL/SDA    |          |-------------- |           |
+ |          |------------|          |
+ |          |            |          | Bus segment 2 |           |
+ |  Linux   | GPIO 1..N  |   MUX    |---------------   Devices
+ |          |------------|          |               |           |
+ |          |            |          | Bus segment M
+ |          |            |          |---------------|           |
+  ----------              ----------                  - - - - -
+
+SCL/SDA of the master I2C bus is multiplexed to bus segment 1..M
+according to the settings of the GPIO pins 1..N.
+
+Usage
+-----
+
+gpio-i2cmux uses the platform bus, so you need to provide a struct
+platform_device with the platform_data pointing to a struct
+gpio_i2cmux_platform_data with the I2C adapter number of the master
+bus, the number of bus segments to create and the GPIO pins used
+to control it. See include/linux/gpio-i2cmux.h for details.
+
+E.G. something like this for a MUX providing 4 bus segments
+controlled through 3 GPIO pins:
+
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+
+static const unsigned myboard_gpiomux_gpios[] = {
+	AT91_PIN_PC26, AT91_PIN_PC25, AT91_PIN_PC24
+};
+
+static const unsigned myboard_gpiomux_values[] = {
+	0, 1, 2, 3
+};
+
+static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
+	.parent		= 1,
+	.base_nr	= 2, /* optional */
+	.values		= myboard_gpiomux_values,
+	.n_values	= ARRAY_SIZE(myboard_gpiomux_values),
+	.gpios		= myboard_gpiomux_gpios,
+	.n_gpios	= ARRAY_SIZE(myboard_gpiomux_gpios),
+	.idle		= 4, /* optional */
+};
+
+static struct platform_device myboard_i2cmux = {
+	.name		= "gpio-i2cmux",
+	.id		= 0,
+	.dev		= {
+		.platform_data	= &myboard_i2cmux_data,
+	},
+};
diff --git a/Documentation/input/ff.txt b/Documentation/input/ff.txt
index ded4d5f..b3867bf 100644
--- a/Documentation/input/ff.txt
+++ b/Documentation/input/ff.txt
@@ -49,7 +49,9 @@
 #include <linux/input.h>
 #include <sys/ioctl.h>
 
-unsigned long features[1 + FF_MAX/sizeof(unsigned long)];
+#define BITS_TO_LONGS(x) \
+	(((x) + 8 * sizeof (unsigned long) - 1) / (8 * sizeof (unsigned long)))
+unsigned long features[BITS_TO_LONGS(FF_CNT)];
 int ioctl(int file_descriptor, int request, unsigned long *features);
 
 "request" must be EVIOCGBIT(EV_FF, size of features array in bytes )
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index d6a63c7..ac293e9 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -247,7 +247,7 @@
 'p'	40-7F	linux/nvram.h
 'p'	80-9F	linux/ppdev.h		user-space parport
 					<mailto:tim@cyberelk.net>
-'p'	A1-A4	linux/pps.h		LinuxPPS
+'p'	A1-A5	linux/pps.h		LinuxPPS
 					<mailto:giometti@linux.it>
 'q'	00-1F	linux/serio.h
 'q'	80-FF	linux/telephony.h	Internet PhoneJACK, Internet LineJACK
diff --git a/Documentation/iostats.txt b/Documentation/iostats.txt
index 59a69ec..f6dece5 100644
--- a/Documentation/iostats.txt
+++ b/Documentation/iostats.txt
@@ -81,7 +81,7 @@
     The only field that should go to zero. Incremented as requests are
     given to appropriate struct request_queue and decremented as they finish.
 Field 10 -- # of milliseconds spent doing I/Os
-    This field is increases so long as field 9 is nonzero.
+    This field increases so long as field 9 is nonzero.
 Field 11 -- weighted # of milliseconds spent doing I/Os
     This field is incremented at each I/O start, I/O completion, I/O
     merge, or read of these stats by the number of I/Os in progress
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index cab61d8..7a9e0b4 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -65,18 +65,21 @@
 
 2) Download the kexec-tools user-space package from the following URL:
 
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/kexec-tools.tar.gz
+http://kernel.org/pub/linux/utils/kernel/kexec/kexec-tools.tar.gz
 
 This is a symlink to the latest version.
 
 The latest kexec-tools git tree is available at:
 
-git://git.kernel.org/pub/scm/linux/kernel/git/horms/kexec-tools.git
-or
-http://www.kernel.org/git/?p=linux/kernel/git/horms/kexec-tools.git
+git://git.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+and
+http://www.kernel.org/pub/scm/utils/kernel/kexec/kexec-tools.git
+
+There is also a gitweb interface available at
+http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
 
 More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/kernel/people/horms/kexec-tools/README.html
+http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
 
 3) Unpack the tarball with the tar command, as follows:
 
@@ -439,6 +442,6 @@
 Contact
 =======
 
-Vivek Goyal (vgoyal@in.ibm.com)
+Vivek Goyal (vgoyal@redhat.com)
 Maneesh Soni (maneesh@in.ibm.com)
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f3dc951..55fe759 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -403,6 +403,10 @@
 	bttv.pll=	See Documentation/video4linux/bttv/Insmod-options
 	bttv.tuner=	and Documentation/video4linux/bttv/CARDLIST
 
+	bulk_remove=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for flushing multiple hpte entries
+			at a time.
+
 	c101=		[NET] Moxa C101 synchronous serial card
 
 	cachesize=	[BUGS=X86-32] Override level 2 CPU cache size detection.
@@ -655,11 +659,6 @@
 
 	dscc4.setup=	[NET]
 
-	dynamic_printk	Enables pr_debug()/dev_dbg() calls if
-			CONFIG_DYNAMIC_PRINTK_DEBUG has been enabled.
-			These can also be switched on/off via
-			<debugfs>/dynamic_printk/modules
-
 	earlycon=	[KNL] Output early console device and options.
 		uart[8250],io,<addr>[,options]
 		uart[8250],mmio,<addr>[,options]
@@ -884,6 +883,7 @@
 			     controller
 	i8042.nopnp	[HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
 			     controllers
+	i8042.notimeout	[HW] Ignore timeout condition signalled by conroller
 	i8042.reset	[HW] Reset the controller during init and cleanup
 	i8042.unlock	[HW] Unlock (ignore) the keylock
 
@@ -1490,6 +1490,10 @@
 	mtdparts=	[MTD]
 			See drivers/mtd/cmdlinepart.c.
 
+	multitce=off	[PPC]  This parameter disables the use of the pSeries
+			firmware feature for updating multiple TCE entries
+			at a time.
+
 	onenand.bdry=	[HW,MTD] Flex-OneNAND Boundary Configuration
 
 			Format: [die0_boundary][,die0_lock][,die1_boundary][,die1_lock]
@@ -1701,6 +1705,9 @@
 
 	no-kvmclock	[X86,KVM] Disable paravirtualized KVM clock driver
 
+	no-kvmapf	[X86,KVM] Disable paravirtualized asynchronous page
+			fault handling.
+
 	nolapic		[X86-32,APIC] Do not enable or use the local APIC.
 
 	nolapic_timer	[X86-32,APIC] Do not use the local APIC timer.
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index e3a55b6..ab5189a 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -391,8 +391,8 @@
 bugme-janitor 메일링 리스트(bugzilla에 모든 변화들이 여기서 메일로 전해진다)
 에 등록하면 된다.
 
-      http://lists.osdl.org/mailman/listinfo/bugme-new
-      http://lists.osdl.org/mailman/listinfo/bugme-janitors
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+      https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 
diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt
index 741fe66..0cfb00f 100644
--- a/Documentation/kprobes.txt
+++ b/Documentation/kprobes.txt
@@ -598,7 +598,7 @@
 a) The instructions in DCR must be relocatable.
 b) The instructions in DCR must not include a call instruction.
 c) JTPR must not be targeted by any jump or call instruction.
-d) DCR must not straddle the border betweeen functions.
+d) DCR must not straddle the border between functions.
 
 Anyway, these limitations are checked by the in-kernel instruction
 decoder, so you don't need to worry about that.
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index b336266..ad85797 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -874,7 +874,7 @@
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
                                  is waiting for an interrupt
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accesible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS)
 
 This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
 irqchip, the multiprocessing state must be maintained by userspace.
@@ -1085,6 +1085,184 @@
 If any additional field gets added to this structure later on, a bit for that
 additional piece of information will be set in the flags bitmap.
 
+4.47 KVM_ASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_ASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Assigns a host PCI device to the VM.
+
+struct kvm_assigned_pci_dev {
+	__u32 assigned_dev_id;
+	__u32 busnr;
+	__u32 devfn;
+	__u32 flags;
+	__u32 segnr;
+	union {
+		__u32 reserved[11];
+	};
+};
+
+The PCI device is specified by the triple segnr, busnr, and devfn.
+Identification in succeeding service requests is done via assigned_dev_id. The
+following flags are specified:
+
+/* Depends on KVM_CAP_IOMMU */
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU	(1 << 0)
+
+4.48 KVM_DEASSIGN_PCI_DEVICE
+
+Capability: KVM_CAP_DEVICE_DEASSIGNMENT
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_pci_dev (in)
+Returns: 0 on success, -1 on error
+
+Ends PCI device assignment, releasing all associated resources.
+
+See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
+used in kvm_assigned_pci_dev to identify the device.
+
+4.49 KVM_ASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Assigns an IRQ to a passed-through device.
+
+struct kvm_assigned_irq {
+	__u32 assigned_dev_id;
+	__u32 host_irq;
+	__u32 guest_irq;
+	__u32 flags;
+	union {
+		struct {
+			__u32 addr_lo;
+			__u32 addr_hi;
+			__u32 data;
+		} guest_msi;
+		__u32 reserved[12];
+	};
+};
+
+The following flags are defined:
+
+#define KVM_DEV_IRQ_HOST_INTX    (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI     (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX    (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX   (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI    (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX   (1 << 10)
+
+It is not valid to specify multiple types per host or guest IRQ. However, the
+IRQ type of host and guest can differ or can even be null.
+
+4.50 KVM_DEASSIGN_DEV_IRQ
+
+Capability: KVM_CAP_ASSIGN_DEV_IRQ
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_irq (in)
+Returns: 0 on success, -1 on error
+
+Ends an IRQ assignment to a passed-through device.
+
+See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
+by assigned_dev_id, flags must correspond to the IRQ type specified on
+KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
+
+4.51 KVM_SET_GSI_ROUTING
+
+Capability: KVM_CAP_IRQ_ROUTING
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_irq_routing (in)
+Returns: 0 on success, -1 on error
+
+Sets the GSI routing table entries, overwriting any previously set entries.
+
+struct kvm_irq_routing {
+	__u32 nr;
+	__u32 flags;
+	struct kvm_irq_routing_entry entries[0];
+};
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_entry {
+	__u32 gsi;
+	__u32 type;
+	__u32 flags;
+	__u32 pad;
+	union {
+		struct kvm_irq_routing_irqchip irqchip;
+		struct kvm_irq_routing_msi msi;
+		__u32 pad[8];
+	} u;
+};
+
+/* gsi routing entry types */
+#define KVM_IRQ_ROUTING_IRQCHIP 1
+#define KVM_IRQ_ROUTING_MSI 2
+
+No flags are specified so far, the corresponding field must be set to zero.
+
+struct kvm_irq_routing_irqchip {
+	__u32 irqchip;
+	__u32 pin;
+};
+
+struct kvm_irq_routing_msi {
+	__u32 address_lo;
+	__u32 address_hi;
+	__u32 data;
+	__u32 pad;
+};
+
+4.52 KVM_ASSIGN_SET_MSIX_NR
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_nr (in)
+Returns: 0 on success, -1 on error
+
+Set the number of MSI-X interrupts for an assigned device. This service can
+only be called once in the lifetime of an assigned device.
+
+struct kvm_assigned_msix_nr {
+	__u32 assigned_dev_id;
+	__u16 entry_nr;
+	__u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV		256
+
+4.53 KVM_ASSIGN_SET_MSIX_ENTRY
+
+Capability: KVM_CAP_DEVICE_MSIX
+Architectures: x86 ia64
+Type: vm ioctl
+Parameters: struct kvm_assigned_msix_entry (in)
+Returns: 0 on success, -1 on error
+
+Specifies the routing of an MSI-X assigned device interrupt to a GSI. Setting
+the GSI vector to zero means disabling the interrupt.
+
+struct kvm_assigned_msix_entry {
+	__u32 assigned_dev_id;
+	__u32 gsi;
+	__u16 entry; /* The index of entry in the MSI-X table */
+	__u16 padding[3];
+};
+
 5. The kvm_run structure
 
 Application code obtains a pointer to the kvm_run structure by
diff --git a/Documentation/kvm/cpuid.txt b/Documentation/kvm/cpuid.txt
index 14a12ea..8820685 100644
--- a/Documentation/kvm/cpuid.txt
+++ b/Documentation/kvm/cpuid.txt
@@ -36,6 +36,9 @@
 KVM_FEATURE_CLOCKSOURCE2           ||     3 || kvmclock available at msrs
                                    ||       || 0x4b564d00 and 0x4b564d01
 ------------------------------------------------------------------------------
+KVM_FEATURE_ASYNC_PF               ||     4 || async pf can be enabled by
+                                   ||       || writing to msr 0x4b564d02
+------------------------------------------------------------------------------
 KVM_FEATURE_CLOCKSOURCE_STABLE_BIT ||    24 || host will warn if no guest-side
                                    ||       || per-cpu warps are expected in
                                    ||       || kvmclock.
diff --git a/Documentation/kvm/msr.txt b/Documentation/kvm/msr.txt
index 8ddcfe8..d079aed 100644
--- a/Documentation/kvm/msr.txt
+++ b/Documentation/kvm/msr.txt
@@ -3,7 +3,6 @@
 =====================================================
 
 KVM makes use of some custom MSRs to service some requests.
-At present, this facility is only used by kvmclock.
 
 Custom MSRs have a range reserved for them, that goes from
 0x4b564d00 to 0x4b564dff. There are MSRs outside this area,
@@ -151,3 +150,38 @@
 			return PRESENT;
 		} else
 			return NON_PRESENT;
+
+MSR_KVM_ASYNC_PF_EN: 0x4b564d02
+	data: Bits 63-6 hold 64-byte aligned physical address of a
+	64 byte memory area which must be in guest RAM and must be
+	zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
+	when asynchronous page faults are enabled on the vcpu 0 when
+	disabled. Bit 2 is 1 if asynchronous page faults can be injected
+	when vcpu is in cpl == 0.
+
+	First 4 byte of 64 byte memory location will be written to by
+	the hypervisor at the time of asynchronous page fault (APF)
+	injection to indicate type of asynchronous page fault. Value
+	of 1 means that the page referred to by the page fault is not
+	present. Value 2 means that the page is now available. Disabling
+	interrupt inhibits APFs. Guest must not enable interrupt
+	before the reason is read, or it may be overwritten by another
+	APF. Since APF uses the same exception vector as regular page
+	fault guest must reset the reason to 0 before it does
+	something that can generate normal page fault.  If during page
+	fault APF reason is 0 it means that this is regular page
+	fault.
+
+	During delivery of type 1 APF cr2 contains a token that will
+	be used to notify a guest when missing page becomes
+	available. When page becomes available type 2 APF is sent with
+	cr2 set to the token associated with the page. There is special
+	kind of token 0xffffffff which tells vcpu that it should wake
+	up all processes waiting for APFs and no individual type 2 APFs
+	will be sent.
+
+	If APF is disabled while there are outstanding APFs, they will
+	not be delivered.
+
+	Currently type 2 APF will be always delivered on the same vcpu as
+	type 1 was, but guest should not rely on that.
diff --git a/Documentation/lguest/lguest.txt b/Documentation/lguest/lguest.txt
index efb3a6a..6ccaf8e 100644
--- a/Documentation/lguest/lguest.txt
+++ b/Documentation/lguest/lguest.txt
@@ -111,8 +111,11 @@
 
   Then use --tunnet=bridge:lg0 when launching the guest.
 
-  See http://linux-net.osdl.org/index.php/Bridge for general information
-  on how to get bridging working.
+  See:
+  
+    http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+    
+  for general information on how to get bridging to work.
 
 There is a helpful mailing list at http://ozlabs.org/mailman/listinfo/lguest
 
diff --git a/Documentation/magic-number.txt b/Documentation/magic-number.txt
index 505f196..4b12abc 100644
--- a/Documentation/magic-number.txt
+++ b/Documentation/magic-number.txt
@@ -150,7 +150,7 @@
 STL_BOARDMAGIC        0xa2267f52  stlbrd            include/linux/stallion.h
 ENI155_MAGIC          0xa54b872d  midway_eprom	    drivers/atm/eni.h
 SCI_MAGIC             0xbabeface  gs_port           drivers/char/sh-sci.h
-CODA_MAGIC            0xC0DAC0DA  coda_file_info    include/linux/coda_fs_i.h
+CODA_MAGIC            0xC0DAC0DA  coda_file_info    fs/coda/coda_fs_i.h
 DPMEM_MAGIC           0xc0ffee11  gdt_pci_sram      drivers/scsi/gdth.h
 STLI_PORTMAGIC        0xe671c7a1  stliport          include/linux/istallion.h
 YAM_MAGIC             0xF10A7654  yam_port          drivers/net/hamradio/yam.c
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt
index bec69a8..a7ba5e4 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.txt
@@ -1,8 +1,8 @@
 In order to use the Ethernet bridging functionality, you'll need the
 userspace tools. These programs and documentation are available
-at http://www.linux-foundation.org/en/Net:Bridge.  The download page is
+at http://www.linuxfoundation.org/en/Net:Bridge.  The download page is
 http://prdownloads.sourceforge.net/bridge.
 
 If you still have questions, don't hesitate to post to the mailing list 
-(more info http://lists.osdl.org/mailman/listinfo/bridge).
+(more info https://lists.linux-foundation.org/mailman/listinfo/bridge).
 
diff --git a/Documentation/networking/caif/spi_porting.txt b/Documentation/networking/caif/spi_porting.txt
index 61d7c92..0cb8cb9 100644
--- a/Documentation/networking/caif/spi_porting.txt
+++ b/Documentation/networking/caif/spi_porting.txt
@@ -32,7 +32,7 @@
 	This function is called by the CAIF SPI interface to give
 	you a chance to set up your hardware to be ready to receive
 	a stream of data from the master. The xfer structure contains
-	both physical and logical adresses, as well as the total length
+	both physical and logical addresses, as well as the total length
 	of the transfer in both directions.The dev parameter can be used
 	to map to different CAIF SPI slave devices.
 
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index b395ca6..d718bc2 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -38,11 +38,11 @@
 specified in RFCs 4340...42.
 
 The known bugs are at:
-	http://linux-net.osdl.org/index.php/TODO#DCCP
+	http://www.linuxfoundation.org/collaborate/workgroups/networking/todo#DCCP
 
 For more up-to-date versions of the DCCP implementation, please consider using
 the experimental DCCP test tree; instructions for checking this out are on:
-http://linux-net.osdl.org/index.php/DCCP_Testing#Experimental_DCCP_source_tree
+http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp_testing#Experimental_DCCP_source_tree
 
 
 Socket options
@@ -167,6 +167,7 @@
 seq_window = 100
 	The initial sequence window (sec. 7.5.2) of the sender. This influences
 	the local ackno validity and the remote seqno validity windows (7.5.1).
+	Values in the range Wmin = 32 (RFC 4340, 7.5.2) up to 2^32-1 can be set.
 
 tx_qlen = 5
 	The size of the transmit buffer in packets. A value of 0 corresponds
diff --git a/Documentation/networking/generic_netlink.txt b/Documentation/networking/generic_netlink.txt
index d4f8b8b..3e07111 100644
--- a/Documentation/networking/generic_netlink.txt
+++ b/Documentation/networking/generic_netlink.txt
@@ -1,3 +1,3 @@
 A wiki document on how to use Generic Netlink can be found here:
 
- * http://linux-net.osdl.org/index.php/Generic_Netlink_HOWTO
+ * http://www.linuxfoundation.org/collaborate/workgroups/networking/generic_netlink_howto
diff --git a/Documentation/nfc/nfc-pn544.txt b/Documentation/nfc/nfc-pn544.txt
new file mode 100644
index 0000000..2fcac9f
--- /dev/null
+++ b/Documentation/nfc/nfc-pn544.txt
@@ -0,0 +1,114 @@
+Kernel driver for the NXP Semiconductors PN544 Near Field
+Communication chip
+
+Author: Jari Vanhala
+Contact: Matti Aaltonen (matti.j.aaltonen at nokia.com)
+
+General
+-------
+
+The PN544 is an integrated transmission module for contactless
+communication. The driver goes under drives/nfc/ and is compiled as a
+module named "pn544". It registers a misc device and creates a device
+file named "/dev/pn544".
+
+Host Interfaces: I2C, SPI and HSU, this driver supports currently only I2C.
+
+The Interface
+-------------
+
+The driver offers a sysfs interface for a hardware test and an IOCTL
+interface for selecting between two operating modes. There are read,
+write and poll functions for transferring messages. The two operating
+modes are the normal (HCI) mode and the firmware update mode.
+
+PN544 is controlled by sending messages from the userspace to the
+chip. The main function of the driver is just to pass those messages
+without caring about the message content.
+
+
+Protocols
+---------
+
+In the normal (HCI) mode and in the firmware update mode read and
+write functions behave a bit differently because the message formats
+or the protocols are different.
+
+In the normal (HCI) mode the protocol used is derived from the ETSI
+HCI specification. The firmware is updated using a specific protocol,
+which is different from HCI.
+
+HCI messages consist of an eight bit header and the message body. The
+header contains the message length. Maximum size for an HCI message is
+33. In HCI mode sent messages are tested for a correct
+checksum. Firmware update messages have the length in the second (MSB)
+and third (LSB) bytes of the message. The maximum FW message length is
+1024 bytes.
+
+For the ETSI HCI specification see
+http://www.etsi.org/WebSite/Technologies/ProtocolSpecification.aspx
+
+The Hardware Test
+-----------------
+
+The idea of the test is that it can performed by reading from the
+corresponding sysfs file. The test is implemented in the board file
+and it should test that PN544 can be put into the firmware update
+mode. If the test is not implemented the sysfs file does not get
+created.
+
+Example:
+> cat /sys/module/pn544/drivers/i2c\:pn544/3-002b/nfc_test
+1
+
+Normal Operation
+----------------
+
+PN544 is powered up when the device file is opened, otherwise it's
+turned off. Only one instance can use the device at a time.
+
+Userspace applications control PN544 with HCI messages. The hardware
+sends an interrupt when data is available for reading. Data is
+physically read when the read function is called by a userspace
+application. Poll() checks the read interrupt state. Configuration and
+self testing are also done from the userspace using read and write.
+
+Example platform data:
+
+static int rx71_pn544_nfc_request_resources(struct i2c_client *client)
+{
+	/* Get and setup the HW resources for the device */
+}
+
+static void rx71_pn544_nfc_free_resources(void)
+{
+	/* Release the HW resources */
+}
+
+static void rx71_pn544_nfc_enable(int fw)
+{
+	/* Turn the device on */
+}
+
+static int rx71_pn544_nfc_test(void)
+{
+	/*
+	 * Put the device into the FW update mode
+	 * and then back to the normal mode.
+	 * Check the behavior and return one on success,
+	 * zero on failure.
+	 */
+}
+
+static void rx71_pn544_nfc_disable(void)
+{
+	/* turn the power off */
+}
+
+static struct pn544_nfc_platform_data rx71_nfc_data = {
+	.request_resources = rx71_pn544_nfc_request_resources,
+	.free_resources = rx71_pn544_nfc_free_resources,
+	.enable = rx71_pn544_nfc_enable,
+	.test = rx71_pn544_nfc_test,
+	.disable = rx71_pn544_nfc_disable,
+};
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt
index 302db5d..7400d75 100644
--- a/Documentation/powerpc/booting-without-of.txt
+++ b/Documentation/powerpc/booting-without-of.txt
@@ -131,7 +131,7 @@
 point and the way a new platform should be added to the kernel. The
 legacy iSeries platform breaks those rules as it predates this scheme,
 but no new board support will be accepted in the main tree that
-doesn't follows them properly.  In addition, since the advent of the
+doesn't follow them properly.  In addition, since the advent of the
 arch/powerpc merged architecture for ppc32 and ppc64, new 32-bit
 platforms and 32-bit platforms which move into arch/powerpc will be
 required to use these rules as well.
@@ -1025,7 +1025,7 @@
 
 WARNING: This version is still in early development stage; the
 resulting device-tree "blobs" have not yet been validated with the
-kernel. The current generated bloc lacks a useful reserve map (it will
+kernel. The current generated block lacks a useful reserve map (it will
 be fixed to generate an empty one, it's up to the bootloader to fill
 it up) among others. The error handling needs work, bugs are lurking,
 etc...
@@ -1098,7 +1098,7 @@
                                  * an arbitrary array of bytes
                                  */
 
-  childnode@addresss {	/* define a child node named "childnode"
+  childnode@address {	/* define a child node named "childnode"
                                  * whose unit name is "childnode at
 				 * address"
                                  */
diff --git a/Documentation/powerpc/dts-bindings/4xx/cpm.txt b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
new file mode 100644
index 0000000..ee45980
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/4xx/cpm.txt
@@ -0,0 +1,52 @@
+PPC4xx Clock Power Management (CPM) node
+
+Required properties:
+	- compatible		: compatible list, currently only "ibm,cpm"
+	- dcr-access-method	: "native"
+	- dcr-reg		: < DCR register range >
+
+Optional properties:
+	- er-offset		: All 4xx SoCs with a CPM controller have
+				  one of two different order for the CPM
+				  registers. Some have the CPM registers
+				  in the following order (ER,FR,SR). The
+				  others have them in the following order
+				  (SR,ER,FR). For the second case set
+				  er-offset = <1>.
+	- unused-units		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices.
+	- idle-doze		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set to turn off unused
+				  devices. This is usually just CPM[CPU].
+	- standby		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on standby and
+				  restored on resume.
+	- suspend		: specifier consist of one cell. For each
+				  bit in the cell, the corresponding bit
+				  in CPM will be set on suspend (mem) and
+				  restored on resume. Note, for standby
+				  and suspend the corresponding bits can
+				  be different or the same. Usually for
+				  standby only class 2 and 3 units are set.
+				  However, the interface does not care.
+				  If they are the same, the additional
+				  power saving will be seeing if support
+				  is available to put the DDR in self
+				  refresh mode and any additional power
+				  saving techniques for the specific SoC.
+
+Example:
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		er-offset = <0>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff0000>;
+		suspend = <0xfeff791d>;
+};
diff --git a/Documentation/pps/pps.txt b/Documentation/pps/pps.txt
index 125f4ab..d35dcdd 100644
--- a/Documentation/pps/pps.txt
+++ b/Documentation/pps/pps.txt
@@ -170,3 +170,49 @@
 
 Please, note that to compile userland programs you need the file timepps.h
 (see Documentation/pps/).
+
+
+Generators
+----------
+
+Sometimes one needs to be able not only to catch PPS signals but to produce
+them also. For example, running a distributed simulation, which requires
+computers' clock to be synchronized very tightly. One way to do this is to
+invent some complicated hardware solutions but it may be neither necessary
+nor affordable. The cheap way is to load a PPS generator on one of the
+computers (master) and PPS clients on others (slaves), and use very simple
+cables to deliver signals using parallel ports, for example.
+
+Parallel port cable pinout:
+pin	name	master      slave
+1	STROBE	  *------     *
+2	D0	  *     |     *
+3	D1	  *     |     *
+4	D2	  *     |     *
+5	D3	  *     |     *
+6	D4	  *     |     *
+7	D5	  *     |     *
+8	D6	  *     |     *
+9	D7	  *     |     *
+10	ACK	  *     ------*
+11	BUSY	  *           *
+12	PE	  *           *
+13	SEL	  *           *
+14	AUTOFD	  *           *
+15	ERROR	  *           *
+16	INIT	  *           *
+17	SELIN	  *           *
+18-25	GND	  *-----------*
+
+Please note that parallel port interrupt occurs only on high->low transition,
+so it is used for PPS assert edge. PPS clear edge can be determined only
+using polling in the interrupt handler which actually can be done way more
+precisely because interrupt handling delays can be quite big and random. So
+current parport PPS generator implementation (pps_gen_parport module) is
+geared towards using the clear edge for time synchronization.
+
+Clear edge polling is done with disabled interrupts so it's better to select
+delay between assert and clear edge as small as possible to reduce system
+latencies. But if it is too small slave won't be able to capture clear edge
+transition. The default of 30us should be good enough in most situations.
+The delay can be selected using 'delay' pps_gen_parport module parameter.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 3c00c9c..d2651c4 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -3,7 +3,7 @@
 sched-arch.txt
 	- CPU Scheduler implementation hints for architecture specific code.
 sched-design-CFS.txt
-	- goals, design and implementation of the Complete Fair Scheduler.
+	- goals, design and implementation of the Completely Fair Scheduler.
 sched-domains.txt
 	- information on scheduling domains.
 sched-nice-design.txt
diff --git a/Documentation/scsi/ChangeLog.lpfc b/Documentation/scsi/ChangeLog.lpfc
index 337c924..5e83769 100644
--- a/Documentation/scsi/ChangeLog.lpfc
+++ b/Documentation/scsi/ChangeLog.lpfc
@@ -573,7 +573,7 @@
 	* Backround nodev_timeout processing to DPC This enables us to
 	  unblock (stop dev_loss_tmo) when appopriate.
 	* Fix array discovery with multiple luns.  The max_luns was 0 at
-	  the time the host structure was intialized.  lpfc_cfg_params
+	  the time the host structure was initialized.  lpfc_cfg_params
 	  then set the max_luns to the correct value afterwards.
 	* Remove unused define LPFC_MAX_LUN and set the default value of
 	  lpfc_max_lun parameter to 512.
diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt
index 7c90050..540db41 100644
--- a/Documentation/serial/tty.txt
+++ b/Documentation/serial/tty.txt
@@ -107,7 +107,7 @@
 
 dcd_change()	-	Report to the tty line the current DCD pin status
 			changes and the relative timestamp. The timestamp
-			can be NULL.
+			cannot be NULL.
 
 
 Driver Access
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index d0eb696..3c1eddd 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -974,13 +974,6 @@
 
     See hdspm.txt for details.
 
-  Module snd-hifier
-  -----------------
-
-    Module for the MediaTek/TempoTec HiFier Fantasia sound card.
-
-    This module supports autoprobe and multiple cards.
-
   Module snd-ice1712
   ------------------
 
@@ -1531,15 +1524,20 @@
   Module snd-oxygen
   -----------------
 
-    Module for sound cards based on the C-Media CMI8788 chip:
+    Module for sound cards based on the C-Media CMI8786/8787/8788 chip:
     * Asound A-8788
+    * Asus Xonar DG
     * AuzenTech X-Meridian
+    * AuzenTech X-Meridian 2G
     * Bgears b-Enspirer
     * Club3D Theatron DTS
     * HT-Omega Claro (plus)
     * HT-Omega Claro halo (XT)
+    * Kuroutoshikou CMI8787-HG2PCI
     * Razer Barracuda AC-1
     * Sondigo Inferno
+    * TempoTec HiFier Fantasia
+    * TempoTec HiFier Serenade
 
     This module supports autoprobe and multiple cards.
 
@@ -2006,9 +2004,9 @@
   Module snd-virtuoso
   -------------------
 
-    Module for sound cards based on the Asus AV100/AV200 chips,
-    i.e., Xonar D1, DX, D2, D2X, DS, HDAV1.3 (Deluxe), Essence ST
-    (Deluxe) and Essence STX.
+    Module for sound cards based on the Asus AV66/AV100/AV200 chips,
+    i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX,
+    HDAV1.3 (Deluxe), and HDAV1.3 Slim.
 
     This module supports autoprobe and multiple cards.
 
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 37c6aad..16ae430 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -149,7 +149,6 @@
   acer-aspire-7730g Acer Aspire 7730G
   acer-aspire-8930g Acer Aspire 8930G
   medion	Medion Laptops
-  medion-md2	Medion MD2
   targa-dig	Targa/MSI
   targa-2ch-dig	Targa/MSI with 2-channel
   targa-8ch-dig Targa/MSI with 8-channel (MSI GX620)
diff --git a/Documentation/sysctl/00-INDEX b/Documentation/sysctl/00-INDEX
index 1286f45..8cf5d49 100644
--- a/Documentation/sysctl/00-INDEX
+++ b/Documentation/sysctl/00-INDEX
@@ -4,8 +4,6 @@
 	- general information about /proc/sys/ sysctl files.
 abi.txt
 	- documentation for /proc/sys/abi/*.
-ctl_unnumbered.txt
-	- explanation of why one should not add new binary sysctl numbers.
 fs.txt
 	- documentation for /proc/sys/fs/*.
 kernel.txt
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 57406719..11d5ced 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -34,6 +34,7 @@
 - hotplug
 - java-appletviewer           [ binfmt_java, obsolete ]
 - java-interpreter            [ binfmt_java, obsolete ]
+- kptr_restrict
 - kstack_depth_to_print       [ X86 only ]
 - l2cr                        [ PPC only ]
 - modprobe                    ==> Documentation/debugging-modules.txt
@@ -261,6 +262,19 @@
 
 ==============================================================
 
+kptr_restrict:
+
+This toggle indicates whether restrictions are placed on
+exposing kernel addresses via /proc and other interfaces.  When
+kptr_restrict is set to (0), there are no restrictions.  When
+kptr_restrict is set to (1), the default, kernel pointers
+printed using the %pK format specifier will be replaced with 0's
+unless the user has CAP_SYSLOG.  When kptr_restrict is set to
+(2), kernel pointers printed using %pK will be replaced with 0's
+regardless of privileges.
+
+==============================================================
+
 kstack_depth_to_print: (X86 only)
 
 Controls the number of words to print when dumping the raw
diff --git a/Documentation/timers/timer_stats.txt b/Documentation/timers/timer_stats.txt
index 9bd00fc..8abd40b 100644
--- a/Documentation/timers/timer_stats.txt
+++ b/Documentation/timers/timer_stats.txt
@@ -19,7 +19,7 @@
 
 - the pid of the task(process) which initialized the timer
 - the name of the process which initialized the timer
-- the function where the timer was intialized
+- the function where the timer was initialized
 - the callback function which is associated to the timer
 - the number of events (callbacks)
 
diff --git a/Documentation/trace/events.txt b/Documentation/trace/events.txt
index 09bd8e9..b510564 100644
--- a/Documentation/trace/events.txt
+++ b/Documentation/trace/events.txt
@@ -125,7 +125,7 @@
 For example, here's the information displayed for the 'sched_wakeup'
 event:
 
-# cat /debug/tracing/events/sched/sched_wakeup/format
+# cat /sys/kernel/debug/tracing/events/sched/sched_wakeup/format
 
 name: sched_wakeup
 ID: 60
@@ -201,19 +201,19 @@
 
 For example:
 
-# cd /debug/tracing/events/sched/sched_wakeup
+# cd /sys/kernel/debug/tracing/events/sched/sched_wakeup
 # echo "common_preempt_count > 4" > filter
 
 A slightly more involved example:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || sig == 17) && comm != bash" > filter
 
 If there is an error in the expression, you'll get an 'Invalid
 argument' error when setting it, and the erroneous string along with
 an error message can be seen by looking at the filter e.g.:
 
-# cd /debug/tracing/events/sched/sched_signal_send
+# cd /sys/kernel/debug/tracing/events/signal/signal_generate
 # echo "((sig >= 10 && sig < 15) || dsig == 17) && comm != bash" > filter
 -bash: echo: write error: Invalid argument
 # cat filter
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index f8101d6..75613c9 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -2,3 +2,5 @@
 	- This file
 w1_therm
 	- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
+w1_ds2423
+	- The Maxim/Dallas Semiconductor ds2423 counter device.
diff --git a/Documentation/w1/slaves/w1_ds2423 b/Documentation/w1/slaves/w1_ds2423
new file mode 100644
index 0000000..90a65d2
--- /dev/null
+++ b/Documentation/w1/slaves/w1_ds2423
@@ -0,0 +1,47 @@
+Kernel driver w1_ds2423
+=======================
+
+Supported chips:
+  * Maxim DS2423 based counter devices.
+
+supported family codes:
+	W1_THERM_DS2423	0x1D
+
+Author: Mika Laitio <lamikr@pilppa.org>
+
+Description
+-----------
+
+Support is provided through the sysfs w1_slave file. Each opening and
+read sequence of w1_slave file initiates the read of counters and ram
+available in DS2423 pages 12 - 15.
+
+Result of each page is provided as an ASCII output where each counter
+value and associated ram buffer is outpputed to own line.
+
+Each lines will contain the values of 42 bytes read from the counter and
+memory page along the crc=YES or NO for indicating whether the read operation
+was successfull and CRC matched.
+If the operation was successfull, there is also in the end of each line
+a counter value expressed as an integer after c=
+
+Meaning of 42 bytes represented is following:
+ - 1 byte from ram page
+ - 4 bytes for the counter value
+ - 4 zero bytes
+ - 2 bytes for crc16 which was calculated from the data read since the previous crc bytes
+ - 31 remaining bytes from the ram page
+ - crc=YES/NO indicating whether read was ok and crc matched
+ - c=<int> current counter value
+
+example from the successfull read:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 00 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 29 c6 5d 18 00 00 00 00 04 37 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=408798761
+00 05 00 00 00 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=YES c=5
+
+example from the read with crc errors:
+00 02 00 00 00 00 00 00 00 6d 38 00 ff ff 00 00 fe ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=YES c=2
+00 02 00 00 22 00 00 00 00 e0 1f 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 e1 61 5d 19 00 00 00 00 df 0b 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff 00 00 ff ff crc=NO
+00 05 00 00 20 00 00 00 00 8d 39 ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff ff crc=NO
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index bdeb81c..9b7221a 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -622,9 +622,9 @@
   The payload may be compressed. The format of both the compressed and
   uncompressed data should be determined using the standard magic
   numbers.  The currently supported compression formats are gzip
-  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
-  (magic number 5D 00).  The uncompressed payload is currently always ELF
-  (magic number 7F 45 4C 46).
+  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A), LZMA
+  (magic number 5D 00), and XZ (magic number FD 37).  The uncompressed
+  payload is currently always ELF (magic number 7F 45 4C 46).
   
 Field name:	payload_length
 Type:		read
diff --git a/Documentation/xz.txt b/Documentation/xz.txt
new file mode 100644
index 0000000..2cf3e26
--- /dev/null
+++ b/Documentation/xz.txt
@@ -0,0 +1,121 @@
+
+XZ data compression in Linux
+============================
+
+Introduction
+
+    XZ is a general purpose data compression format with high compression
+    ratio and relatively fast decompression. The primary compression
+    algorithm (filter) is LZMA2. Additional filters can be used to improve
+    compression ratio even further. E.g. Branch/Call/Jump (BCJ) filters
+    improve compression ratio of executable data.
+
+    The XZ decompressor in Linux is called XZ Embedded. It supports
+    the LZMA2 filter and optionally also BCJ filters. CRC32 is supported
+    for integrity checking. The home page of XZ Embedded is at
+    <http://tukaani.org/xz/embedded.html>, where you can find the
+    latest version and also information about using the code outside
+    the Linux kernel.
+
+    For userspace, XZ Utils provide a zlib-like compression library
+    and a gzip-like command line tool. XZ Utils can be downloaded from
+    <http://tukaani.org/xz/>.
+
+XZ related components in the kernel
+
+    The xz_dec module provides XZ decompressor with single-call (buffer
+    to buffer) and multi-call (stateful) APIs. The usage of the xz_dec
+    module is documented in include/linux/xz.h.
+
+    The xz_dec_test module is for testing xz_dec. xz_dec_test is not
+    useful unless you are hacking the XZ decompressor. xz_dec_test
+    allocates a char device major dynamically to which one can write
+    .xz files from userspace. The decompressed output is thrown away.
+    Keep an eye on dmesg to see diagnostics printed by xz_dec_test.
+    See the xz_dec_test source code for the details.
+
+    For decompressing the kernel image, initramfs, and initrd, there
+    is a wrapper function in lib/decompress_unxz.c. Its API is the
+    same as in other decompress_*.c files, which is defined in
+    include/linux/decompress/generic.h.
+
+    scripts/xz_wrap.sh is a wrapper for the xz command line tool found
+    from XZ Utils. The wrapper sets compression options to values suitable
+    for compressing the kernel image.
+
+    For kernel makefiles, two commands are provided for use with
+    $(call if_needed). The kernel image should be compressed with
+    $(call if_needed,xzkern) which will use a BCJ filter and a big LZMA2
+    dictionary. It will also append a four-byte trailer containing the
+    uncompressed size of the file, which is needed by the boot code.
+    Other things should be compressed with $(call if_needed,xzmisc)
+    which will use no BCJ filter and 1 MiB LZMA2 dictionary.
+
+Notes on compression options
+
+    Since the XZ Embedded supports only streams with no integrity check or
+    CRC32, make sure that you don't use some other integrity check type
+    when encoding files that are supposed to be decoded by the kernel. With
+    liblzma, you need to use either LZMA_CHECK_NONE or LZMA_CHECK_CRC32
+    when encoding. With the xz command line tool, use --check=none or
+    --check=crc32.
+
+    Using CRC32 is strongly recommended unless there is some other layer
+    which will verify the integrity of the uncompressed data anyway.
+    Double checking the integrity would probably be waste of CPU cycles.
+    Note that the headers will always have a CRC32 which will be validated
+    by the decoder; you can only change the integrity check type (or
+    disable it) for the actual uncompressed data.
+
+    In userspace, LZMA2 is typically used with dictionary sizes of several
+    megabytes. The decoder needs to have the dictionary in RAM, thus big
+    dictionaries cannot be used for files that are intended to be decoded
+    by the kernel. 1 MiB is probably the maximum reasonable dictionary
+    size for in-kernel use (maybe more is OK for initramfs). The presets
+    in XZ Utils may not be optimal when creating files for the kernel,
+    so don't hesitate to use custom settings. Example:
+
+        xz --check=crc32 --lzma2=dict=512KiB inputfile
+
+    An exception to above dictionary size limitation is when the decoder
+    is used in single-call mode. Decompressing the kernel itself is an
+    example of this situation. In single-call mode, the memory usage
+    doesn't depend on the dictionary size, and it is perfectly fine to
+    use a big dictionary: for maximum compression, the dictionary should
+    be at least as big as the uncompressed data itself.
+
+Future plans
+
+    Creating a limited XZ encoder may be considered if people think it is
+    useful. LZMA2 is slower to compress than e.g. Deflate or LZO even at
+    the fastest settings, so it isn't clear if LZMA2 encoder is wanted
+    into the kernel.
+
+    Support for limited random-access reading is planned for the
+    decompression code. I don't know if it could have any use in the
+    kernel, but I know that it would be useful in some embedded projects
+    outside the Linux kernel.
+
+Conformance to the .xz file format specification
+
+    There are a couple of corner cases where things have been simplified
+    at expense of detecting errors as early as possible. These should not
+    matter in practice all, since they don't cause security issues. But
+    it is good to know this if testing the code e.g. with the test files
+    from XZ Utils.
+
+Reporting bugs
+
+    Before reporting a bug, please check that it's not fixed already
+    at upstream. See <http://tukaani.org/xz/embedded.html> to get the
+    latest code.
+
+    Report bugs to <lasse.collin@tukaani.org> or visit #tukaani on
+    Freenode and talk to Larhzu. I don't actively read LKML or other
+    kernel-related mailing lists, so if there's something I should know,
+    you should email to me personally or use IRC.
+
+    Don't bother Igor Pavlov with questions about the XZ implementation
+    in the kernel or about XZ Utils. While these two implementations
+    include essential code that is directly based on Igor Pavlov's code,
+    these implementations aren't maintained nor supported by him.
diff --git a/Documentation/zh_CN/HOWTO b/Documentation/zh_CN/HOWTO
index 6916077..faf976c 100644
--- a/Documentation/zh_CN/HOWTO
+++ b/Documentation/zh_CN/HOWTO
@@ -347,8 +347,8 @@
 最新bug的通知,可以订阅bugme-new邮件列表(只有新的bug报告会被寄到这里)
 或者订阅bugme-janitor邮件列表(所有bugzilla的变动都会被寄到这里)。
 
-	http://lists.osdl.org/mailman/listinfo/bugme-new
-	http://lists.osdl.org/mailman/listinfo/bugme-janitors
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-new
+	https://lists.linux-foundation.org/mailman/listinfo/bugme-janitors
 
 
 邮件列表
diff --git a/Documentation/zh_CN/SubmittingDrivers b/Documentation/zh_CN/SubmittingDrivers
index c27b0f6..5889f8d 100644
--- a/Documentation/zh_CN/SubmittingDrivers
+++ b/Documentation/zh_CN/SubmittingDrivers
@@ -61,7 +61,7 @@
 Linux 2.6:
 	除了遵循和 2.4 版内核同样的规则外,你还需要在 linux-kernel 邮件
 	列表上跟踪最新的 API 变化。向 Linux 2.6 内核提交驱动的顶级联系人
-	是 Andrew Morton <akpm@osdl.org>。
+	是 Andrew Morton <akpm@linux-foundation.org>。
 
 决定设备驱动能否被接受的条件
 ----------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index aca102f..3dd5c6f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -285,6 +285,41 @@
 S:	Maintained
 F:	sound/pci/ad1889.*
 
+AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD5254
+S:	Supported
+F:	drivers/misc/ad525x_dpot.c
+
+AD5398 CURRENT REGULATOR DRIVER (AD5398/AD5821)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD5398
+S:	Supported
+F:	drivers/regulator/ad5398.c
+
+AD714X CAPACITANCE TOUCH SENSOR DRIVER (AD7142/3/7/8/7A)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7142
+S:	Supported
+F:	drivers/input/misc/ad714x.c
+
+AD7877 TOUCHSCREEN DRIVER
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7877
+S:	Supported
+F:	drivers/input/touchscreen/ad7877.c
+
+AD7879 TOUCHSCREEN DRIVER (AD7879/AD7889)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/AD7879
+S:	Supported
+F:	drivers/input/touchscreen/ad7879.c
+
 ADM1025 HARDWARE MONITOR DRIVER
 M:	Jean Delvare <khali@linux-fr.org>
 L:	lm-sensors@lm-sensors.org
@@ -304,6 +339,32 @@
 S:	Orphan
 F:	drivers/net/wireless/adm8211.*
 
+ADP5520 BACKLIGHT DRIVER WITH IO EXPANDER (ADP5520/ADP5501)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP5520
+S:	Supported
+F:	drivers/mfd/adp5520.c
+F:	drivers/video/backlight/adp5520_bl.c
+F:	drivers/led/leds-adp5520.c
+F:	drivers/gpio/adp5520-gpio.c
+F:	drivers/input/keyboard/adp5520-keys.c
+
+ADP5588 QWERTY KEYPAD AND IO EXPANDER DRIVER (ADP5588/ADP5587)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP5588
+S:	Supported
+F:	drivers/input/keyboard/adp5588-keys.c
+F:	drivers/gpio/adp5588-gpio.c
+
+ADP8860 BACKLIGHT DRIVER (ADP8860/ADP8861/ADP8863)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADP8860
+S:	Supported
+F:	drivers/video/backlight/adp8860_bl.c
+
 ADT746X FAN DRIVER
 M:	Colin Leroy <colin@colino.net>
 S:	Maintained
@@ -316,6 +377,13 @@
 F:	Documentation/hwmon/adt7475
 F:	drivers/hwmon/adt7475.c
 
+ADXL34X THREE-AXIS DIGITAL ACCELEROMETER DRIVER (ADXL345/ADXL346)
+M:	Michael Hennerich <michael.hennerich@analog.com>
+L:	device-driver-devel@blackfin.uclinux.org
+W:	http://wiki-analog.com/ADXL345
+S:	Supported
+F:	drivers/input/misc/adxl34x.c
+
 ADVANSYS SCSI DRIVER
 M:	Matthew Wilcox <matthew@wil.cx>
 L:	linux-scsi@vger.kernel.org
@@ -428,7 +496,6 @@
 F:	arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
-M:	Stelian Pop <stelian@popies.net>
 M:	Michael Hanselmann <linux-kernel@hansmi.ch>
 S:	Supported
 F:	drivers/macintosh/ams/
@@ -440,16 +507,22 @@
 S:	Maintained
 F:	drivers/infiniband/hw/amso1100/
 
+ANALOG DEVICES INC ASOC CODEC DRIVERS
+L:	device-driver-devel@blackfin.uclinux.org
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+W:	http://wiki-analog.com/
+S:	Supported
+F:	sound/soc/codecs/ad1*
+F:	sound/soc/codecs/adau*
+F:	sound/soc/codecs/adav*
+F:	sound/soc/codecs/ssm*
+
 ANALOG DEVICES INC ASOC DRIVERS
 L:	uclinux-dist-devel@blackfin.uclinux.org
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 W:	http://blackfin.uclinux.org/
 S:	Supported
 F:	sound/soc/blackfin/*
-F:	sound/soc/codecs/ad1*
-F:	sound/soc/codecs/adau*
-F:	sound/soc/codecs/adav*
-F:	sound/soc/codecs/ssm*
 
 AOA (Apple Onboard Audio) ALSA DRIVER
 M:	Johannes Berg <johannes@sipsolutions.net>
@@ -1450,6 +1523,14 @@
 F:	block/bsg.c
 F:	include/linux/bsg.h
 
+BT87X AUDIO DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	Documentation/sound/alsa/Bt87x.txt
+F:	sound/pci/bt87x.c
+
 BT8XXGPIO DRIVER
 M:	Michael Buesch <mb@bu3sch.de>
 W:	http://bu3sch.de/btgpio.php
@@ -1475,6 +1556,13 @@
 F:	Documentation/video4linux/bttv/
 F:	drivers/media/video/bt8xx/bttv*
 
+C-MEDIA CMI8788 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/pci/oxygen/
+
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
 L:	linux-cachefs@redhat.com
@@ -1711,7 +1799,8 @@
 F:	drivers/usb/atm/cxacru.c
 
 CONFIGFS
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/configfs.git
 S:	Supported
 F:	fs/configfs/
 F:	include/linux/configfs.h
@@ -1933,7 +2022,7 @@
 DCCP PROTOCOL
 M:	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
 L:	dccp@vger.kernel.org
-W:	http://linux-net.osdl.org/index.php/DCCP
+W:	http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
 S:	Maintained
 F:	include/linux/dccp.h
 F:	include/linux/tfrc.h
@@ -2265,6 +2354,13 @@
 S:	Maintained
 F:	drivers/edac/r82600_edac.c
 
+EDIROL UA-101/UA-1000 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/misc/ua101.c
+
 EEEPC LAPTOP EXTRAS DRIVER
 M:	Corentin Chary <corentincj@iksaif.net>
 L:	acpi4asus-user@lists.sourceforge.net
@@ -2355,7 +2451,7 @@
 M:	Stephen Hemminger <shemminger@linux-foundation.org>
 L:	bridge@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
-W:	http://www.linux-foundation.org/en/Net:Bridge
+W:	http://www.linuxfoundation.org/en/Net:Bridge
 S:	Maintained
 F:	include/linux/netfilter_bridge/
 F:	net/bridge/
@@ -2618,6 +2714,14 @@
 F:	drivers/i2c/busses/i2c-gpio.c
 F:	include/linux/i2c-gpio.h
 
+GENERIC GPIO I2C MULTIPLEXER DRIVER
+M:	Peter Korsgaard <peter.korsgaard@barco.com>
+L:	linux-i2c@vger.kernel.org
+S:	Supported
+F:	drivers/i2c/muxes/gpio-i2cmux.c
+F:	include/linux/gpio-i2cmux.h
+F:	Documentation/i2c/muxes/gpio-i2cmux
+
 GENERIC HDLC (WAN) DRIVERS
 M:	Krzysztof Halasa <khc@pm.waw.pl>
 W:	http://www.kernel.org/pub/linux/utils/net/hdlc/
@@ -3425,6 +3529,13 @@
 S:	Maintained
 F:	drivers/serial/jsm/
 
+K10TEMP HARDWARE MONITORING DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	lm-sensors@lm-sensors.org
+S:	Maintained
+F:	Documentation/hwmon/k10temp
+F:	drivers/hwmon/k10temp.c
+
 K8TEMP HARDWARE MONITORING DRIVER
 M:	Rudolf Marek <r.marek@assembler.cz>
 L:	lm-sensors@lm-sensors.org
@@ -4010,9 +4121,8 @@
 F:	kernel/module.c
 
 MOTION EYE VAIO PICTUREBOOK CAMERA DRIVER
-M:	Stelian Pop <stelian@popies.net>
 W:	http://popies.net/meye/
-S:	Maintained
+S:	Orphan
 F:	Documentation/video4linux/meye.txt
 F:	drivers/media/video/meye.*
 F:	include/linux/meye.h
@@ -4300,11 +4410,11 @@
 F:	drivers/scsi/nsp32*
 
 NTFS FILESYSTEM
-M:	Anton Altaparmakov <aia21@cantab.net>
+M:	Anton Altaparmakov <anton@tuxera.com>
 L:	linux-ntfs-dev@lists.sourceforge.net
-W:	http://www.linux-ntfs.org/
+W:	http://www.tuxera.com/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/aia21/ntfs-2.6.git
-S:	Maintained
+S:	Supported
 F:	Documentation/filesystems/ntfs.txt
 F:	fs/ntfs/
 
@@ -4456,6 +4566,13 @@
 F:	include/linux/of*.h
 K:	of_get_property
 
+OPL4 DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/drivers/opl4/
+
 OPROFILE
 M:	Robert Richter <robert.richter@amd.com>
 L:	oprofile-list@lists.sf.net
@@ -4467,7 +4584,7 @@
 
 ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
 M:	Mark Fasheh <mfasheh@suse.com>
-M:	Joel Becker <joel.becker@oracle.com>
+M:	Joel Becker <jlbec@evilplan.org>
 L:	ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
 W:	http://oss.oracle.com/projects/ocfs2/
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
@@ -4552,7 +4669,7 @@
 M:	Chris Wright <chrisw@sous-sol.org>
 M:	Alok Kataria <akataria@vmware.com>
 M:	Rusty Russell <rusty@rustcorp.com.au>
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	Documentation/ia64/paravirt_ops.txt
 F:	arch/*/kernel/paravirt*
@@ -5050,11 +5167,6 @@
 F:	kernel/srcu*
 X:	kernel/rcutorture.c
 
-REAL TIME CLOCK DRIVER (LEGACY)
-M:	Paul Gortmaker <p_gortmaker@yahoo.com>
-S:	Maintained
-F:	drivers/char/rtc.c
-
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:	Alessandro Zummo <a.zummo@towertech.it>
 L:	rtc-linux@googlegroups.com
@@ -5207,7 +5319,7 @@
 M:	Jassi Brar <jassi.brar@samsung.com>
 L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:	Supported
-F:	sound/soc/s3c24xx
+F:	sound/soc/samsung
 
 TIMEKEEPING, NTP
 M:	John Stultz <johnstul@us.ibm.com>
@@ -6215,6 +6327,13 @@
 W:	http://www.one-eyed-alien.net/~mdharm/linux-usb/
 F:	drivers/usb/storage/
 
+USB MIDI DRIVER
+M:	Clemens Ladisch <clemens@ladisch.de>
+L:	alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:	git git://git.alsa-project.org/alsa-kernel.git
+S:	Maintained
+F:	sound/usb/midi.*
+
 USB OHCI DRIVER
 M:	David Brownell <dbrownell@users.sourceforge.net>
 L:	linux-usb@vger.kernel.org
@@ -6454,7 +6573,7 @@
 VIRTIO HOST (VHOST)
 M:	"Michael S. Tsirkin" <mst@redhat.com>
 L:	kvm@vger.kernel.org
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 L:	netdev@vger.kernel.org
 S:	Maintained
 F:	drivers/vhost/
@@ -6504,7 +6623,7 @@
 
 VLYNQ BUS
 M:	Florian Fainelli <florian@openwrt.org>
-L:	openwrt-devel@lists.openwrt.org
+L:	openwrt-devel@lists.openwrt.org (subscribers-only)
 S:	Maintained
 F:	drivers/vlynq/vlynq.c
 F:	include/linux/vlynq.h
@@ -6724,7 +6843,7 @@
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
 L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
-L:	virtualization@lists.osdl.org
+L:	virtualization@lists.linux-foundation.org
 S:	Supported
 F:	arch/x86/xen/
 F:	drivers/*/xen-*front.c
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index 0f1d849..c1f3e7c 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -506,7 +506,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
  		retval = -1;
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 42ff90b..665ebf7 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -236,7 +236,7 @@
 
 /*
  * The following functions are needed for DMA bouncing.
- * ITE8152 chip can addrees up to 64MByte, so all the devices
+ * ITE8152 chip can address up to 64MByte, so all the devices
  * connected to ITE8152 (PCI and USB) should have limited DMA window
  */
 
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index ba65f6e..cb660bc 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -70,7 +70,7 @@
  * vic_init2 - common initialisation code
  * @base: Base of the VIC.
  *
- * Common initialisation code for registeration
+ * Common initialisation code for registration
  * and resume.
 */
 static void vic_init2(void __iomem *base)
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 7b58c94..de2fd04 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -128,17 +128,17 @@
 		.platform_data	= &my_flash0_platform,
 #endif
 	},
-	{	/* User accessable spi - cs1 (250KHz) */
+	{	/* User accessible spi - cs1 (250KHz) */
 		.modalias	= "spi-cs1",
 		.chip_select	= 1,
 		.max_speed_hz	= 250 * 1000,
 	},
-	{	/* User accessable spi - cs2 (1MHz) */
+	{	/* User accessible spi - cs2 (1MHz) */
 		.modalias	= "spi-cs2",
 		.chip_select	= 2,
 		.max_speed_hz	= 1 * 1000 * 1000,
 	},
-	{	/* User accessable spi - cs3 (10MHz) */
+	{	/* User accessible spi - cs3 (10MHz) */
 		.modalias	= "spi-cs3",
 		.chip_select	= 3,
 		.max_speed_hz	= 10 * 1000 * 1000,
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index dafbacc..ea53f4d 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -301,7 +301,7 @@
 }
 
 
-static struct platform_suspend_ops at91_pm_ops ={
+static const struct platform_suspend_ops at91_pm_ops = {
 	.valid	= at91_pm_valid_state,
 	.begin	= at91_pm_begin,
 	.enter	= at91_pm_enter,
diff --git a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
index b3a61d8..96273ff 100644
--- a/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
+++ b/arch/arm/mach-bcmring/csp/chipc/chipcHw.c
@@ -757,7 +757,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
index 7b9bac2..6b9be2e 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw.c
@@ -893,7 +893,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     ) {
 	dmacHw_CBLK_t *pCblk = dmacHw_HANDLE_TO_CBLK(handle);
 
diff --git a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
index ff7b436..77f84b4 100644
--- a/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
+++ b/arch/arm/mach-bcmring/csp/dmac/dmacHw_extra.c
@@ -316,7 +316,7 @@
 /**
 *  @brief   Check if DMA channel is the flow controller
 *
-*  @return  1 : If DMA is a flow controler
+*  @return  1 : If DMA is a flow controller
 *           0 : Peripheral is the flow controller
 *
 *  @note
diff --git a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
index 5c1c9a0..16225e4 100644
--- a/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
+++ b/arch/arm/mach-bcmring/csp/tmr/tmrHw.c
@@ -558,7 +558,7 @@
 		t = t << 1;
 	}
 
-	/* Intialize the result */
+	/* Initialize the result */
 	r = 0;
 
 	do {
diff --git a/arch/arm/mach-bcmring/dma.c b/arch/arm/mach-bcmring/dma.c
index 77eb35c..8d1baf3 100644
--- a/arch/arm/mach-bcmring/dma.c
+++ b/arch/arm/mach-bcmring/dma.c
@@ -671,7 +671,7 @@
 
 /****************************************************************************/
 /**
-*   Intializes all of the data structures associated with the DMA.
+*   Initializes all of the data structures associated with the DMA.
 *   @return
 *       >= 0    - Initialization was successfull.
 *
diff --git a/arch/arm/mach-bcmring/include/csp/dmacHw.h b/arch/arm/mach-bcmring/include/csp/dmacHw.h
index 5d51013..6c8da2b 100644
--- a/arch/arm/mach-bcmring/include/csp/dmacHw.h
+++ b/arch/arm/mach-bcmring/include/csp/dmacHw.h
@@ -590,7 +590,7 @@
 */
 /****************************************************************************/
 uint32_t dmacHw_getDmaControllerAttribute(dmacHw_HANDLE_t handle,	/*  [ IN ]  DMA Channel handle  */
-					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controler attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
+					  dmacHw_CONTROLLER_ATTRIB_e attr	/*  [ IN ]  DMA Controller attribute of type  dmacHw_CONTROLLER_ATTRIB_e */
     );
 
 #endif /* _DMACHW_H */
diff --git a/arch/arm/mach-bcmring/include/csp/tmrHw.h b/arch/arm/mach-bcmring/include/csp/tmrHw.h
index f1236d0..2cbb530 100644
--- a/arch/arm/mach-bcmring/include/csp/tmrHw.h
+++ b/arch/arm/mach-bcmring/include/csp/tmrHw.h
@@ -76,7 +76,7 @@
 *           certain time interval
 *
 *  This function initializes a periodic timer to generate timer interrupt
-*  after every time interval in milisecond
+*  after every time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
@@ -93,7 +93,7 @@
 *           after certain time interval
 *
 *  This function initializes a periodic timer to generate a single ticks after
-*  certain time interval in milisecond
+*  certain time interval in millisecond
 *
 *  @return   On success: Effective interval set in mili-second
 *            On failure: 0
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
index cbf334d..d67e2f8 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_priv.h
@@ -28,7 +28,7 @@
 
 /* Data type for DMA Link List Item */
 typedef struct {
-	uint32_t sar;		/* Source Adress Register.
+	uint32_t sar;		/* Source Address Register.
 				   Address must be aligned to CTLx.SRC_TR_WIDTH.             */
 	uint32_t dar;		/* Destination Address Register.
 				   Address must be aligned to CTLx.DST_TR_WIDTH.             */
diff --git a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
index 891cea8..f1ecf96 100644
--- a/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
+++ b/arch/arm/mach-bcmring/include/mach/csp/dmacHw_reg.h
@@ -35,7 +35,7 @@
 
 /* Data type representing DMA channel registers */
 typedef struct {
-	dmacHw_REG64_t ChannelSar;	/*  Source Adress Register. 64 bits (upper 32 bits are reserved)
+	dmacHw_REG64_t ChannelSar;	/*  Source Address Register. 64 bits (upper 32 bits are reserved)
 					   Address must be aligned to CTLx.SRC_TR_WIDTH.
 					 */
 	dmacHw_REG64_t ChannelDar;	/*  Destination Address Register.64 bits (upper 32 bits are reserved)
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index fab953b..1bd73a04 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -110,7 +110,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops davinci_pm_ops = {
+static const struct platform_suspend_ops davinci_pm_ops = {
 	.enter		= davinci_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index f7a1258..fe627ab 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -770,7 +770,7 @@
 };
 
 static struct platform_device dove_sdio0 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 0,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
@@ -798,7 +798,7 @@
 };
 
 static struct platform_device dove_sdio1 = {
-	.name		= "sdhci-mv",
+	.name		= "sdhci-dove",
 	.id		= 1,
 	.dev		= {
 		.dma_mask		= &sdio_dmamask,
diff --git a/arch/arm/mach-gemini/include/mach/hardware.h b/arch/arm/mach-gemini/include/mach/hardware.h
index 213a4fc..8c950e1 100644
--- a/arch/arm/mach-gemini/include/mach/hardware.h
+++ b/arch/arm/mach-gemini/include/mach/hardware.h
@@ -33,7 +33,7 @@
 #define GEMINI_LPC_HOST_BASE	0x47000000
 #define GEMINI_LPC_IO_BASE	0x47800000
 #define GEMINI_INTERRUPT_BASE	0x48000000
-/* TODO: Different interrupt controlers when SMP
+/* TODO: Different interrupt controllers when SMP
  * #define GEMINI_INTERRUPT0_BASE	0x48000000
  * #define GEMINI_INTERRUPT1_BASE	0x49000000
  */
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index f667a26..5056148 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -254,10 +254,10 @@
 
 static struct mc13783_regulator_init_data pcm038_regulators[] = {
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_data,
 	}, {
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc1_data,
 	},
 };
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 6bf81ce..acf1769 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -32,7 +32,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops mx27_suspend_ops = {
+static const struct platform_suspend_ops mx27_suspend_ops = {
 	.enter = mx27_suspend_enter,
 	.valid = suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-kirkwood/openrd-setup.c b/arch/arm/mach-kirkwood/openrd-setup.c
index c9d77fa..cfcca41 100644
--- a/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/arch/arm/mach-kirkwood/openrd-setup.c
@@ -171,7 +171,7 @@
 
 	kirkwood_i2c_init();
 
-	if (machine_is_openrd_client()) {
+	if (machine_is_openrd_client() || machine_is_openrd_ultimate()) {
 		i2c_register_board_info(0, i2c_board_info,
 			ARRAY_SIZE(i2c_board_info));
 		kirkwood_audio_init();
diff --git a/arch/arm/mach-lpc32xx/pm.c b/arch/arm/mach-lpc32xx/pm.c
index a6e2aed..e76d41b 100644
--- a/arch/arm/mach-lpc32xx/pm.c
+++ b/arch/arm/mach-lpc32xx/pm.c
@@ -123,7 +123,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops lpc32xx_pm_ops = {
+static const struct platform_suspend_ops lpc32xx_pm_ops = {
 	.valid	= suspend_valid_only_mem,
 	.enter	= lpc32xx_pm_enter,
 };
diff --git a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
index 4dc99aa..1246715 100644
--- a/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
+++ b/arch/arm/mach-msm/include/mach/entry-macro-qgic.S
@@ -26,7 +26,7 @@
 	 * The interrupt numbering scheme is defined in the
 	 * interrupt controller spec.  To wit:
 	 *
-	 * Migrated the code from ARM MP port to be more consistant
+	 * Migrated the code from ARM MP port to be more consistent
 	 * with interrupt processing , the following still holds true
 	 * however, all interrupts are treated the same regardless of
 	 * if they are local IPI or PPI
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 800f327..1260007 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -154,7 +154,7 @@
 {
 	if (mtype == MT_DEVICE) {
 		/* The peripherals in the 88000000 - D0000000 range
-		 * are only accessable by type MT_DEVICE_NONSHARED.
+		 * are only accessible by type MT_DEVICE_NONSHARED.
 		 * Adjust mtype as necessary to make this "just work."
 		 */
 		if ((phys_addr >= 0x88000000) && (phys_addr < 0xD0000000))
diff --git a/arch/arm/mach-mx3/mach-mx31_3ds.c b/arch/arm/mach-mx3/mach-mx31_3ds.c
index 4e516b4..899a969 100644
--- a/arch/arm/mach-mx3/mach-mx31_3ds.c
+++ b/arch/arm/mach-mx3/mach-mx31_3ds.c
@@ -140,10 +140,10 @@
 
 static struct mc13783_regulator_init_data mx31_3ds_regulators[] = {
 	{
-		.id = MC13783_REGU_PWGT1SPI, /* Power Gate for ARM core. */
+		.id = MC13783_REG_PWGT1SPI, /* Power Gate for ARM core. */
 		.init_data = &pwgtx_init,
 	}, {
-		.id = MC13783_REGU_PWGT2SPI, /* Power Gate for L2 Cache. */
+		.id = MC13783_REG_PWGT2SPI, /* Power Gate for L2 Cache. */
 		.init_data = &pwgtx_init,
 	}, {
 
diff --git a/arch/arm/mach-mx3/mach-mx31moboard.c b/arch/arm/mach-mx3/mach-mx31moboard.c
index 203d21a..1aa8d65 100644
--- a/arch/arm/mach-mx3/mach-mx31moboard.c
+++ b/arch/arm/mach-mx3/mach-mx31moboard.c
@@ -216,11 +216,11 @@
 
 static struct mc13783_regulator_init_data moboard_regulators[] = {
 	{
-		.id = MC13783_REGU_VMMC1,
+		.id = MC13783_REG_VMMC1,
 		.init_data = &sdhc_vreg_data,
 	},
 	{
-		.id = MC13783_REGU_VCAM,
+		.id = MC13783_REG_VCAM,
 		.init_data = &cam_vreg_data,
 	},
 };
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 0cca23a..98ba978 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -647,7 +647,7 @@
 
 
 
-static struct platform_suspend_ops omap_pm_ops ={
+static const struct platform_suspend_ops omap_pm_ops = {
 	.prepare	= omap_pm_prepare,
 	.enter		= omap_pm_enter,
 	.finish		= omap_pm_finish,
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index f3e043f..11b89e96 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -252,7 +252,7 @@
 	 * FIXME: we currently manage device-specific idle states
 	 *        for PER and CORE in combination with CPU-specific
 	 *        idle states.  This is wrong, and device-specific
-	 *        idle managment needs to be separated out into 
+	 *        idle management needs to be separated out into 
 	 *        its own code.
 	 */
 
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index dac2d1d..9e5dc8e 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -350,7 +350,7 @@
 	enable_hlt();
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap2_pm_begin,
 	.enter		= omap2_pm_enter,
 	.end		= omap2_pm_end,
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 5b323f2..8cbbead 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -605,7 +605,7 @@
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap3_pm_begin,
 	.end		= omap3_pm_end,
 	.enter		= omap3_pm_enter,
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index e9f4862c..76cfff2 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -65,7 +65,7 @@
 	return;
 }
 
-static struct platform_suspend_ops omap_pm_ops = {
+static const struct platform_suspend_ops omap_pm_ops = {
 	.begin		= omap4_pm_begin,
 	.end		= omap4_pm_end,
 	.enter		= omap4_pm_enter,
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index c645788..302da74 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -852,7 +852,7 @@
 }
 
 /**
- * omap_serial_init() - intialize all supported serial ports
+ * omap_serial_init() - initialize all supported serial ports
  *
  * Initializes all available UARTs as serial ports. Platforms
  * can call this function when they want to have default behaviour
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index ee3c29c..f3e60a0 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -119,7 +119,7 @@
 	       (state == PM_SUSPEND_MEM);
 }
 
-static struct platform_suspend_ops pnx4008_pm_ops = {
+static const struct platform_suspend_ops pnx4008_pm_ops = {
 	.enter = pnx4008_pm_enter,
 	.valid = pnx4008_pm_valid,
 };
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index 462167a..cdf7f41 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -337,7 +337,7 @@
 }
 #endif
 
-/* USB Open Host Controler Interface */
+/* USB Open Host Controller Interface */
 static struct pxaohci_platform_data mxm_8x10_ohci_platform_data = {
 	.port_mode = PMM_NPS_MODE,
 	.flags = ENABLE_PORT_ALL
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 166c15f..978e1b2 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -96,7 +96,7 @@
 		pxa_cpu_pm_fns->finish();
 }
 
-static struct platform_suspend_ops pxa_pm_ops = {
+static const struct platform_suspend_ops pxa_pm_ops = {
 	.valid		= pxa_pm_valid,
 	.enter		= pxa_pm_enter,
 	.prepare	= pxa_pm_prepare,
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
index e68d46d..785880f 100644
--- a/arch/arm/mach-pxa/sharpsl_pm.c
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -869,7 +869,7 @@
 }
 
 #ifdef CONFIG_PM
-static struct platform_suspend_ops sharpsl_pm_ops = {
+static const struct platform_suspend_ops sharpsl_pm_ops = {
 	.prepare	= pxa_pm_prepare,
 	.finish		= pxa_pm_finish,
 	.enter		= corgi_pxa_pm_enter,
diff --git a/arch/arm/mach-s3c2410/mach-h1940.c b/arch/arm/mach-s3c2410/mach-h1940.c
index d7ada8c..1a81fe1 100644
--- a/arch/arm/mach-s3c2410/mach-h1940.c
+++ b/arch/arm/mach-s3c2410/mach-h1940.c
@@ -387,7 +387,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&h1940_device_leds,
 	&h1940_device_bluetooth,
diff --git a/arch/arm/mach-s3c2440/mach-rx1950.c b/arch/arm/mach-s3c2440/mach-rx1950.c
index e0622bb..eab6ae5 100644
--- a/arch/arm/mach-s3c2440/mach-rx1950.c
+++ b/arch/arm/mach-s3c2440/mach-rx1950.c
@@ -692,7 +692,7 @@
 	&s3c_device_wdt,
 	&s3c_device_i2c0,
 	&s3c_device_iis,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c_device_usbgadget,
 	&s3c_device_rtc,
 	&s3c_device_nand,
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 7e03f0a..1c98d2ff 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -695,7 +695,7 @@
 	}, {
 		.clk	= {
 			.name		= "audio-bus",
-			.id		= -1,  /* There's only one IISv4 port */
+			.id		= 2,
 			.ctrlbit        = S3C6410_CLKCON_SCLK_AUDIO2,
 			.enable		= s3c64xx_sclk_ctrl,
 		},
diff --git a/arch/arm/mach-s3c64xx/dev-audio.c b/arch/arm/mach-s3c64xx/dev-audio.c
index 76426a3..cad6702 100644
--- a/arch/arm/mach-s3c64xx/dev-audio.c
+++ b/arch/arm/mach-s3c64xx/dev-audio.c
@@ -22,7 +22,12 @@
 #include <plat/audio.h>
 #include <plat/gpio-cfg.h>
 
-static int s3c64xx_i2sv3_cfg_gpio(struct platform_device *pdev)
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static int s3c64xx_i2s_cfg_gpio(struct platform_device *pdev)
 {
 	unsigned int base;
 
@@ -33,6 +38,12 @@
 	case 1:
 		base = S3C64XX_GPE(0);
 		break;
+	case 2:
+		s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
+		s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
+		return 0;
 	default:
 		printk(KERN_DEBUG "Invalid I2S Controller number: %d\n",
 			pdev->id);
@@ -44,16 +55,6 @@
 	return 0;
 }
 
-static int s3c64xx_i2sv4_cfg_gpio(struct platform_device *pdev)
-{
-	s3c_gpio_cfgpin(S3C64XX_GPC(4), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(5), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin(S3C64XX_GPC(7), S3C_GPIO_SFN(5));
-	s3c_gpio_cfgpin_range(S3C64XX_GPH(6), 4, S3C_GPIO_SFN(5));
-
-	return 0;
-}
-
 static struct resource s3c64xx_iis0_resource[] = {
 	[0] = {
 		.start = S3C64XX_PA_IIS0,
@@ -72,17 +73,22 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s0_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iis0 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 0,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis0_resource),
 	.resource	  = s3c64xx_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s0_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis0);
@@ -105,17 +111,13 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2s1_pdata = {
-	.cfg_gpio = s3c64xx_i2sv3_cfg_gpio,
-};
-
 struct platform_device s3c64xx_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iis1_resource),
 	.resource	  = s3c64xx_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s1_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iis1);
@@ -138,17 +140,23 @@
 	},
 };
 
-static struct s3c_audio_pdata s3c_i2sv4_pdata = {
-	.cfg_gpio = s3c64xx_i2sv4_cfg_gpio,
+static struct s3c_audio_pdata i2sv4_pdata = {
+	.cfg_gpio = s3c64xx_i2s_cfg_gpio,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 struct platform_device s3c64xx_device_iisv4 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 2,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_iisv4_resource),
 	.resource	  = s3c64xx_iisv4_resource,
 	.dev = {
-		.platform_data = &s3c_i2sv4_pdata,
+		.platform_data = &i2sv4_pdata,
 	},
 };
 EXPORT_SYMBOL(s3c64xx_device_iisv4);
@@ -288,7 +296,7 @@
 static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s3c64xx_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c64xx_ac97_resource),
 	.resource	  = s3c64xx_ac97_resource,
@@ -307,16 +315,3 @@
 	else
 		s3c_ac97_pdata.cfg_gpio = s3c64xx_ac97_cfg_gpe;
 }
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-EXPORT_SYMBOL(s3c_device_pcm);
-
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index e7d03ab..372ea68 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -740,7 +740,7 @@
 	/* Set all DMA configuration to be DMA, not SDMA */
 	writel(0xffffff, S3C_SYSREG(0x110));
 
-	/* Register standard DMA controlers */
+	/* Register standard DMA controllers */
 	s3c64xx_dma_init1(0, DMACH_UART0, IRQ_DMA0, 0x75000000);
 	s3c64xx_dma_init1(8, DMACH_PCM1_TX, IRQ_DMA1, 0x75100000);
 
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 77488fa..e85192a 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -283,7 +283,7 @@
 	&s3c_device_fb,
 	&s3c_device_ohci,
 	&s3c_device_usb_hsotg,
-	&s3c_device_pcm,
+	&samsung_asoc_dma,
 	&s3c64xx_device_iisv4,
 	&samsung_device_keypad,
 
diff --git a/arch/arm/mach-s5p6442/dev-audio.c b/arch/arm/mach-s5p6442/dev-audio.c
index 3462197..8719dc4 100644
--- a/arch/arm/mach-s5p6442/dev-audio.c
+++ b/arch/arm/mach-s5p6442/dev-audio.c
@@ -29,7 +29,7 @@
 		base = S5P6442_GPC1(0);
 		break;
 
-	case -1:
+	case 0:
 		base = S5P6442_GPC0(0);
 		break;
 
@@ -42,8 +42,19 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v35[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static struct s3c_audio_pdata i2sv35_pdata = {
 	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v35,
+		},
+	},
 };
 
 static struct resource s5p6442_iis0_resource[] = {
@@ -62,15 +73,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5p6442_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis0_resource),
 	.resource	  = s5p6442_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv35_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5p6442_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +123,12 @@
 };
 
 struct platform_device s5p6442_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5p6442_iis1_resource),
 	.resource	  = s5p6442_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index e4883dc..409c5fc 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -261,7 +261,7 @@
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 25),
 	}, {
-		.name		= "i2s_v40",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index 7dbf3c9..7fc6abd 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -256,7 +256,7 @@
 		.ctrlbit	= (1 << 22),
 	}, {
 		.name		= "iis",
-		.id		= -1,
+		.id		= 0,
 		.parent		= &clk_pclk_low.clk,
 		.enable		= s5p64x0_pclk_ctrl,
 		.ctrlbit	= (1 << 26),
diff --git a/arch/arm/mach-s5p64x0/dev-audio.c b/arch/arm/mach-s5p64x0/dev-audio.c
index 396bacc..14f89e73 100644
--- a/arch/arm/mach-s5p64x0/dev-audio.c
+++ b/arch/arm/mach-s5p64x0/dev-audio.c
@@ -19,15 +19,19 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
-static int s5p6440_cfg_i2s(struct platform_device *pdev)
+static const char *rclksrc[] = {
+	[0] = "iis",
+	[1] = "sclk_audio2",
+};
+
+static int s5p64x0_cfg_i2s(struct platform_device *pdev)
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
-	case -1:
+	case 0:
 		s3c_gpio_cfgpin_range(S5P6440_GPR(4), 5, S3C_GPIO_SFN(5));
 		s3c_gpio_cfgpin_range(S5P6440_GPR(13), 2, S3C_GPIO_SFN(5));
 		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -36,31 +40,14 @@
 	return 0;
 }
 
-static int s5p6450_cfg_i2s(struct platform_device *pdev)
-{
-	/* configure GPIO for i2s port */
-	switch (pdev->id) {
-	case -1:
-		s3c_gpio_cfgpin(S5P6450_GPB(4), S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(4), 5, S3C_GPIO_SFN(5));
-		s3c_gpio_cfgpin_range(S5P6450_GPR(13), 2, S3C_GPIO_SFN(5));
-
-		break;
-
-	default:
-		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static struct s3c_audio_pdata s5p6440_i2s_pdata = {
-	.cfg_gpio = s5p6440_cfg_i2s,
-};
-
-static struct s3c_audio_pdata s5p6450_i2s_pdata = {
-	.cfg_gpio = s5p6450_cfg_i2s,
+static struct s3c_audio_pdata s5p64x0_i2s_pdata = {
+	.cfg_gpio = s5p64x0_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 static struct resource s5p64x0_iis0_resource[] = {
@@ -82,22 +69,22 @@
 };
 
 struct platform_device s5p6440_device_iis = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
+	.name		= "samsung-i2s",
+	.id		= 0,
 	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
 	.resource	= s5p64x0_iis0_resource,
 	.dev = {
-		.platform_data = &s5p6440_i2s_pdata,
+		.platform_data = &s5p64x0_i2s_pdata,
 	},
 };
 
 struct platform_device s5p6450_device_iis0 = {
-	.name		= "s3c64xx-iis-v4",
-	.id		= -1,
+	.name		= "samsung-i2s",
+	.id		= 0,
 	.num_resources	= ARRAY_SIZE(s5p64x0_iis0_resource),
 	.resource	= s5p64x0_iis0_resource,
 	.dev = {
-		.platform_data = &s5p6450_i2s_pdata,
+		.platform_data = &s5p64x0_i2s_pdata,
 	},
 };
 
diff --git a/arch/arm/mach-s5pc100/dev-audio.c b/arch/arm/mach-s5pc100/dev-audio.c
index 564e195..ab2d271 100644
--- a/arch/arm/mach-s5pc100/dev-audio.c
+++ b/arch/arm/mach-s5pc100/dev-audio.c
@@ -23,17 +23,14 @@
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0: /* Dedicated pins */
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PC100_GPC(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PC100_GPG3(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1: /* Dedicated pins */
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -42,8 +39,20 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static const char *rclksrc_v5[] = {
+	[0] = "iis",
+	[1] = "i2sclkd2",
+};
+
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc_v5,
+		},
+	},
 };
 
 static struct resource s5pc100_iis0_resource[] = {
@@ -62,15 +71,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pc100_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis0_resource),
 	.resource	  = s5pc100_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "sclk_audio",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pc100_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -93,12 +121,12 @@
 };
 
 struct platform_device s5pc100_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis1_resource),
 	.resource	  = s5pc100_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -121,12 +149,12 @@
 };
 
 struct platform_device s5pc100_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pc100_iis2_resource),
 	.resource	  = s5pc100_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -253,7 +281,7 @@
 static u64 s5pc100_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pc100_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pc100_ac97_resource),
 	.resource	  = s5pc100_ac97_resource,
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 18b405d..dd192a2 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -96,6 +96,7 @@
 
 /* I2C0 */
 static struct i2c_board_info i2c_devs0[] __initdata = {
+	{I2C_BOARD_INFO("wm8580", 0x1b),},
 };
 
 /* I2C1 */
@@ -190,6 +191,7 @@
 	&s3c_device_ts,
 	&s3c_device_wdt,
 	&smdkc100_lcd_powerdev,
+	&samsung_asoc_dma,
 	&s5pc100_device_iis0,
 	&samsung_device_keypad,
 	&s5pc100_device_ac97,
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 019c3a6..b774ff1 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -467,20 +467,20 @@
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<21),
 	}, {
-		.name		= "i2s_v50",
+		.name		= "iis",
 		.id		= 0,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1<<4),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 0,
+		.name		= "iis",
+		.id		= 1,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 5),
 	}, {
-		.name		= "i2s_v32",
-		.id		= 1,
+		.name		= "iis",
+		.id		= 2,
 		.parent		= &clk_p,
 		.enable		= s5pv210_clk_ip3_ctrl,
 		.ctrlbit	= (1 << 6),
diff --git a/arch/arm/mach-s5pv210/dev-audio.c b/arch/arm/mach-s5pv210/dev-audio.c
index 1303fcb..8d58f19 100644
--- a/arch/arm/mach-s5pv210/dev-audio.c
+++ b/arch/arm/mach-s5pv210/dev-audio.c
@@ -19,22 +19,24 @@
 #include <mach/dma.h>
 #include <mach/irqs.h>
 
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
 static int s5pv210_cfg_i2s(struct platform_device *pdev)
 {
 	/* configure GPIO for i2s port */
 	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
+		break;
 	case 1:
 		s3c_gpio_cfgpin_range(S5PV210_GPC0(0), 5, S3C_GPIO_SFN(2));
 		break;
-
 	case 2:
 		s3c_gpio_cfgpin_range(S5PV210_GPC1(0), 5, S3C_GPIO_SFN(4));
 		break;
-
-	case -1:
-		s3c_gpio_cfgpin_range(S5PV210_GPI(0), 7, S3C_GPIO_SFN(2));
-		break;
-
 	default:
 		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
 		return -EINVAL;
@@ -43,8 +45,15 @@
 	return 0;
 }
 
-static struct s3c_audio_pdata s3c_i2s_pdata = {
+static struct s3c_audio_pdata i2sv5_pdata = {
 	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
 };
 
 static struct resource s5pv210_iis0_resource[] = {
@@ -63,15 +72,34 @@
 		.end   = DMACH_I2S0_RX,
 		.flags = IORESOURCE_DMA,
 	},
+	[3] = {
+		.start = DMACH_I2S0S_TX,
+		.end = DMACH_I2S0S_TX,
+		.flags = IORESOURCE_DMA,
+	},
 };
 
 struct platform_device s5pv210_device_iis0 = {
-	.name		  = "s3c64xx-iis-v4",
-	.id		  = -1,
+	.name = "samsung-i2s",
+	.id = 0,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis0_resource),
 	.resource	  = s5pv210_iis0_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "iis",
+	[1] = "audio-bus",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv210_cfg_i2s,
+	.type = {
+		.i2s = {
+			.src_clk = rclksrc_v3,
+		},
 	},
 };
 
@@ -94,12 +122,12 @@
 };
 
 struct platform_device s5pv210_device_iis1 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis1_resource),
 	.resource	  = s5pv210_iis1_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -122,12 +150,12 @@
 };
 
 struct platform_device s5pv210_device_iis2 = {
-	.name		  = "s3c64xx-iis",
+	.name		  = "samsung-i2s",
 	.id		  = 2,
 	.num_resources	  = ARRAY_SIZE(s5pv210_iis2_resource),
 	.resource	  = s5pv210_iis2_resource,
 	.dev = {
-		.platform_data = &s3c_i2s_pdata,
+		.platform_data = &i2sv3_pdata,
 	},
 };
 
@@ -283,7 +311,7 @@
 static u64 s5pv210_ac97_dmamask = DMA_BIT_MASK(32);
 
 struct platform_device s5pv210_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s5pv210_ac97_resource),
 	.resource	  = s5pv210_ac97_resource,
diff --git a/arch/arm/mach-s5pv310/Kconfig b/arch/arm/mach-s5pv310/Kconfig
index 1150b36..d64efe0 100644
--- a/arch/arm/mach-s5pv310/Kconfig
+++ b/arch/arm/mach-s5pv310/Kconfig
@@ -11,6 +11,7 @@
 
 config CPU_S5PV310
 	bool
+	select S3C_PL330_DMA
 	help
 	  Enable S5PV310 CPU support
 
diff --git a/arch/arm/mach-s5pv310/Makefile b/arch/arm/mach-s5pv310/Makefile
index 84afc64..61e3cb6 100644
--- a/arch/arm/mach-s5pv310/Makefile
+++ b/arch/arm/mach-s5pv310/Makefile
@@ -13,7 +13,7 @@
 # Core support for S5PV310 system
 
 obj-$(CONFIG_CPU_S5PV310)	+= cpu.o init.o clock.o irq-combiner.o
-obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o
+obj-$(CONFIG_CPU_S5PV310)	+= setup-i2c0.o time.o gpiolib.o irq-eint.o dma.o
 
 obj-$(CONFIG_SMP)		+= platsmp.o headsmp.o
 obj-$(CONFIG_LOCAL_TIMERS)	+= localtimer.o
@@ -27,6 +27,7 @@
 
 # device support
 
+obj-y += dev-audio.o
 obj-$(CONFIG_S5PV310_SETUP_I2C1)	+= setup-i2c1.o
 obj-$(CONFIG_S5PV310_SETUP_I2C2)	+= setup-i2c2.o
 obj-$(CONFIG_S5PV310_SETUP_I2C3)	+= setup-i2c3.o
diff --git a/arch/arm/mach-s5pv310/dev-audio.c b/arch/arm/mach-s5pv310/dev-audio.c
new file mode 100644
index 0000000..a196424
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dev-audio.c
@@ -0,0 +1,364 @@
+/* linux/arch/arm/mach-s5pv310/dev-audio.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/audio.h>
+
+#include <mach/map.h>
+#include <mach/dma.h>
+#include <mach/irqs.h>
+
+static const char *rclksrc[] = {
+	[0] = "busclk",
+	[1] = "i2sclk",
+};
+
+static int s5pv310_cfg_i2s(struct platform_device *pdev)
+{
+	/* configure GPIO for i2s port */
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 7, S3C_GPIO_SFN(2));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(2));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(4));
+		break;
+	default:
+		printk(KERN_ERR "Invalid Device %d\n", pdev->id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata i2sv5_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI
+					 | QUIRK_NEED_RSTCLR,
+			.src_clk = rclksrc,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S0,
+		.end	= S5PV310_PA_I2S0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S0_TX,
+		.end	= DMACH_I2S0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S0_RX,
+		.end	= DMACH_I2S0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_I2S0S_TX,
+		.end	= DMACH_I2S0S_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s0 = {
+	.name = "samsung-i2s",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s0_resource),
+	.resource = s5pv310_i2s0_resource,
+	.dev = {
+		.platform_data = &i2sv5_pdata,
+	},
+};
+
+static const char *rclksrc_v3[] = {
+	[0] = "sclk_i2s",
+	[1] = "no_such_clock",
+};
+
+static struct s3c_audio_pdata i2sv3_pdata = {
+	.cfg_gpio = s5pv310_cfg_i2s,
+	.type = {
+		.i2s = {
+			.quirks = QUIRK_NO_MUXPSR,
+			.src_clk = rclksrc_v3,
+		},
+	},
+};
+
+static struct resource s5pv310_i2s1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S1,
+		.end	= S5PV310_PA_I2S1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S1_TX,
+		.end	= DMACH_I2S1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S1_RX,
+		.end	= DMACH_I2S1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s1 = {
+	.name = "samsung-i2s",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s1_resource),
+	.resource = s5pv310_i2s1_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+static struct resource s5pv310_i2s2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_I2S2,
+		.end	= S5PV310_PA_I2S2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_I2S2_TX,
+		.end	= DMACH_I2S2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_I2S2_RX,
+		.end	= DMACH_I2S2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_i2s2 = {
+	.name = "samsung-i2s",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_i2s2_resource),
+	.resource = s5pv310_i2s2_resource,
+	.dev = {
+		.platform_data = &i2sv3_pdata,
+	},
+};
+
+/* PCM Controller platform_devices */
+
+static int s5pv310_pcm_cfg_gpio(struct platform_device *pdev)
+{
+	switch (pdev->id) {
+	case 0:
+		s3c_gpio_cfgpin_range(S5PV310_GPZ(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 1:
+		s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(3));
+		break;
+	case 2:
+		s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 5, S3C_GPIO_SFN(3));
+		break;
+	default:
+		printk(KERN_DEBUG "Invalid PCM Controller number!");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static struct s3c_audio_pdata s3c_pcm_pdata = {
+	.cfg_gpio = s5pv310_pcm_cfg_gpio,
+};
+
+static struct resource s5pv310_pcm0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM0,
+		.end	= S5PV310_PA_PCM0 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM0_TX,
+		.end	= DMACH_PCM0_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM0_RX,
+		.end	= DMACH_PCM0_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm0 = {
+	.name = "samsung-pcm",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm0_resource),
+	.resource = s5pv310_pcm0_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM1,
+		.end	= S5PV310_PA_PCM1 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM1_TX,
+		.end	= DMACH_PCM1_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM1_RX,
+		.end	= DMACH_PCM1_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm1 = {
+	.name = "samsung-pcm",
+	.id = 1,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm1_resource),
+	.resource = s5pv310_pcm1_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+static struct resource s5pv310_pcm2_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PCM2,
+		.end	= S5PV310_PA_PCM2 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_PCM2_TX,
+		.end	= DMACH_PCM2_TX,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_PCM2_RX,
+		.end	= DMACH_PCM2_RX,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+struct platform_device s5pv310_device_pcm2 = {
+	.name = "samsung-pcm",
+	.id = 2,
+	.num_resources = ARRAY_SIZE(s5pv310_pcm2_resource),
+	.resource = s5pv310_pcm2_resource,
+	.dev = {
+		.platform_data = &s3c_pcm_pdata,
+	},
+};
+
+/* AC97 Controller platform devices */
+
+static int s5pv310_ac97_cfg_gpio(struct platform_device *pdev)
+{
+	return s3c_gpio_cfgpin_range(S5PV310_GPC0(0), 5, S3C_GPIO_SFN(4));
+}
+
+static struct resource s5pv310_ac97_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_AC97,
+		.end	= S5PV310_PA_AC97 + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_AC97_PCMOUT,
+		.end	= DMACH_AC97_PCMOUT,
+		.flags	= IORESOURCE_DMA,
+	},
+	[2] = {
+		.start	= DMACH_AC97_PCMIN,
+		.end	= DMACH_AC97_PCMIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[3] = {
+		.start	= DMACH_AC97_MICIN,
+		.end	= DMACH_AC97_MICIN,
+		.flags	= IORESOURCE_DMA,
+	},
+	[4] = {
+		.start	= IRQ_AC97,
+		.end	= IRQ_AC97,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_audio_pdata s3c_ac97_pdata = {
+	.cfg_gpio = s5pv310_ac97_cfg_gpio,
+};
+
+static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_ac97 = {
+	.name = "samsung-ac97",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_ac97_resource),
+	.resource = s5pv310_ac97_resource,
+	.dev = {
+		.platform_data = &s3c_ac97_pdata,
+		.dma_mask = &s5pv310_ac97_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
+
+/* S/PDIF Controller platform_device */
+
+static int s5pv310_spdif_cfg_gpio(struct platform_device *pdev)
+{
+	s3c_gpio_cfgpin_range(S5PV310_GPC1(0), 2, S3C_GPIO_SFN(3));
+
+	return 0;
+}
+
+static struct resource s5pv310_spdif_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_SPDIF,
+		.end	= S5PV310_PA_SPDIF + 0x100 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= DMACH_SPDIF,
+		.end	= DMACH_SPDIF,
+		.flags	= IORESOURCE_DMA,
+	},
+};
+
+static struct s3c_audio_pdata samsung_spdif_pdata = {
+	.cfg_gpio = s5pv310_spdif_cfg_gpio,
+};
+
+static u64 s5pv310_spdif_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device s5pv310_device_spdif = {
+	.name = "samsung-spdif",
+	.id = -1,
+	.num_resources = ARRAY_SIZE(s5pv310_spdif_resource),
+	.resource = s5pv310_spdif_resource,
+	.dev = {
+		.platform_data = &samsung_spdif_pdata,
+		.dma_mask = &s5pv310_spdif_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	},
+};
diff --git a/arch/arm/mach-s5pv310/dma.c b/arch/arm/mach-s5pv310/dma.c
new file mode 100644
index 0000000..20066c7
--- /dev/null
+++ b/arch/arm/mach-s5pv310/dma.c
@@ -0,0 +1,168 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/devs.h>
+#include <plat/irqs.h>
+
+#include <mach/map.h>
+#include <mach/irqs.h>
+
+#include <plat/s3c-pl330-pdata.h>
+
+static u64 dma_dmamask = DMA_BIT_MASK(32);
+
+static struct resource s5pv310_pdma0_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA0,
+		.end	= S5PV310_PA_PDMA0 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA0,
+		.end	= IRQ_PDMA0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma0_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM2_RX,
+		[3] = DMACH_PCM2_TX,
+		[4] = DMACH_MSM_REQ0,
+		[5] = DMACH_MSM_REQ2,
+		[6] = DMACH_SPI0_RX,
+		[7] = DMACH_SPI0_TX,
+		[8] = DMACH_SPI2_RX,
+		[9] = DMACH_SPI2_TX,
+		[10] = DMACH_I2S0S_TX,
+		[11] = DMACH_I2S0_RX,
+		[12] = DMACH_I2S0_TX,
+		[13] = DMACH_I2S2_RX,
+		[14] = DMACH_I2S2_TX,
+		[15] = DMACH_UART0_RX,
+		[16] = DMACH_UART0_TX,
+		[17] = DMACH_UART2_RX,
+		[18] = DMACH_UART2_TX,
+		[19] = DMACH_UART4_RX,
+		[20] = DMACH_UART4_TX,
+		[21] = DMACH_SLIMBUS0_RX,
+		[22] = DMACH_SLIMBUS0_TX,
+		[23] = DMACH_SLIMBUS2_RX,
+		[24] = DMACH_SLIMBUS2_TX,
+		[25] = DMACH_SLIMBUS4_RX,
+		[26] = DMACH_SLIMBUS4_TX,
+		[27] = DMACH_AC97_MICIN,
+		[28] = DMACH_AC97_PCMIN,
+		[29] = DMACH_AC97_PCMOUT,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma0 = {
+	.name		= "s3c-pl330",
+	.id		= 0,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma0_resource),
+	.resource	= s5pv310_pdma0_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma0_pdata,
+	},
+};
+
+static struct resource s5pv310_pdma1_resource[] = {
+	[0] = {
+		.start	= S5PV310_PA_PDMA1,
+		.end	= S5PV310_PA_PDMA1 + SZ_4K,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= IRQ_PDMA1,
+		.end	= IRQ_PDMA1,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct s3c_pl330_platdata s5pv310_pdma1_pdata = {
+	.peri = {
+		[0] = DMACH_PCM0_RX,
+		[1] = DMACH_PCM0_TX,
+		[2] = DMACH_PCM1_RX,
+		[3] = DMACH_PCM1_TX,
+		[4] = DMACH_MSM_REQ1,
+		[5] = DMACH_MSM_REQ3,
+		[6] = DMACH_SPI1_RX,
+		[7] = DMACH_SPI1_TX,
+		[8] = DMACH_I2S0S_TX,
+		[9] = DMACH_I2S0_RX,
+		[10] = DMACH_I2S0_TX,
+		[11] = DMACH_I2S1_RX,
+		[12] = DMACH_I2S1_TX,
+		[13] = DMACH_UART0_RX,
+		[14] = DMACH_UART0_TX,
+		[15] = DMACH_UART1_RX,
+		[16] = DMACH_UART1_TX,
+		[17] = DMACH_UART3_RX,
+		[18] = DMACH_UART3_TX,
+		[19] = DMACH_SLIMBUS1_RX,
+		[20] = DMACH_SLIMBUS1_TX,
+		[21] = DMACH_SLIMBUS3_RX,
+		[22] = DMACH_SLIMBUS3_TX,
+		[23] = DMACH_SLIMBUS5_RX,
+		[24] = DMACH_SLIMBUS5_TX,
+		[25] = DMACH_SLIMBUS0AUX_RX,
+		[26] = DMACH_SLIMBUS0AUX_TX,
+		[27] = DMACH_SPDIF,
+		[28] = DMACH_MAX,
+		[29] = DMACH_MAX,
+		[30] = DMACH_MAX,
+		[31] = DMACH_MAX,
+	},
+};
+
+static struct platform_device s5pv310_device_pdma1 = {
+	.name		= "s3c-pl330",
+	.id		= 1,
+	.num_resources	= ARRAY_SIZE(s5pv310_pdma1_resource),
+	.resource	= s5pv310_pdma1_resource,
+	.dev		= {
+		.dma_mask = &dma_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+		.platform_data = &s5pv310_pdma1_pdata,
+	},
+};
+
+static struct platform_device *s5pv310_dmacs[] __initdata = {
+	&s5pv310_device_pdma0,
+	&s5pv310_device_pdma1,
+};
+
+static int __init s5pv310_dma_init(void)
+{
+	platform_add_devices(s5pv310_dmacs, ARRAY_SIZE(s5pv310_dmacs));
+
+	return 0;
+}
+arch_initcall(s5pv310_dma_init);
diff --git a/arch/arm/mach-s5pv310/include/mach/dma.h b/arch/arm/mach-s5pv310/include/mach/dma.h
new file mode 100644
index 0000000..81209eb
--- /dev/null
+++ b/arch/arm/mach-s5pv310/include/mach/dma.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __MACH_DMA_H
+#define __MACH_DMA_H
+
+/* This platform uses the common S3C DMA API driver for PL330 */
+#include <plat/s3c-dma-pl330.h>
+
+#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-s5pv310/include/mach/irqs.h b/arch/arm/mach-s5pv310/include/mach/irqs.h
index 99e7dad..3c05c58 100644
--- a/arch/arm/mach-s5pv310/include/mach/irqs.h
+++ b/arch/arm/mach-s5pv310/include/mach/irqs.h
@@ -54,6 +54,9 @@
 #define COMBINER_GROUP(x)	((x) * MAX_IRQ_IN_COMBINER + IRQ_SPI(64))
 #define COMBINER_IRQ(x, y)	(COMBINER_GROUP(x) + y)
 
+#define IRQ_PDMA0		COMBINER_IRQ(21, 0)
+#define IRQ_PDMA1		COMBINER_IRQ(21, 1)
+
 #define IRQ_TIMER0_VIC		COMBINER_IRQ(22, 0)
 #define IRQ_TIMER1_VIC		COMBINER_IRQ(22, 1)
 #define IRQ_TIMER2_VIC		COMBINER_IRQ(22, 2)
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 7acf4e7..5399446 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -52,6 +52,11 @@
 #define S5PV310_PA_GIC_DIST		(0x10501000)
 #define S5PV310_PA_L2CC			(0x10502000)
 
+/* DMA */
+#define S5PV310_PA_MDMA		0x10810000
+#define S5PV310_PA_PDMA0	0x12680000
+#define S5PV310_PA_PDMA1	0x12690000
+
 #define S5PV310_PA_GPIO1		(0x11400000)
 #define S5PV310_PA_GPIO2		(0x11000000)
 #define S5PV310_PA_GPIO3		(0x03860000)
@@ -60,6 +65,22 @@
 
 #define S5PV310_PA_SROMC		(0x12570000)
 
+/* S/PDIF */
+#define S5PV310_PA_SPDIF	0xE1100000
+
+/* I2S */
+#define S5PV310_PA_I2S0		0x03830000
+#define S5PV310_PA_I2S1		0xE3100000
+#define S5PV310_PA_I2S2		0xE2A00000
+
+/* PCM */
+#define S5PV310_PA_PCM0		0x03840000
+#define S5PV310_PA_PCM1		0x13980000
+#define S5PV310_PA_PCM2		0x13990000
+
+/* AC97 */
+#define S5PV310_PA_AC97		0x139A0000
+
 #define S5PV310_PA_UART			(0x13800000)
 
 #define S5P_PA_UART(x)			(S5PV310_PA_UART + ((x) * S3C_UART_OFFSET))
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index c83fdc8..ab9fc44 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -120,7 +120,7 @@
 	return virt_to_phys(sp);
 }
 
-static struct platform_suspend_ops sa11x0_pm_ops = {
+static const struct platform_suspend_ops sa11x0_pm_ops = {
 	.enter		= sa11x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index cd79d7c..5b9937c 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -711,6 +711,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4643_device = {
+	.name		= "sh_fsi2_a_ak4643",
+};
+
 static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
 	.clock_source = LCDC_CLK_EXTERNAL,
 	.ch[0] = {
@@ -933,6 +937,7 @@
 	&sdhi1_device,
 	&usb1_host_device,
 	&fsi_device,
+	&fsi_ak4643_device,
 	&sh_mmcif_device,
 	&lcdc1_device,
 	&lcdc_device,
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index 3560f8c..5aa2d54 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -371,7 +371,7 @@
 };
 
 /* Add spear300 specific devices here */
-/* arm gpio1 device registeration */
+/* arm gpio1 device registration */
 static struct pl061_platform_data gpio1_plat_data = {
 	.gpio_base	= 8,
 	.irq_base	= SPEAR_GPIO1_INT_BASE,
@@ -451,7 +451,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	shirq_ras1.regs.base =
 		ioremap(SPEAR300_TELECOM_BASE, SPEAR300_TELECOM_REG_SIZE);
 	if (shirq_ras1.regs.base) {
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 96a1ab8..53b41b5 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -266,7 +266,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR310_SOC_CONFIG_BASE, SPEAR310_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index 6a121954..88b4652 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -519,7 +519,7 @@
 	/* call spear3xx family common init function */
 	spear3xx_init();
 
-	/* shared irq registeration */
+	/* shared irq registration */
 	base = ioremap(SPEAR320_SOC_CONFIG_BASE, SPEAR320_SOC_CONFIG_SIZE);
 	if (base) {
 		/* shirq 1 */
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index e87313a..52f553c 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -22,7 +22,7 @@
 #include <mach/spear.h>
 
 /* Add spear3xx machines common devices here */
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data = {
 	.gpio_base	= 0,
 	.irq_base	= SPEAR_GPIO_INT_BASE,
@@ -41,7 +41,7 @@
 	.irq = {IRQ_BASIC_GPIO, NO_IRQ},
 };
 
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device = {
 	.dev = {
 		.init_name = "uart",
@@ -543,6 +543,6 @@
 
 pmx_fail:
 	if (ret)
-		printk(KERN_ERR "padmux: registeration failed. err no: %d\n",
+		printk(KERN_ERR "padmux: registration failed. err no: %d\n",
 				ret);
 }
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index baf6bcc..f2fe14e 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -23,7 +23,7 @@
 #include <mach/spear.h>
 
 /* Add spear6xx machines common devices here */
-/* uart device registeration */
+/* uart device registration */
 struct amba_device uart_device[] = {
 	{
 		.dev = {
@@ -50,7 +50,7 @@
 	}
 };
 
-/* gpio device registeration */
+/* gpio device registration */
 static struct pl061_platform_data gpio_plat_data[] = {
 	{
 		.gpio_base	= 0,
diff --git a/arch/arm/mach-tegra/include/mach/sdhci.h b/arch/arm/mach-tegra/include/mach/sdhci.h
new file mode 100644
index 0000000..3ad086e
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/sdhci.h
@@ -0,0 +1,29 @@
+/*
+ * include/asm-arm/arch-tegra/include/mach/sdhci.h
+ *
+ * Copyright (C) 2009 Palm, Inc.
+ * Author: Yvonne Yip <y@palm.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
+#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
+
+#include <linux/mmc/host.h>
+
+struct tegra_sdhci_platform_data {
+	int cd_gpio;
+	int wp_gpio;
+	int power_gpio;
+	int is_8bit;
+};
+
+#endif
diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig
index 801b21e..32a7b0f 100644
--- a/arch/arm/mach-u300/Kconfig
+++ b/arch/arm/mach-u300/Kconfig
@@ -64,7 +64,7 @@
 	bool "Dual RAM"
 	help
 		Select this if you want support for Dual RAM phones.
-		This is two RAM memorys on different EMIFs.
+		This is two RAM memories on different EMIFs.
 endchoice
 
 config U300_DEBUG
diff --git a/arch/arm/mach-u300/include/mach/coh901318.h b/arch/arm/mach-u300/include/mach/coh901318.h
index 193da2d..6193aaa 100644
--- a/arch/arm/mach-u300/include/mach/coh901318.h
+++ b/arch/arm/mach-u300/include/mach/coh901318.h
@@ -24,7 +24,7 @@
  * @src_addr: transfer source address
  * @dst_addr: transfer destination address
  * @link_addr:  physical address to next lli
- * @virt_link_addr: virtual addres of next lli (only used by pool_free)
+ * @virt_link_addr: virtual address of next lli (only used by pool_free)
  * @phy_this: physical address of current lli (only used by pool_free)
  */
 struct coh901318_lli {
@@ -90,7 +90,7 @@
  * struct coh901318_platform - platform arch structure
  * @chans_slave: specifying dma slave channels
  * @chans_memcpy: specifying dma memcpy channels
- * @access_memory_state: requesting DMA memeory access (on / off)
+ * @access_memory_state: requesting DMA memory access (on / off)
  * @chan_conf: dma channel configurations
  * @max_channels: max number of dma chanenls
  */
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c
index 1187f1f..533967c 100644
--- a/arch/arm/mach-ux500/board-mop500-regulators.c
+++ b/arch/arm/mach-ux500/board-mop500-regulators.c
@@ -3,99 +3,94 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com>
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com>
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com>
  *
  * MOP500 board specific initialization for regulators
  */
 #include <linux/kernel.h>
 #include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
 
-/* supplies to the display/camera */
-static struct regulator_init_data ab8500_vaux1_regulator = {
-	.constraints = {
-		.name = "V-DISPLAY",
-		.min_uV = 2500000,
-		.max_uV = 2900000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
+/* AB8500 regulators */
+struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = {
+	/* supplies to the display/camera */
+	[AB8500_LDO_AUX1] = {
+		.constraints = {
+			.name = "V-DISPLAY",
+			.min_uV = 2500000,
+			.max_uV = 2900000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supplies to the on-board eMMC */
+	[AB8500_LDO_AUX2] = {
+		.constraints = {
+			.name = "V-eMMC1",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for VAUX3, supplies to SDcard slots */
+	[AB8500_LDO_AUX3] = {
+		.constraints = {
+			.name = "V-MMC-SD",
+			.min_uV = 1100000,
+			.max_uV = 3300000,
+			.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE |
+					  REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for tvout, gpadc, TVOUT LDO */
+	[AB8500_LDO_TVOUT] = {
+		.constraints = {
+			.name = "V-TVOUT",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for ab8500-vaudio, VAUDIO LDO */
+	[AB8500_LDO_AUDIO] = {
+		.constraints = {
+			.name = "V-AUD",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-anamic1 VAMic1-LDO */
+	[AB8500_LDO_ANAMIC1] = {
+		.constraints = {
+			.name = "V-AMIC1",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
+	[AB8500_LDO_ANAMIC2] = {
+		.constraints = {
+			.name = "V-AMIC2",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-dmic, VDMIC LDO */
+	[AB8500_LDO_DMIC] = {
+		.constraints = {
+			.name = "V-DMIC",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for v-intcore12, VINTCORE12 LDO */
+	[AB8500_LDO_INTCORE] = {
+		.constraints = {
+			.name = "V-INTCORE",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
+	},
+	/* supply for U8500 CSI/DSI, VANA LDO */
+	[AB8500_LDO_ANA] = {
+		.constraints = {
+			.name = "V-CSI/DSI",
+			.valid_ops_mask = REGULATOR_CHANGE_STATUS,
+		},
 	},
 };
-
-/* supplies to the on-board eMMC */
-static struct regulator_init_data ab8500_vaux2_regulator = {
-	.constraints = {
-		.name = "V-eMMC1",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for VAUX3, supplies to SDcard slots */
-static struct regulator_init_data ab8500_vaux3_regulator = {
-	.constraints = {
-		.name = "V-MMC-SD",
-		.min_uV = 1100000,
-		.max_uV = 3300000,
-		.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE|
-					REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for tvout, gpadc, TVOUT LDO */
-static struct regulator_init_data ab8500_vtvout_init = {
-	.constraints = {
-		.name = "V-TVOUT",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for ab8500-vaudio, VAUDIO LDO */
-static struct regulator_init_data ab8500_vaudio_init = {
-	.constraints = {
-		.name = "V-AUD",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-anamic1 VAMic1-LDO */
-static struct regulator_init_data ab8500_vamic1_init = {
-	.constraints = {
-		.name = "V-AMIC1",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-amic2, VAMIC2 LDO, reuse constants for AMIC1 */
-static struct regulator_init_data ab8500_vamic2_init = {
-	.constraints = {
-		.name = "V-AMIC2",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-dmic, VDMIC LDO */
-static struct regulator_init_data ab8500_vdmic_init = {
-	.constraints = {
-		.name = "V-DMIC",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for v-intcore12, VINTCORE12 LDO */
-static struct regulator_init_data ab8500_vintcore_init = {
-	.constraints = {
-		.name = "V-INTCORE",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
-/* supply for U8500 CSI/DSI, VANA LDO */
-static struct regulator_init_data ab8500_vana_init = {
-	.constraints = {
-		.name = "V-CSI/DSI",
-		.valid_ops_mask = REGULATOR_CHANGE_STATUS,
-	},
-};
-
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.h b/arch/arm/mach-ux500/board-mop500-regulators.h
new file mode 100644
index 0000000..2675fae
--- /dev/null
+++ b/arch/arm/mach-ux500/board-mop500-regulators.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2010
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Author: Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
+ *
+ * MOP500 board specific initialization for regulators
+ */
+
+#ifndef __BOARD_MOP500_REGULATORS_H
+#define __BOARD_MOP500_REGULATORS_H
+
+#include <linux/regulator/machine.h>
+#include <linux/regulator/ab8500.h>
+
+extern struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS];
+
+#endif
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index a1c9ea1..a393f57 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -35,6 +35,7 @@
 #include "devices-db8500.h"
 #include "pins-db8500.h"
 #include "board-mop500.h"
+#include "board-mop500-regulators.h"
 
 static pin_cfg_t mop500_pins[] = {
 	/* SSP0 */
@@ -80,6 +81,8 @@
 
 static struct ab8500_platform_data ab8500_platdata = {
 	.irq_base	= MOP500_AB8500_IRQ_BASE,
+	.regulator	= ab8500_regulators,
+	.num_regulator	= ARRAY_SIZE(ab8500_regulators),
 };
 
 static struct resource ab8500_resources[] = {
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c29f283..2b269c9 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,7 +18,6 @@
 #include <asm/smp_plat.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
-#include <asm/smp_plat.h>
 
 #include "mm.h"
 
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 58a49cc..ba65c92 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -70,7 +70,7 @@
 
 /* all normal IRQs can be FIQs */
 #define FIQ_START	0
-/* switch betwean IRQ and FIQ */
+/* switch between IRQ and FIQ */
 extern int mxc_set_irq_fiq(unsigned int irq, unsigned int type);
 
 #endif /* __ASM_ARCH_MXC_IRQS_H__ */
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index 6864a99..1eee85a 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -351,7 +351,7 @@
 /**
  * struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
  * @clkctrl_reg: PRCM address of the clock control register
- * @rstctrl_reg: adress of the XXX_RSTCTRL register located in the PRM
+ * @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
  * @submodule_wkdep_bit: bit shift of the WKDEP range
  */
 struct omap_hwmod_omap4_prcm {
diff --git a/arch/arm/plat-s3c24xx/devs.c b/arch/arm/plat-s3c24xx/devs.c
index 2f91057..8a42bc4 100644
--- a/arch/arm/plat-s3c24xx/devs.c
+++ b/arch/arm/plat-s3c24xx/devs.c
@@ -259,21 +259,6 @@
 
 EXPORT_SYMBOL(s3c_device_iis);
 
-/* ASoC PCM DMA */
-
-static u64 s3c_device_audio_dmamask = 0xffffffffUL;
-
-struct platform_device s3c_device_pcm = {
-	.name		  = "s3c24xx-pcm-audio",
-	.id		  = -1,
-	.dev              = {
-		.dma_mask = &s3c_device_audio_dmamask,
-		.coherent_dma_mask = 0xffffffffUL
-	}
-};
-
-EXPORT_SYMBOL(s3c_device_pcm);
-
 /* RTC */
 
 static struct resource s3c_rtc_resource[] = {
@@ -496,8 +481,10 @@
 	},
 };
 
+static u64 s3c_device_audio_dmamask = 0xffffffffUL;
+
 struct platform_device s3c_device_ac97 = {
-	.name		  = "s3c-ac97",
+	.name		  = "samsung-ac97",
 	.id		  = -1,
 	.num_resources	  = ARRAY_SIZE(s3c_ac97_resource),
 	.resource	  = s3c_ac97_resource,
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index afcce474..19d8a16 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -17,6 +17,7 @@
 obj-y				+= pwm-clock.o
 obj-y				+= gpio.o
 obj-y				+= gpio-config.o
+obj-y				+= dev-asocdma.o
 
 obj-$(CONFIG_SAMSUNG_GPIOLIB_4BIT)	+= gpiolib.o
 obj-$(CONFIG_SAMSUNG_CLKSRC)	+= clock-clksrc.o
diff --git a/arch/arm/plat-samsung/dev-asocdma.c b/arch/arm/plat-samsung/dev-asocdma.c
new file mode 100644
index 0000000..a068c4f
--- /dev/null
+++ b/arch/arm/plat-samsung/dev-asocdma.c
@@ -0,0 +1,25 @@
+/* linux/arch/arm/plat-samsung/dev-asocdma.c
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <plat/devs.h>
+
+static u64 audio_dmamask = DMA_BIT_MASK(32);
+
+struct platform_device samsung_asoc_dma = {
+	.name		  = "samsung-audio",
+	.id		  = -1,
+	.dev              = {
+		.dma_mask = &audio_dmamask,
+		.coherent_dma_mask = DMA_BIT_MASK(32),
+	}
+};
+EXPORT_SYMBOL(samsung_asoc_dma);
diff --git a/arch/arm/plat-samsung/include/plat/audio.h b/arch/arm/plat-samsung/include/plat/audio.h
index 7712ff6..a0826ed 100644
--- a/arch/arm/plat-samsung/include/plat/audio.h
+++ b/arch/arm/plat-samsung/include/plat/audio.h
@@ -25,10 +25,34 @@
 #define S5PC100_SPDIF_GPG3 1
 extern void s5pc100_spdif_setup_gpio(int);
 
+struct samsung_i2s {
+/* If the Primary DAI has 5.1 Channels */
+#define QUIRK_PRI_6CHAN		(1 << 0)
+/* If the I2S block has a Stereo Overlay Channel */
+#define QUIRK_SEC_DAI		(1 << 1)
+/*
+ * If the I2S block has no internal prescalar or MUX (I2SMOD[10] bit)
+ * The Machine driver must provide suitably set clock to the I2S block.
+ */
+#define QUIRK_NO_MUXPSR		(1 << 2)
+#define QUIRK_NEED_RSTCLR	(1 << 3)
+	/* Quirks of the I2S controller */
+	u32 quirks;
+
+	/*
+	 * Array of clock names that can be used to generate I2S signals.
+	 * Also corresponds to clocks of I2SMOD[10]
+	 */
+	const char **src_clk;
+};
+
 /**
  * struct s3c_audio_pdata - common platform data for audio device drivers
  * @cfg_gpio: Callback function to setup mux'ed pins in I2S/PCM/AC97 mode
  */
 struct s3c_audio_pdata {
 	int (*cfg_gpio)(struct platform_device *);
+	union {
+		struct samsung_i2s i2s;
+	} type;
 };
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2d82a6c..e9e3b6e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -32,7 +32,7 @@
 extern struct platform_device s3c64xx_device_spi0;
 extern struct platform_device s3c64xx_device_spi1;
 
-extern struct platform_device s3c_device_pcm;
+extern struct platform_device samsung_asoc_dma;
 
 extern struct platform_device s3c64xx_device_pcm0;
 extern struct platform_device s3c64xx_device_pcm1;
@@ -96,6 +96,15 @@
 extern struct platform_device s5pv210_device_iis2;
 extern struct platform_device s5pv210_device_spdif;
 
+extern struct platform_device s5pv310_device_ac97;
+extern struct platform_device s5pv310_device_pcm0;
+extern struct platform_device s5pv310_device_pcm1;
+extern struct platform_device s5pv310_device_pcm2;
+extern struct platform_device s5pv310_device_i2s0;
+extern struct platform_device s5pv310_device_i2s1;
+extern struct platform_device s5pv310_device_i2s2;
+extern struct platform_device s5pv310_device_spdif;
+
 extern struct platform_device s5p6442_device_pcm0;
 extern struct platform_device s5p6442_device_pcm1;
 extern struct platform_device s5p6442_device_iis0;
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 27cfca5..5bf3f2f 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -355,7 +355,7 @@
 	s3c_pm_check_cleanup();
 }
 
-static struct platform_suspend_ops s3c_pm_ops = {
+static const struct platform_suspend_ops s3c_pm_ops = {
 	.enter		= s3c_pm_enter,
 	.prepare	= s3c_pm_prepare,
 	.finish		= s3c_pm_finish,
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index f021edf..32d680e 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -176,7 +176,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops avr32_pm_ops = {
+static const struct platform_suspend_ops avr32_pm_ops = {
 	.valid	= avr32_pm_valid_state,
 	.enter	= avr32_pm_enter,
 };
diff --git a/arch/blackfin/Makefile b/arch/blackfin/Makefile
index 46738d4..46f42b2 100644
--- a/arch/blackfin/Makefile
+++ b/arch/blackfin/Makefile
@@ -19,7 +19,7 @@
 endif
 KBUILD_AFLAGS           += $(call cc-option,-mno-fdpic)
 KBUILD_CFLAGS_MODULE    += -mlong-calls
-KBUILD_LDFLAGS_MODULE   += -m elf32bfin
+LDFLAGS                 += -m elf32bfin
 KALLSYMS         += --symbol-prefix=_
 
 KBUILD_DEFCONFIG := BF537-STAMP_defconfig
@@ -97,8 +97,11 @@
 rev-$(CONFIG_BF_REV_NONE) := none
 rev-$(CONFIG_BF_REV_ANY)  := any
 
-KBUILD_CFLAGS += -mcpu=$(cpu-y)-$(rev-y)
-KBUILD_AFLAGS += -mcpu=$(cpu-y)-$(rev-y)
+CPU_REV := $(cpu-y)-$(rev-y)
+export CPU_REV
+
+KBUILD_CFLAGS += -mcpu=$(CPU_REV)
+KBUILD_AFLAGS += -mcpu=$(CPU_REV)
 
 # - we utilize the silicon rev from the toolchain, so move it over to the checkflags
 CHECKFLAGS_SILICON = $(shell echo "" | $(CPP) $(KBUILD_CFLAGS) -dD - 2>/dev/null | awk '$$2 == "__SILICON_REVISION__" { print $$3 }')
diff --git a/arch/blackfin/boot/Makefile b/arch/blackfin/boot/Makefile
index 13d2dbd..0a49279 100644
--- a/arch/blackfin/boot/Makefile
+++ b/arch/blackfin/boot/Makefile
@@ -17,7 +17,7 @@
 
 quiet_cmd_uimage = UIMAGE  $@
       cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A $(ARCH) -O linux -T kernel \
-                   -C $(2) -n '$(MACHINE)-$(KERNELRELEASE)' \
+                   -C $(2) -n '$(CPU_REV)-$(KERNELRELEASE)' \
                    -e $(shell $(NM) vmlinux | awk '$$NF == "__start" {print $$1}') \
                    $(UIMAGE_OPTS-y) -d $< $@
 
diff --git a/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
new file mode 100644
index 0000000..4cf4510
--- /dev/null
+++ b/arch/blackfin/configs/BF561-EZKIT-SMP_defconfig
@@ -0,0 +1,113 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_FUTEX is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+CONFIG_SLAB=y
+CONFIG_MMAP_ALLOW_UNINITIALIZED=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_BF561=y
+CONFIG_SMP=y
+CONFIG_IRQ_TIMER0=10
+CONFIG_CLKIN_HZ=30000000
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_NOMMU_INITIAL_TRIM_EXCESS=0
+CONFIG_BFIN_GPTIMERS=m
+CONFIG_C_CDPRIO=y
+CONFIG_BANK_3=0xAAC2
+CONFIG_BINFMT_FLAT=y
+CONFIG_BINFMT_ZFLAT=y
+CONFIG_PM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_IRDA=m
+CONFIG_IRLAN=m
+CONFIG_IRCOMM=m
+CONFIG_IRDA_CACHE_LAST_LSAP=y
+CONFIG_IRTTY_SIR=m
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=m
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=m
+CONFIG_MTD_CFI_AMDSTD=m
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=m
+CONFIG_MTD_PHYSMAP=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMC91X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_INPUT_EVDEV=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=m
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_BFIN_WDT=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_JFFS2_FS=m
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3=y
+CONFIG_SMB_FS=m
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_MMRS=y
+CONFIG_DEBUG_HWERR=y
+CONFIG_EXACT_HWERR=y
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_DEBUG_BFIN_HWTRACE_COMPRESSION_ONE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/blackfin/configs/DNP5370_defconfig b/arch/blackfin/configs/DNP5370_defconfig
new file mode 100644
index 0000000..0ebc7d9
--- /dev/null
+++ b/arch/blackfin/configs/DNP5370_defconfig
@@ -0,0 +1,121 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOCALVERSION="DNP5370"
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_SLOB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_BF537=y
+CONFIG_BF_REV_0_3=y
+CONFIG_DNP5370=y
+CONFIG_IRQ_ERROR=7
+# CONFIG_CYCLES_CLOCKSOURCE is not set
+CONFIG_C_CDPRIO=y
+CONFIG_C_AMBEN_B0_B1_B2=y
+CONFIG_PM=y
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_IPV6 is not set
+CONFIG_LLC2=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_DEBUG_VERBOSE=1
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_ROM=y
+CONFIG_MTD_ABSENT=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UCLINUX=y
+CONFIG_MTD_PLATRAM=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_BLOCK2MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_MISC_DEVICES is not set
+CONFIG_NETDEVICES=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_NET_ETHERNET=y
+CONFIG_BFIN_MAC=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_BFIN_DMA_INTERFACE is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_BFIN_JTAG_COMM=y
+CONFIG_BFIN_JTAG_COMM_CONSOLE=y
+CONFIG_SERIAL_BFIN=y
+CONFIG_SERIAL_BFIN_CONSOLE=y
+CONFIG_SERIAL_BFIN_UART0=y
+CONFIG_LEGACY_PTY_COUNT=64
+# CONFIG_HW_RANDOM is not set
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_BLACKFIN_TWI=y
+CONFIG_SPI=y
+CONFIG_SPI_BFIN=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SENSORS_LM75=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_DMADEVICES=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+# CONFIG_DNOTIFY is not set
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BOTH=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_DEBUG_KOBJECT=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+CONFIG_PAGE_POISONING=y
+# CONFIG_FTRACE is not set
+CONFIG_DEBUG_DOUBLEFAULT=y
+CONFIG_CPLB_INFO=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/blackfin/include/asm/bfin_dma.h b/arch/blackfin/include/asm/bfin_dma.h
new file mode 100644
index 0000000..d511207
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_dma.h
@@ -0,0 +1,91 @@
+/*
+ * bfin_dma.h - Blackfin DMA defines/structures/etc...
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_BFIN_DMA_H__
+#define __ASM_BFIN_DMA_H__
+
+#include <linux/types.h>
+
+/* DMA_CONFIG Masks */
+#define DMAEN			0x0001	/* DMA Channel Enable */
+#define WNR				0x0002	/* Channel Direction (W/R*) */
+#define WDSIZE_8		0x0000	/* Transfer Word Size = 8 */
+#define WDSIZE_16		0x0004	/* Transfer Word Size = 16 */
+#define WDSIZE_32		0x0008	/* Transfer Word Size = 32 */
+#define DMA2D			0x0010	/* DMA Mode (2D/1D*) */
+#define RESTART			0x0020	/* DMA Buffer Clear */
+#define DI_SEL			0x0040	/* Data Interrupt Timing Select */
+#define DI_EN			0x0080	/* Data Interrupt Enable */
+#define NDSIZE_0		0x0000	/* Next Descriptor Size = 0 (Stop/Autobuffer) */
+#define NDSIZE_1		0x0100	/* Next Descriptor Size = 1 */
+#define NDSIZE_2		0x0200	/* Next Descriptor Size = 2 */
+#define NDSIZE_3		0x0300	/* Next Descriptor Size = 3 */
+#define NDSIZE_4		0x0400	/* Next Descriptor Size = 4 */
+#define NDSIZE_5		0x0500	/* Next Descriptor Size = 5 */
+#define NDSIZE_6		0x0600	/* Next Descriptor Size = 6 */
+#define NDSIZE_7		0x0700	/* Next Descriptor Size = 7 */
+#define NDSIZE_8		0x0800	/* Next Descriptor Size = 8 */
+#define NDSIZE_9		0x0900	/* Next Descriptor Size = 9 */
+#define NDSIZE			0x0f00	/* Next Descriptor Size */
+#define DMAFLOW			0x7000	/* Flow Control */
+#define DMAFLOW_STOP	0x0000	/* Stop Mode */
+#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
+#define DMAFLOW_ARRAY	0x4000	/* Descriptor Array Mode */
+#define DMAFLOW_SMALL	0x6000	/* Small Model Descriptor List Mode */
+#define DMAFLOW_LARGE	0x7000	/* Large Model Descriptor List Mode */
+
+/* DMA_IRQ_STATUS Masks */
+#define DMA_DONE		0x0001	/* DMA Completion Interrupt Status */
+#define DMA_ERR			0x0002	/* DMA Error Interrupt Status */
+#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
+#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+
+/*
+ * bfin dma registers layout
+ */
+struct bfin_dma_regs {
+	u32 next_desc_ptr;
+	u32 start_addr;
+	__BFP(config);
+	u32 __pad0;
+	__BFP(x_count);
+	__BFP(x_modify);
+	__BFP(y_count);
+	__BFP(y_modify);
+	u32 curr_desc_ptr;
+	u32 curr_addr;
+	__BFP(irq_status);
+	__BFP(peripheral_map);
+	__BFP(curr_x_count);
+	u32 __pad1;
+	__BFP(curr_y_count);
+	u32 __pad2;
+};
+
+/*
+ * bfin handshake mdma registers layout
+ */
+struct bfin_hmdma_regs {
+	__BFP(control);
+	__BFP(ecinit);
+	__BFP(bcinit);
+	__BFP(ecurgent);
+	__BFP(ecoverflow);
+	__BFP(ecount);
+	__BFP(bcount);
+};
+
+#undef __BFP
+
+#endif
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
new file mode 100644
index 0000000..1ff9f14
--- /dev/null
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -0,0 +1,275 @@
+/*
+ * bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_ASM_SERIAL_H__
+#define __BFIN_ASM_SERIAL_H__
+
+#include <linux/serial_core.h>
+#include <mach/anomaly.h>
+#include <mach/bfin_serial.h>
+
+#if defined(CONFIG_BFIN_UART0_CTSRTS) || \
+    defined(CONFIG_BFIN_UART1_CTSRTS) || \
+    defined(CONFIG_BFIN_UART2_CTSRTS) || \
+    defined(CONFIG_BFIN_UART3_CTSRTS)
+# ifdef BFIN_UART_BF54X_STYLE
+#  define CONFIG_SERIAL_BFIN_HARD_CTSRTS
+# else
+#  define CONFIG_SERIAL_BFIN_CTSRTS
+# endif
+#endif
+
+struct circ_buf;
+struct timer_list;
+struct work_struct;
+
+struct bfin_serial_port {
+	struct uart_port port;
+	unsigned int old_status;
+	int status_irq;
+#ifndef BFIN_UART_BF54X_STYLE
+	unsigned int lsr;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_DMA
+	int tx_done;
+	int tx_count;
+	struct circ_buf rx_dma_buf;
+	struct timer_list rx_dma_timer;
+	int rx_dma_nrows;
+	unsigned int tx_dma_channel;
+	unsigned int rx_dma_channel;
+	struct work_struct tx_dma_workqueue;
+#elif ANOMALY_05000363
+	unsigned int anomaly_threshold;
+#endif
+#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
+	int scts;
+#endif
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+	int cts_pin;
+	int rts_pin;
+#endif
+};
+
+/* UART_LCR Masks */
+#define WLS(x)                   (((x)-5) & 0x03)  /* Word Length Select */
+#define STB                      0x04  /* Stop Bits */
+#define PEN                      0x08  /* Parity Enable */
+#define EPS                      0x10  /* Even Parity Select */
+#define STP                      0x20  /* Stick Parity */
+#define SB                       0x40  /* Set Break */
+#define DLAB                     0x80  /* Divisor Latch Access */
+
+/* UART_LSR Masks */
+#define DR                       0x01  /* Data Ready */
+#define OE                       0x02  /* Overrun Error */
+#define PE                       0x04  /* Parity Error */
+#define FE                       0x08  /* Framing Error */
+#define BI                       0x10  /* Break Interrupt */
+#define THRE                     0x20  /* THR Empty */
+#define TEMT                     0x40  /* TSR and UART_THR Empty */
+#define TFI                      0x80  /* Transmission Finished Indicator */
+
+/* UART_IER Masks */
+#define ERBFI                    0x01  /* Enable Receive Buffer Full Interrupt */
+#define ETBEI                    0x02  /* Enable Transmit Buffer Empty Interrupt */
+#define ELSI                     0x04  /* Enable RX Status Interrupt */
+#define EDSSI                    0x08  /* Enable Modem Status Interrupt */
+#define EDTPTI                   0x10  /* Enable DMA Transmit PIRQ Interrupt */
+#define ETFI                     0x20  /* Enable Transmission Finished Interrupt */
+#define ERFCI                    0x40  /* Enable Receive FIFO Count Interrupt */
+
+/* UART_MCR Masks */
+#define XOFF                     0x01  /* Transmitter Off */
+#define MRTS                     0x02  /* Manual Request To Send */
+#define RFIT                     0x04  /* Receive FIFO IRQ Threshold */
+#define RFRT                     0x08  /* Receive FIFO RTS Threshold */
+#define LOOP_ENA                 0x10  /* Loopback Mode Enable */
+#define FCPOL                    0x20  /* Flow Control Pin Polarity */
+#define ARTS                     0x40  /* Automatic Request To Send */
+#define ACTS                     0x80  /* Automatic Clear To Send */
+
+/* UART_MSR Masks */
+#define SCTS                     0x01  /* Sticky CTS */
+#define CTS                      0x10  /* Clear To Send */
+#define RFCS                     0x20  /* Receive FIFO Count Status */
+
+/* UART_GCTL Masks */
+#define UCEN                     0x01  /* Enable UARTx Clocks */
+#define IREN                     0x02  /* Enable IrDA Mode */
+#define TPOLC                    0x04  /* IrDA TX Polarity Change */
+#define RPOLC                    0x08  /* IrDA RX Polarity Change */
+#define FPE                      0x10  /* Force Parity Error On Transmit */
+#define FFE                      0x20  /* Force Framing Error On Transmit */
+
+#ifdef BFIN_UART_BF54X_STYLE
+# define OFFSET_DLL              0x00  /* Divisor Latch (Low-Byte)        */
+# define OFFSET_DLH              0x04  /* Divisor Latch (High-Byte)       */
+# define OFFSET_GCTL             0x08  /* Global Control Register         */
+# define OFFSET_LCR              0x0C  /* Line Control Register           */
+# define OFFSET_MCR              0x10  /* Modem Control Register          */
+# define OFFSET_LSR              0x14  /* Line Status Register            */
+# define OFFSET_MSR              0x18  /* Modem Status Register           */
+# define OFFSET_SCR              0x1C  /* SCR Scratch Register            */
+# define OFFSET_IER_SET          0x20  /* Set Interrupt Enable Register   */
+# define OFFSET_IER_CLEAR        0x24  /* Clear Interrupt Enable Register */
+# define OFFSET_THR              0x28  /* Transmit Holding register       */
+# define OFFSET_RBR              0x2C  /* Receive Buffer register         */
+#else /* BF533 style */
+# define OFFSET_THR              0x00  /* Transmit Holding register         */
+# define OFFSET_RBR              0x00  /* Receive Buffer register           */
+# define OFFSET_DLL              0x00  /* Divisor Latch (Low-Byte)          */
+# define OFFSET_DLH              0x04  /* Divisor Latch (High-Byte)         */
+# define OFFSET_IER              0x04  /* Interrupt Enable Register         */
+# define OFFSET_IIR              0x08  /* Interrupt Identification Register */
+# define OFFSET_LCR              0x0C  /* Line Control Register             */
+# define OFFSET_MCR              0x10  /* Modem Control Register            */
+# define OFFSET_LSR              0x14  /* Line Status Register              */
+# define OFFSET_MSR              0x18  /* Modem Status Register             */
+# define OFFSET_SCR              0x1C  /* SCR Scratch Register              */
+# define OFFSET_GCTL             0x24  /* Global Control Register           */
+/* code should not need IIR, so force build error if they use it */
+# undef OFFSET_IIR
+#endif
+
+/*
+ * All Blackfin system MMRs are padded to 32bits even if the register
+ * itself is only 16bits.  So use a helper macro to streamline this.
+ */
+#define __BFP(m) u16 m; u16 __pad_##m
+struct bfin_uart_regs {
+#ifdef BFIN_UART_BF54X_STYLE
+	__BFP(dll);
+	__BFP(dlh);
+	__BFP(gctl);
+	__BFP(lcr);
+	__BFP(mcr);
+	__BFP(lsr);
+	__BFP(msr);
+	__BFP(scr);
+	__BFP(ier_set);
+	__BFP(ier_clear);
+	__BFP(thr);
+	__BFP(rbr);
+#else
+	union {
+		u16 dll;
+		u16 thr;
+		const u16 rbr;
+	};
+	const u16 __pad0;
+	union {
+		u16 dlh;
+		u16 ier;
+	};
+	const u16 __pad1;
+	const __BFP(iir);
+	__BFP(lcr);
+	__BFP(mcr);
+	__BFP(lsr);
+	__BFP(msr);
+	__BFP(scr);
+	const u32 __pad2;
+	__BFP(gctl);
+#endif
+};
+#undef __BFP
+
+#ifndef port_membase
+# define port_membase(p) (((struct bfin_serial_port *)(p))->port.membase)
+#endif
+
+#define UART_GET_CHAR(p)      bfin_read16(port_membase(p) + OFFSET_RBR)
+#define UART_GET_DLL(p)       bfin_read16(port_membase(p) + OFFSET_DLL)
+#define UART_GET_DLH(p)       bfin_read16(port_membase(p) + OFFSET_DLH)
+#define UART_GET_GCTL(p)      bfin_read16(port_membase(p) + OFFSET_GCTL)
+#define UART_GET_LCR(p)       bfin_read16(port_membase(p) + OFFSET_LCR)
+#define UART_GET_MCR(p)       bfin_read16(port_membase(p) + OFFSET_MCR)
+#define UART_GET_MSR(p)       bfin_read16(port_membase(p) + OFFSET_MSR)
+
+#define UART_PUT_CHAR(p, v)   bfin_write16(port_membase(p) + OFFSET_THR, v)
+#define UART_PUT_DLL(p, v)    bfin_write16(port_membase(p) + OFFSET_DLL, v)
+#define UART_PUT_DLH(p, v)    bfin_write16(port_membase(p) + OFFSET_DLH, v)
+#define UART_PUT_GCTL(p, v)   bfin_write16(port_membase(p) + OFFSET_GCTL, v)
+#define UART_PUT_LCR(p, v)    bfin_write16(port_membase(p) + OFFSET_LCR, v)
+#define UART_PUT_MCR(p, v)    bfin_write16(port_membase(p) + OFFSET_MCR, v)
+
+#ifdef BFIN_UART_BF54X_STYLE
+
+#define UART_CLEAR_IER(p, v)  bfin_write16(port_membase(p) + OFFSET_IER_CLEAR, v)
+#define UART_GET_IER(p)       bfin_read16(port_membase(p) + OFFSET_IER_SET)
+#define UART_SET_IER(p, v)    bfin_write16(port_membase(p) + OFFSET_IER_SET, v)
+
+#define UART_CLEAR_DLAB(p)    /* MMRs not muxed on BF54x */
+#define UART_SET_DLAB(p)      /* MMRs not muxed on BF54x */
+
+#define UART_CLEAR_LSR(p)     bfin_write16(port_membase(p) + OFFSET_LSR, -1)
+#define UART_GET_LSR(p)       bfin_read16(port_membase(p) + OFFSET_LSR)
+#define UART_PUT_LSR(p, v)    bfin_write16(port_membase(p) + OFFSET_LSR, v)
+
+/* This handles hard CTS/RTS */
+#define BFIN_UART_CTSRTS_HARD
+#define UART_CLEAR_SCTS(p)      bfin_write16((port_membase(p) + OFFSET_MSR), SCTS)
+#define UART_GET_CTS(x)         (UART_GET_MSR(x) & CTS)
+#define UART_DISABLE_RTS(x)     UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS | MRTS))
+#define UART_ENABLE_RTS(x)      UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
+#define UART_ENABLE_INTS(x, v)  UART_SET_IER(x, v)
+#define UART_DISABLE_INTS(x)    UART_CLEAR_IER(x, 0xF)
+
+#else /* BF533 style */
+
+#define UART_CLEAR_IER(p, v)  UART_PUT_IER(p, UART_GET_IER(p) & ~(v))
+#define UART_GET_IER(p)       bfin_read16(port_membase(p) + OFFSET_IER)
+#define UART_PUT_IER(p, v)    bfin_write16(port_membase(p) + OFFSET_IER, v)
+#define UART_SET_IER(p, v)    UART_PUT_IER(p, UART_GET_IER(p) | (v))
+
+#define UART_CLEAR_DLAB(p)    do { UART_PUT_LCR(p, UART_GET_LCR(p) & ~DLAB); SSYNC(); } while (0)
+#define UART_SET_DLAB(p)      do { UART_PUT_LCR(p, UART_GET_LCR(p) | DLAB); SSYNC(); } while (0)
+
+#ifndef put_lsr_cache
+# define put_lsr_cache(p, v) (((struct bfin_serial_port *)(p))->lsr = (v))
+#endif
+#ifndef get_lsr_cache
+# define get_lsr_cache(p)    (((struct bfin_serial_port *)(p))->lsr)
+#endif
+
+/* The hardware clears the LSR bits upon read, so we need to cache
+ * some of the more fun bits in software so they don't get lost
+ * when checking the LSR in other code paths (TX).
+ */
+static inline void UART_CLEAR_LSR(void *p)
+{
+	put_lsr_cache(p, 0);
+	bfin_write16(port_membase(p) + OFFSET_LSR, -1);
+}
+static inline unsigned int UART_GET_LSR(void *p)
+{
+	unsigned int lsr = bfin_read16(port_membase(p) + OFFSET_LSR);
+	put_lsr_cache(p, get_lsr_cache(p) | (lsr & (BI|FE|PE|OE)));
+	return lsr | get_lsr_cache(p);
+}
+static inline void UART_PUT_LSR(void *p, uint16_t val)
+{
+	put_lsr_cache(p, get_lsr_cache(p) & ~val);
+}
+
+/* This handles soft CTS/RTS */
+#define UART_GET_CTS(x)        gpio_get_value((x)->cts_pin)
+#define UART_DISABLE_RTS(x)    gpio_set_value((x)->rts_pin, 1)
+#define UART_ENABLE_RTS(x)     gpio_set_value((x)->rts_pin, 0)
+#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
+#define UART_DISABLE_INTS(x)   UART_PUT_IER(x, 0)
+
+#endif
+
+#ifndef BFIN_UART_TX_FIFO_SIZE
+# define BFIN_UART_TX_FIFO_SIZE 2
+#endif
+
+#endif /* __BFIN_ASM_SERIAL_H__ */
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
index 3f7ef4d..29f4fd8 100644
--- a/arch/blackfin/include/asm/bitops.h
+++ b/arch/blackfin/include/asm/bitops.h
@@ -108,7 +108,9 @@
 #define smp_mb__before_clear_bit()	barrier()
 #define smp_mb__after_clear_bit()	barrier()
 
+#define test_bit __skip_test_bit
 #include <asm-generic/bitops/non-atomic.h>
+#undef test_bit
 
 #endif /* CONFIG_SMP */
 
diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
index bd0641a..568885a 100644
--- a/arch/blackfin/include/asm/cache.h
+++ b/arch/blackfin/include/asm/cache.h
@@ -7,6 +7,8 @@
 #ifndef __ARCH_BLACKFIN_CACHE_H
 #define __ARCH_BLACKFIN_CACHE_H
 
+#include <linux/linkage.h>	/* for asmlinkage */
+
 /*
  * Bytes per L1 cache line
  * Blackfin loads 32 bytes for cache
diff --git a/arch/blackfin/include/asm/cacheflush.h b/arch/blackfin/include/asm/cacheflush.h
index 2666ff8..77135b6 100644
--- a/arch/blackfin/include/asm/cacheflush.h
+++ b/arch/blackfin/include/asm/cacheflush.h
@@ -11,6 +11,9 @@
 
 #include <asm/blackfin.h>	/* for SSYNC() */
 #include <asm/sections.h>	/* for _ramend */
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#endif
 
 extern void blackfin_icache_flush_range(unsigned long start_address, unsigned long end_address);
 extern void blackfin_dcache_flush_range(unsigned long start_address, unsigned long end_address);
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index eedf3ca6..d9dbc1a 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -14,40 +14,7 @@
 #include <asm/blackfin.h>
 #include <asm/page.h>
 #include <asm-generic/dma.h>
-
-/* DMA_CONFIG Masks */
-#define DMAEN			0x0001	/* DMA Channel Enable */
-#define WNR				0x0002	/* Channel Direction (W/R*) */
-#define WDSIZE_8		0x0000	/* Transfer Word Size = 8 */
-#define WDSIZE_16		0x0004	/* Transfer Word Size = 16 */
-#define WDSIZE_32		0x0008	/* Transfer Word Size = 32 */
-#define DMA2D			0x0010	/* DMA Mode (2D/1D*) */
-#define RESTART			0x0020	/* DMA Buffer Clear */
-#define DI_SEL			0x0040	/* Data Interrupt Timing Select */
-#define DI_EN			0x0080	/* Data Interrupt Enable */
-#define NDSIZE_0		0x0000	/* Next Descriptor Size = 0 (Stop/Autobuffer) */
-#define NDSIZE_1		0x0100	/* Next Descriptor Size = 1 */
-#define NDSIZE_2		0x0200	/* Next Descriptor Size = 2 */
-#define NDSIZE_3		0x0300	/* Next Descriptor Size = 3 */
-#define NDSIZE_4		0x0400	/* Next Descriptor Size = 4 */
-#define NDSIZE_5		0x0500	/* Next Descriptor Size = 5 */
-#define NDSIZE_6		0x0600	/* Next Descriptor Size = 6 */
-#define NDSIZE_7		0x0700	/* Next Descriptor Size = 7 */
-#define NDSIZE_8		0x0800	/* Next Descriptor Size = 8 */
-#define NDSIZE_9		0x0900	/* Next Descriptor Size = 9 */
-#define NDSIZE			0x0f00	/* Next Descriptor Size */
-#define DMAFLOW			0x7000	/* Flow Control */
-#define DMAFLOW_STOP	0x0000	/* Stop Mode */
-#define DMAFLOW_AUTO	0x1000	/* Autobuffer Mode */
-#define DMAFLOW_ARRAY	0x4000	/* Descriptor Array Mode */
-#define DMAFLOW_SMALL	0x6000	/* Small Model Descriptor List Mode */
-#define DMAFLOW_LARGE	0x7000	/* Large Model Descriptor List Mode */
-
-/* DMA_IRQ_STATUS Masks */
-#define DMA_DONE		0x0001	/* DMA Completion Interrupt Status */
-#define DMA_ERR			0x0002	/* DMA Error Interrupt Status */
-#define DFETCH			0x0004	/* DMA Descriptor Fetch Indicator */
-#define DMA_RUN			0x0008	/* DMA Channel Running Indicator */
+#include <asm/bfin_dma.h>
 
 /*-------------------------
  * config reg bits value
@@ -149,7 +116,7 @@
 *	DMA API's
 *******************************************************************************/
 extern struct dma_channel dma_ch[MAX_DMA_CHANNELS];
-extern struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS];
+extern struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS];
 extern int channel2irq(unsigned int channel);
 
 static inline void set_dma_start_addr(unsigned int channel, unsigned long addr)
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index efcc3ae..3047120 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -9,6 +9,8 @@
 #ifndef _BLACKFIN_DPMC_H_
 #define _BLACKFIN_DPMC_H_
 
+#include <mach/pll.h>
+
 /* PLL_CTL Masks */
 #define DF			0x0001	/* 0: PLL = CLKIN, 1: PLL = CLKIN/2 */
 #define PLL_OFF			0x0002	/* PLL Not Powered */
diff --git a/arch/blackfin/include/asm/io.h b/arch/blackfin/include/asm/io.h
index 234fbac..dccae26 100644
--- a/arch/blackfin/include/asm/io.h
+++ b/arch/blackfin/include/asm/io.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,148 +7,48 @@
 #ifndef _BFIN_IO_H
 #define _BFIN_IO_H
 
-#ifdef __KERNEL__
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
 #include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
 
-/*
- * These are for ISA/PCI shared memory _only_ and should never be used
- * on any other type of memory, including Zorro memory. They are meant to
- * access the bus in the bus byte order which is little-endian!.
- *
- * readX/writeX() are used to access memory mapped devices. On some
- * architectures the memory mapped IO stuff needs to be accessed
- * differently. On the bfin architecture, we just read/write the
- * memory location directly.
- */
-#ifndef __ASSEMBLY__
-
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = b [%2] (z);"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return (unsigned char) val;
+#define DECLARE_BFIN_RAW_READX(size, type, asm, asm_sign) \
+static inline type __raw_read##size(const volatile void __iomem *addr) \
+{ \
+	unsigned int val; \
+	int tmp; \
+	__asm__ __volatile__ ( \
+		"cli %1;" \
+		"NOP; NOP; SSYNC;" \
+		"%0 = "#asm" [%2] "#asm_sign";" \
+		"sti %1;" \
+		: "=d"(val), "=d"(tmp) \
+		: "a"(addr) \
+	); \
+	return (type) val; \
 }
-
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = w [%2] (z);"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return (unsigned short) val;
-}
-
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
-	unsigned int val;
-	int tmp;
-
-	__asm__ __volatile__ (
-		"cli %1;"
-		"NOP; NOP; SSYNC;"
-		"%0 = [%2];"
-		"sti %1;"
-		: "=d"(val), "=d"(tmp)
-		: "a"(addr)
-	);
-
-	return val;
-}
-
-#endif /*  __ASSEMBLY__ */
-
-#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
-#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
-#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define memset_io(a, b, c)	memset((void *)(a), (b), (c))
-#define memcpy_fromio(a, b, c)	memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)	memcpy((void *)(a), (b), (c))
-
-/* Convert "I/O port addresses" to actual addresses.  i.e. ugly casts. */
-#define __io(port) ((void *)(unsigned long)(port))
-
-#define inb(port)    readb(__io(port))
-#define inw(port)    readw(__io(port))
-#define inl(port)    readl(__io(port))
-#define outb(x, port) writeb(x, __io(port))
-#define outw(x, port) writew(x, __io(port))
-#define outl(x, port) writel(x, __io(port))
-
-#define inb_p(port)    inb(__io(port))
-#define inw_p(port)    inw(__io(port))
-#define inl_p(port)    inl(__io(port))
-#define outb_p(x, port) outb(x, __io(port))
-#define outw_p(x, port) outw(x, __io(port))
-#define outl_p(x, port) outl(x, __io(port))
-
-#define ioread8_rep(a, d, c)	readsb(a, d, c)
-#define ioread16_rep(a, d, c)	readsw(a, d, c)
-#define ioread32_rep(a, d, c)	readsl(a, d, c)
-#define iowrite8_rep(a, s, c)	writesb(a, s, c)
-#define iowrite16_rep(a, s, c)	writesw(a, s, c)
-#define iowrite32_rep(a, s, c)	writesl(a, s, c)
-
-#define ioread8(x)			readb(x)
-#define ioread16(x)			readw(x)
-#define ioread32(x)			readl(x)
-#define iowrite8(val, x)		writeb(val, x)
-#define iowrite16(val, x)		writew(val, x)
-#define iowrite32(val, x)		writel(val, x)
-
-/**
- * I/O write barrier
- *
- * Ensure ordering of I/O space writes. This will make sure that writes
- * following the barrier will arrive after all previous writes.
- */
-#define mmiowb() do { SSYNC(); wmb(); } while (0)
-
-#define IO_SPACE_LIMIT 0xffffffff
-
-/* Values for nocacheflag and cmode */
-#define IOMAP_NOCACHE_SER		1
-
-#ifndef __ASSEMBLY__
+DECLARE_BFIN_RAW_READX(b, u8, b, (z))
+#define __raw_readb __raw_readb
+DECLARE_BFIN_RAW_READX(w, u16, w, (z))
+#define __raw_readw __raw_readw
+DECLARE_BFIN_RAW_READX(l, u32, , )
+#define __raw_readl __raw_readl
 
 extern void outsb(unsigned long port, const void *addr, unsigned long count);
 extern void outsw(unsigned long port, const void *addr, unsigned long count);
 extern void outsw_8(unsigned long port, const void *addr, unsigned long count);
 extern void outsl(unsigned long port, const void *addr, unsigned long count);
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
 
 extern void insb(unsigned long port, void *addr, unsigned long count);
 extern void insw(unsigned long port, void *addr, unsigned long count);
 extern void insw_8(unsigned long port, void *addr, unsigned long count);
 extern void insl(unsigned long port, void *addr, unsigned long count);
 extern void insl_16(unsigned long port, void *addr, unsigned long count);
+#define insb insb
+#define insw insw
+#define insl insl
 
 extern void dma_outsb(unsigned long port, const void *addr, unsigned short count);
 extern void dma_outsw(unsigned long port, const void *addr, unsigned short count);
@@ -158,108 +58,14 @@
 extern void dma_insw(unsigned long port, void *addr, unsigned short count);
 extern void dma_insl(unsigned long port, void *addr, unsigned short count);
 
-static inline void readsl(const void __iomem *addr, void *buf, int len)
-{
-	insl((unsigned long)addr, buf, len);
-}
-
-static inline void readsw(const void __iomem *addr, void *buf, int len)
-{
-	insw((unsigned long)addr, buf, len);
-}
-
-static inline void readsb(const void __iomem *addr, void *buf, int len)
-{
-	insb((unsigned long)addr, buf, len);
-}
-
-static inline void writesl(const void __iomem *addr, const void *buf, int len)
-{
-	outsl((unsigned long)addr, buf, len);
-}
-
-static inline void writesw(const void __iomem *addr, const void *buf, int len)
-{
-	outsw((unsigned long)addr, buf, len);
-}
-
-static inline void writesb(const void __iomem *addr, const void *buf, int len)
-{
-	outsb((unsigned long)addr, buf, len);
-}
-
-/*
- * Map some physical address range into the kernel address space.
+/**
+ * I/O write barrier
+ *
+ * Ensure ordering of I/O space writes. This will make sure that writes
+ * following the barrier will arrive after all previous writes.
  */
-static inline void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
-				int cacheflag)
-{
-	return (void __iomem *)physaddr;
-}
+#define mmiowb() do { SSYNC(); wmb(); } while (0)
 
-/*
- * Unmap a ioremap()ed region again
- */
-static inline void iounmap(void *addr)
-{
-}
-
-/*
- * __iounmap unmaps nearly everything, so be careful
- * it doesn't free currently pointer/page tables anymore but it
- * wans't used anyway and might be added later.
- */
-static inline void __iounmap(void *addr, unsigned long size)
-{
-}
-
-/*
- * Set new cache mode for some kernel address space.
- * The caller must push data for that range itself, if such data may already
- * be in the cache.
- */
-static inline void kernel_set_cachemode(void *addr, unsigned long size,
-					int cmode)
-{
-}
-
-static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
-{
-	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-static inline void __iomem *ioremap_nocache(unsigned long physaddr,
-					    unsigned long size)
-{
-	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
-}
-
-extern void blkfin_inv_cache_all(void);
+#include <asm-generic/io.h>
 
 #endif
-
-#define	ioport_map(port, nr)		((void __iomem*)(port))
-#define	ioport_unmap(addr)
-
-/* Pages to physical address... */
-#define page_to_bus(page)       ((page - mem_map) << PAGE_SHIFT)
-
-#define phys_to_virt(vaddr)	((void *) (vaddr))
-#define virt_to_phys(vaddr)	((unsigned long) (vaddr))
-
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)	__va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)	p
-
-#endif				/* __KERNEL__ */
-
-#endif				/* _BFIN_IO_H */
diff --git a/arch/blackfin/include/asm/irqflags.h b/arch/blackfin/include/asm/irqflags.h
index 41c4d70..3365cb9 100644
--- a/arch/blackfin/include/asm/irqflags.h
+++ b/arch/blackfin/include/asm/irqflags.h
@@ -13,9 +13,6 @@
 #ifdef CONFIG_SMP
 # include <asm/pda.h>
 # include <asm/processor.h>
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
 # define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
 #else
 extern unsigned long bfin_irq_flags;
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h
index aea8802..8af7772 100644
--- a/arch/blackfin/include/asm/processor.h
+++ b/arch/blackfin/include/asm/processor.h
@@ -14,7 +14,7 @@
 #define current_text_addr() ({ __label__ _l; _l: &&_l;})
 
 #include <asm/ptrace.h>
-#include <asm/blackfin.h>
+#include <mach/blackfin.h>
 
 static inline unsigned long rdusp(void)
 {
@@ -134,6 +134,8 @@
 	return bfin_read_DSPID();
 }
 
+#define blackfin_core_id() (bfin_dspid() & 0xff)
+
 static inline uint32_t __pure bfin_compiled_revid(void)
 {
 #if defined(CONFIG_BF_REV_0_0)
diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h
index 1942ccf..1f286e7 100644
--- a/arch/blackfin/include/asm/spinlock.h
+++ b/arch/blackfin/include/asm/spinlock.h
@@ -17,12 +17,12 @@
 asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
 asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
 asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
-asmlinkage void arch_read_lock_asm(volatile int *ptr);
-asmlinkage int arch_read_trylock_asm(volatile int *ptr);
-asmlinkage void arch_read_unlock_asm(volatile int *ptr);
-asmlinkage void arch_write_lock_asm(volatile int *ptr);
-asmlinkage int arch_write_trylock_asm(volatile int *ptr);
-asmlinkage void arch_write_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_read_lock_asm(volatile int *ptr);
+asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
+asmlinkage void __raw_write_lock_asm(volatile int *ptr);
+asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
+asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
@@ -64,32 +64,36 @@
 
 static inline void arch_read_lock(arch_rwlock_t *rw)
 {
-	arch_read_lock_asm(&rw->lock);
+	__raw_read_lock_asm(&rw->lock);
 }
 
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+
 static inline int arch_read_trylock(arch_rwlock_t *rw)
 {
-	return arch_read_trylock_asm(&rw->lock);
+	return __raw_read_trylock_asm(&rw->lock);
 }
 
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
-	arch_read_unlock_asm(&rw->lock);
+	__raw_read_unlock_asm(&rw->lock);
 }
 
 static inline void arch_write_lock(arch_rwlock_t *rw)
 {
-	arch_write_lock_asm(&rw->lock);
+	__raw_write_lock_asm(&rw->lock);
 }
 
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
 static inline int arch_write_trylock(arch_rwlock_t *rw)
 {
-	return arch_write_trylock_asm(&rw->lock);
+	return __raw_write_trylock_asm(&rw->lock);
 }
 
 static inline void arch_write_unlock(arch_rwlock_t *rw)
 {
-	arch_write_unlock_asm(&rw->lock);
+	__raw_write_unlock_asm(&rw->lock);
 }
 
 #define arch_spin_relax(lock)  	cpu_relax()
diff --git a/arch/blackfin/include/mach-common/pll.h b/arch/blackfin/include/mach-common/pll.h
new file mode 100644
index 0000000..382178b
--- /dev/null
+++ b/arch/blackfin/include/mach-common/pll.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2005-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _MACH_COMMON_PLL_H
+#define _MACH_COMMON_PLL_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/blackfin.h>
+#include <asm/irqflags.h>
+
+#ifndef bfin_iwr_restore
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
+{
+#ifdef SIC_IWR
+	bfin_write_SIC_IWR(iwr0);
+#else
+	bfin_write_SIC_IWR0(iwr0);
+# ifdef SIC_IWR1
+	bfin_write_SIC_IWR1(iwr1);
+# endif
+# ifdef SIC_IWR2
+	bfin_write_SIC_IWR2(iwr2);
+# endif
+#endif
+}
+#endif
+
+#ifndef bfin_iwr_save
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+              unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+#ifdef SIC_IWR
+	*iwr0 = bfin_read_SIC_IWR();
+#else
+	*iwr0 = bfin_read_SIC_IWR0();
+# ifdef SIC_IWR1
+	*iwr1 = bfin_read_SIC_IWR1();
+# endif
+# ifdef SIC_IWR2
+	*iwr2 = bfin_read_SIC_IWR2();
+# endif
+#endif
+	bfin_iwr_restore(niwr0, niwr1, niwr2);
+}
+#endif
+
+static inline void _bfin_write_pll_relock(u32 addr, unsigned int val)
+{
+	unsigned long flags, iwr0, iwr1, iwr2;
+
+	if (val == bfin_read_PLL_CTL())
+		return;
+
+	flags = hard_local_irq_save();
+	/* Enable the PLL Wakeup bit in SIC IWR */
+	bfin_iwr_save(IWR_ENABLE(0), 0, 0, &iwr0, &iwr1, &iwr2);
+
+	bfin_write16(addr, val);
+	SSYNC();
+	asm("IDLE;");
+
+	bfin_iwr_restore(iwr0, iwr1, iwr2);
+	hard_local_irq_restore(flags);
+}
+
+/* Writing to PLL_CTL initiates a PLL relock sequence */
+static inline void bfin_write_PLL_CTL(unsigned int val)
+{
+	_bfin_write_pll_relock(PLL_CTL, val);
+}
+
+/* Writing to VR_CTL initiates a PLL relock sequence */
+static inline void bfin_write_VR_CTL(unsigned int val)
+{
+	_bfin_write_pll_relock(VR_CTL, val);
+}
+
+#endif
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-a.h b/arch/blackfin/include/mach-common/ports-a.h
new file mode 100644
index 0000000..9f78a76
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-a.h
@@ -0,0 +1,25 @@
+/*
+ * Port A Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_A__
+#define __BFIN_PERIPHERAL_PORT_A__
+
+#define PA0		(1 << 0)
+#define PA1		(1 << 1)
+#define PA2		(1 << 2)
+#define PA3		(1 << 3)
+#define PA4		(1 << 4)
+#define PA5		(1 << 5)
+#define PA6		(1 << 6)
+#define PA7		(1 << 7)
+#define PA8		(1 << 8)
+#define PA9		(1 << 9)
+#define PA10		(1 << 10)
+#define PA11		(1 << 11)
+#define PA12		(1 << 12)
+#define PA13		(1 << 13)
+#define PA14		(1 << 14)
+#define PA15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-b.h b/arch/blackfin/include/mach-common/ports-b.h
new file mode 100644
index 0000000..b81702f
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-b.h
@@ -0,0 +1,25 @@
+/*
+ * Port B Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_B__
+#define __BFIN_PERIPHERAL_PORT_B__
+
+#define PB0		(1 << 0)
+#define PB1		(1 << 1)
+#define PB2		(1 << 2)
+#define PB3		(1 << 3)
+#define PB4		(1 << 4)
+#define PB5		(1 << 5)
+#define PB6		(1 << 6)
+#define PB7		(1 << 7)
+#define PB8		(1 << 8)
+#define PB9		(1 << 9)
+#define PB10		(1 << 10)
+#define PB11		(1 << 11)
+#define PB12		(1 << 12)
+#define PB13		(1 << 13)
+#define PB14		(1 << 14)
+#define PB15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-c.h b/arch/blackfin/include/mach-common/ports-c.h
new file mode 100644
index 0000000..3cc665e
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-c.h
@@ -0,0 +1,25 @@
+/*
+ * Port C Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_C__
+#define __BFIN_PERIPHERAL_PORT_C__
+
+#define PC0		(1 << 0)
+#define PC1		(1 << 1)
+#define PC2		(1 << 2)
+#define PC3		(1 << 3)
+#define PC4		(1 << 4)
+#define PC5		(1 << 5)
+#define PC6		(1 << 6)
+#define PC7		(1 << 7)
+#define PC8		(1 << 8)
+#define PC9		(1 << 9)
+#define PC10		(1 << 10)
+#define PC11		(1 << 11)
+#define PC12		(1 << 12)
+#define PC13		(1 << 13)
+#define PC14		(1 << 14)
+#define PC15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-d.h b/arch/blackfin/include/mach-common/ports-d.h
new file mode 100644
index 0000000..868c6a0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-d.h
@@ -0,0 +1,25 @@
+/*
+ * Port D Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_D__
+#define __BFIN_PERIPHERAL_PORT_D__
+
+#define PD0		(1 << 0)
+#define PD1		(1 << 1)
+#define PD2		(1 << 2)
+#define PD3		(1 << 3)
+#define PD4		(1 << 4)
+#define PD5		(1 << 5)
+#define PD6		(1 << 6)
+#define PD7		(1 << 7)
+#define PD8		(1 << 8)
+#define PD9		(1 << 9)
+#define PD10		(1 << 10)
+#define PD11		(1 << 11)
+#define PD12		(1 << 12)
+#define PD13		(1 << 13)
+#define PD14		(1 << 14)
+#define PD15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-e.h b/arch/blackfin/include/mach-common/ports-e.h
new file mode 100644
index 0000000..c88b0d0
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-e.h
@@ -0,0 +1,25 @@
+/*
+ * Port E Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_E__
+#define __BFIN_PERIPHERAL_PORT_E__
+
+#define PE0		(1 << 0)
+#define PE1		(1 << 1)
+#define PE2		(1 << 2)
+#define PE3		(1 << 3)
+#define PE4		(1 << 4)
+#define PE5		(1 << 5)
+#define PE6		(1 << 6)
+#define PE7		(1 << 7)
+#define PE8		(1 << 8)
+#define PE9		(1 << 9)
+#define PE10		(1 << 10)
+#define PE11		(1 << 11)
+#define PE12		(1 << 12)
+#define PE13		(1 << 13)
+#define PE14		(1 << 14)
+#define PE15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-f.h b/arch/blackfin/include/mach-common/ports-f.h
new file mode 100644
index 0000000..d6af206
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-f.h
@@ -0,0 +1,25 @@
+/*
+ * Port F Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_F__
+#define __BFIN_PERIPHERAL_PORT_F__
+
+#define PF0		(1 << 0)
+#define PF1		(1 << 1)
+#define PF2		(1 << 2)
+#define PF3		(1 << 3)
+#define PF4		(1 << 4)
+#define PF5		(1 << 5)
+#define PF6		(1 << 6)
+#define PF7		(1 << 7)
+#define PF8		(1 << 8)
+#define PF9		(1 << 9)
+#define PF10		(1 << 10)
+#define PF11		(1 << 11)
+#define PF12		(1 << 12)
+#define PF13		(1 << 13)
+#define PF14		(1 << 14)
+#define PF15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-g.h b/arch/blackfin/include/mach-common/ports-g.h
new file mode 100644
index 0000000..09355d3
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-g.h
@@ -0,0 +1,25 @@
+/*
+ * Port G Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_G__
+#define __BFIN_PERIPHERAL_PORT_G__
+
+#define PG0		(1 << 0)
+#define PG1		(1 << 1)
+#define PG2		(1 << 2)
+#define PG3		(1 << 3)
+#define PG4		(1 << 4)
+#define PG5		(1 << 5)
+#define PG6		(1 << 6)
+#define PG7		(1 << 7)
+#define PG8		(1 << 8)
+#define PG9		(1 << 9)
+#define PG10		(1 << 10)
+#define PG11		(1 << 11)
+#define PG12		(1 << 12)
+#define PG13		(1 << 13)
+#define PG14		(1 << 14)
+#define PG15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-h.h b/arch/blackfin/include/mach-common/ports-h.h
new file mode 100644
index 0000000..fa3910c
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-h.h
@@ -0,0 +1,25 @@
+/*
+ * Port H Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_H__
+#define __BFIN_PERIPHERAL_PORT_H__
+
+#define PH0		(1 << 0)
+#define PH1		(1 << 1)
+#define PH2		(1 << 2)
+#define PH3		(1 << 3)
+#define PH4		(1 << 4)
+#define PH5		(1 << 5)
+#define PH6		(1 << 6)
+#define PH7		(1 << 7)
+#define PH8		(1 << 8)
+#define PH9		(1 << 9)
+#define PH10		(1 << 10)
+#define PH11		(1 << 11)
+#define PH12		(1 << 12)
+#define PH13		(1 << 13)
+#define PH14		(1 << 14)
+#define PH15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-i.h b/arch/blackfin/include/mach-common/ports-i.h
new file mode 100644
index 0000000..f176f08
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-i.h
@@ -0,0 +1,25 @@
+/*
+ * Port I Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_I__
+#define __BFIN_PERIPHERAL_PORT_I__
+
+#define PI0		(1 << 0)
+#define PI1		(1 << 1)
+#define PI2		(1 << 2)
+#define PI3		(1 << 3)
+#define PI4		(1 << 4)
+#define PI5		(1 << 5)
+#define PI6		(1 << 6)
+#define PI7		(1 << 7)
+#define PI8		(1 << 8)
+#define PI9		(1 << 9)
+#define PI10		(1 << 10)
+#define PI11		(1 << 11)
+#define PI12		(1 << 12)
+#define PI13		(1 << 13)
+#define PI14		(1 << 14)
+#define PI15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/include/mach-common/ports-j.h b/arch/blackfin/include/mach-common/ports-j.h
new file mode 100644
index 0000000..924123e
--- /dev/null
+++ b/arch/blackfin/include/mach-common/ports-j.h
@@ -0,0 +1,25 @@
+/*
+ * Port J Masks
+ */
+
+#ifndef __BFIN_PERIPHERAL_PORT_J__
+#define __BFIN_PERIPHERAL_PORT_J__
+
+#define PJ0		(1 << 0)
+#define PJ1		(1 << 1)
+#define PJ2		(1 << 2)
+#define PJ3		(1 << 3)
+#define PJ4		(1 << 4)
+#define PJ5		(1 << 5)
+#define PJ6		(1 << 6)
+#define PJ7		(1 << 7)
+#define PJ8		(1 << 8)
+#define PJ9		(1 << 9)
+#define PJ10		(1 << 10)
+#define PJ11		(1 << 11)
+#define PJ12		(1 << 12)
+#define PJ13		(1 << 13)
+#define PJ14		(1 << 14)
+#define PJ15		(1 << 15)
+
+#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index bfe75af..886e000 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -116,7 +116,7 @@
 	    ((_ramend - uncached_end) >= 1 * 1024 * 1024))
 		dcplb_bounds[i_d].eaddr = uncached_end;
 	else
-		dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
+		dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
 	dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
 	/* DMA uncached region.  */
 	if (DMA_UNCACHED_REGION) {
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index edae461..eb92592 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -345,6 +345,23 @@
 }
 #endif
 
+#ifdef CONFIG_IPIPE
+static unsigned long kgdb_arch_imask;
+#endif
+
+void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
+{
+	if (kgdb_single_step)
+		preempt_enable();
+
+#ifdef CONFIG_IPIPE
+	if (kgdb_arch_imask) {
+		cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
+		kgdb_arch_imask = 0;
+	}
+#endif
+}
+
 int kgdb_arch_handle_exception(int vector, int signo,
 			       int err_code, char *remcom_in_buffer,
 			       char *remcom_out_buffer,
@@ -388,6 +405,12 @@
 			 * kgdb_single_step > 0 means in single step mode
 			 */
 			kgdb_single_step = i + 1;
+
+			preempt_disable();
+#ifdef CONFIG_IPIPE
+			kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
+			cpu_pda[raw_smp_processor_id()].ex_imask = 0;
+#endif
 		}
 
 		bfin_correct_hw_break();
@@ -448,6 +471,9 @@
 int kgdb_arch_init(void)
 {
 	kgdb_single_step = 0;
+#ifdef CONFIG_IPIPE
+	kgdb_arch_imask = 0;
+#endif
 
 	bfin_remove_all_hw_break();
 	return 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 08c0236..2a6e9db 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -95,6 +95,10 @@
 {
 	struct proc_dir_entry *entry;
 
+#if L2_LENGTH
+	num2 = 0;
+#endif
+
 	entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
 	if (entry == NULL)
 		return -ENOMEM;
diff --git a/arch/blackfin/mach-bf518/boards/ezbrd.c b/arch/blackfin/mach-bf518/boards/ezbrd.c
index b894c8a..c0ccadc 100644
--- a/arch/blackfin/mach-bf518/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf518/boards/ezbrd.c
@@ -104,24 +104,23 @@
 
 static struct bfin_phydev_platform_data bfin_phydev_data[] = {
 	{
-		.addr = 1,
-		.irq = IRQ_MAC_PHYINT,
-	},
-	{
-		.addr = 2,
-		.irq = IRQ_MAC_PHYINT,
-	},
-	{
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
 		.addr = 3,
+#else
+		.addr = 1,
+#endif
 		.irq = IRQ_MAC_PHYINT,
 	},
 };
 
 static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
-	.phydev_number = 3,
+	.phydev_number = 1,
 	.phydev_data = bfin_phydev_data,
 	.phy_mode = PHY_INTERFACE_MODE_MII,
 	.mac_peripherals = bfin_mac_peripherals,
+#if defined(CONFIG_NET_DSA_KSZ8893M) || defined(CONFIG_NET_DSA_KSZ8893M_MODULE)
+	.phy_mask = 0xfff7, /* Only probe the port phy connect to the on chip MAC */
+#endif
 };
 
 static struct platform_device bfin_mii_bus = {
@@ -453,7 +452,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -496,7 +495,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -636,9 +635,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -670,9 +669,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/boards/tcm-bf518.c b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
index e6ce1d7..50fc5c8 100644
--- a/arch/blackfin/mach-bf518/boards/tcm-bf518.c
+++ b/arch/blackfin/mach-bf518/boards/tcm-bf518.c
@@ -377,7 +377,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -420,7 +420,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -547,9 +547,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -581,9 +581,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf518/dma.c b/arch/blackfin/mach-bf518/dma.c
index 78b4360..bcd1fbc 100644
--- a/arch/blackfin/mach-bf518/dma.c
+++ b/arch/blackfin/mach-bf518/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
index 970d310..f6d924a 100644
--- a/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf518/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port port;
-	unsigned int old_status;
-	int status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int tx_done;
-	int tx_count;
-	struct circ_buf rx_dma_buf;
-	struct timer_list rx_dma_timer;
-	int rx_dma_nrows;
-	unsigned int tx_dma_channel;
-	unsigned int rx_dma_channel;
-	struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list cts_timer;
-	int cts_pin;
-	int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long uart_base_addr;
 	int uart_irq;
@@ -146,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf518/include/mach/blackfin.h b/arch/blackfin/mach-bf518/include/mach/blackfin.h
index 9053462..a882886 100644
--- a/arch/blackfin/mach-bf518/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf518/include/mach/blackfin.h
@@ -1,61 +1,43 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
 #define _MACH_BLACKFIN_H_
 
 #include "bf518.h"
-#include "defBF512.h"
 #include "anomaly.h"
 
-#if defined(CONFIG_BF518)
-#include "defBF518.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF512
+# include "defBF512.h"
+#endif
+#ifdef CONFIG_BF514
+# include "defBF514.h"
+#endif
+#ifdef CONFIG_BF516
+# include "defBF516.h"
+#endif
+#ifdef CONFIG_BF518
+# include "defBF518.h"
 #endif
 
-#if defined(CONFIG_BF516)
-#include "defBF516.h"
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF512
+#  include "cdefBF512.h"
+# endif
+# ifdef CONFIG_BF514
+#  include "cdefBF514.h"
+# endif
+# ifdef CONFIG_BF516
+#  include "cdefBF516.h"
+# endif
+# ifdef CONFIG_BF518
+#  include "cdefBF518.h"
+# endif
 #endif
 
-#if defined(CONFIG_BF514)
-#include "defBF514.h"
-#endif
-
-#if defined(CONFIG_BF512)
-#include "defBF512.h"
-#endif
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF512.h"
-
-#if defined(CONFIG_BF518)
-#include "cdefBF518.h"
-#endif
-
-#if defined(CONFIG_BF516)
-#include "cdefBF516.h"
-#endif
-
-#if defined(CONFIG_BF514)
-#include "cdefBF514.h"
-#endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
index 493020d..b657d37 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF512.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,15 +7,1037 @@
 #ifndef _CDEF_BF512_H
 #define _CDEF_BF512_H
 
-/* include all Core registers and bit definitions */
-#include "defBF512.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()			bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
 
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define bfin_read_SWRST()			bfin_read16(SWRST)
+#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()			bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
 
-/* include cdefBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "cdefBF51x_base.h"
+#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
+#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
+#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
+#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
+#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
+#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers													*/
+#define bfin_read_DMAC_TC_PER()			bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)		bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT()			bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)		bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller																	*/
+#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
+#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
+#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
+#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
+#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
+#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
 
 #endif /* _CDEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
index 108fa4b..dc98866 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF514.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF514_H
 #define _CDEF_BF514_H
 
-/* include all Core registers and bit definitions */
-#include "defBF514.h"
-
 /* BF514 is BF512 + RSI */
 #include "cdefBF512.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
index 2751592..142e45c 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF516.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF516_H
 #define _CDEF_BF516_H
 
-/* include all Core registers and bit definitions */
-#include "defBF516.h"
-
 /* BF516 is BF514 + EMAC */
 #include "cdefBF514.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
index 7fb7f0e..e638197 100644
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
+++ b/arch/blackfin/mach-bf518/include/mach/cdefBF518.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF518_H
 #define _CDEF_BF518_H
 
-/* include all Core registers and bit definitions */
-#include "defBF518.h"
-
 /* BF518 is BF516 + IEEE-1588 */
 #include "cdefBF516.h"
 
diff --git a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
deleted file mode 100644
index e16969f..0000000
--- a/arch/blackfin/mach-bf518/include/mach/cdefBF51x_base.h
+++ /dev/null
@@ -1,1061 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF51x_base.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID()			bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define bfin_read_SWRST()			bfin_read16(SWRST)
-#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
-#define bfin_read_SYSCR()			bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
-#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
-#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
-#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
-#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
-#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()			bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)		bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT()			bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)		bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()			bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)		bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT()			bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)		bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller																	*/
-#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
-#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
-#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
-#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF51x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
-#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
-#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF512.h b/arch/blackfin/mach-bf518/include/mach/defBF512.h
index 9b505bb..2728582 100644
--- a/arch/blackfin/mach-bf518/include/mach/defBF512.h
+++ b/arch/blackfin/mach-bf518/include/mach/defBF512.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,12 +7,1388 @@
 #ifndef _DEF_BF512_H
 #define _DEF_BF512_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x    */
+/* ************************************************************** */
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF512 */
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
+#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
+#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register				*/
+#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
+#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
+#define CHIPID				0xFFC00014	/* Device ID Register */
 
-/* Include defBF51x_base.h for the set of #defines that are common to all ADSP-BF51x processors */
-#include "defBF51x_base.h"
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)								*/
+#define SWRST				0xFFC00100	/* Software Reset Register					*/
+#define SYSCR				0xFFC00104	/* System Configuration Register				*/
+#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register			*/
+
+#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
+#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0				*/
+#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1				*/
+#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2				*/
+#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3				*/
+#define SIC_ISR0			0xFFC00120	/* Interrupt Status Register					*/
+#define SIC_IWR0			0xFFC00124	/* Interrupt Wakeup Register					*/
+
+/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
+#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
+#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
+#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
+#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
+#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
+#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
+#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
+#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
+#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
+#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
+#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
+#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
+#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
+#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
+#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
+#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
+#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
+#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
+#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
+#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
+#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
+#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
+#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
+#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
+#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
+
+/* SPI0 Controller			(0xFFC00500 - 0xFFC005FF)							*/
+#define SPI0_REGBASE			0xFFC00500
+#define SPI0_CTL			0xFFC00500	/* SPI Control Register						*/
+#define SPI0_FLG			0xFFC00504	/* SPI Flag register						*/
+#define SPI0_STAT			0xFFC00508	/* SPI Status register						*/
+#define SPI0_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register				*/
+#define SPI0_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register				*/
+#define SPI0_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
+#define SPI0_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
+
+/* SPI1 Controller			(0xFFC03400 - 0xFFC034FF)							*/
+#define SPI1_REGBASE			0xFFC03400
+#define SPI1_CTL			0xFFC03400	/* SPI Control Register						*/
+#define SPI1_FLG			0xFFC03404	/* SPI Flag register						*/
+#define SPI1_STAT			0xFFC03408	/* SPI Status register						*/
+#define SPI1_TDBR			0xFFC0340C	/* SPI Transmit Data Buffer Register				*/
+#define SPI1_RDBR			0xFFC03410	/* SPI Receive Data Buffer Register				*/
+#define SPI1_BAUD			0xFFC03414	/* SPI Baud rate Register					*/
+#define SPI1_SHADOW			0xFFC03418	/* SPI_RDBR Shadow Register					*/
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
+#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
+#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
+#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
+
+#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
+#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
+#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
+#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
+
+#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
+#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
+#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
+#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
+
+#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
+#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
+#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
+#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
+
+#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
+#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
+#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
+#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
+
+#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
+#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
+#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
+#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
+
+#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
+#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
+#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
+#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
+
+#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
+#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
+#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
+#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
+
+#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
+#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
+#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
+#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
+#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
+#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
+#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
+#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
+#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
+#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
+#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
+#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
+#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
+#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
+#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
+#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
+#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
+#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
+#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
+#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
+#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
+#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
+#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
+#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
+#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
+#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
+#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
+#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
+#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
+#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
+#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
+#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
+#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
+#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
+#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
+#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
+#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
+#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
+#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
+#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
+#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
+#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
+#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
+#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
+#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
+#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
+#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
+#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
+#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
+#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
+#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
+#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
+#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
+#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
+#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
+#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
+#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
+#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
+#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
+#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
+#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
+
+/* DMA Traffic Control Registers													*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
+#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
+#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
+#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
+#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
+#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
+#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
+#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
+#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
+#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
+#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
+#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
+#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
+#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
+
+#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
+#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
+#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
+#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
+#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
+#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
+#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
+#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
+#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
+#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
+#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
+#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
+#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
+
+#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
+#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
+#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
+#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
+#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
+#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
+#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
+#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
+#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
+#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
+#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
+#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
+#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
+
+#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
+#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
+#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
+#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
+#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
+#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
+#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
+#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
+#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
+#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
+#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
+#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
+#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
+
+#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
+#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
+#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
+#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
+#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
+#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
+#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
+#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
+#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
+#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
+#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
+#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
+#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
+
+#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
+#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
+#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
+#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
+#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
+#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
+#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
+#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
+#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
+#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
+#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
+#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
+#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
+
+#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
+#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
+#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
+#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
+#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
+#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
+#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
+#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
+#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
+#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
+#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
+#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
+#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
+
+#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
+#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
+#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
+#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
+#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
+#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
+#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
+#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
+#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
+#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
+#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
+#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
+#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
+
+#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
+#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
+#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
+#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
+#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
+#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
+#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
+#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
+#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
+#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
+#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
+#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
+#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
+
+#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
+#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
+#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
+#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
+#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
+#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
+#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
+#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
+#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
+#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
+#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
+#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
+#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
+
+#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
+#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
+#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
+#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
+#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
+#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
+#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
+#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
+#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
+#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
+#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
+#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
+#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
+
+#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
+#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
+#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
+#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
+#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
+#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
+#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
+#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
+#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
+#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
+#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
+#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
+#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
+
+#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
+#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
+#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
+#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
+#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
+#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
+#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
+#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
+#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
+#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
+#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
+
+#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
+#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
+#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
+#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
+#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
+#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
+#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
+#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
+#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
+#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
+#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
+#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
+#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
+
+#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
+#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
+#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
+#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
+#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
+#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
+#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
+#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
+#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
+#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
+#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
+
+#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
+#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
+#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
+#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
+#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
+#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
+#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
+#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
+#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
+#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
+#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
+#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
+#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
+#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
+#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
+#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
+#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
+#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+#define TWI0_REGBASE			0xFFC01400
+#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
+#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
+#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
+#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
+#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
+#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
+#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
+#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
+#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
+#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
+#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
+#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
+#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
+#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
+#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
+#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
+#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
+#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
+#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
+#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
+#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
+#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
+#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
+#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
+#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
+#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
+#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
+#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
+#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
+#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
+#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
+#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
+#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
+#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
+#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
+#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
+#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
+#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
+#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
+#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
+#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
+#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
+#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
+#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
+#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
+#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
+#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
+#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
+#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
+#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
+#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
+#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
+#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
+#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
+#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
+#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
+#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
+
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
+#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
+#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
+#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
+#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
+#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
+#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
+#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
+#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
+#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
+#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
+#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
+
+#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
+#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
+#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
+#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
+#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
+#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
+#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
+
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX               0xFFC03210      /* Port F mux control */
+#define PORTG_MUX               0xFFC03214      /* Port G mux control */
+#define PORTH_MUX               0xFFC03218      /* Port H mux control */
+#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
+#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
+#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
+#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
+#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
+#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
+#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
+#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* SWRST Masks																		*/
+#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
+#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
+#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
+#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
+#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
+
+/* SYSCR Masks																				*/
+#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
+#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
+
+#if 0
+#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
+
+#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
+#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
+#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
+#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
+#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
+
+#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
+#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
+#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
+#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
+#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
+#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
+#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
+#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
+
+#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
+#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
+#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
+#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
+#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
+#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
+#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
+#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
+#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
+#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
+
+#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
+#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
+#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
+#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
+#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
+#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
+#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
+#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
+#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
+#endif
+
+/* SIC_IAR0 Macros															*/
+#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
+#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
+#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
+#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
+#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
+#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
+#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
+#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
+
+/* SIC_IAR1 Macros															*/
+#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
+#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
+#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
+#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
+#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
+#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
+#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
+#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
+
+/* SIC_IAR2 Macros															*/
+#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
+#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
+#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
+#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
+#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
+#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
+#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
+#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
+
+/* SIC_IAR3 Macros															*/
+#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
+#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
+#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
+#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
+#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
+#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
+#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
+#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
+
+
+/* SIC_IMASK Masks																		*/
+#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
+#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
+#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
+#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
+
+/* SIC_IWR Masks																		*/
+#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
+#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
+#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
+#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
+
+/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
+/* TIMER_ENABLE Masks													*/
+#define TIMEN0			0x0001		/* Enable Timer 0					*/
+#define TIMEN1			0x0002		/* Enable Timer 1					*/
+#define TIMEN2			0x0004		/* Enable Timer 2					*/
+#define TIMEN3			0x0008		/* Enable Timer 3					*/
+#define TIMEN4			0x0010		/* Enable Timer 4					*/
+#define TIMEN5			0x0020		/* Enable Timer 5					*/
+#define TIMEN6			0x0040		/* Enable Timer 6					*/
+#define TIMEN7			0x0080		/* Enable Timer 7					*/
+
+/* TIMER_DISABLE Masks													*/
+#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
+#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
+#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
+#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
+#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
+#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
+#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
+#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
+
+/* TIMER_STATUS Masks													*/
+#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
+#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
+#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
+#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
+#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
+#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
+#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
+#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
+#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
+#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
+#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
+#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
+#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
+#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
+#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
+#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
+#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
+#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
+#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
+#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
+#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
+#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
+#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
+#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks													*/
+#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
+#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
+#define EXT_CLK			0x0003	/* External Clock Mode					*/
+#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
+#define PERIOD_CNT		0x0008	/* Period Count							*/
+#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
+#define TIN_SEL			0x0020	/* Timer Input Select					*/
+#define OUT_DIS			0x0040	/* Output Pad Disable					*/
+#define CLK_SEL			0x0080	/* Timer Clock Select					*/
+#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
+#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
+#define ERR_TYP			0xC000	/* Error Type							*/
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
+/* EBIU_AMGCTL Masks																	*/
+#define AMCKEN			0x0001		/* Enable CLKOUT									*/
+#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
+#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
+#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
+#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
+#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
+
+/* EBIU_AMBCTL0 Masks																	*/
+#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
+#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
+#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
+#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
+#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
+#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
+#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
+#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
+#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
+#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
+#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
+#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
+#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
+#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
+#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
+#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
+#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
+#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
+#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
+#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
+#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
+#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
+#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
+#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
+#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
+#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
+#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
+#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
+#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
+#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
+#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
+#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
+#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
+#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
+#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
+#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
+
+#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
+#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
+#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
+#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
+#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
+#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
+#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
+#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
+#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
+#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
+#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
+#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
+#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
+#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
+#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
+#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
+#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
+#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
+#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
+#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
+#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
+#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
+#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
+#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
+#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
+#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
+#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
+#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
+#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
+#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
+#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
+#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
+#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
+#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
+#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
+#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
+#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
+#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
+#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
+#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
+#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
+
+/* EBIU_AMBCTL1 Masks																	*/
+#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
+#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
+#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
+#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
+#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
+#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
+#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
+#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
+#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
+#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
+#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
+#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
+#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
+#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
+#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
+#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
+#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
+#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
+#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
+#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
+#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
+#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
+#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
+#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
+#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
+#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
+#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
+#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
+#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
+#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
+#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
+#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
+#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
+#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
+#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
+#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
+
+#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
+#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
+#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
+#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
+#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
+#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
+#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
+#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
+#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
+#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
+#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
+#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
+#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
+#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
+#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
+#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
+#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
+#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
+#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
+#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
+#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
+#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
+#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
+#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
+#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
+#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
+#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
+#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
+#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
+#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
+#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
+#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
+#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
+#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
+#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
+#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
+
+
+/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
+/* EBIU_SDGCTL Masks																			*/
+#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
+#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
+#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
+#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
+#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
+#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
+#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
+#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
+#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
+#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
+#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
+#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
+#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
+#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
+#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
+#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
+#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
+#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
+#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
+#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
+#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
+#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
+#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
+#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
+#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
+#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
+#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
+#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
+#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
+#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
+#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
+#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
+#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
+#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
+#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
+#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
+#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
+#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
+#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
+#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
+#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
+#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
+#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
+#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
+#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
+#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
+#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
+
+/* EBIU_SDBCTL Masks																		*/
+#define EBE				0x0001		/* Enable SDRAM External Bank							*/
+#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
+#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
+#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
+#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
+#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
+#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
+#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
+#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
+#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
+#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
+
+/* EBIU_SDSTAT Masks														*/
+#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
+#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
+#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
+#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
+#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
+#define BGSTAT			0x0020		/* Bus Grant Status						*/
+
+
+/* **************************  DMA CONTROLLER MASKS  ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
+#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
+#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
+#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
+#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
+#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
+#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
+#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
+#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
+#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
+#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
+#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
+#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
+#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
+#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
+
+/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/*  PPI_CONTROL Masks													*/
+#define PORT_EN			0x0001		/* PPI Port Enable					*/
+#define PORT_DIR		0x0002		/* PPI Port Direction				*/
+#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
+#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
+#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
+#define PACK_EN			0x0080		/* PPI Packing Mode					*/
+#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
+#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
+#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
+#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
+#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
+#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
+#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
+#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
+#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
+#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
+#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
+#define DLENGTH			0x3800		/* PPI Data Length  */
+#define POLC			0x4000		/* PPI Clock Polarity				*/
+#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
+
+/* PPI_STATUS Masks														*/
+#define FLD				0x0400		/* Field Indicator					*/
+#define FT_ERR			0x0800		/* Frame Track Error				*/
+#define OVR				0x1000		/* FIFO Overflow Error				*/
+#define UNDR			0x2000		/* FIFO Underrun Error				*/
+#define ERR_DET			0x4000		/* Error Detected Indicator			*/
+#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
+#define	TWI_ENA		0x0080		/* TWI Enable									*/
+#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
+
+/* TWI_SLAVE_CTL Masks															*/
+#define	SEN			0x0001		/* Slave Enable									*/
+#define	SADD_LEN	0x0002		/* Slave Address Length							*/
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
+#define GCALL		0x0002		/* General Call Indicator						*/
+
+/* TWI_MASTER_CTL Masks													*/
+#define	MEN			0x0001		/* Master Mode Enable						*/
+#define	MADD_LEN	0x0002		/* Master Address Length					*/
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
+#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
+#define	STOP		0x0010		/* Issue Stop Condition						*/
+#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
+#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
+#define	SDAOVR		0x4000		/* Serial Data Override						*/
+#define	SCLOVR		0x8000		/* Serial Clock Override					*/
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001		/* Master Transfer In Progress					*/
+#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
+#define	ANAK		0x0004		/* Address Not Acknowledged						*/
+#define	DNAK		0x0008		/* Data Not Acknowledged						*/
+#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
+#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
+#define	SDASEN		0x0040		/* Serial Data Sense							*/
+#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
+#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
+#define	SERR		0x0004		/* Slave Transfer Error		*/
+#define	SOVF		0x0008		/* Slave Overflow			*/
+#define	MCOMP		0x0010		/* Master Transfer Complete	*/
+#define	MERR		0x0020		/* Master Transfer Error	*/
+#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
+#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
+#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
+#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
+#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
+
+#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
+#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
+#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
+#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
+
+
+/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
+/* PORT_MUX Masks															*/
+#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
+#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
+#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
+
+#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
+#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
+#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
+#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
+
+#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
+#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
+#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
+
+#define	PFTE			0x0010			/* Port F Timer Enable				*/
+#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
+#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
+
+#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
+#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
+#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
+
+#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
+#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
+#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
+
+#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
+#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
+#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
+
+#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
+#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
+#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
+
+#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
+#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
+#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
+
+#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
+#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
+#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
+
+#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
+#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
+#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
+
+
+/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
+/* HDMAx_CTL Masks														*/
+#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
+#define	REP			0x0002	/* HDMA Request Polarity					*/
+#define	UTE			0x0004	/* Urgency Threshold Enable					*/
+#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
+#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
+#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
+#define	DRQ			0x0300	/* HDMA Request Type						*/
+#define	DRQ_NONE	0x0000	/* 		No Request							*/
+#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
+#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
+#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
+#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
+#define	PS			0x2000	/* HDMA Pin Status							*/
+#define	OI			0x4000	/* Overflow Interrupt Generated				*/
+#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define	PGDE_UART   PFDE_UART
+#define	PGDE_DMA    PFDE_DMA
+#define	CKELOW		SCKELOW
+
+/* HOST Port Registers */
+
+#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
+#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
+#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
+#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
+#define                       CNT_STATUS  0xffc03508   /* Status Register */
+#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
+#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
+#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
+#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
+#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
+#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
+#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
+#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
+#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
+#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* Motor Control PWM Registers */
+
+#define                         PWM_CTRL  0xffc03700   /* PWM Control Register */
+#define                         PWM_STAT  0xffc03704   /* PWM Status Register */
+#define                           PWM_TM  0xffc03708   /* PWM Period Register */
+#define                           PWM_DT  0xffc0370c   /* PWM Dead Time Register */
+#define                         PWM_GATE  0xffc03710   /* PWM Chopping Control */
+#define                          PWM_CHA  0xffc03714   /* PWM Channel A Duty Control */
+#define                          PWM_CHB  0xffc03718   /* PWM Channel B Duty Control */
+#define                          PWM_CHC  0xffc0371c   /* PWM Channel C Duty Control */
+#define                          PWM_SEG  0xffc03720   /* PWM Crossover and Output Enable */
+#define                       PWM_SYNCWT  0xffc03724   /* PWM Sync Pluse Width Control */
+#define                         PWM_CHAL  0xffc03728   /* PWM Channel AL Duty Control (SR mode only) */
+#define                         PWM_CHBL  0xffc0372c   /* PWM Channel BL Duty Control (SR mode only) */
+#define                         PWM_CHCL  0xffc03730   /* PWM Channel CL Duty Control (SR mode only) */
+#define                          PWM_LSI  0xffc03734   /* PWM Low Side Invert (SR mode only) */
+#define                        PWM_STAT2  0xffc03738   /* PWM Status Register 2 */
+
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
+#define                  HOST_CNTR_nHOST_EN  0x0
+#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
+#define                 HOST_CNTR_nHOST_END  0x0
+#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
+#define                HOST_CNTR_nDATA_SIZE  0x0
+#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
+#define                 HOST_CNTR_nHOST_RST  0x0
+#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
+#define                 HOST_CNTR_nHRDY_OVR  0x0
+#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
+#define                 HOST_CNTR_nINT_MODE  0x0
+#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
+#define                   HOST_CNTR_ nBT_EN  0x0
+#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
+#define                      HOST_CNTR_nEHW  0x0
+#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
+#define                      HOST_CNTR_nEHR  0x0
+#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
+#define                      HOST_CNTR_nBDR  0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define                     HOST_STAT_READY  0x1        /* DMA Ready */
+#define                    HOST_STAT_nREADY  0x0
+#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
+#define                 HOST_STAT_nFIFOFULL  0x0
+#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
+#define                HOST_STAT_nFIFOEMPTY  0x0
+#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
+#define                 HOST_STAT_nCOMPLETE  0x0
+#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
+#define                     HOST_STAT_nHSHK  0x0
+#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
+#define                  HOST_STAT_nTIMEOUT  0x0
+#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
+#define                     HOST_STAT_nHIRQ  0x0
+#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
+#define               HOST_STAT_nALLOW_CNFG  0x0
+#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
+#define                  HOST_STAT_nDMA_DIR  0x0
+#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
+#define                      HOST_STAT_nBTE  0x0
+#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
+#define              HOST_STAT_nHOSTRD_DONE  0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define                   EMUDABL  0x1        /* Emulation Disable. */
+#define                  nEMUDABL  0x0
+#define                   RSTDABL  0x2        /* Reset Disable */
+#define                  nRSTDABL  0x0
+#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
+#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
+#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
+#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
+#define                  nDMA0OVR  0x0
+#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
+#define                  nDMA1OVR  0x0
+#define                    EMUOVR  0x4000     /* Emulation Override */
+#define                   nEMUOVR  0x0
+#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
+#define                   nOTPSEN  0x0
+#define                    L2DABL  0x70000    /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define                   SECURE0  0x1        /* SECURE 0 */
+#define                  nSECURE0  0x0
+#define                   SECURE1  0x2        /* SECURE 1 */
+#define                  nSECURE1  0x0
+#define                   SECURE2  0x4        /* SECURE 2 */
+#define                  nSECURE2  0x0
+#define                   SECURE3  0x8        /* SECURE 3 */
+#define                  nSECURE3  0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define                   SECMODE  0x3        /* Secured Mode Control State */
+#define                       NMI  0x4        /* Non Maskable Interrupt */
+#define                      nNMI  0x0
+#define                   AFVALID  0x8        /* Authentication Firmware Valid */
+#define                  nAFVALID  0x0
+#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
+#define                   nAFEXIT  0x0
+#define                   SECSTAT  0xe0       /* Secure Status */
 
 #endif /* _DEF_BF512_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h b/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
deleted file mode 100644
index 5f84913..0000000
--- a/arch/blackfin/mach-bf518/include/mach/defBF51x_base.h
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF51X_H
-#define _DEF_BF51X_H
-
-
-/* ************************************************************** */
-/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF51x    */
-/* ************************************************************** */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
-#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
-#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register				*/
-#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
-#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
-#define CHIPID				0xFFC00014	/* Device ID Register */
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)								*/
-#define SWRST				0xFFC00100	/* Software Reset Register					*/
-#define SYSCR				0xFFC00104	/* System Configuration Register				*/
-#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register			*/
-
-#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
-#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0				*/
-#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1				*/
-#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2				*/
-#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3				*/
-#define SIC_ISR0			0xFFC00120	/* Interrupt Status Register					*/
-#define SIC_IWR0			0xFFC00124	/* Interrupt Wakeup Register					*/
-
-/* SIC Additions to ADSP-BF51x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
-#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
-#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
-#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
-#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
-#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
-#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
-#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
-#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
-#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
-#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
-#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
-#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
-#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
-#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
-#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
-#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
-#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
-#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
-#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
-#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
-#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
-#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
-#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
-#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
-#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
-
-/* SPI0 Controller			(0xFFC00500 - 0xFFC005FF)							*/
-#define SPI0_REGBASE			0xFFC00500
-#define SPI0_CTL			0xFFC00500	/* SPI Control Register						*/
-#define SPI0_FLG			0xFFC00504	/* SPI Flag register						*/
-#define SPI0_STAT			0xFFC00508	/* SPI Status register						*/
-#define SPI0_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register				*/
-#define SPI0_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register				*/
-#define SPI0_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
-#define SPI0_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
-
-/* SPI1 Controller			(0xFFC03400 - 0xFFC034FF)							*/
-#define SPI1_REGBASE			0xFFC03400
-#define SPI1_CTL			0xFFC03400	/* SPI Control Register						*/
-#define SPI1_FLG			0xFFC03404	/* SPI Flag register						*/
-#define SPI1_STAT			0xFFC03408	/* SPI Status register						*/
-#define SPI1_TDBR			0xFFC0340C	/* SPI Transmit Data Buffer Register				*/
-#define SPI1_RDBR			0xFFC03410	/* SPI Receive Data Buffer Register				*/
-#define SPI1_BAUD			0xFFC03414	/* SPI Baud rate Register					*/
-#define SPI1_SHADOW			0xFFC03418	/* SPI_RDBR Shadow Register					*/
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
-#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
-#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
-#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
-
-#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
-#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
-#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
-#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
-
-#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
-#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
-#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
-#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
-
-#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
-#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
-#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
-#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
-
-#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
-#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
-#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
-#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
-
-#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
-#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
-#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
-#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
-
-#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
-#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
-#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
-#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
-
-#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
-#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
-#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
-#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
-
-#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
-#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
-#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
-#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
-#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
-#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
-#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
-#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
-#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
-#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
-#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
-#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
-#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
-#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
-#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
-#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
-#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
-#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
-#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
-#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
-#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
-#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
-#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
-#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
-#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
-#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
-#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
-#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
-#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
-#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
-#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
-#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
-#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
-#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
-#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
-#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
-#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
-#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
-#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
-#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
-#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
-#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
-#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
-#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
-#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
-#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
-#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
-#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
-#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
-#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
-#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
-#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
-#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
-#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
-#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
-#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
-#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
-#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
-#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
-#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
-#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
-
-/* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
-#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
-#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
-#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
-#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
-#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
-#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
-#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
-#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
-#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
-#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
-#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
-#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
-#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
-
-#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
-#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
-#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
-#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
-#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
-#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
-#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
-#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
-#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
-#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
-#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
-#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
-#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
-
-#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
-#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
-#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
-#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
-#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
-#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
-#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
-#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
-#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
-#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
-#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
-#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
-#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
-
-#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
-#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
-#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
-#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
-#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
-#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
-#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
-#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
-#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
-#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
-#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
-#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
-#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
-
-#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
-#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
-#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
-#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
-#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
-#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
-#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
-#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
-#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
-#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
-#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
-#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
-#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
-
-#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
-#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
-#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
-#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
-#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
-#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
-#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
-#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
-#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
-#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
-#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
-#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
-#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
-
-#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
-#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
-#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
-#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
-#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
-#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
-#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
-#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
-#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
-#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
-#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
-#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
-#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
-
-#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
-#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
-#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
-#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
-#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
-#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
-#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
-#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
-#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
-#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
-#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
-#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
-#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
-
-#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
-#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
-#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
-#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
-#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
-#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
-#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
-#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
-#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
-#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
-#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
-#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
-#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
-
-#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
-#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
-#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
-#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
-#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
-#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
-#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
-#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
-#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
-#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
-#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
-#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
-#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
-
-#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
-#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
-#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
-#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
-#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
-#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
-#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
-#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
-#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
-#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
-#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
-#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
-#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
-
-#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
-#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
-#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
-#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
-#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
-#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
-#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
-#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
-#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
-#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
-#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
-#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
-#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
-
-#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
-#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
-#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
-#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
-#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
-#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
-#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
-#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
-#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
-#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
-#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
-
-#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
-#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
-#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
-#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
-#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
-#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
-#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
-#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
-#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
-#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
-#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
-#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
-#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
-
-#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
-#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
-#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
-#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
-#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
-#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
-#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
-#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
-#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
-#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
-#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
-
-#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
-#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
-#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
-#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
-#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
-#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
-#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
-#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
-#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
-#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
-#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
-#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
-#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
-#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
-#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
-#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
-#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
-#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-#define TWI0_REGBASE			0xFFC01400
-#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
-#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
-#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
-#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
-#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
-#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
-#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
-#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
-#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
-#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
-#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
-#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
-#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
-#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
-#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
-#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
-#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
-#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
-#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
-#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
-#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
-#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
-#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
-#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
-#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
-#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
-#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
-#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
-#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
-#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
-#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
-#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
-#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
-#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
-#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
-#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
-#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
-#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
-#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
-#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
-#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
-#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
-#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
-#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
-#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
-#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
-#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
-#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
-#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
-#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
-#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
-#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
-#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
-#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
-#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
-#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
-#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
-
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
-#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
-#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
-#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
-#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
-#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
-#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
-#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
-#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
-#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
-#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
-#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
-
-#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
-#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
-#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
-#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
-#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
-#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
-#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
-
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX               0xFFC03210      /* Port F mux control */
-#define PORTG_MUX               0xFFC03214      /* Port G mux control */
-#define PORTH_MUX               0xFFC03218      /* Port H mux control */
-#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
-#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
-#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
-#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
-#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
-#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
-#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
-#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer:	All macros are intended to make C and Assembly code more readable.
-**				Use these macros carefully, as any that do left shifts for field
-**				depositing will result in the lower order bits being destroyed.  Any
-**				macro that shifts left to properly position the bit-field should be
-**				used as part of an OR to initialize a register and NOT as a dynamic
-**				modifier UNLESS the lower order bits are saved and ORed back in when
-**				the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* SWRST Masks																		*/
-#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
-#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
-#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
-#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
-#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
-
-/* SYSCR Masks																				*/
-#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
-#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
-
-#if 0
-#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
-
-#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
-#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
-#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
-#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
-#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
-
-#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
-#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
-#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
-#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
-#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
-#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
-#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
-#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
-
-#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
-#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
-#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
-#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
-#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
-#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
-#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
-#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
-#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
-#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
-
-#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
-#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
-#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
-#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
-#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
-#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
-#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
-#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
-#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
-#endif
-
-/* SIC_IAR0 Macros															*/
-#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
-#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
-#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
-#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
-#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
-#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
-#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
-#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
-
-/* SIC_IAR1 Macros															*/
-#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
-#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
-#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
-#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
-#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
-#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
-#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
-#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
-
-/* SIC_IAR2 Macros															*/
-#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
-#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
-#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
-#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
-#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
-#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
-#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
-#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
-
-/* SIC_IAR3 Macros															*/
-#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
-#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
-#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
-#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
-#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
-#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
-#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
-#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
-
-
-/* SIC_IMASK Masks																		*/
-#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
-#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
-#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
-#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
-
-/* SIC_IWR Masks																		*/
-#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
-#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
-#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
-#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select */
-#define STB			0x04				/* Stop Bits			*/
-#define PEN			0x08				/* Parity Enable		*/
-#define EPS			0x10				/* Even Parity Select	*/
-#define STP			0x20				/* Stick Parity			*/
-#define SB			0x40				/* Set Break			*/
-#define DLAB		0x80				/* Divisor Latch Access	*/
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define LOOP_ENA_P	0x04
-
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready				*/
-#define OE			0x02	/* Overrun Error			*/
-#define PE			0x04	/* Parity Error				*/
-#define FE			0x08	/* Framing Error			*/
-#define BI			0x10	/* Break Interrupt			*/
-#define THRE		0x20	/* THR Empty				*/
-#define TEMT		0x40	/* TSR and UART_THR Empty	*/
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01		/* Enable Receive Buffer Full Interrupt		*/
-#define ETBEI		0x02		/* Enable Transmit Buffer Empty Interrupt	*/
-#define ELSI		0x04		/* Enable RX Status Interrupt				*/
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01		/* Pending Interrupt					*/
-#define IIR_TX_READY    0x02		/* UART_THR empty                               */
-#define IIR_RX_READY    0x04		/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06		/* Receive line status    			*/
-#define IIR_STATUS	0x06		/* Highest Priority Pending Interrupt	*/
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01		/* Enable UARTx Clocks				*/
-#define IREN		0x02		/* Enable IrDA Mode					*/
-#define TPOLC		0x04		/* IrDA TX Polarity Change			*/
-#define RPOLC		0x08		/* IrDA RX Polarity Change			*/
-#define FPE			0x10		/* Force Parity Error On Transmit	*/
-#define FFE			0x20		/* Force Framing Error On Transmit	*/
-
-
-/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
-/* TIMER_ENABLE Masks													*/
-#define TIMEN0			0x0001		/* Enable Timer 0					*/
-#define TIMEN1			0x0002		/* Enable Timer 1					*/
-#define TIMEN2			0x0004		/* Enable Timer 2					*/
-#define TIMEN3			0x0008		/* Enable Timer 3					*/
-#define TIMEN4			0x0010		/* Enable Timer 4					*/
-#define TIMEN5			0x0020		/* Enable Timer 5					*/
-#define TIMEN6			0x0040		/* Enable Timer 6					*/
-#define TIMEN7			0x0080		/* Enable Timer 7					*/
-
-/* TIMER_DISABLE Masks													*/
-#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
-#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
-#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
-#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
-#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
-#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
-#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
-#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
-
-/* TIMER_STATUS Masks													*/
-#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
-#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
-#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
-#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
-#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
-#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
-#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
-#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
-#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
-#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
-#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
-#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
-#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
-#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
-#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
-#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
-#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
-#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
-#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
-#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
-#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
-#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
-#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
-#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks													*/
-#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
-#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
-#define EXT_CLK			0x0003	/* External Clock Mode					*/
-#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
-#define PERIOD_CNT		0x0008	/* Period Count							*/
-#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
-#define TIN_SEL			0x0020	/* Timer Input Select					*/
-#define OUT_DIS			0x0040	/* Output Pad Disable					*/
-#define CLK_SEL			0x0080	/* Timer Clock Select					*/
-#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
-#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
-#define ERR_TYP			0xC000	/* Error Type							*/
-
-
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
-/* EBIU_AMGCTL Masks																	*/
-#define AMCKEN			0x0001		/* Enable CLKOUT									*/
-#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
-#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
-#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
-#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
-#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
-
-/* EBIU_AMBCTL0 Masks																	*/
-#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
-#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
-#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
-#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
-#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
-#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
-#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
-#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
-#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
-#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
-#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
-#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
-#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
-#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
-#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
-#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
-#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
-#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
-#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
-#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
-#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
-#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
-#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
-#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
-#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
-#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
-#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
-#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
-#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
-#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
-#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
-#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
-#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
-#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
-#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
-#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
-
-#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
-#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
-#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
-#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
-#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
-#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
-#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
-#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
-#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
-#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
-#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
-#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
-#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
-#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
-#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
-#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
-#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
-#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
-#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
-#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
-#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
-#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
-#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
-#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
-#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
-#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
-#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
-#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
-#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
-#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
-#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
-#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
-#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
-#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
-#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
-#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
-#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
-#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
-#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
-#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
-#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
-
-/* EBIU_AMBCTL1 Masks																	*/
-#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
-#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
-#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
-#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
-#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
-#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
-#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
-#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
-#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
-#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
-#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
-#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
-#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
-#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
-#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
-#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
-#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
-#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
-#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
-#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
-#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
-#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
-#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
-#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
-#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
-#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
-#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
-#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
-#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
-#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
-#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
-#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
-#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
-#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
-#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
-#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
-
-#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
-#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
-#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
-#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
-#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
-#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
-#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
-#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
-#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
-#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
-#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
-#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
-#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
-#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
-#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
-#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
-#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
-#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
-#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
-#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
-#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
-#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
-#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
-#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
-#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
-#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
-#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
-#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
-#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
-#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
-#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
-#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
-#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
-#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
-#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
-#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
-
-
-/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
-/* EBIU_SDGCTL Masks																			*/
-#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
-#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
-#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
-#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
-#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
-#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
-#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
-#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
-#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
-#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
-#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
-#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
-#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
-#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
-#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
-#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
-#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
-#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
-#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
-#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
-#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
-#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
-#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
-#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
-#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
-#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
-#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
-#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
-#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
-#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
-#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
-#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
-#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
-#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
-#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
-#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
-#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
-#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
-#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
-#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
-#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
-#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
-#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
-#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
-#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
-#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
-#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
-
-/* EBIU_SDBCTL Masks																		*/
-#define EBE				0x0001		/* Enable SDRAM External Bank							*/
-#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
-#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
-#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
-#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
-#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
-#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
-#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
-#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
-#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
-#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
-
-/* EBIU_SDSTAT Masks														*/
-#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
-#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
-#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
-#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
-#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
-#define BGSTAT			0x0020		/* Bus Grant Status						*/
-
-
-/* **************************  DMA CONTROLLER MASKS  ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
-#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
-#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
-#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
-#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
-#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
-#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
-#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
-#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
-#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
-#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
-#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
-#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
-#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
-#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
-
-/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/*  PPI_CONTROL Masks													*/
-#define PORT_EN			0x0001		/* PPI Port Enable					*/
-#define PORT_DIR		0x0002		/* PPI Port Direction				*/
-#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
-#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
-#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
-#define PACK_EN			0x0080		/* PPI Packing Mode					*/
-#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
-#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
-#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
-#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
-#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
-#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
-#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
-#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
-#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
-#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
-#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
-#define DLENGTH			0x3800		/* PPI Data Length  */
-#define POLC			0x4000		/* PPI Clock Polarity				*/
-#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
-
-/* PPI_STATUS Masks														*/
-#define FLD				0x0400		/* Field Indicator					*/
-#define FT_ERR			0x0800		/* Frame Track Error				*/
-#define OVR				0x1000		/* FIFO Overflow Error				*/
-#define UNDR			0x2000		/* FIFO Underrun Error				*/
-#define ERR_DET			0x4000		/* Error Detected Indicator			*/
-#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
-/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
-/* PORT_MUX Masks															*/
-#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
-#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
-#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
-
-#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
-#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
-#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
-#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
-
-#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
-#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
-#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
-
-#define	PFTE			0x0010			/* Port F Timer Enable				*/
-#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
-#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
-
-#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
-#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
-#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
-
-#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
-#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
-#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
-
-#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
-#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
-#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
-
-#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
-#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
-#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
-
-#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
-#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
-#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
-
-#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
-#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
-#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
-
-#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
-#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
-#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
-
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define	PGDE_UART   PFDE_UART
-#define	PGDE_DMA    PFDE_DMA
-#define	CKELOW		SCKELOW
-
-/* HOST Port Registers */
-
-#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
-#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
-#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
-#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
-#define                       CNT_STATUS  0xffc03508   /* Status Register */
-#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
-#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
-#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
-#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
-#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
-#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
-#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
-#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
-#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
-#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* Motor Control PWM Registers */
-
-#define                         PWM_CTRL  0xffc03700   /* PWM Control Register */
-#define                         PWM_STAT  0xffc03704   /* PWM Status Register */
-#define                           PWM_TM  0xffc03708   /* PWM Period Register */
-#define                           PWM_DT  0xffc0370c   /* PWM Dead Time Register */
-#define                         PWM_GATE  0xffc03710   /* PWM Chopping Control */
-#define                          PWM_CHA  0xffc03714   /* PWM Channel A Duty Control */
-#define                          PWM_CHB  0xffc03718   /* PWM Channel B Duty Control */
-#define                          PWM_CHC  0xffc0371c   /* PWM Channel C Duty Control */
-#define                          PWM_SEG  0xffc03720   /* PWM Crossover and Output Enable */
-#define                       PWM_SYNCWT  0xffc03724   /* PWM Sync Pluse Width Control */
-#define                         PWM_CHAL  0xffc03728   /* PWM Channel AL Duty Control (SR mode only) */
-#define                         PWM_CHBL  0xffc0372c   /* PWM Channel BL Duty Control (SR mode only) */
-#define                         PWM_CHCL  0xffc03730   /* PWM Channel CL Duty Control (SR mode only) */
-#define                          PWM_LSI  0xffc03734   /* PWM Low Side Invert (SR mode only) */
-#define                        PWM_STAT2  0xffc03738   /* PWM Status Register 2 */
-
-
-/* ********************************************************** */
-/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
-/*     and MULTI BIT READ MACROS                              */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
-#define                  HOST_CNTR_nHOST_EN  0x0
-#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
-#define                 HOST_CNTR_nHOST_END  0x0
-#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
-#define                HOST_CNTR_nDATA_SIZE  0x0
-#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
-#define                 HOST_CNTR_nHOST_RST  0x0
-#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
-#define                 HOST_CNTR_nHRDY_OVR  0x0
-#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
-#define                 HOST_CNTR_nINT_MODE  0x0
-#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
-#define                   HOST_CNTR_ nBT_EN  0x0
-#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
-#define                      HOST_CNTR_nEHW  0x0
-#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
-#define                      HOST_CNTR_nEHR  0x0
-#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
-#define                      HOST_CNTR_nBDR  0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define                     HOST_STAT_READY  0x1        /* DMA Ready */
-#define                    HOST_STAT_nREADY  0x0
-#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
-#define                 HOST_STAT_nFIFOFULL  0x0
-#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
-#define                HOST_STAT_nFIFOEMPTY  0x0
-#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
-#define                 HOST_STAT_nCOMPLETE  0x0
-#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
-#define                     HOST_STAT_nHSHK  0x0
-#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
-#define                  HOST_STAT_nTIMEOUT  0x0
-#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
-#define                     HOST_STAT_nHIRQ  0x0
-#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
-#define               HOST_STAT_nALLOW_CNFG  0x0
-#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
-#define                  HOST_STAT_nDMA_DIR  0x0
-#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
-#define                      HOST_STAT_nBTE  0x0
-#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
-#define              HOST_STAT_nHOSTRD_DONE  0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define                   EMUDABL  0x1        /* Emulation Disable. */
-#define                  nEMUDABL  0x0
-#define                   RSTDABL  0x2        /* Reset Disable */
-#define                  nRSTDABL  0x0
-#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
-#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
-#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
-#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
-#define                  nDMA0OVR  0x0
-#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
-#define                  nDMA1OVR  0x0
-#define                    EMUOVR  0x4000     /* Emulation Override */
-#define                   nEMUOVR  0x0
-#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
-#define                   nOTPSEN  0x0
-#define                    L2DABL  0x70000    /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define                   SECURE0  0x1        /* SECURE 0 */
-#define                  nSECURE0  0x0
-#define                   SECURE1  0x2        /* SECURE 1 */
-#define                  nSECURE1  0x0
-#define                   SECURE2  0x4        /* SECURE 2 */
-#define                  nSECURE2  0x0
-#define                   SECURE3  0x8        /* SECURE 3 */
-#define                  nSECURE3  0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define                   SECMODE  0x3        /* Secured Mode Control State */
-#define                       NMI  0x4        /* Non Maskable Interrupt */
-#define                      nNMI  0x0
-#define                   AFVALID  0x8        /* Authentication Firmware Valid */
-#define                  nAFVALID  0x0
-#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
-#define                   nAFEXIT  0x0
-#define                   SECSTAT  0xe0       /* Secure Status */
-
-
-
-#endif /* _DEF_BF51X_H */
diff --git a/arch/blackfin/mach-bf518/include/mach/gpio.h b/arch/blackfin/mach-bf518/include/mach/gpio.h
index 9af6ce0..b480705 100644
--- a/arch/blackfin/mach-bf518/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf518/include/mach/gpio.h
@@ -55,4 +55,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf518/include/mach/pll.h b/arch/blackfin/mach-bf518/include/mach/pll.h
index d550298..94cca67 100644
--- a/arch/blackfin/mach-bf518/include/mach/pll.h
+++ b/arch/blackfin/mach-bf518/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c
index 52295ff..ccab4c6 100644
--- a/arch/blackfin/mach-bf527/boards/ad7160eval.c
+++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c
@@ -67,6 +67,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -419,7 +420,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -474,7 +475,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -627,9 +628,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -661,9 +662,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/cm_bf527.c b/arch/blackfin/mach-bf527/boards/cm_bf527.c
index 50533ed..c9d6dc8 100644
--- a/arch/blackfin/mach-bf527/boards/cm_bf527.c
+++ b/arch/blackfin/mach-bf527/boards/cm_bf527.c
@@ -104,6 +104,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -614,7 +615,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -669,7 +670,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -801,9 +802,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -835,9 +836,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezbrd.c b/arch/blackfin/mach-bf527/boards/ezbrd.c
index d06177b..b7101aa 100644
--- a/arch/blackfin/mach-bf527/boards/ezbrd.c
+++ b/arch/blackfin/mach-bf527/boards/ezbrd.c
@@ -68,6 +68,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -499,7 +500,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -554,7 +555,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -681,9 +682,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -715,9 +716,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index 35a88a5..2cd2ff6 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -108,6 +108,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -708,7 +709,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -763,7 +764,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -962,6 +963,11 @@
 		I2C_BOARD_INFO("ad5252", 0x2f),
 	},
 #endif
+#if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE)
+	{
+		I2C_BOARD_INFO("adau1373", 0x1A),
+	},
+#endif
 };
 
 #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
@@ -984,9 +990,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -1018,9 +1024,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/boards/tll6527m.c b/arch/blackfin/mach-bf527/boards/tll6527m.c
index 130861b..18d303d 100644
--- a/arch/blackfin/mach-bf527/boards/tll6527m.c
+++ b/arch/blackfin/mach-bf527/boards/tll6527m.c
@@ -193,7 +193,7 @@
 	GPIO_PG1, GPIO_PH9, GPIO_PH10
 };
 
-static struct gpio_decoder_platfrom_data spi_decoded_cs = {
+static struct gpio_decoder_platform_data spi_decoded_cs = {
 	.base		= EXP_GPIO_SPISEL_BASE,
 	.input_addrs	= gpio_addr_inputs,
 	.nr_input_addrs = ARRAY_SIZE(gpio_addr_inputs),
@@ -586,7 +586,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -642,7 +642,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -799,9 +799,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -834,9 +834,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf527/dma.c b/arch/blackfin/mach-bf527/dma.c
index 7bc7577..1fabdef 100644
--- a/arch/blackfin/mach-bf527/dma.c
+++ b/arch/blackfin/mach-bf527/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
index c1d55b8..960e089 100644
--- a/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf527/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port port;
-	unsigned int old_status;
-	int status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int tx_done;
-	int tx_count;
-	struct circ_buf rx_dma_buf;
-	struct timer_list rx_dma_timer;
-	int rx_dma_nrows;
-	unsigned int tx_dma_channel;
-	unsigned int rx_dma_channel;
-	struct work_struct tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list cts_timer;
-	int cts_pin;
-	int rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long uart_base_addr;
 	int uart_irq;
@@ -146,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf527/include/mach/blackfin.h b/arch/blackfin/mach-bf527/include/mach/blackfin.h
index f714c5d..e1d2792 100644
--- a/arch/blackfin/mach-bf527/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf527/include/mach/blackfin.h
@@ -1,49 +1,37 @@
 /*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
 #define _MACH_BLACKFIN_H_
 
 #include "bf527.h"
-#include "defBF522.h"
 #include "anomaly.h"
 
-#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "defBF527.h"
+#include <asm/def_LPBlackfin.h>
+#if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+# include "defBF522.h"
 #endif
-
 #if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "defBF525.h"
+# include "defBF525.h"
+#endif
+#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
+# include "defBF527.h"
 #endif
 
 #if !defined(__ASSEMBLY__)
-#include "cdefBF522.h"
-
-#if defined(CONFIG_BF527) || defined(CONFIG_BF526)
-#include "cdefBF527.h"
+# include <asm/cdef_LPBlackfin.h>
+# if defined(CONFIG_BF523) || defined(CONFIG_BF522)
+#  include "cdefBF522.h"
+# endif
+# if defined(CONFIG_BF525) || defined(CONFIG_BF524)
+#  include "cdefBF525.h"
+# endif
+# if defined(CONFIG_BF527) || defined(CONFIG_BF526)
+#  include "cdefBF527.h"
+# endif
 #endif
 
-#if defined(CONFIG_BF525) || defined(CONFIG_BF524)
-#include "cdefBF525.h"
-#endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
index 1079af8..618dfcd 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF522.h
@@ -1,21 +1,1095 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF522_H
 #define _CDEF_BF522_H
 
-/* include all Core registers and bit definitions */
-#include "defBF522.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
+#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
+#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
+#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
+#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
+#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
+#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
+#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
+#define bfin_read_CHIPID()			bfin_read32(CHIPID)
+#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
 
-/* include core specific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define bfin_read_SWRST()			bfin_read16(SWRST)
+#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
+#define bfin_read_SYSCR()			bfin_read16(SYSCR)
+#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
 
-/* include cdefBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "cdefBF52x_base.h"
+#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
+#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
+#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
+#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
+#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
+#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
+
+#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
+#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
+#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
+#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
+#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
+#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
+#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
+#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
+
+#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
+#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
+#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
+#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
+
+#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
+#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
+#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
+#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+
+#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
+#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
+#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
+#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
+#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
+#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
+#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
+#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
+#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
+#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
+#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
+#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
+#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
+#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
+
+/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
+#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
+#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
+#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
+#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
+#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
+#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
+#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
+#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
+#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
+#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
+#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
+#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
+#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
+#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
+#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
+#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
+#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
+#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
+#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
+#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
+#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
+#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
+#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
+#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
+#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
+#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
+#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
+#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
+#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
+#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
+#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
+#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
+#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
+#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
+#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
+#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
+#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
+#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
+#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
+#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
+#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
+#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
+
+
+/* SPI Controller		(0xFFC00500 - 0xFFC005FF)									*/
+#define bfin_read_SPI_CTL()			bfin_read16(SPI_CTL)
+#define bfin_write_SPI_CTL(val)			bfin_write16(SPI_CTL, val)
+#define bfin_read_SPI_FLG()			bfin_read16(SPI_FLG)
+#define bfin_write_SPI_FLG(val)			bfin_write16(SPI_FLG, val)
+#define bfin_read_SPI_STAT()			bfin_read16(SPI_STAT)
+#define bfin_write_SPI_STAT(val)		bfin_write16(SPI_STAT, val)
+#define bfin_read_SPI_TDBR()			bfin_read16(SPI_TDBR)
+#define bfin_write_SPI_TDBR(val)		bfin_write16(SPI_TDBR, val)
+#define bfin_read_SPI_RDBR()			bfin_read16(SPI_RDBR)
+#define bfin_write_SPI_RDBR(val)		bfin_write16(SPI_RDBR, val)
+#define bfin_read_SPI_BAUD()			bfin_read16(SPI_BAUD)
+#define bfin_write_SPI_BAUD(val)		bfin_write16(SPI_BAUD, val)
+#define bfin_read_SPI_SHADOW()			bfin_read16(SPI_SHADOW)
+#define bfin_write_SPI_SHADOW(val)		bfin_write16(SPI_SHADOW, val)
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
+#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
+#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
+#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
+#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
+#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
+#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
+#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
+
+#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
+#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
+#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
+#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
+#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
+#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
+#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
+#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
+
+#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
+#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
+#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
+#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
+#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
+#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
+#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
+#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
+
+#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
+#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
+#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
+#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
+#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
+#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
+#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
+#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
+
+#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
+#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
+#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
+#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
+#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
+#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
+#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
+#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
+
+#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
+#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
+#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
+#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
+#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
+#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
+#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
+#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
+
+#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
+#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
+#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
+#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
+#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
+#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
+#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
+#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
+
+#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
+#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
+#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
+#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
+#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
+#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
+#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
+#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
+
+#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
+#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
+#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
+#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
+#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
+#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
+#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
+#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
+#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
+#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
+#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
+#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
+#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
+#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
+#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
+#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
+#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
+#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
+#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
+#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
+#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
+#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
+#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
+#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
+#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
+#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
+#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
+#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
+#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
+#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
+#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
+#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
+#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
+#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
+#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
+#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
+#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
+#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
+#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
+#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
+#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
+#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
+#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
+#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
+#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
+#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
+#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
+#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
+#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
+#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
+#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
+#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
+#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
+#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
+#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
+#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
+#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
+#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
+#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
+#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
+#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
+#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
+#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
+#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
+#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
+#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
+#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
+#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
+#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
+#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
+#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
+#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
+#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
+#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
+#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
+#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
+#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
+#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
+#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
+#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
+#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
+#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
+#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
+#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
+#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
+#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
+#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
+#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
+#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
+#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
+#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
+#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
+#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
+#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
+#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
+#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
+#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
+#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
+#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
+#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
+#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
+#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
+#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
+#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
+#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
+#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
+#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
+#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
+#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
+#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
+#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
+#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
+#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
+#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
+#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
+#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
+#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
+#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
+#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
+#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
+#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
+#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
+#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
+#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
+#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
+#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
+#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
+#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
+#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
+#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
+#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
+#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
+#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
+#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
+#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
+#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
+#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
+#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
+#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
+#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
+#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
+#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
+#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
+#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
+#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
+
+
+/* DMA Traffic Control Registers													*/
+#define bfin_read_DMAC_TC_PER()			bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)		bfin_write16(DMAC_TC_PER, val)
+#define bfin_read_DMAC_TC_CNT()			bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)		bfin_write16(DMAC_TC_CNT, val)
+
+/* DMA Controller																	*/
+#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
+#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
+#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
+#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
+#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
+#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
+#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
+#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
+#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
+#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
+#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
+#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
+#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
+#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
+#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
+#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
+#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
+#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
+#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
+#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
+#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
+#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
+#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
+#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
+#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
+#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
+#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
+#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
+#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
+#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
+#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
+#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
+#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
+#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
+#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
+#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
+#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
+#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
+#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
+#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
+#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
+#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
+#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
+#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
+#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
+#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
+#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
+#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
+#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
+#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
+#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
+#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
+#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
+#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
+#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
+#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
+#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
+#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
+#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
+#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
+#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
+#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
+#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
+#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
+#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
+#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
+#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
+#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
+#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
+#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
+#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
+#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
+#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
+#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
+#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
+#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
+#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
+#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
+#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
+#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
+#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
+#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
+#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
+#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
+#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
+#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
+#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
+#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
+#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
+#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
+#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
+#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
+#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
+#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
+#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
+#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
+#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
+#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
+#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
+#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
+#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
+#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
+#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
+#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
+#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
+#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
+#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
+#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
+#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
+#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
+#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
+#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
+#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
+#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
+#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
+#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
+#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
+#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
+#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
+#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
+#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
+#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
+#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
+#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
+#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
+#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
+#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
+#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
+#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
+#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
+#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
+#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
+#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
+#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
+#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
+#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
+#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
+#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
+#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
+#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
+#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
+#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
+#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
+#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
+#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
+#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
+#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
+#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
+#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
+#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
+#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
+#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
+#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
+#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
+#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
+#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
+#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
+#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
+#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
+#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
+#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
+#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
+#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
+#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
+#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
+#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
+#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
+#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
+#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
+#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
+#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
+#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
+#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
+#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
+#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
+#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
+#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
+#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
+#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
+#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
+#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
+#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
+#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
+#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
+#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
+#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
+#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
+#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
+#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
+#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
+#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
+#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
+#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
+#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
+#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
+#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
+#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
+#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
+#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
+#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
+#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
+#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
+#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
+#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
+#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
+#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
+#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
+#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
+#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
+#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
+#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
+#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
+#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
+#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
+#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
+#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
+#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
+#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
+#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
+#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
+#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
+#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
+#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
+#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
+#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
+#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
+#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
+#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
+#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
+#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
+#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
+#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
+#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
+#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
+#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
+#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
+#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
+#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
+#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
+#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
+#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
+#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
+#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
+#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
+#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
+#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
+#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
+#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
+#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
+#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
+#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
+#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
+#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
+#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
+#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
+#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
+#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
+#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
+#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
+#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
+#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
+#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
+#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
+#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
+#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
+#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
+#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
+#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
+#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
+#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
+#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
+#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
+#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
+#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
+#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
+#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
+
+#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
+#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
+#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
+#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
+#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
+#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
+#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
+#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
+#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
+#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
+#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
+#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
+#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
+#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
+#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
+#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
+#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
+#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
+#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
+#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
+#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
+#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
+#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
+#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
+#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
+#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+
+#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
+#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
+#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
+#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
+#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
+#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
+#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
+#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
+#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
+#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
+#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
+#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
+#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
+#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
+#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
+#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
+#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
+#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
+#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
+#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
+#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
+#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
+#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
+#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
+#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
+#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
+#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
+#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
+#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
+#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
+#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
+#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
+#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
+#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
+#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
+#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
+#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
+#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
+#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
+#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
+#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
+#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
+#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
+#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
+#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
+#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
+#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
+#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
+#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
+#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
+#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
+#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
+#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
+#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
+#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
+#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
+#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
+#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
+#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
+#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
+#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
+#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
+#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
+#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
+#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
+#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
+#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
+#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
+#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
+#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
+#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
+#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
+#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
+#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
+#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
+#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
+#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
+#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
+#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
+#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
+#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
+#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
+#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
+#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
+#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
+#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
+#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
+#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
+#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
+#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
+#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
+#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
+#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
+#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
+#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
+#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
+#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
+#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
+#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
+
+/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
+#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
+#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
+#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
+#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
+#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
+#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
+#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
+#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
+#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
+#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
+#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
+#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
+#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
+#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
+#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
+#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
+#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
+#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
+#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
+#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
+#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
+#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
+
+#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
+#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
+#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
+#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
+#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
+#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
+#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
+#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
+#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
+#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
+#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
+#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
+#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
+#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
+
+/* ==== end from cdefBF534.h ==== */
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+
+#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
+#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
+#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
+#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
+#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
+#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
+
+#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
+#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
+#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
+#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
+#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
+#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
+#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
+#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
+#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
+#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
+#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
+#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
+#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
+#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
+#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
+#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
+#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
+#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
+#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
+#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
+#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
+#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
+#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
+#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
+
+/* HOST Port Registers */
+
+#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
+#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
+#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
+#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
+#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
+#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
+
+/* Counter Registers */
+
+#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
+#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
+#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
+#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
+#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
+#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
+#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
+#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
+#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
+#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
+#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
+#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
+#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
+#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
+#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
+#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
+
+/* Security Registers */
+
+#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
+#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
+#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
+#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
+#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
+#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
+
+/* NFC Registers */
+
+#define bfin_read_NFC_CTL()			bfin_read16(NFC_CTL)
+#define bfin_write_NFC_CTL(val)			bfin_write16(NFC_CTL, val)
+#define bfin_read_NFC_STAT()			bfin_read16(NFC_STAT)
+#define bfin_write_NFC_STAT(val)		bfin_write16(NFC_STAT, val)
+#define bfin_read_NFC_IRQSTAT()			bfin_read16(NFC_IRQSTAT)
+#define bfin_write_NFC_IRQSTAT(val)		bfin_write16(NFC_IRQSTAT, val)
+#define bfin_read_NFC_IRQMASK()			bfin_read16(NFC_IRQMASK)
+#define bfin_write_NFC_IRQMASK(val)		bfin_write16(NFC_IRQMASK, val)
+#define bfin_read_NFC_ECC0()			bfin_read16(NFC_ECC0)
+#define bfin_write_NFC_ECC0(val)		bfin_write16(NFC_ECC0, val)
+#define bfin_read_NFC_ECC1()			bfin_read16(NFC_ECC1)
+#define bfin_write_NFC_ECC1(val)		bfin_write16(NFC_ECC1, val)
+#define bfin_read_NFC_ECC2()			bfin_read16(NFC_ECC2)
+#define bfin_write_NFC_ECC2(val)		bfin_write16(NFC_ECC2, val)
+#define bfin_read_NFC_ECC3()			bfin_read16(NFC_ECC3)
+#define bfin_write_NFC_ECC3(val)		bfin_write16(NFC_ECC3, val)
+#define bfin_read_NFC_COUNT()			bfin_read16(NFC_COUNT)
+#define bfin_write_NFC_COUNT(val)		bfin_write16(NFC_COUNT, val)
+#define bfin_read_NFC_RST()			bfin_read16(NFC_RST)
+#define bfin_write_NFC_RST(val)			bfin_write16(NFC_RST, val)
+#define bfin_read_NFC_PGCTL()			bfin_read16(NFC_PGCTL)
+#define bfin_write_NFC_PGCTL(val)		bfin_write16(NFC_PGCTL, val)
+#define bfin_read_NFC_READ()			bfin_read16(NFC_READ)
+#define bfin_write_NFC_READ(val)		bfin_write16(NFC_READ, val)
+#define bfin_read_NFC_ADDR()			bfin_read16(NFC_ADDR)
+#define bfin_write_NFC_ADDR(val)		bfin_write16(NFC_ADDR, val)
+#define bfin_read_NFC_CMD()			bfin_read16(NFC_CMD)
+#define bfin_write_NFC_CMD(val)			bfin_write16(NFC_CMD, val)
+#define bfin_read_NFC_DATA_WR()			bfin_read16(NFC_DATA_WR)
+#define bfin_write_NFC_DATA_WR(val)		bfin_write16(NFC_DATA_WR, val)
+#define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
+#define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
 
 #endif /* _CDEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
index d7e2751..d90a85b 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF525.h
@@ -1,15 +1,12 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF525_H
 #define _CDEF_BF525_H
 
-/* include all Core registers and bit definitions */
-#include "defBF525.h"
-
 /* BF525 is BF522 + USB */
 #include "cdefBF522.h"
 
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
index c7ba544..eb22f586 100644
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/cdefBF527.h
@@ -1,15 +1,12 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF527_H
 #define _CDEF_BF527_H
 
-/* include all Core registers and bit definitions */
-#include "defBF527.h"
-
 /* BF527 is BF525 + EMAC */
 #include "cdefBF525.h"
 
diff --git a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
deleted file mode 100644
index 3048b52b..0000000
--- a/arch/blackfin/mach-bf527/include/mach/cdefBF52x_base.h
+++ /dev/null
@@ -1,1113 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _CDEF_BF52X_H
-#define _CDEF_BF52X_H
-
-#include <asm/blackfin.h>
-
-#include "defBF52x_base.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
-/* ==== begin from cdefBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define bfin_read_PLL_CTL()			bfin_read16(PLL_CTL)
-#define bfin_read_PLL_DIV()			bfin_read16(PLL_DIV)
-#define bfin_write_PLL_DIV(val)			bfin_write16(PLL_DIV, val)
-#define bfin_read_VR_CTL()			bfin_read16(VR_CTL)
-#define bfin_read_PLL_STAT()			bfin_read16(PLL_STAT)
-#define bfin_write_PLL_STAT(val)		bfin_write16(PLL_STAT, val)
-#define bfin_read_PLL_LOCKCNT()			bfin_read16(PLL_LOCKCNT)
-#define bfin_write_PLL_LOCKCNT(val)		bfin_write16(PLL_LOCKCNT, val)
-#define bfin_read_CHIPID()			bfin_read32(CHIPID)
-#define bfin_write_CHIPID(val)			bfin_write32(CHIPID, val)
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define bfin_read_SWRST()			bfin_read16(SWRST)
-#define bfin_write_SWRST(val)			bfin_write16(SWRST, val)
-#define bfin_read_SYSCR()			bfin_read16(SYSCR)
-#define bfin_write_SYSCR(val)			bfin_write16(SYSCR, val)
-
-#define bfin_read_SIC_RVECT()			bfin_read32(SIC_RVECT)
-#define bfin_write_SIC_RVECT(val)		bfin_write32(SIC_RVECT, val)
-#define bfin_read_SIC_IMASK0()			bfin_read32(SIC_IMASK0)
-#define bfin_write_SIC_IMASK0(val)		bfin_write32(SIC_IMASK0, val)
-#define bfin_read_SIC_IMASK(x)			bfin_read32(SIC_IMASK0 + (x << 6))
-#define bfin_write_SIC_IMASK(x, val)		bfin_write32((SIC_IMASK0 + (x << 6)), val)
-
-#define bfin_read_SIC_IAR0()			bfin_read32(SIC_IAR0)
-#define bfin_write_SIC_IAR0(val)		bfin_write32(SIC_IAR0, val)
-#define bfin_read_SIC_IAR1()			bfin_read32(SIC_IAR1)
-#define bfin_write_SIC_IAR1(val)		bfin_write32(SIC_IAR1, val)
-#define bfin_read_SIC_IAR2()			bfin_read32(SIC_IAR2)
-#define bfin_write_SIC_IAR2(val)		bfin_write32(SIC_IAR2, val)
-#define bfin_read_SIC_IAR3()			bfin_read32(SIC_IAR3)
-#define bfin_write_SIC_IAR3(val)		bfin_write32(SIC_IAR3, val)
-
-#define bfin_read_SIC_ISR0()			bfin_read32(SIC_ISR0)
-#define bfin_write_SIC_ISR0(val)		bfin_write32(SIC_ISR0, val)
-#define bfin_read_SIC_ISR(x)			bfin_read32(SIC_ISR0 + (x << 6))
-#define bfin_write_SIC_ISR(x, val)		bfin_write32((SIC_ISR0 + (x << 6)), val)
-
-#define bfin_read_SIC_IWR0()			bfin_read32(SIC_IWR0)
-#define bfin_write_SIC_IWR0(val)		bfin_write32(SIC_IWR0, val)
-#define bfin_read_SIC_IWR(x)			bfin_read32(SIC_IWR0 + (x << 6))
-#define bfin_write_SIC_IWR(x, val)		bfin_write32((SIC_IWR0 + (x << 6)), val)
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-
-#define bfin_read_SIC_IMASK1()			bfin_read32(SIC_IMASK1)
-#define bfin_write_SIC_IMASK1(val)		bfin_write32(SIC_IMASK1, val)
-#define bfin_read_SIC_IAR4()			bfin_read32(SIC_IAR4)
-#define bfin_write_SIC_IAR4(val)		bfin_write32(SIC_IAR4, val)
-#define bfin_read_SIC_IAR5()			bfin_read32(SIC_IAR5)
-#define bfin_write_SIC_IAR5(val)		bfin_write32(SIC_IAR5, val)
-#define bfin_read_SIC_IAR6()			bfin_read32(SIC_IAR6)
-#define bfin_write_SIC_IAR6(val)		bfin_write32(SIC_IAR6, val)
-#define bfin_read_SIC_IAR7()			bfin_read32(SIC_IAR7)
-#define bfin_write_SIC_IAR7(val)		bfin_write32(SIC_IAR7, val)
-#define bfin_read_SIC_ISR1()			bfin_read32(SIC_ISR1)
-#define bfin_write_SIC_ISR1(val)		bfin_write32(SIC_ISR1, val)
-#define bfin_read_SIC_IWR1()			bfin_read32(SIC_IWR1)
-#define bfin_write_SIC_IWR1(val)		bfin_write32(SIC_IWR1, val)
-
-/* Watchdog Timer		(0xFFC00200 - 0xFFC002FF)									*/
-#define bfin_read_WDOG_CTL()			bfin_read16(WDOG_CTL)
-#define bfin_write_WDOG_CTL(val)		bfin_write16(WDOG_CTL, val)
-#define bfin_read_WDOG_CNT()			bfin_read32(WDOG_CNT)
-#define bfin_write_WDOG_CNT(val)		bfin_write32(WDOG_CNT, val)
-#define bfin_read_WDOG_STAT()			bfin_read32(WDOG_STAT)
-#define bfin_write_WDOG_STAT(val)		bfin_write32(WDOG_STAT, val)
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define bfin_read_RTC_STAT()			bfin_read32(RTC_STAT)
-#define bfin_write_RTC_STAT(val)		bfin_write32(RTC_STAT, val)
-#define bfin_read_RTC_ICTL()			bfin_read16(RTC_ICTL)
-#define bfin_write_RTC_ICTL(val)		bfin_write16(RTC_ICTL, val)
-#define bfin_read_RTC_ISTAT()			bfin_read16(RTC_ISTAT)
-#define bfin_write_RTC_ISTAT(val)		bfin_write16(RTC_ISTAT, val)
-#define bfin_read_RTC_SWCNT()			bfin_read16(RTC_SWCNT)
-#define bfin_write_RTC_SWCNT(val)		bfin_write16(RTC_SWCNT, val)
-#define bfin_read_RTC_ALARM()			bfin_read32(RTC_ALARM)
-#define bfin_write_RTC_ALARM(val)		bfin_write32(RTC_ALARM, val)
-#define bfin_read_RTC_FAST()			bfin_read16(RTC_FAST)
-#define bfin_write_RTC_FAST(val)		bfin_write16(RTC_FAST, val)
-#define bfin_read_RTC_PREN()			bfin_read16(RTC_PREN)
-#define bfin_write_RTC_PREN(val)		bfin_write16(RTC_PREN, val)
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define bfin_read_UART0_THR()			bfin_read16(UART0_THR)
-#define bfin_write_UART0_THR(val)		bfin_write16(UART0_THR, val)
-#define bfin_read_UART0_RBR()			bfin_read16(UART0_RBR)
-#define bfin_write_UART0_RBR(val)		bfin_write16(UART0_RBR, val)
-#define bfin_read_UART0_DLL()			bfin_read16(UART0_DLL)
-#define bfin_write_UART0_DLL(val)		bfin_write16(UART0_DLL, val)
-#define bfin_read_UART0_IER()			bfin_read16(UART0_IER)
-#define bfin_write_UART0_IER(val)		bfin_write16(UART0_IER, val)
-#define bfin_read_UART0_DLH()			bfin_read16(UART0_DLH)
-#define bfin_write_UART0_DLH(val)		bfin_write16(UART0_DLH, val)
-#define bfin_read_UART0_IIR()			bfin_read16(UART0_IIR)
-#define bfin_write_UART0_IIR(val)		bfin_write16(UART0_IIR, val)
-#define bfin_read_UART0_LCR()			bfin_read16(UART0_LCR)
-#define bfin_write_UART0_LCR(val)		bfin_write16(UART0_LCR, val)
-#define bfin_read_UART0_MCR()			bfin_read16(UART0_MCR)
-#define bfin_write_UART0_MCR(val)		bfin_write16(UART0_MCR, val)
-#define bfin_read_UART0_LSR()			bfin_read16(UART0_LSR)
-#define bfin_write_UART0_LSR(val)		bfin_write16(UART0_LSR, val)
-#define bfin_read_UART0_MSR()			bfin_read16(UART0_MSR)
-#define bfin_write_UART0_MSR(val)		bfin_write16(UART0_MSR, val)
-#define bfin_read_UART0_SCR()			bfin_read16(UART0_SCR)
-#define bfin_write_UART0_SCR(val)		bfin_write16(UART0_SCR, val)
-#define bfin_read_UART0_GCTL()			bfin_read16(UART0_GCTL)
-#define bfin_write_UART0_GCTL(val)		bfin_write16(UART0_GCTL, val)
-
-
-/* SPI Controller		(0xFFC00500 - 0xFFC005FF)									*/
-#define bfin_read_SPI_CTL()			bfin_read16(SPI_CTL)
-#define bfin_write_SPI_CTL(val)			bfin_write16(SPI_CTL, val)
-#define bfin_read_SPI_FLG()			bfin_read16(SPI_FLG)
-#define bfin_write_SPI_FLG(val)			bfin_write16(SPI_FLG, val)
-#define bfin_read_SPI_STAT()			bfin_read16(SPI_STAT)
-#define bfin_write_SPI_STAT(val)		bfin_write16(SPI_STAT, val)
-#define bfin_read_SPI_TDBR()			bfin_read16(SPI_TDBR)
-#define bfin_write_SPI_TDBR(val)		bfin_write16(SPI_TDBR, val)
-#define bfin_read_SPI_RDBR()			bfin_read16(SPI_RDBR)
-#define bfin_write_SPI_RDBR(val)		bfin_write16(SPI_RDBR, val)
-#define bfin_read_SPI_BAUD()			bfin_read16(SPI_BAUD)
-#define bfin_write_SPI_BAUD(val)		bfin_write16(SPI_BAUD, val)
-#define bfin_read_SPI_SHADOW()			bfin_read16(SPI_SHADOW)
-#define bfin_write_SPI_SHADOW(val)		bfin_write16(SPI_SHADOW, val)
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define bfin_read_TIMER0_CONFIG()		bfin_read16(TIMER0_CONFIG)
-#define bfin_write_TIMER0_CONFIG(val)		bfin_write16(TIMER0_CONFIG, val)
-#define bfin_read_TIMER0_COUNTER()		bfin_read32(TIMER0_COUNTER)
-#define bfin_write_TIMER0_COUNTER(val)		bfin_write32(TIMER0_COUNTER, val)
-#define bfin_read_TIMER0_PERIOD()		bfin_read32(TIMER0_PERIOD)
-#define bfin_write_TIMER0_PERIOD(val)		bfin_write32(TIMER0_PERIOD, val)
-#define bfin_read_TIMER0_WIDTH()		bfin_read32(TIMER0_WIDTH)
-#define bfin_write_TIMER0_WIDTH(val)		bfin_write32(TIMER0_WIDTH, val)
-
-#define bfin_read_TIMER1_CONFIG()		bfin_read16(TIMER1_CONFIG)
-#define bfin_write_TIMER1_CONFIG(val)		bfin_write16(TIMER1_CONFIG, val)
-#define bfin_read_TIMER1_COUNTER()		bfin_read32(TIMER1_COUNTER)
-#define bfin_write_TIMER1_COUNTER(val)		bfin_write32(TIMER1_COUNTER, val)
-#define bfin_read_TIMER1_PERIOD()		bfin_read32(TIMER1_PERIOD)
-#define bfin_write_TIMER1_PERIOD(val)		bfin_write32(TIMER1_PERIOD, val)
-#define bfin_read_TIMER1_WIDTH()		bfin_read32(TIMER1_WIDTH)
-#define bfin_write_TIMER1_WIDTH(val)		bfin_write32(TIMER1_WIDTH, val)
-
-#define bfin_read_TIMER2_CONFIG()		bfin_read16(TIMER2_CONFIG)
-#define bfin_write_TIMER2_CONFIG(val)		bfin_write16(TIMER2_CONFIG, val)
-#define bfin_read_TIMER2_COUNTER()		bfin_read32(TIMER2_COUNTER)
-#define bfin_write_TIMER2_COUNTER(val)		bfin_write32(TIMER2_COUNTER, val)
-#define bfin_read_TIMER2_PERIOD()		bfin_read32(TIMER2_PERIOD)
-#define bfin_write_TIMER2_PERIOD(val)		bfin_write32(TIMER2_PERIOD, val)
-#define bfin_read_TIMER2_WIDTH()		bfin_read32(TIMER2_WIDTH)
-#define bfin_write_TIMER2_WIDTH(val)		bfin_write32(TIMER2_WIDTH, val)
-
-#define bfin_read_TIMER3_CONFIG()		bfin_read16(TIMER3_CONFIG)
-#define bfin_write_TIMER3_CONFIG(val)		bfin_write16(TIMER3_CONFIG, val)
-#define bfin_read_TIMER3_COUNTER()		bfin_read32(TIMER3_COUNTER)
-#define bfin_write_TIMER3_COUNTER(val)		bfin_write32(TIMER3_COUNTER, val)
-#define bfin_read_TIMER3_PERIOD()		bfin_read32(TIMER3_PERIOD)
-#define bfin_write_TIMER3_PERIOD(val)		bfin_write32(TIMER3_PERIOD, val)
-#define bfin_read_TIMER3_WIDTH()		bfin_read32(TIMER3_WIDTH)
-#define bfin_write_TIMER3_WIDTH(val)		bfin_write32(TIMER3_WIDTH, val)
-
-#define bfin_read_TIMER4_CONFIG()		bfin_read16(TIMER4_CONFIG)
-#define bfin_write_TIMER4_CONFIG(val)		bfin_write16(TIMER4_CONFIG, val)
-#define bfin_read_TIMER4_COUNTER()		bfin_read32(TIMER4_COUNTER)
-#define bfin_write_TIMER4_COUNTER(val)		bfin_write32(TIMER4_COUNTER, val)
-#define bfin_read_TIMER4_PERIOD()		bfin_read32(TIMER4_PERIOD)
-#define bfin_write_TIMER4_PERIOD(val)		bfin_write32(TIMER4_PERIOD, val)
-#define bfin_read_TIMER4_WIDTH()		bfin_read32(TIMER4_WIDTH)
-#define bfin_write_TIMER4_WIDTH(val)		bfin_write32(TIMER4_WIDTH, val)
-
-#define bfin_read_TIMER5_CONFIG()		bfin_read16(TIMER5_CONFIG)
-#define bfin_write_TIMER5_CONFIG(val)		bfin_write16(TIMER5_CONFIG, val)
-#define bfin_read_TIMER5_COUNTER()		bfin_read32(TIMER5_COUNTER)
-#define bfin_write_TIMER5_COUNTER(val)		bfin_write32(TIMER5_COUNTER, val)
-#define bfin_read_TIMER5_PERIOD()		bfin_read32(TIMER5_PERIOD)
-#define bfin_write_TIMER5_PERIOD(val)		bfin_write32(TIMER5_PERIOD, val)
-#define bfin_read_TIMER5_WIDTH()		bfin_read32(TIMER5_WIDTH)
-#define bfin_write_TIMER5_WIDTH(val)		bfin_write32(TIMER5_WIDTH, val)
-
-#define bfin_read_TIMER6_CONFIG()		bfin_read16(TIMER6_CONFIG)
-#define bfin_write_TIMER6_CONFIG(val)		bfin_write16(TIMER6_CONFIG, val)
-#define bfin_read_TIMER6_COUNTER()		bfin_read32(TIMER6_COUNTER)
-#define bfin_write_TIMER6_COUNTER(val)		bfin_write32(TIMER6_COUNTER, val)
-#define bfin_read_TIMER6_PERIOD()		bfin_read32(TIMER6_PERIOD)
-#define bfin_write_TIMER6_PERIOD(val)		bfin_write32(TIMER6_PERIOD, val)
-#define bfin_read_TIMER6_WIDTH()		bfin_read32(TIMER6_WIDTH)
-#define bfin_write_TIMER6_WIDTH(val)		bfin_write32(TIMER6_WIDTH, val)
-
-#define bfin_read_TIMER7_CONFIG()		bfin_read16(TIMER7_CONFIG)
-#define bfin_write_TIMER7_CONFIG(val)		bfin_write16(TIMER7_CONFIG, val)
-#define bfin_read_TIMER7_COUNTER()		bfin_read32(TIMER7_COUNTER)
-#define bfin_write_TIMER7_COUNTER(val)		bfin_write32(TIMER7_COUNTER, val)
-#define bfin_read_TIMER7_PERIOD()		bfin_read32(TIMER7_PERIOD)
-#define bfin_write_TIMER7_PERIOD(val)		bfin_write32(TIMER7_PERIOD, val)
-#define bfin_read_TIMER7_WIDTH()		bfin_read32(TIMER7_WIDTH)
-#define bfin_write_TIMER7_WIDTH(val)		bfin_write32(TIMER7_WIDTH, val)
-
-#define bfin_read_TIMER_ENABLE()		bfin_read16(TIMER_ENABLE)
-#define bfin_write_TIMER_ENABLE(val)		bfin_write16(TIMER_ENABLE, val)
-#define bfin_read_TIMER_DISABLE()		bfin_read16(TIMER_DISABLE)
-#define bfin_write_TIMER_DISABLE(val)		bfin_write16(TIMER_DISABLE, val)
-#define bfin_read_TIMER_STATUS()		bfin_read32(TIMER_STATUS)
-#define bfin_write_TIMER_STATUS(val)		bfin_write32(TIMER_STATUS, val)
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)								*/
-#define bfin_read_PORTFIO()			bfin_read16(PORTFIO)
-#define bfin_write_PORTFIO(val)			bfin_write16(PORTFIO, val)
-#define bfin_read_PORTFIO_CLEAR()		bfin_read16(PORTFIO_CLEAR)
-#define bfin_write_PORTFIO_CLEAR(val)		bfin_write16(PORTFIO_CLEAR, val)
-#define bfin_read_PORTFIO_SET()			bfin_read16(PORTFIO_SET)
-#define bfin_write_PORTFIO_SET(val)		bfin_write16(PORTFIO_SET, val)
-#define bfin_read_PORTFIO_TOGGLE()		bfin_read16(PORTFIO_TOGGLE)
-#define bfin_write_PORTFIO_TOGGLE(val)		bfin_write16(PORTFIO_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKA()		bfin_read16(PORTFIO_MASKA)
-#define bfin_write_PORTFIO_MASKA(val)		bfin_write16(PORTFIO_MASKA, val)
-#define bfin_read_PORTFIO_MASKA_CLEAR()		bfin_read16(PORTFIO_MASKA_CLEAR)
-#define bfin_write_PORTFIO_MASKA_CLEAR(val)	bfin_write16(PORTFIO_MASKA_CLEAR, val)
-#define bfin_read_PORTFIO_MASKA_SET()		bfin_read16(PORTFIO_MASKA_SET)
-#define bfin_write_PORTFIO_MASKA_SET(val)	bfin_write16(PORTFIO_MASKA_SET, val)
-#define bfin_read_PORTFIO_MASKA_TOGGLE()	bfin_read16(PORTFIO_MASKA_TOGGLE)
-#define bfin_write_PORTFIO_MASKA_TOGGLE(val)	bfin_write16(PORTFIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTFIO_MASKB()		bfin_read16(PORTFIO_MASKB)
-#define bfin_write_PORTFIO_MASKB(val)		bfin_write16(PORTFIO_MASKB, val)
-#define bfin_read_PORTFIO_MASKB_CLEAR()		bfin_read16(PORTFIO_MASKB_CLEAR)
-#define bfin_write_PORTFIO_MASKB_CLEAR(val)	bfin_write16(PORTFIO_MASKB_CLEAR, val)
-#define bfin_read_PORTFIO_MASKB_SET()		bfin_read16(PORTFIO_MASKB_SET)
-#define bfin_write_PORTFIO_MASKB_SET(val)	bfin_write16(PORTFIO_MASKB_SET, val)
-#define bfin_read_PORTFIO_MASKB_TOGGLE()	bfin_read16(PORTFIO_MASKB_TOGGLE)
-#define bfin_write_PORTFIO_MASKB_TOGGLE(val)	bfin_write16(PORTFIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTFIO_DIR()			bfin_read16(PORTFIO_DIR)
-#define bfin_write_PORTFIO_DIR(val)		bfin_write16(PORTFIO_DIR, val)
-#define bfin_read_PORTFIO_POLAR()		bfin_read16(PORTFIO_POLAR)
-#define bfin_write_PORTFIO_POLAR(val)		bfin_write16(PORTFIO_POLAR, val)
-#define bfin_read_PORTFIO_EDGE()		bfin_read16(PORTFIO_EDGE)
-#define bfin_write_PORTFIO_EDGE(val)		bfin_write16(PORTFIO_EDGE, val)
-#define bfin_read_PORTFIO_BOTH()		bfin_read16(PORTFIO_BOTH)
-#define bfin_write_PORTFIO_BOTH(val)		bfin_write16(PORTFIO_BOTH, val)
-#define bfin_read_PORTFIO_INEN()		bfin_read16(PORTFIO_INEN)
-#define bfin_write_PORTFIO_INEN(val)		bfin_write16(PORTFIO_INEN, val)
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)								*/
-#define bfin_read_SPORT0_TCR1()			bfin_read16(SPORT0_TCR1)
-#define bfin_write_SPORT0_TCR1(val)		bfin_write16(SPORT0_TCR1, val)
-#define bfin_read_SPORT0_TCR2()			bfin_read16(SPORT0_TCR2)
-#define bfin_write_SPORT0_TCR2(val)		bfin_write16(SPORT0_TCR2, val)
-#define bfin_read_SPORT0_TCLKDIV()		bfin_read16(SPORT0_TCLKDIV)
-#define bfin_write_SPORT0_TCLKDIV(val)		bfin_write16(SPORT0_TCLKDIV, val)
-#define bfin_read_SPORT0_TFSDIV()		bfin_read16(SPORT0_TFSDIV)
-#define bfin_write_SPORT0_TFSDIV(val)		bfin_write16(SPORT0_TFSDIV, val)
-#define bfin_read_SPORT0_TX()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX32()			bfin_read32(SPORT0_TX)
-#define bfin_write_SPORT0_TX32(val)		bfin_write32(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX32()			bfin_read32(SPORT0_RX)
-#define bfin_write_SPORT0_RX32(val)		bfin_write32(SPORT0_RX, val)
-#define bfin_read_SPORT0_TX16()			bfin_read16(SPORT0_TX)
-#define bfin_write_SPORT0_TX16(val)		bfin_write16(SPORT0_TX, val)
-#define bfin_read_SPORT0_RX16()			bfin_read16(SPORT0_RX)
-#define bfin_write_SPORT0_RX16(val)		bfin_write16(SPORT0_RX, val)
-#define bfin_read_SPORT0_RCR1()			bfin_read16(SPORT0_RCR1)
-#define bfin_write_SPORT0_RCR1(val)		bfin_write16(SPORT0_RCR1, val)
-#define bfin_read_SPORT0_RCR2()			bfin_read16(SPORT0_RCR2)
-#define bfin_write_SPORT0_RCR2(val)		bfin_write16(SPORT0_RCR2, val)
-#define bfin_read_SPORT0_RCLKDIV()		bfin_read16(SPORT0_RCLKDIV)
-#define bfin_write_SPORT0_RCLKDIV(val)		bfin_write16(SPORT0_RCLKDIV, val)
-#define bfin_read_SPORT0_RFSDIV()		bfin_read16(SPORT0_RFSDIV)
-#define bfin_write_SPORT0_RFSDIV(val)		bfin_write16(SPORT0_RFSDIV, val)
-#define bfin_read_SPORT0_STAT()			bfin_read16(SPORT0_STAT)
-#define bfin_write_SPORT0_STAT(val)		bfin_write16(SPORT0_STAT, val)
-#define bfin_read_SPORT0_CHNL()			bfin_read16(SPORT0_CHNL)
-#define bfin_write_SPORT0_CHNL(val)		bfin_write16(SPORT0_CHNL, val)
-#define bfin_read_SPORT0_MCMC1()		bfin_read16(SPORT0_MCMC1)
-#define bfin_write_SPORT0_MCMC1(val)		bfin_write16(SPORT0_MCMC1, val)
-#define bfin_read_SPORT0_MCMC2()		bfin_read16(SPORT0_MCMC2)
-#define bfin_write_SPORT0_MCMC2(val)		bfin_write16(SPORT0_MCMC2, val)
-#define bfin_read_SPORT0_MTCS0()		bfin_read32(SPORT0_MTCS0)
-#define bfin_write_SPORT0_MTCS0(val)		bfin_write32(SPORT0_MTCS0, val)
-#define bfin_read_SPORT0_MTCS1()		bfin_read32(SPORT0_MTCS1)
-#define bfin_write_SPORT0_MTCS1(val)		bfin_write32(SPORT0_MTCS1, val)
-#define bfin_read_SPORT0_MTCS2()		bfin_read32(SPORT0_MTCS2)
-#define bfin_write_SPORT0_MTCS2(val)		bfin_write32(SPORT0_MTCS2, val)
-#define bfin_read_SPORT0_MTCS3()		bfin_read32(SPORT0_MTCS3)
-#define bfin_write_SPORT0_MTCS3(val)		bfin_write32(SPORT0_MTCS3, val)
-#define bfin_read_SPORT0_MRCS0()		bfin_read32(SPORT0_MRCS0)
-#define bfin_write_SPORT0_MRCS0(val)		bfin_write32(SPORT0_MRCS0, val)
-#define bfin_read_SPORT0_MRCS1()		bfin_read32(SPORT0_MRCS1)
-#define bfin_write_SPORT0_MRCS1(val)		bfin_write32(SPORT0_MRCS1, val)
-#define bfin_read_SPORT0_MRCS2()		bfin_read32(SPORT0_MRCS2)
-#define bfin_write_SPORT0_MRCS2(val)		bfin_write32(SPORT0_MRCS2, val)
-#define bfin_read_SPORT0_MRCS3()		bfin_read32(SPORT0_MRCS3)
-#define bfin_write_SPORT0_MRCS3(val)		bfin_write32(SPORT0_MRCS3, val)
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)								*/
-#define bfin_read_SPORT1_TCR1()			bfin_read16(SPORT1_TCR1)
-#define bfin_write_SPORT1_TCR1(val)		bfin_write16(SPORT1_TCR1, val)
-#define bfin_read_SPORT1_TCR2()			bfin_read16(SPORT1_TCR2)
-#define bfin_write_SPORT1_TCR2(val)		bfin_write16(SPORT1_TCR2, val)
-#define bfin_read_SPORT1_TCLKDIV()		bfin_read16(SPORT1_TCLKDIV)
-#define bfin_write_SPORT1_TCLKDIV(val)		bfin_write16(SPORT1_TCLKDIV, val)
-#define bfin_read_SPORT1_TFSDIV()		bfin_read16(SPORT1_TFSDIV)
-#define bfin_write_SPORT1_TFSDIV(val)		bfin_write16(SPORT1_TFSDIV, val)
-#define bfin_read_SPORT1_TX()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX32()			bfin_read32(SPORT1_TX)
-#define bfin_write_SPORT1_TX32(val)		bfin_write32(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX32()			bfin_read32(SPORT1_RX)
-#define bfin_write_SPORT1_RX32(val)		bfin_write32(SPORT1_RX, val)
-#define bfin_read_SPORT1_TX16()			bfin_read16(SPORT1_TX)
-#define bfin_write_SPORT1_TX16(val)		bfin_write16(SPORT1_TX, val)
-#define bfin_read_SPORT1_RX16()			bfin_read16(SPORT1_RX)
-#define bfin_write_SPORT1_RX16(val)		bfin_write16(SPORT1_RX, val)
-#define bfin_read_SPORT1_RCR1()			bfin_read16(SPORT1_RCR1)
-#define bfin_write_SPORT1_RCR1(val)		bfin_write16(SPORT1_RCR1, val)
-#define bfin_read_SPORT1_RCR2()			bfin_read16(SPORT1_RCR2)
-#define bfin_write_SPORT1_RCR2(val)		bfin_write16(SPORT1_RCR2, val)
-#define bfin_read_SPORT1_RCLKDIV()		bfin_read16(SPORT1_RCLKDIV)
-#define bfin_write_SPORT1_RCLKDIV(val)		bfin_write16(SPORT1_RCLKDIV, val)
-#define bfin_read_SPORT1_RFSDIV()		bfin_read16(SPORT1_RFSDIV)
-#define bfin_write_SPORT1_RFSDIV(val)		bfin_write16(SPORT1_RFSDIV, val)
-#define bfin_read_SPORT1_STAT()			bfin_read16(SPORT1_STAT)
-#define bfin_write_SPORT1_STAT(val)		bfin_write16(SPORT1_STAT, val)
-#define bfin_read_SPORT1_CHNL()			bfin_read16(SPORT1_CHNL)
-#define bfin_write_SPORT1_CHNL(val)		bfin_write16(SPORT1_CHNL, val)
-#define bfin_read_SPORT1_MCMC1()		bfin_read16(SPORT1_MCMC1)
-#define bfin_write_SPORT1_MCMC1(val)		bfin_write16(SPORT1_MCMC1, val)
-#define bfin_read_SPORT1_MCMC2()		bfin_read16(SPORT1_MCMC2)
-#define bfin_write_SPORT1_MCMC2(val)		bfin_write16(SPORT1_MCMC2, val)
-#define bfin_read_SPORT1_MTCS0()		bfin_read32(SPORT1_MTCS0)
-#define bfin_write_SPORT1_MTCS0(val)		bfin_write32(SPORT1_MTCS0, val)
-#define bfin_read_SPORT1_MTCS1()		bfin_read32(SPORT1_MTCS1)
-#define bfin_write_SPORT1_MTCS1(val)		bfin_write32(SPORT1_MTCS1, val)
-#define bfin_read_SPORT1_MTCS2()		bfin_read32(SPORT1_MTCS2)
-#define bfin_write_SPORT1_MTCS2(val)		bfin_write32(SPORT1_MTCS2, val)
-#define bfin_read_SPORT1_MTCS3()		bfin_read32(SPORT1_MTCS3)
-#define bfin_write_SPORT1_MTCS3(val)		bfin_write32(SPORT1_MTCS3, val)
-#define bfin_read_SPORT1_MRCS0()		bfin_read32(SPORT1_MRCS0)
-#define bfin_write_SPORT1_MRCS0(val)		bfin_write32(SPORT1_MRCS0, val)
-#define bfin_read_SPORT1_MRCS1()		bfin_read32(SPORT1_MRCS1)
-#define bfin_write_SPORT1_MRCS1(val)		bfin_write32(SPORT1_MRCS1, val)
-#define bfin_read_SPORT1_MRCS2()		bfin_read32(SPORT1_MRCS2)
-#define bfin_write_SPORT1_MRCS2(val)		bfin_write32(SPORT1_MRCS2, val)
-#define bfin_read_SPORT1_MRCS3()		bfin_read32(SPORT1_MRCS3)
-#define bfin_write_SPORT1_MRCS3(val)		bfin_write32(SPORT1_MRCS3, val)
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)							*/
-#define bfin_read_EBIU_AMGCTL()			bfin_read16(EBIU_AMGCTL)
-#define bfin_write_EBIU_AMGCTL(val)		bfin_write16(EBIU_AMGCTL, val)
-#define bfin_read_EBIU_AMBCTL0()		bfin_read32(EBIU_AMBCTL0)
-#define bfin_write_EBIU_AMBCTL0(val)		bfin_write32(EBIU_AMBCTL0, val)
-#define bfin_read_EBIU_AMBCTL1()		bfin_read32(EBIU_AMBCTL1)
-#define bfin_write_EBIU_AMBCTL1(val)		bfin_write32(EBIU_AMBCTL1, val)
-#define bfin_read_EBIU_SDGCTL()			bfin_read32(EBIU_SDGCTL)
-#define bfin_write_EBIU_SDGCTL(val)		bfin_write32(EBIU_SDGCTL, val)
-#define bfin_read_EBIU_SDBCTL()			bfin_read16(EBIU_SDBCTL)
-#define bfin_write_EBIU_SDBCTL(val)		bfin_write16(EBIU_SDBCTL, val)
-#define bfin_read_EBIU_SDRRC()			bfin_read16(EBIU_SDRRC)
-#define bfin_write_EBIU_SDRRC(val)		bfin_write16(EBIU_SDRRC, val)
-#define bfin_read_EBIU_SDSTAT()			bfin_read16(EBIU_SDSTAT)
-#define bfin_write_EBIU_SDSTAT(val)		bfin_write16(EBIU_SDSTAT, val)
-
-
-/* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()			bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)		bfin_write16(DMA_TC_PER, val)
-#define bfin_read_DMA_TC_CNT()			bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)		bfin_write16(DMA_TC_CNT, val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()			bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)		bfin_write16(DMA_TCPER, val)
-#define bfin_read_DMA_TCCNT()			bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)		bfin_write16(DMA_TCCNT, val)
-
-/* DMA Controller																	*/
-#define bfin_read_DMA0_CONFIG()			bfin_read16(DMA0_CONFIG)
-#define bfin_write_DMA0_CONFIG(val)		bfin_write16(DMA0_CONFIG, val)
-#define bfin_read_DMA0_NEXT_DESC_PTR()		bfin_read32(DMA0_NEXT_DESC_PTR)
-#define bfin_write_DMA0_NEXT_DESC_PTR(val)	bfin_write32(DMA0_NEXT_DESC_PTR, val)
-#define bfin_read_DMA0_START_ADDR()		bfin_read32(DMA0_START_ADDR)
-#define bfin_write_DMA0_START_ADDR(val)		bfin_write32(DMA0_START_ADDR, val)
-#define bfin_read_DMA0_X_COUNT()		bfin_read16(DMA0_X_COUNT)
-#define bfin_write_DMA0_X_COUNT(val)		bfin_write16(DMA0_X_COUNT, val)
-#define bfin_read_DMA0_Y_COUNT()		bfin_read16(DMA0_Y_COUNT)
-#define bfin_write_DMA0_Y_COUNT(val)		bfin_write16(DMA0_Y_COUNT, val)
-#define bfin_read_DMA0_X_MODIFY()		bfin_read16(DMA0_X_MODIFY)
-#define bfin_write_DMA0_X_MODIFY(val)		bfin_write16(DMA0_X_MODIFY, val)
-#define bfin_read_DMA0_Y_MODIFY()		bfin_read16(DMA0_Y_MODIFY)
-#define bfin_write_DMA0_Y_MODIFY(val)		bfin_write16(DMA0_Y_MODIFY, val)
-#define bfin_read_DMA0_CURR_DESC_PTR()		bfin_read32(DMA0_CURR_DESC_PTR)
-#define bfin_write_DMA0_CURR_DESC_PTR(val)	bfin_write32(DMA0_CURR_DESC_PTR, val)
-#define bfin_read_DMA0_CURR_ADDR()		bfin_read32(DMA0_CURR_ADDR)
-#define bfin_write_DMA0_CURR_ADDR(val)		bfin_write32(DMA0_CURR_ADDR, val)
-#define bfin_read_DMA0_CURR_X_COUNT()		bfin_read16(DMA0_CURR_X_COUNT)
-#define bfin_write_DMA0_CURR_X_COUNT(val)	bfin_write16(DMA0_CURR_X_COUNT, val)
-#define bfin_read_DMA0_CURR_Y_COUNT()		bfin_read16(DMA0_CURR_Y_COUNT)
-#define bfin_write_DMA0_CURR_Y_COUNT(val)	bfin_write16(DMA0_CURR_Y_COUNT, val)
-#define bfin_read_DMA0_IRQ_STATUS()		bfin_read16(DMA0_IRQ_STATUS)
-#define bfin_write_DMA0_IRQ_STATUS(val)		bfin_write16(DMA0_IRQ_STATUS, val)
-#define bfin_read_DMA0_PERIPHERAL_MAP()		bfin_read16(DMA0_PERIPHERAL_MAP)
-#define bfin_write_DMA0_PERIPHERAL_MAP(val)	bfin_write16(DMA0_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA1_CONFIG()			bfin_read16(DMA1_CONFIG)
-#define bfin_write_DMA1_CONFIG(val)		bfin_write16(DMA1_CONFIG, val)
-#define bfin_read_DMA1_NEXT_DESC_PTR()		bfin_read32(DMA1_NEXT_DESC_PTR)
-#define bfin_write_DMA1_NEXT_DESC_PTR(val)	bfin_write32(DMA1_NEXT_DESC_PTR, val)
-#define bfin_read_DMA1_START_ADDR()		bfin_read32(DMA1_START_ADDR)
-#define bfin_write_DMA1_START_ADDR(val)		bfin_write32(DMA1_START_ADDR, val)
-#define bfin_read_DMA1_X_COUNT()		bfin_read16(DMA1_X_COUNT)
-#define bfin_write_DMA1_X_COUNT(val)		bfin_write16(DMA1_X_COUNT, val)
-#define bfin_read_DMA1_Y_COUNT()		bfin_read16(DMA1_Y_COUNT)
-#define bfin_write_DMA1_Y_COUNT(val)		bfin_write16(DMA1_Y_COUNT, val)
-#define bfin_read_DMA1_X_MODIFY()		bfin_read16(DMA1_X_MODIFY)
-#define bfin_write_DMA1_X_MODIFY(val)		bfin_write16(DMA1_X_MODIFY, val)
-#define bfin_read_DMA1_Y_MODIFY()		bfin_read16(DMA1_Y_MODIFY)
-#define bfin_write_DMA1_Y_MODIFY(val)		bfin_write16(DMA1_Y_MODIFY, val)
-#define bfin_read_DMA1_CURR_DESC_PTR()		bfin_read32(DMA1_CURR_DESC_PTR)
-#define bfin_write_DMA1_CURR_DESC_PTR(val)	bfin_write32(DMA1_CURR_DESC_PTR, val)
-#define bfin_read_DMA1_CURR_ADDR()		bfin_read32(DMA1_CURR_ADDR)
-#define bfin_write_DMA1_CURR_ADDR(val)		bfin_write32(DMA1_CURR_ADDR, val)
-#define bfin_read_DMA1_CURR_X_COUNT()		bfin_read16(DMA1_CURR_X_COUNT)
-#define bfin_write_DMA1_CURR_X_COUNT(val)	bfin_write16(DMA1_CURR_X_COUNT, val)
-#define bfin_read_DMA1_CURR_Y_COUNT()		bfin_read16(DMA1_CURR_Y_COUNT)
-#define bfin_write_DMA1_CURR_Y_COUNT(val)	bfin_write16(DMA1_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_IRQ_STATUS()		bfin_read16(DMA1_IRQ_STATUS)
-#define bfin_write_DMA1_IRQ_STATUS(val)		bfin_write16(DMA1_IRQ_STATUS, val)
-#define bfin_read_DMA1_PERIPHERAL_MAP()		bfin_read16(DMA1_PERIPHERAL_MAP)
-#define bfin_write_DMA1_PERIPHERAL_MAP(val)	bfin_write16(DMA1_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA2_CONFIG()			bfin_read16(DMA2_CONFIG)
-#define bfin_write_DMA2_CONFIG(val)		bfin_write16(DMA2_CONFIG, val)
-#define bfin_read_DMA2_NEXT_DESC_PTR()		bfin_read32(DMA2_NEXT_DESC_PTR)
-#define bfin_write_DMA2_NEXT_DESC_PTR(val)	bfin_write32(DMA2_NEXT_DESC_PTR, val)
-#define bfin_read_DMA2_START_ADDR()		bfin_read32(DMA2_START_ADDR)
-#define bfin_write_DMA2_START_ADDR(val)		bfin_write32(DMA2_START_ADDR, val)
-#define bfin_read_DMA2_X_COUNT()		bfin_read16(DMA2_X_COUNT)
-#define bfin_write_DMA2_X_COUNT(val)		bfin_write16(DMA2_X_COUNT, val)
-#define bfin_read_DMA2_Y_COUNT()		bfin_read16(DMA2_Y_COUNT)
-#define bfin_write_DMA2_Y_COUNT(val)		bfin_write16(DMA2_Y_COUNT, val)
-#define bfin_read_DMA2_X_MODIFY()		bfin_read16(DMA2_X_MODIFY)
-#define bfin_write_DMA2_X_MODIFY(val)		bfin_write16(DMA2_X_MODIFY, val)
-#define bfin_read_DMA2_Y_MODIFY()		bfin_read16(DMA2_Y_MODIFY)
-#define bfin_write_DMA2_Y_MODIFY(val)		bfin_write16(DMA2_Y_MODIFY, val)
-#define bfin_read_DMA2_CURR_DESC_PTR()		bfin_read32(DMA2_CURR_DESC_PTR)
-#define bfin_write_DMA2_CURR_DESC_PTR(val)	bfin_write32(DMA2_CURR_DESC_PTR, val)
-#define bfin_read_DMA2_CURR_ADDR()		bfin_read32(DMA2_CURR_ADDR)
-#define bfin_write_DMA2_CURR_ADDR(val)		bfin_write32(DMA2_CURR_ADDR, val)
-#define bfin_read_DMA2_CURR_X_COUNT()		bfin_read16(DMA2_CURR_X_COUNT)
-#define bfin_write_DMA2_CURR_X_COUNT(val)	bfin_write16(DMA2_CURR_X_COUNT, val)
-#define bfin_read_DMA2_CURR_Y_COUNT()		bfin_read16(DMA2_CURR_Y_COUNT)
-#define bfin_write_DMA2_CURR_Y_COUNT(val)	bfin_write16(DMA2_CURR_Y_COUNT, val)
-#define bfin_read_DMA2_IRQ_STATUS()		bfin_read16(DMA2_IRQ_STATUS)
-#define bfin_write_DMA2_IRQ_STATUS(val)		bfin_write16(DMA2_IRQ_STATUS, val)
-#define bfin_read_DMA2_PERIPHERAL_MAP()		bfin_read16(DMA2_PERIPHERAL_MAP)
-#define bfin_write_DMA2_PERIPHERAL_MAP(val)	bfin_write16(DMA2_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA3_CONFIG()			bfin_read16(DMA3_CONFIG)
-#define bfin_write_DMA3_CONFIG(val)		bfin_write16(DMA3_CONFIG, val)
-#define bfin_read_DMA3_NEXT_DESC_PTR()		bfin_read32(DMA3_NEXT_DESC_PTR)
-#define bfin_write_DMA3_NEXT_DESC_PTR(val)	bfin_write32(DMA3_NEXT_DESC_PTR, val)
-#define bfin_read_DMA3_START_ADDR()		bfin_read32(DMA3_START_ADDR)
-#define bfin_write_DMA3_START_ADDR(val)		bfin_write32(DMA3_START_ADDR, val)
-#define bfin_read_DMA3_X_COUNT()		bfin_read16(DMA3_X_COUNT)
-#define bfin_write_DMA3_X_COUNT(val)		bfin_write16(DMA3_X_COUNT, val)
-#define bfin_read_DMA3_Y_COUNT()		bfin_read16(DMA3_Y_COUNT)
-#define bfin_write_DMA3_Y_COUNT(val)		bfin_write16(DMA3_Y_COUNT, val)
-#define bfin_read_DMA3_X_MODIFY()		bfin_read16(DMA3_X_MODIFY)
-#define bfin_write_DMA3_X_MODIFY(val)		bfin_write16(DMA3_X_MODIFY, val)
-#define bfin_read_DMA3_Y_MODIFY()		bfin_read16(DMA3_Y_MODIFY)
-#define bfin_write_DMA3_Y_MODIFY(val)		bfin_write16(DMA3_Y_MODIFY, val)
-#define bfin_read_DMA3_CURR_DESC_PTR()		bfin_read32(DMA3_CURR_DESC_PTR)
-#define bfin_write_DMA3_CURR_DESC_PTR(val)	bfin_write32(DMA3_CURR_DESC_PTR, val)
-#define bfin_read_DMA3_CURR_ADDR()		bfin_read32(DMA3_CURR_ADDR)
-#define bfin_write_DMA3_CURR_ADDR(val)		bfin_write32(DMA3_CURR_ADDR, val)
-#define bfin_read_DMA3_CURR_X_COUNT()		bfin_read16(DMA3_CURR_X_COUNT)
-#define bfin_write_DMA3_CURR_X_COUNT(val)	bfin_write16(DMA3_CURR_X_COUNT, val)
-#define bfin_read_DMA3_CURR_Y_COUNT()		bfin_read16(DMA3_CURR_Y_COUNT)
-#define bfin_write_DMA3_CURR_Y_COUNT(val)	bfin_write16(DMA3_CURR_Y_COUNT, val)
-#define bfin_read_DMA3_IRQ_STATUS()		bfin_read16(DMA3_IRQ_STATUS)
-#define bfin_write_DMA3_IRQ_STATUS(val)		bfin_write16(DMA3_IRQ_STATUS, val)
-#define bfin_read_DMA3_PERIPHERAL_MAP()		bfin_read16(DMA3_PERIPHERAL_MAP)
-#define bfin_write_DMA3_PERIPHERAL_MAP(val)	bfin_write16(DMA3_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA4_CONFIG()			bfin_read16(DMA4_CONFIG)
-#define bfin_write_DMA4_CONFIG(val)		bfin_write16(DMA4_CONFIG, val)
-#define bfin_read_DMA4_NEXT_DESC_PTR()		bfin_read32(DMA4_NEXT_DESC_PTR)
-#define bfin_write_DMA4_NEXT_DESC_PTR(val)	bfin_write32(DMA4_NEXT_DESC_PTR, val)
-#define bfin_read_DMA4_START_ADDR()		bfin_read32(DMA4_START_ADDR)
-#define bfin_write_DMA4_START_ADDR(val)		bfin_write32(DMA4_START_ADDR, val)
-#define bfin_read_DMA4_X_COUNT()		bfin_read16(DMA4_X_COUNT)
-#define bfin_write_DMA4_X_COUNT(val)		bfin_write16(DMA4_X_COUNT, val)
-#define bfin_read_DMA4_Y_COUNT()		bfin_read16(DMA4_Y_COUNT)
-#define bfin_write_DMA4_Y_COUNT(val)		bfin_write16(DMA4_Y_COUNT, val)
-#define bfin_read_DMA4_X_MODIFY()		bfin_read16(DMA4_X_MODIFY)
-#define bfin_write_DMA4_X_MODIFY(val)		bfin_write16(DMA4_X_MODIFY, val)
-#define bfin_read_DMA4_Y_MODIFY()		bfin_read16(DMA4_Y_MODIFY)
-#define bfin_write_DMA4_Y_MODIFY(val)		bfin_write16(DMA4_Y_MODIFY, val)
-#define bfin_read_DMA4_CURR_DESC_PTR()		bfin_read32(DMA4_CURR_DESC_PTR)
-#define bfin_write_DMA4_CURR_DESC_PTR(val)	bfin_write32(DMA4_CURR_DESC_PTR, val)
-#define bfin_read_DMA4_CURR_ADDR()		bfin_read32(DMA4_CURR_ADDR)
-#define bfin_write_DMA4_CURR_ADDR(val)		bfin_write32(DMA4_CURR_ADDR, val)
-#define bfin_read_DMA4_CURR_X_COUNT()		bfin_read16(DMA4_CURR_X_COUNT)
-#define bfin_write_DMA4_CURR_X_COUNT(val)	bfin_write16(DMA4_CURR_X_COUNT, val)
-#define bfin_read_DMA4_CURR_Y_COUNT()		bfin_read16(DMA4_CURR_Y_COUNT)
-#define bfin_write_DMA4_CURR_Y_COUNT(val)	bfin_write16(DMA4_CURR_Y_COUNT, val)
-#define bfin_read_DMA4_IRQ_STATUS()		bfin_read16(DMA4_IRQ_STATUS)
-#define bfin_write_DMA4_IRQ_STATUS(val)		bfin_write16(DMA4_IRQ_STATUS, val)
-#define bfin_read_DMA4_PERIPHERAL_MAP()		bfin_read16(DMA4_PERIPHERAL_MAP)
-#define bfin_write_DMA4_PERIPHERAL_MAP(val)	bfin_write16(DMA4_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA5_CONFIG()			bfin_read16(DMA5_CONFIG)
-#define bfin_write_DMA5_CONFIG(val)		bfin_write16(DMA5_CONFIG, val)
-#define bfin_read_DMA5_NEXT_DESC_PTR()		bfin_read32(DMA5_NEXT_DESC_PTR)
-#define bfin_write_DMA5_NEXT_DESC_PTR(val)	bfin_write32(DMA5_NEXT_DESC_PTR, val)
-#define bfin_read_DMA5_START_ADDR()		bfin_read32(DMA5_START_ADDR)
-#define bfin_write_DMA5_START_ADDR(val)		bfin_write32(DMA5_START_ADDR, val)
-#define bfin_read_DMA5_X_COUNT()		bfin_read16(DMA5_X_COUNT)
-#define bfin_write_DMA5_X_COUNT(val)		bfin_write16(DMA5_X_COUNT, val)
-#define bfin_read_DMA5_Y_COUNT()		bfin_read16(DMA5_Y_COUNT)
-#define bfin_write_DMA5_Y_COUNT(val)		bfin_write16(DMA5_Y_COUNT, val)
-#define bfin_read_DMA5_X_MODIFY()		bfin_read16(DMA5_X_MODIFY)
-#define bfin_write_DMA5_X_MODIFY(val)		bfin_write16(DMA5_X_MODIFY, val)
-#define bfin_read_DMA5_Y_MODIFY()		bfin_read16(DMA5_Y_MODIFY)
-#define bfin_write_DMA5_Y_MODIFY(val)		bfin_write16(DMA5_Y_MODIFY, val)
-#define bfin_read_DMA5_CURR_DESC_PTR()		bfin_read32(DMA5_CURR_DESC_PTR)
-#define bfin_write_DMA5_CURR_DESC_PTR(val)	bfin_write32(DMA5_CURR_DESC_PTR, val)
-#define bfin_read_DMA5_CURR_ADDR()		bfin_read32(DMA5_CURR_ADDR)
-#define bfin_write_DMA5_CURR_ADDR(val)		bfin_write32(DMA5_CURR_ADDR, val)
-#define bfin_read_DMA5_CURR_X_COUNT()		bfin_read16(DMA5_CURR_X_COUNT)
-#define bfin_write_DMA5_CURR_X_COUNT(val)	bfin_write16(DMA5_CURR_X_COUNT, val)
-#define bfin_read_DMA5_CURR_Y_COUNT()		bfin_read16(DMA5_CURR_Y_COUNT)
-#define bfin_write_DMA5_CURR_Y_COUNT(val)	bfin_write16(DMA5_CURR_Y_COUNT, val)
-#define bfin_read_DMA5_IRQ_STATUS()		bfin_read16(DMA5_IRQ_STATUS)
-#define bfin_write_DMA5_IRQ_STATUS(val)		bfin_write16(DMA5_IRQ_STATUS, val)
-#define bfin_read_DMA5_PERIPHERAL_MAP()		bfin_read16(DMA5_PERIPHERAL_MAP)
-#define bfin_write_DMA5_PERIPHERAL_MAP(val)	bfin_write16(DMA5_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA6_CONFIG()			bfin_read16(DMA6_CONFIG)
-#define bfin_write_DMA6_CONFIG(val)		bfin_write16(DMA6_CONFIG, val)
-#define bfin_read_DMA6_NEXT_DESC_PTR()		bfin_read32(DMA6_NEXT_DESC_PTR)
-#define bfin_write_DMA6_NEXT_DESC_PTR(val)	bfin_write32(DMA6_NEXT_DESC_PTR, val)
-#define bfin_read_DMA6_START_ADDR()		bfin_read32(DMA6_START_ADDR)
-#define bfin_write_DMA6_START_ADDR(val)		bfin_write32(DMA6_START_ADDR, val)
-#define bfin_read_DMA6_X_COUNT()		bfin_read16(DMA6_X_COUNT)
-#define bfin_write_DMA6_X_COUNT(val)		bfin_write16(DMA6_X_COUNT, val)
-#define bfin_read_DMA6_Y_COUNT()		bfin_read16(DMA6_Y_COUNT)
-#define bfin_write_DMA6_Y_COUNT(val)		bfin_write16(DMA6_Y_COUNT, val)
-#define bfin_read_DMA6_X_MODIFY()		bfin_read16(DMA6_X_MODIFY)
-#define bfin_write_DMA6_X_MODIFY(val)		bfin_write16(DMA6_X_MODIFY, val)
-#define bfin_read_DMA6_Y_MODIFY()		bfin_read16(DMA6_Y_MODIFY)
-#define bfin_write_DMA6_Y_MODIFY(val)		bfin_write16(DMA6_Y_MODIFY, val)
-#define bfin_read_DMA6_CURR_DESC_PTR()		bfin_read32(DMA6_CURR_DESC_PTR)
-#define bfin_write_DMA6_CURR_DESC_PTR(val)	bfin_write32(DMA6_CURR_DESC_PTR, val)
-#define bfin_read_DMA6_CURR_ADDR()		bfin_read32(DMA6_CURR_ADDR)
-#define bfin_write_DMA6_CURR_ADDR(val)		bfin_write32(DMA6_CURR_ADDR, val)
-#define bfin_read_DMA6_CURR_X_COUNT()		bfin_read16(DMA6_CURR_X_COUNT)
-#define bfin_write_DMA6_CURR_X_COUNT(val)	bfin_write16(DMA6_CURR_X_COUNT, val)
-#define bfin_read_DMA6_CURR_Y_COUNT()		bfin_read16(DMA6_CURR_Y_COUNT)
-#define bfin_write_DMA6_CURR_Y_COUNT(val)	bfin_write16(DMA6_CURR_Y_COUNT, val)
-#define bfin_read_DMA6_IRQ_STATUS()		bfin_read16(DMA6_IRQ_STATUS)
-#define bfin_write_DMA6_IRQ_STATUS(val)		bfin_write16(DMA6_IRQ_STATUS, val)
-#define bfin_read_DMA6_PERIPHERAL_MAP()		bfin_read16(DMA6_PERIPHERAL_MAP)
-#define bfin_write_DMA6_PERIPHERAL_MAP(val)	bfin_write16(DMA6_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA7_CONFIG()			bfin_read16(DMA7_CONFIG)
-#define bfin_write_DMA7_CONFIG(val)		bfin_write16(DMA7_CONFIG, val)
-#define bfin_read_DMA7_NEXT_DESC_PTR()		bfin_read32(DMA7_NEXT_DESC_PTR)
-#define bfin_write_DMA7_NEXT_DESC_PTR(val)	bfin_write32(DMA7_NEXT_DESC_PTR, val)
-#define bfin_read_DMA7_START_ADDR()		bfin_read32(DMA7_START_ADDR)
-#define bfin_write_DMA7_START_ADDR(val)		bfin_write32(DMA7_START_ADDR, val)
-#define bfin_read_DMA7_X_COUNT()		bfin_read16(DMA7_X_COUNT)
-#define bfin_write_DMA7_X_COUNT(val)		bfin_write16(DMA7_X_COUNT, val)
-#define bfin_read_DMA7_Y_COUNT()		bfin_read16(DMA7_Y_COUNT)
-#define bfin_write_DMA7_Y_COUNT(val)		bfin_write16(DMA7_Y_COUNT, val)
-#define bfin_read_DMA7_X_MODIFY()		bfin_read16(DMA7_X_MODIFY)
-#define bfin_write_DMA7_X_MODIFY(val)		bfin_write16(DMA7_X_MODIFY, val)
-#define bfin_read_DMA7_Y_MODIFY()		bfin_read16(DMA7_Y_MODIFY)
-#define bfin_write_DMA7_Y_MODIFY(val)		bfin_write16(DMA7_Y_MODIFY, val)
-#define bfin_read_DMA7_CURR_DESC_PTR()		bfin_read32(DMA7_CURR_DESC_PTR)
-#define bfin_write_DMA7_CURR_DESC_PTR(val)	bfin_write32(DMA7_CURR_DESC_PTR, val)
-#define bfin_read_DMA7_CURR_ADDR()		bfin_read32(DMA7_CURR_ADDR)
-#define bfin_write_DMA7_CURR_ADDR(val)		bfin_write32(DMA7_CURR_ADDR, val)
-#define bfin_read_DMA7_CURR_X_COUNT()		bfin_read16(DMA7_CURR_X_COUNT)
-#define bfin_write_DMA7_CURR_X_COUNT(val)	bfin_write16(DMA7_CURR_X_COUNT, val)
-#define bfin_read_DMA7_CURR_Y_COUNT()		bfin_read16(DMA7_CURR_Y_COUNT)
-#define bfin_write_DMA7_CURR_Y_COUNT(val)	bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA7_IRQ_STATUS()		bfin_read16(DMA7_IRQ_STATUS)
-#define bfin_write_DMA7_IRQ_STATUS(val)		bfin_write16(DMA7_IRQ_STATUS, val)
-#define bfin_read_DMA7_PERIPHERAL_MAP()		bfin_read16(DMA7_PERIPHERAL_MAP)
-#define bfin_write_DMA7_PERIPHERAL_MAP(val)	bfin_write16(DMA7_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA8_CONFIG()			bfin_read16(DMA8_CONFIG)
-#define bfin_write_DMA8_CONFIG(val)		bfin_write16(DMA8_CONFIG, val)
-#define bfin_read_DMA8_NEXT_DESC_PTR()		bfin_read32(DMA8_NEXT_DESC_PTR)
-#define bfin_write_DMA8_NEXT_DESC_PTR(val)	bfin_write32(DMA8_NEXT_DESC_PTR, val)
-#define bfin_read_DMA8_START_ADDR()		bfin_read32(DMA8_START_ADDR)
-#define bfin_write_DMA8_START_ADDR(val)		bfin_write32(DMA8_START_ADDR, val)
-#define bfin_read_DMA8_X_COUNT()		bfin_read16(DMA8_X_COUNT)
-#define bfin_write_DMA8_X_COUNT(val)		bfin_write16(DMA8_X_COUNT, val)
-#define bfin_read_DMA8_Y_COUNT()		bfin_read16(DMA8_Y_COUNT)
-#define bfin_write_DMA8_Y_COUNT(val)		bfin_write16(DMA8_Y_COUNT, val)
-#define bfin_read_DMA8_X_MODIFY()		bfin_read16(DMA8_X_MODIFY)
-#define bfin_write_DMA8_X_MODIFY(val)		bfin_write16(DMA8_X_MODIFY, val)
-#define bfin_read_DMA8_Y_MODIFY()		bfin_read16(DMA8_Y_MODIFY)
-#define bfin_write_DMA8_Y_MODIFY(val)		bfin_write16(DMA8_Y_MODIFY, val)
-#define bfin_read_DMA8_CURR_DESC_PTR()		bfin_read32(DMA8_CURR_DESC_PTR)
-#define bfin_write_DMA8_CURR_DESC_PTR(val)	bfin_write32(DMA8_CURR_DESC_PTR, val)
-#define bfin_read_DMA8_CURR_ADDR()		bfin_read32(DMA8_CURR_ADDR)
-#define bfin_write_DMA8_CURR_ADDR(val)		bfin_write32(DMA8_CURR_ADDR, val)
-#define bfin_read_DMA8_CURR_X_COUNT()		bfin_read16(DMA8_CURR_X_COUNT)
-#define bfin_write_DMA8_CURR_X_COUNT(val)	bfin_write16(DMA8_CURR_X_COUNT, val)
-#define bfin_read_DMA8_CURR_Y_COUNT()		bfin_read16(DMA8_CURR_Y_COUNT)
-#define bfin_write_DMA8_CURR_Y_COUNT(val)	bfin_write16(DMA8_CURR_Y_COUNT, val)
-#define bfin_read_DMA8_IRQ_STATUS()		bfin_read16(DMA8_IRQ_STATUS)
-#define bfin_write_DMA8_IRQ_STATUS(val)		bfin_write16(DMA8_IRQ_STATUS, val)
-#define bfin_read_DMA8_PERIPHERAL_MAP()		bfin_read16(DMA8_PERIPHERAL_MAP)
-#define bfin_write_DMA8_PERIPHERAL_MAP(val)	bfin_write16(DMA8_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA9_CONFIG()			bfin_read16(DMA9_CONFIG)
-#define bfin_write_DMA9_CONFIG(val)		bfin_write16(DMA9_CONFIG, val)
-#define bfin_read_DMA9_NEXT_DESC_PTR()		bfin_read32(DMA9_NEXT_DESC_PTR)
-#define bfin_write_DMA9_NEXT_DESC_PTR(val)	bfin_write32(DMA9_NEXT_DESC_PTR, val)
-#define bfin_read_DMA9_START_ADDR()		bfin_read32(DMA9_START_ADDR)
-#define bfin_write_DMA9_START_ADDR(val)		bfin_write32(DMA9_START_ADDR, val)
-#define bfin_read_DMA9_X_COUNT()		bfin_read16(DMA9_X_COUNT)
-#define bfin_write_DMA9_X_COUNT(val)		bfin_write16(DMA9_X_COUNT, val)
-#define bfin_read_DMA9_Y_COUNT()		bfin_read16(DMA9_Y_COUNT)
-#define bfin_write_DMA9_Y_COUNT(val)		bfin_write16(DMA9_Y_COUNT, val)
-#define bfin_read_DMA9_X_MODIFY()		bfin_read16(DMA9_X_MODIFY)
-#define bfin_write_DMA9_X_MODIFY(val)		bfin_write16(DMA9_X_MODIFY, val)
-#define bfin_read_DMA9_Y_MODIFY()		bfin_read16(DMA9_Y_MODIFY)
-#define bfin_write_DMA9_Y_MODIFY(val)		bfin_write16(DMA9_Y_MODIFY, val)
-#define bfin_read_DMA9_CURR_DESC_PTR()		bfin_read32(DMA9_CURR_DESC_PTR)
-#define bfin_write_DMA9_CURR_DESC_PTR(val)	bfin_write32(DMA9_CURR_DESC_PTR, val)
-#define bfin_read_DMA9_CURR_ADDR()		bfin_read32(DMA9_CURR_ADDR)
-#define bfin_write_DMA9_CURR_ADDR(val)		bfin_write32(DMA9_CURR_ADDR, val)
-#define bfin_read_DMA9_CURR_X_COUNT()		bfin_read16(DMA9_CURR_X_COUNT)
-#define bfin_write_DMA9_CURR_X_COUNT(val)	bfin_write16(DMA9_CURR_X_COUNT, val)
-#define bfin_read_DMA9_CURR_Y_COUNT()		bfin_read16(DMA9_CURR_Y_COUNT)
-#define bfin_write_DMA9_CURR_Y_COUNT(val)	bfin_write16(DMA9_CURR_Y_COUNT, val)
-#define bfin_read_DMA9_IRQ_STATUS()		bfin_read16(DMA9_IRQ_STATUS)
-#define bfin_write_DMA9_IRQ_STATUS(val)		bfin_write16(DMA9_IRQ_STATUS, val)
-#define bfin_read_DMA9_PERIPHERAL_MAP()		bfin_read16(DMA9_PERIPHERAL_MAP)
-#define bfin_write_DMA9_PERIPHERAL_MAP(val)	bfin_write16(DMA9_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA10_CONFIG()		bfin_read16(DMA10_CONFIG)
-#define bfin_write_DMA10_CONFIG(val)		bfin_write16(DMA10_CONFIG, val)
-#define bfin_read_DMA10_NEXT_DESC_PTR()		bfin_read32(DMA10_NEXT_DESC_PTR)
-#define bfin_write_DMA10_NEXT_DESC_PTR(val)	bfin_write32(DMA10_NEXT_DESC_PTR, val)
-#define bfin_read_DMA10_START_ADDR()		bfin_read32(DMA10_START_ADDR)
-#define bfin_write_DMA10_START_ADDR(val)	bfin_write32(DMA10_START_ADDR, val)
-#define bfin_read_DMA10_X_COUNT()		bfin_read16(DMA10_X_COUNT)
-#define bfin_write_DMA10_X_COUNT(val)		bfin_write16(DMA10_X_COUNT, val)
-#define bfin_read_DMA10_Y_COUNT()		bfin_read16(DMA10_Y_COUNT)
-#define bfin_write_DMA10_Y_COUNT(val)		bfin_write16(DMA10_Y_COUNT, val)
-#define bfin_read_DMA10_X_MODIFY()		bfin_read16(DMA10_X_MODIFY)
-#define bfin_write_DMA10_X_MODIFY(val)		bfin_write16(DMA10_X_MODIFY, val)
-#define bfin_read_DMA10_Y_MODIFY()		bfin_read16(DMA10_Y_MODIFY)
-#define bfin_write_DMA10_Y_MODIFY(val)		bfin_write16(DMA10_Y_MODIFY, val)
-#define bfin_read_DMA10_CURR_DESC_PTR()		bfin_read32(DMA10_CURR_DESC_PTR)
-#define bfin_write_DMA10_CURR_DESC_PTR(val)	bfin_write32(DMA10_CURR_DESC_PTR, val)
-#define bfin_read_DMA10_CURR_ADDR()		bfin_read32(DMA10_CURR_ADDR)
-#define bfin_write_DMA10_CURR_ADDR(val)		bfin_write32(DMA10_CURR_ADDR, val)
-#define bfin_read_DMA10_CURR_X_COUNT()		bfin_read16(DMA10_CURR_X_COUNT)
-#define bfin_write_DMA10_CURR_X_COUNT(val)	bfin_write16(DMA10_CURR_X_COUNT, val)
-#define bfin_read_DMA10_CURR_Y_COUNT()		bfin_read16(DMA10_CURR_Y_COUNT)
-#define bfin_write_DMA10_CURR_Y_COUNT(val)	bfin_write16(DMA10_CURR_Y_COUNT, val)
-#define bfin_read_DMA10_IRQ_STATUS()		bfin_read16(DMA10_IRQ_STATUS)
-#define bfin_write_DMA10_IRQ_STATUS(val)	bfin_write16(DMA10_IRQ_STATUS, val)
-#define bfin_read_DMA10_PERIPHERAL_MAP()	bfin_read16(DMA10_PERIPHERAL_MAP)
-#define bfin_write_DMA10_PERIPHERAL_MAP(val)	bfin_write16(DMA10_PERIPHERAL_MAP, val)
-
-#define bfin_read_DMA11_CONFIG()		bfin_read16(DMA11_CONFIG)
-#define bfin_write_DMA11_CONFIG(val)		bfin_write16(DMA11_CONFIG, val)
-#define bfin_read_DMA11_NEXT_DESC_PTR()		bfin_read32(DMA11_NEXT_DESC_PTR)
-#define bfin_write_DMA11_NEXT_DESC_PTR(val)	bfin_write32(DMA11_NEXT_DESC_PTR, val)
-#define bfin_read_DMA11_START_ADDR()		bfin_read32(DMA11_START_ADDR)
-#define bfin_write_DMA11_START_ADDR(val)	bfin_write32(DMA11_START_ADDR, val)
-#define bfin_read_DMA11_X_COUNT()		bfin_read16(DMA11_X_COUNT)
-#define bfin_write_DMA11_X_COUNT(val)		bfin_write16(DMA11_X_COUNT, val)
-#define bfin_read_DMA11_Y_COUNT()		bfin_read16(DMA11_Y_COUNT)
-#define bfin_write_DMA11_Y_COUNT(val)		bfin_write16(DMA11_Y_COUNT, val)
-#define bfin_read_DMA11_X_MODIFY()		bfin_read16(DMA11_X_MODIFY)
-#define bfin_write_DMA11_X_MODIFY(val)		bfin_write16(DMA11_X_MODIFY, val)
-#define bfin_read_DMA11_Y_MODIFY()		bfin_read16(DMA11_Y_MODIFY)
-#define bfin_write_DMA11_Y_MODIFY(val)		bfin_write16(DMA11_Y_MODIFY, val)
-#define bfin_read_DMA11_CURR_DESC_PTR()		bfin_read32(DMA11_CURR_DESC_PTR)
-#define bfin_write_DMA11_CURR_DESC_PTR(val)	bfin_write32(DMA11_CURR_DESC_PTR, val)
-#define bfin_read_DMA11_CURR_ADDR()		bfin_read32(DMA11_CURR_ADDR)
-#define bfin_write_DMA11_CURR_ADDR(val)		bfin_write32(DMA11_CURR_ADDR, val)
-#define bfin_read_DMA11_CURR_X_COUNT()		bfin_read16(DMA11_CURR_X_COUNT)
-#define bfin_write_DMA11_CURR_X_COUNT(val)	bfin_write16(DMA11_CURR_X_COUNT, val)
-#define bfin_read_DMA11_CURR_Y_COUNT()		bfin_read16(DMA11_CURR_Y_COUNT)
-#define bfin_write_DMA11_CURR_Y_COUNT(val)	bfin_write16(DMA11_CURR_Y_COUNT, val)
-#define bfin_read_DMA11_IRQ_STATUS()		bfin_read16(DMA11_IRQ_STATUS)
-#define bfin_write_DMA11_IRQ_STATUS(val)	bfin_write16(DMA11_IRQ_STATUS, val)
-#define bfin_read_DMA11_PERIPHERAL_MAP()	bfin_read16(DMA11_PERIPHERAL_MAP)
-#define bfin_write_DMA11_PERIPHERAL_MAP(val)	bfin_write16(DMA11_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D0_CONFIG()		bfin_read16(MDMA_D0_CONFIG)
-#define bfin_write_MDMA_D0_CONFIG(val)		bfin_write16(MDMA_D0_CONFIG, val)
-#define bfin_read_MDMA_D0_NEXT_DESC_PTR()	bfin_read32(MDMA_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D0_START_ADDR()		bfin_read32(MDMA_D0_START_ADDR)
-#define bfin_write_MDMA_D0_START_ADDR(val)	bfin_write32(MDMA_D0_START_ADDR, val)
-#define bfin_read_MDMA_D0_X_COUNT()		bfin_read16(MDMA_D0_X_COUNT)
-#define bfin_write_MDMA_D0_X_COUNT(val)		bfin_write16(MDMA_D0_X_COUNT, val)
-#define bfin_read_MDMA_D0_Y_COUNT()		bfin_read16(MDMA_D0_Y_COUNT)
-#define bfin_write_MDMA_D0_Y_COUNT(val)		bfin_write16(MDMA_D0_Y_COUNT, val)
-#define bfin_read_MDMA_D0_X_MODIFY()		bfin_read16(MDMA_D0_X_MODIFY)
-#define bfin_write_MDMA_D0_X_MODIFY(val)	bfin_write16(MDMA_D0_X_MODIFY, val)
-#define bfin_read_MDMA_D0_Y_MODIFY()		bfin_read16(MDMA_D0_Y_MODIFY)
-#define bfin_write_MDMA_D0_Y_MODIFY(val)	bfin_write16(MDMA_D0_Y_MODIFY, val)
-#define bfin_read_MDMA_D0_CURR_DESC_PTR()	bfin_read32(MDMA_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA_D0_CURR_DESC_PTR(val)	bfin_write32(MDMA_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D0_CURR_ADDR()		bfin_read32(MDMA_D0_CURR_ADDR)
-#define bfin_write_MDMA_D0_CURR_ADDR(val)	bfin_write32(MDMA_D0_CURR_ADDR, val)
-#define bfin_read_MDMA_D0_CURR_X_COUNT()	bfin_read16(MDMA_D0_CURR_X_COUNT)
-#define bfin_write_MDMA_D0_CURR_X_COUNT(val)	bfin_write16(MDMA_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D0_CURR_Y_COUNT()	bfin_read16(MDMA_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA_D0_CURR_Y_COUNT(val)	bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()		bfin_read16(MDMA_D0_IRQ_STATUS)
-#define bfin_write_MDMA_D0_IRQ_STATUS(val)	bfin_write16(MDMA_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA_D0_PERIPHERAL_MAP()	bfin_read16(MDMA_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S0_CONFIG()		bfin_read16(MDMA_S0_CONFIG)
-#define bfin_write_MDMA_S0_CONFIG(val)		bfin_write16(MDMA_S0_CONFIG, val)
-#define bfin_read_MDMA_S0_NEXT_DESC_PTR()	bfin_read32(MDMA_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S0_START_ADDR()		bfin_read32(MDMA_S0_START_ADDR)
-#define bfin_write_MDMA_S0_START_ADDR(val)	bfin_write32(MDMA_S0_START_ADDR, val)
-#define bfin_read_MDMA_S0_X_COUNT()		bfin_read16(MDMA_S0_X_COUNT)
-#define bfin_write_MDMA_S0_X_COUNT(val)		bfin_write16(MDMA_S0_X_COUNT, val)
-#define bfin_read_MDMA_S0_Y_COUNT()		bfin_read16(MDMA_S0_Y_COUNT)
-#define bfin_write_MDMA_S0_Y_COUNT(val)		bfin_write16(MDMA_S0_Y_COUNT, val)
-#define bfin_read_MDMA_S0_X_MODIFY()		bfin_read16(MDMA_S0_X_MODIFY)
-#define bfin_write_MDMA_S0_X_MODIFY(val)	bfin_write16(MDMA_S0_X_MODIFY, val)
-#define bfin_read_MDMA_S0_Y_MODIFY()		bfin_read16(MDMA_S0_Y_MODIFY)
-#define bfin_write_MDMA_S0_Y_MODIFY(val)	bfin_write16(MDMA_S0_Y_MODIFY, val)
-#define bfin_read_MDMA_S0_CURR_DESC_PTR()	bfin_read32(MDMA_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA_S0_CURR_DESC_PTR(val)	bfin_write32(MDMA_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S0_CURR_ADDR()		bfin_read32(MDMA_S0_CURR_ADDR)
-#define bfin_write_MDMA_S0_CURR_ADDR(val)	bfin_write32(MDMA_S0_CURR_ADDR, val)
-#define bfin_read_MDMA_S0_CURR_X_COUNT()	bfin_read16(MDMA_S0_CURR_X_COUNT)
-#define bfin_write_MDMA_S0_CURR_X_COUNT(val)	bfin_write16(MDMA_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S0_CURR_Y_COUNT()	bfin_read16(MDMA_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA_S0_CURR_Y_COUNT(val)	bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()		bfin_read16(MDMA_S0_IRQ_STATUS)
-#define bfin_write_MDMA_S0_IRQ_STATUS(val)	bfin_write16(MDMA_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA_S0_PERIPHERAL_MAP()	bfin_read16(MDMA_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_D1_CONFIG()		bfin_read16(MDMA_D1_CONFIG)
-#define bfin_write_MDMA_D1_CONFIG(val)		bfin_write16(MDMA_D1_CONFIG, val)
-#define bfin_read_MDMA_D1_NEXT_DESC_PTR()	bfin_read32(MDMA_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_D1_START_ADDR()		bfin_read32(MDMA_D1_START_ADDR)
-#define bfin_write_MDMA_D1_START_ADDR(val)	bfin_write32(MDMA_D1_START_ADDR, val)
-#define bfin_read_MDMA_D1_X_COUNT()		bfin_read16(MDMA_D1_X_COUNT)
-#define bfin_write_MDMA_D1_X_COUNT(val)		bfin_write16(MDMA_D1_X_COUNT, val)
-#define bfin_read_MDMA_D1_Y_COUNT()		bfin_read16(MDMA_D1_Y_COUNT)
-#define bfin_write_MDMA_D1_Y_COUNT(val)		bfin_write16(MDMA_D1_Y_COUNT, val)
-#define bfin_read_MDMA_D1_X_MODIFY()		bfin_read16(MDMA_D1_X_MODIFY)
-#define bfin_write_MDMA_D1_X_MODIFY(val)	bfin_write16(MDMA_D1_X_MODIFY, val)
-#define bfin_read_MDMA_D1_Y_MODIFY()		bfin_read16(MDMA_D1_Y_MODIFY)
-#define bfin_write_MDMA_D1_Y_MODIFY(val)	bfin_write16(MDMA_D1_Y_MODIFY, val)
-#define bfin_read_MDMA_D1_CURR_DESC_PTR()	bfin_read32(MDMA_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA_D1_CURR_DESC_PTR(val)	bfin_write32(MDMA_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_D1_CURR_ADDR()		bfin_read32(MDMA_D1_CURR_ADDR)
-#define bfin_write_MDMA_D1_CURR_ADDR(val)	bfin_write32(MDMA_D1_CURR_ADDR, val)
-#define bfin_read_MDMA_D1_CURR_X_COUNT()	bfin_read16(MDMA_D1_CURR_X_COUNT)
-#define bfin_write_MDMA_D1_CURR_X_COUNT(val)	bfin_write16(MDMA_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_D1_CURR_Y_COUNT()	bfin_read16(MDMA_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA_D1_CURR_Y_COUNT(val)	bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()		bfin_read16(MDMA_D1_IRQ_STATUS)
-#define bfin_write_MDMA_D1_IRQ_STATUS(val)	bfin_write16(MDMA_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA_D1_PERIPHERAL_MAP()	bfin_read16(MDMA_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
-
-#define bfin_read_MDMA_S1_CONFIG()		bfin_read16(MDMA_S1_CONFIG)
-#define bfin_write_MDMA_S1_CONFIG(val)		bfin_write16(MDMA_S1_CONFIG, val)
-#define bfin_read_MDMA_S1_NEXT_DESC_PTR()	bfin_read32(MDMA_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val)	bfin_write32(MDMA_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA_S1_START_ADDR()		bfin_read32(MDMA_S1_START_ADDR)
-#define bfin_write_MDMA_S1_START_ADDR(val)	bfin_write32(MDMA_S1_START_ADDR, val)
-#define bfin_read_MDMA_S1_X_COUNT()		bfin_read16(MDMA_S1_X_COUNT)
-#define bfin_write_MDMA_S1_X_COUNT(val)		bfin_write16(MDMA_S1_X_COUNT, val)
-#define bfin_read_MDMA_S1_Y_COUNT()		bfin_read16(MDMA_S1_Y_COUNT)
-#define bfin_write_MDMA_S1_Y_COUNT(val)		bfin_write16(MDMA_S1_Y_COUNT, val)
-#define bfin_read_MDMA_S1_X_MODIFY()		bfin_read16(MDMA_S1_X_MODIFY)
-#define bfin_write_MDMA_S1_X_MODIFY(val)	bfin_write16(MDMA_S1_X_MODIFY, val)
-#define bfin_read_MDMA_S1_Y_MODIFY()		bfin_read16(MDMA_S1_Y_MODIFY)
-#define bfin_write_MDMA_S1_Y_MODIFY(val)	bfin_write16(MDMA_S1_Y_MODIFY, val)
-#define bfin_read_MDMA_S1_CURR_DESC_PTR()	bfin_read32(MDMA_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA_S1_CURR_DESC_PTR(val)	bfin_write32(MDMA_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA_S1_CURR_ADDR()		bfin_read32(MDMA_S1_CURR_ADDR)
-#define bfin_write_MDMA_S1_CURR_ADDR(val)	bfin_write32(MDMA_S1_CURR_ADDR, val)
-#define bfin_read_MDMA_S1_CURR_X_COUNT()	bfin_read16(MDMA_S1_CURR_X_COUNT)
-#define bfin_write_MDMA_S1_CURR_X_COUNT(val)	bfin_write16(MDMA_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA_S1_CURR_Y_COUNT()	bfin_read16(MDMA_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA_S1_CURR_Y_COUNT(val)	bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()		bfin_read16(MDMA_S1_IRQ_STATUS)
-#define bfin_write_MDMA_S1_IRQ_STATUS(val)	bfin_write16(MDMA_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA_S1_PERIPHERAL_MAP()	bfin_read16(MDMA_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val)	bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)							*/
-#define bfin_read_PPI_CONTROL()			bfin_read16(PPI_CONTROL)
-#define bfin_write_PPI_CONTROL(val)		bfin_write16(PPI_CONTROL, val)
-#define bfin_read_PPI_STATUS()			bfin_read16(PPI_STATUS)
-#define bfin_write_PPI_STATUS(val)		bfin_write16(PPI_STATUS, val)
-#define bfin_clear_PPI_STATUS()			bfin_write_PPI_STATUS(0xFFFF)
-#define bfin_read_PPI_DELAY()			bfin_read16(PPI_DELAY)
-#define bfin_write_PPI_DELAY(val)		bfin_write16(PPI_DELAY, val)
-#define bfin_read_PPI_COUNT()			bfin_read16(PPI_COUNT)
-#define bfin_write_PPI_COUNT(val)		bfin_write16(PPI_COUNT, val)
-#define bfin_read_PPI_FRAME()			bfin_read16(PPI_FRAME)
-#define bfin_write_PPI_FRAME(val)		bfin_write16(PPI_FRAME, val)
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)								*/
-#define bfin_read_PORTGIO()			bfin_read16(PORTGIO)
-#define bfin_write_PORTGIO(val)			bfin_write16(PORTGIO, val)
-#define bfin_read_PORTGIO_CLEAR()		bfin_read16(PORTGIO_CLEAR)
-#define bfin_write_PORTGIO_CLEAR(val)		bfin_write16(PORTGIO_CLEAR, val)
-#define bfin_read_PORTGIO_SET()			bfin_read16(PORTGIO_SET)
-#define bfin_write_PORTGIO_SET(val)		bfin_write16(PORTGIO_SET, val)
-#define bfin_read_PORTGIO_TOGGLE()		bfin_read16(PORTGIO_TOGGLE)
-#define bfin_write_PORTGIO_TOGGLE(val)		bfin_write16(PORTGIO_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKA()		bfin_read16(PORTGIO_MASKA)
-#define bfin_write_PORTGIO_MASKA(val)		bfin_write16(PORTGIO_MASKA, val)
-#define bfin_read_PORTGIO_MASKA_CLEAR()		bfin_read16(PORTGIO_MASKA_CLEAR)
-#define bfin_write_PORTGIO_MASKA_CLEAR(val)	bfin_write16(PORTGIO_MASKA_CLEAR, val)
-#define bfin_read_PORTGIO_MASKA_SET()		bfin_read16(PORTGIO_MASKA_SET)
-#define bfin_write_PORTGIO_MASKA_SET(val)	bfin_write16(PORTGIO_MASKA_SET, val)
-#define bfin_read_PORTGIO_MASKA_TOGGLE()	bfin_read16(PORTGIO_MASKA_TOGGLE)
-#define bfin_write_PORTGIO_MASKA_TOGGLE(val)	bfin_write16(PORTGIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTGIO_MASKB()		bfin_read16(PORTGIO_MASKB)
-#define bfin_write_PORTGIO_MASKB(val)		bfin_write16(PORTGIO_MASKB, val)
-#define bfin_read_PORTGIO_MASKB_CLEAR()		bfin_read16(PORTGIO_MASKB_CLEAR)
-#define bfin_write_PORTGIO_MASKB_CLEAR(val)	bfin_write16(PORTGIO_MASKB_CLEAR, val)
-#define bfin_read_PORTGIO_MASKB_SET()		bfin_read16(PORTGIO_MASKB_SET)
-#define bfin_write_PORTGIO_MASKB_SET(val)	bfin_write16(PORTGIO_MASKB_SET, val)
-#define bfin_read_PORTGIO_MASKB_TOGGLE()	bfin_read16(PORTGIO_MASKB_TOGGLE)
-#define bfin_write_PORTGIO_MASKB_TOGGLE(val)	bfin_write16(PORTGIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTGIO_DIR()			bfin_read16(PORTGIO_DIR)
-#define bfin_write_PORTGIO_DIR(val)		bfin_write16(PORTGIO_DIR, val)
-#define bfin_read_PORTGIO_POLAR()		bfin_read16(PORTGIO_POLAR)
-#define bfin_write_PORTGIO_POLAR(val)		bfin_write16(PORTGIO_POLAR, val)
-#define bfin_read_PORTGIO_EDGE()		bfin_read16(PORTGIO_EDGE)
-#define bfin_write_PORTGIO_EDGE(val)		bfin_write16(PORTGIO_EDGE, val)
-#define bfin_read_PORTGIO_BOTH()		bfin_read16(PORTGIO_BOTH)
-#define bfin_write_PORTGIO_BOTH(val)		bfin_write16(PORTGIO_BOTH, val)
-#define bfin_read_PORTGIO_INEN()		bfin_read16(PORTGIO_INEN)
-#define bfin_write_PORTGIO_INEN(val)		bfin_write16(PORTGIO_INEN, val)
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)								*/
-#define bfin_read_PORTHIO()			bfin_read16(PORTHIO)
-#define bfin_write_PORTHIO(val)			bfin_write16(PORTHIO, val)
-#define bfin_read_PORTHIO_CLEAR()		bfin_read16(PORTHIO_CLEAR)
-#define bfin_write_PORTHIO_CLEAR(val)		bfin_write16(PORTHIO_CLEAR, val)
-#define bfin_read_PORTHIO_SET()			bfin_read16(PORTHIO_SET)
-#define bfin_write_PORTHIO_SET(val)		bfin_write16(PORTHIO_SET, val)
-#define bfin_read_PORTHIO_TOGGLE()		bfin_read16(PORTHIO_TOGGLE)
-#define bfin_write_PORTHIO_TOGGLE(val)		bfin_write16(PORTHIO_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKA()		bfin_read16(PORTHIO_MASKA)
-#define bfin_write_PORTHIO_MASKA(val)		bfin_write16(PORTHIO_MASKA, val)
-#define bfin_read_PORTHIO_MASKA_CLEAR()		bfin_read16(PORTHIO_MASKA_CLEAR)
-#define bfin_write_PORTHIO_MASKA_CLEAR(val)	bfin_write16(PORTHIO_MASKA_CLEAR, val)
-#define bfin_read_PORTHIO_MASKA_SET()		bfin_read16(PORTHIO_MASKA_SET)
-#define bfin_write_PORTHIO_MASKA_SET(val)	bfin_write16(PORTHIO_MASKA_SET, val)
-#define bfin_read_PORTHIO_MASKA_TOGGLE()	bfin_read16(PORTHIO_MASKA_TOGGLE)
-#define bfin_write_PORTHIO_MASKA_TOGGLE(val)	bfin_write16(PORTHIO_MASKA_TOGGLE, val)
-#define bfin_read_PORTHIO_MASKB()		bfin_read16(PORTHIO_MASKB)
-#define bfin_write_PORTHIO_MASKB(val)		bfin_write16(PORTHIO_MASKB, val)
-#define bfin_read_PORTHIO_MASKB_CLEAR()		bfin_read16(PORTHIO_MASKB_CLEAR)
-#define bfin_write_PORTHIO_MASKB_CLEAR(val)	bfin_write16(PORTHIO_MASKB_CLEAR, val)
-#define bfin_read_PORTHIO_MASKB_SET()		bfin_read16(PORTHIO_MASKB_SET)
-#define bfin_write_PORTHIO_MASKB_SET(val)	bfin_write16(PORTHIO_MASKB_SET, val)
-#define bfin_read_PORTHIO_MASKB_TOGGLE()	bfin_read16(PORTHIO_MASKB_TOGGLE)
-#define bfin_write_PORTHIO_MASKB_TOGGLE(val)	bfin_write16(PORTHIO_MASKB_TOGGLE, val)
-#define bfin_read_PORTHIO_DIR()			bfin_read16(PORTHIO_DIR)
-#define bfin_write_PORTHIO_DIR(val)		bfin_write16(PORTHIO_DIR, val)
-#define bfin_read_PORTHIO_POLAR()		bfin_read16(PORTHIO_POLAR)
-#define bfin_write_PORTHIO_POLAR(val)		bfin_write16(PORTHIO_POLAR, val)
-#define bfin_read_PORTHIO_EDGE()		bfin_read16(PORTHIO_EDGE)
-#define bfin_write_PORTHIO_EDGE(val)		bfin_write16(PORTHIO_EDGE, val)
-#define bfin_read_PORTHIO_BOTH()		bfin_read16(PORTHIO_BOTH)
-#define bfin_write_PORTHIO_BOTH(val)		bfin_write16(PORTHIO_BOTH, val)
-#define bfin_read_PORTHIO_INEN()		bfin_read16(PORTHIO_INEN)
-#define bfin_write_PORTHIO_INEN(val)		bfin_write16(PORTHIO_INEN, val)
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define bfin_read_UART1_THR()			bfin_read16(UART1_THR)
-#define bfin_write_UART1_THR(val)		bfin_write16(UART1_THR, val)
-#define bfin_read_UART1_RBR()			bfin_read16(UART1_RBR)
-#define bfin_write_UART1_RBR(val)		bfin_write16(UART1_RBR, val)
-#define bfin_read_UART1_DLL()			bfin_read16(UART1_DLL)
-#define bfin_write_UART1_DLL(val)		bfin_write16(UART1_DLL, val)
-#define bfin_read_UART1_IER()			bfin_read16(UART1_IER)
-#define bfin_write_UART1_IER(val)		bfin_write16(UART1_IER, val)
-#define bfin_read_UART1_DLH()			bfin_read16(UART1_DLH)
-#define bfin_write_UART1_DLH(val)		bfin_write16(UART1_DLH, val)
-#define bfin_read_UART1_IIR()			bfin_read16(UART1_IIR)
-#define bfin_write_UART1_IIR(val)		bfin_write16(UART1_IIR, val)
-#define bfin_read_UART1_LCR()			bfin_read16(UART1_LCR)
-#define bfin_write_UART1_LCR(val)		bfin_write16(UART1_LCR, val)
-#define bfin_read_UART1_MCR()			bfin_read16(UART1_MCR)
-#define bfin_write_UART1_MCR(val)		bfin_write16(UART1_MCR, val)
-#define bfin_read_UART1_LSR()			bfin_read16(UART1_LSR)
-#define bfin_write_UART1_LSR(val)		bfin_write16(UART1_LSR, val)
-#define bfin_read_UART1_MSR()			bfin_read16(UART1_MSR)
-#define bfin_write_UART1_MSR(val)		bfin_write16(UART1_MSR, val)
-#define bfin_read_UART1_SCR()			bfin_read16(UART1_SCR)
-#define bfin_write_UART1_SCR(val)		bfin_write16(UART1_SCR, val)
-#define bfin_read_UART1_GCTL()			bfin_read16(UART1_GCTL)
-#define bfin_write_UART1_GCTL(val)		bfin_write16(UART1_GCTL, val)
-
-/* Omit CAN register sets from the cdefBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)								*/
-#define bfin_read_PORTF_FER()			bfin_read16(PORTF_FER)
-#define bfin_write_PORTF_FER(val)		bfin_write16(PORTF_FER, val)
-#define bfin_read_PORTG_FER()			bfin_read16(PORTG_FER)
-#define bfin_write_PORTG_FER(val)		bfin_write16(PORTG_FER, val)
-#define bfin_read_PORTH_FER()			bfin_read16(PORTH_FER)
-#define bfin_write_PORTH_FER(val)		bfin_write16(PORTH_FER, val)
-#define bfin_read_PORT_MUX()			bfin_read16(PORT_MUX)
-#define bfin_write_PORT_MUX(val)		bfin_write16(PORT_MUX, val)
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)								*/
-#define bfin_read_HMDMA0_CONTROL()		bfin_read16(HMDMA0_CONTROL)
-#define bfin_write_HMDMA0_CONTROL(val)		bfin_write16(HMDMA0_CONTROL, val)
-#define bfin_read_HMDMA0_ECINIT()		bfin_read16(HMDMA0_ECINIT)
-#define bfin_write_HMDMA0_ECINIT(val)		bfin_write16(HMDMA0_ECINIT, val)
-#define bfin_read_HMDMA0_BCINIT()		bfin_read16(HMDMA0_BCINIT)
-#define bfin_write_HMDMA0_BCINIT(val)		bfin_write16(HMDMA0_BCINIT, val)
-#define bfin_read_HMDMA0_ECURGENT()		bfin_read16(HMDMA0_ECURGENT)
-#define bfin_write_HMDMA0_ECURGENT(val)		bfin_write16(HMDMA0_ECURGENT, val)
-#define bfin_read_HMDMA0_ECOVERFLOW()		bfin_read16(HMDMA0_ECOVERFLOW)
-#define bfin_write_HMDMA0_ECOVERFLOW(val)	bfin_write16(HMDMA0_ECOVERFLOW, val)
-#define bfin_read_HMDMA0_ECOUNT()		bfin_read16(HMDMA0_ECOUNT)
-#define bfin_write_HMDMA0_ECOUNT(val)		bfin_write16(HMDMA0_ECOUNT, val)
-#define bfin_read_HMDMA0_BCOUNT()		bfin_read16(HMDMA0_BCOUNT)
-#define bfin_write_HMDMA0_BCOUNT(val)		bfin_write16(HMDMA0_BCOUNT, val)
-
-#define bfin_read_HMDMA1_CONTROL()		bfin_read16(HMDMA1_CONTROL)
-#define bfin_write_HMDMA1_CONTROL(val)		bfin_write16(HMDMA1_CONTROL, val)
-#define bfin_read_HMDMA1_ECINIT()		bfin_read16(HMDMA1_ECINIT)
-#define bfin_write_HMDMA1_ECINIT(val)		bfin_write16(HMDMA1_ECINIT, val)
-#define bfin_read_HMDMA1_BCINIT()		bfin_read16(HMDMA1_BCINIT)
-#define bfin_write_HMDMA1_BCINIT(val)		bfin_write16(HMDMA1_BCINIT, val)
-#define bfin_read_HMDMA1_ECURGENT()		bfin_read16(HMDMA1_ECURGENT)
-#define bfin_write_HMDMA1_ECURGENT(val)		bfin_write16(HMDMA1_ECURGENT, val)
-#define bfin_read_HMDMA1_ECOVERFLOW()		bfin_read16(HMDMA1_ECOVERFLOW)
-#define bfin_write_HMDMA1_ECOVERFLOW(val)	bfin_write16(HMDMA1_ECOVERFLOW, val)
-#define bfin_read_HMDMA1_ECOUNT()		bfin_read16(HMDMA1_ECOUNT)
-#define bfin_write_HMDMA1_ECOUNT(val)		bfin_write16(HMDMA1_ECOUNT, val)
-#define bfin_read_HMDMA1_BCOUNT()		bfin_read16(HMDMA1_BCOUNT)
-#define bfin_write_HMDMA1_BCOUNT(val)		bfin_write16(HMDMA1_BCOUNT, val)
-
-/* ==== end from cdefBF534.h ==== */
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-
-#define bfin_read_PORTF_MUX()			bfin_read16(PORTF_MUX)
-#define bfin_write_PORTF_MUX(val)		bfin_write16(PORTF_MUX, val)
-#define bfin_read_PORTG_MUX()			bfin_read16(PORTG_MUX)
-#define bfin_write_PORTG_MUX(val)		bfin_write16(PORTG_MUX, val)
-#define bfin_read_PORTH_MUX()			bfin_read16(PORTH_MUX)
-#define bfin_write_PORTH_MUX(val)		bfin_write16(PORTH_MUX, val)
-
-#define bfin_read_PORTF_DRIVE()			bfin_read16(PORTF_DRIVE)
-#define bfin_write_PORTF_DRIVE(val)		bfin_write16(PORTF_DRIVE, val)
-#define bfin_read_PORTG_DRIVE()			bfin_read16(PORTG_DRIVE)
-#define bfin_write_PORTG_DRIVE(val)		bfin_write16(PORTG_DRIVE, val)
-#define bfin_read_PORTH_DRIVE()			bfin_read16(PORTH_DRIVE)
-#define bfin_write_PORTH_DRIVE(val)		bfin_write16(PORTH_DRIVE, val)
-#define bfin_read_PORTF_SLEW()			bfin_read16(PORTF_SLEW)
-#define bfin_write_PORTF_SLEW(val)		bfin_write16(PORTF_SLEW, val)
-#define bfin_read_PORTG_SLEW()			bfin_read16(PORTG_SLEW)
-#define bfin_write_PORTG_SLEW(val)		bfin_write16(PORTG_SLEW, val)
-#define bfin_read_PORTH_SLEW()			bfin_read16(PORTH_SLEW)
-#define bfin_write_PORTH_SLEW(val)		bfin_write16(PORTH_SLEW, val)
-#define bfin_read_PORTF_HYSTERISIS()		bfin_read16(PORTF_HYSTERISIS)
-#define bfin_write_PORTF_HYSTERISIS(val)	bfin_write16(PORTF_HYSTERISIS, val)
-#define bfin_read_PORTG_HYSTERISIS()		bfin_read16(PORTG_HYSTERISIS)
-#define bfin_write_PORTG_HYSTERISIS(val)	bfin_write16(PORTG_HYSTERISIS, val)
-#define bfin_read_PORTH_HYSTERISIS()		bfin_read16(PORTH_HYSTERISIS)
-#define bfin_write_PORTH_HYSTERISIS(val)	bfin_write16(PORTH_HYSTERISIS, val)
-#define bfin_read_MISCPORT_DRIVE()		bfin_read16(MISCPORT_DRIVE)
-#define bfin_write_MISCPORT_DRIVE(val)		bfin_write16(MISCPORT_DRIVE, val)
-#define bfin_read_MISCPORT_SLEW()		bfin_read16(MISCPORT_SLEW)
-#define bfin_write_MISCPORT_SLEW(val)		bfin_write16(MISCPORT_SLEW, val)
-#define bfin_read_MISCPORT_HYSTERISIS()		bfin_read16(MISCPORT_HYSTERISIS)
-#define bfin_write_MISCPORT_HYSTERISIS(val)	bfin_write16(MISCPORT_HYSTERISIS, val)
-
-/* HOST Port Registers */
-
-#define bfin_read_HOST_CONTROL()		bfin_read16(HOST_CONTROL)
-#define bfin_write_HOST_CONTROL(val)		bfin_write16(HOST_CONTROL, val)
-#define bfin_read_HOST_STATUS()			bfin_read16(HOST_STATUS)
-#define bfin_write_HOST_STATUS(val)		bfin_write16(HOST_STATUS, val)
-#define bfin_read_HOST_TIMEOUT()		bfin_read16(HOST_TIMEOUT)
-#define bfin_write_HOST_TIMEOUT(val)		bfin_write16(HOST_TIMEOUT, val)
-
-/* Counter Registers */
-
-#define bfin_read_CNT_CONFIG()			bfin_read16(CNT_CONFIG)
-#define bfin_write_CNT_CONFIG(val)		bfin_write16(CNT_CONFIG, val)
-#define bfin_read_CNT_IMASK()			bfin_read16(CNT_IMASK)
-#define bfin_write_CNT_IMASK(val)		bfin_write16(CNT_IMASK, val)
-#define bfin_read_CNT_STATUS()			bfin_read16(CNT_STATUS)
-#define bfin_write_CNT_STATUS(val)		bfin_write16(CNT_STATUS, val)
-#define bfin_read_CNT_COMMAND()			bfin_read16(CNT_COMMAND)
-#define bfin_write_CNT_COMMAND(val)		bfin_write16(CNT_COMMAND, val)
-#define bfin_read_CNT_DEBOUNCE()		bfin_read16(CNT_DEBOUNCE)
-#define bfin_write_CNT_DEBOUNCE(val)		bfin_write16(CNT_DEBOUNCE, val)
-#define bfin_read_CNT_COUNTER()			bfin_read32(CNT_COUNTER)
-#define bfin_write_CNT_COUNTER(val)		bfin_write32(CNT_COUNTER, val)
-#define bfin_read_CNT_MAX()			bfin_read32(CNT_MAX)
-#define bfin_write_CNT_MAX(val)			bfin_write32(CNT_MAX, val)
-#define bfin_read_CNT_MIN()			bfin_read32(CNT_MIN)
-#define bfin_write_CNT_MIN(val)			bfin_write32(CNT_MIN, val)
-
-/* Security Registers */
-
-#define bfin_read_SECURE_SYSSWT()		bfin_read32(SECURE_SYSSWT)
-#define bfin_write_SECURE_SYSSWT(val)		bfin_write32(SECURE_SYSSWT, val)
-#define bfin_read_SECURE_CONTROL()		bfin_read16(SECURE_CONTROL)
-#define bfin_write_SECURE_CONTROL(val)		bfin_write16(SECURE_CONTROL, val)
-#define bfin_read_SECURE_STATUS()		bfin_read16(SECURE_STATUS)
-#define bfin_write_SECURE_STATUS(val)		bfin_write16(SECURE_STATUS, val)
-
-/* NFC Registers */
-
-#define bfin_read_NFC_CTL()			bfin_read16(NFC_CTL)
-#define bfin_write_NFC_CTL(val)			bfin_write16(NFC_CTL, val)
-#define bfin_read_NFC_STAT()			bfin_read16(NFC_STAT)
-#define bfin_write_NFC_STAT(val)		bfin_write16(NFC_STAT, val)
-#define bfin_read_NFC_IRQSTAT()			bfin_read16(NFC_IRQSTAT)
-#define bfin_write_NFC_IRQSTAT(val)		bfin_write16(NFC_IRQSTAT, val)
-#define bfin_read_NFC_IRQMASK()			bfin_read16(NFC_IRQMASK)
-#define bfin_write_NFC_IRQMASK(val)		bfin_write16(NFC_IRQMASK, val)
-#define bfin_read_NFC_ECC0()			bfin_read16(NFC_ECC0)
-#define bfin_write_NFC_ECC0(val)		bfin_write16(NFC_ECC0, val)
-#define bfin_read_NFC_ECC1()			bfin_read16(NFC_ECC1)
-#define bfin_write_NFC_ECC1(val)		bfin_write16(NFC_ECC1, val)
-#define bfin_read_NFC_ECC2()			bfin_read16(NFC_ECC2)
-#define bfin_write_NFC_ECC2(val)		bfin_write16(NFC_ECC2, val)
-#define bfin_read_NFC_ECC3()			bfin_read16(NFC_ECC3)
-#define bfin_write_NFC_ECC3(val)		bfin_write16(NFC_ECC3, val)
-#define bfin_read_NFC_COUNT()			bfin_read16(NFC_COUNT)
-#define bfin_write_NFC_COUNT(val)		bfin_write16(NFC_COUNT, val)
-#define bfin_read_NFC_RST()			bfin_read16(NFC_RST)
-#define bfin_write_NFC_RST(val)			bfin_write16(NFC_RST, val)
-#define bfin_read_NFC_PGCTL()			bfin_read16(NFC_PGCTL)
-#define bfin_write_NFC_PGCTL(val)		bfin_write16(NFC_PGCTL, val)
-#define bfin_read_NFC_READ()			bfin_read16(NFC_READ)
-#define bfin_write_NFC_READ(val)		bfin_write16(NFC_READ, val)
-#define bfin_read_NFC_ADDR()			bfin_read16(NFC_ADDR)
-#define bfin_write_NFC_ADDR(val)		bfin_write16(NFC_ADDR, val)
-#define bfin_read_NFC_CMD()			bfin_read16(NFC_CMD)
-#define bfin_write_NFC_CMD(val)			bfin_write16(NFC_CMD, val)
-#define bfin_read_NFC_DATA_WR()			bfin_read16(NFC_DATA_WR)
-#define bfin_write_NFC_DATA_WR(val)		bfin_write16(NFC_DATA_WR, val)
-#define bfin_read_NFC_DATA_RD()			bfin_read16(NFC_DATA_RD)
-#define bfin_write_NFC_DATA_RD(val)		bfin_write16(NFC_DATA_RD, val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
-#endif /* _CDEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF522.h b/arch/blackfin/mach-bf527/include/mach/defBF522.h
index cb139a2..89f5420 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF522.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF522.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,12 +7,1393 @@
 #ifndef _DEF_BF522_H
 #define _DEF_BF522_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
+/* ************************************************************** */
+/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x    */
+/* ************************************************************** */
 
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF522 */
+/* ==== begin from defBF534.h ==== */
 
-/* Include defBF52x_base.h for the set of #defines that are common to all ADSP-BF52x processors */
-#include "defBF52x_base.h"
+/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
+#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
+#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
+#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register		*/
+#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
+#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
+#define CHIPID        0xFFC00014  /* Device ID Register */
+
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
+#define SWRST				0xFFC00100	/* Software Reset Register					*/
+#define SYSCR				0xFFC00104	/* System Configuration Register			*/
+#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register	*/
+
+#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
+#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0			*/
+#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1			*/
+#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2			*/
+#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3			*/
+#define SIC_ISR0				0xFFC00120	/* Interrupt Status Register				*/
+#define SIC_IWR0				0xFFC00124	/* Interrupt Wakeup Register				*/
+
+/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
+#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
+#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
+#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
+#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
+#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
+#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
+#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
+
+
+/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
+#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
+#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
+#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
+
+
+/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
+#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
+#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
+#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
+#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
+#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
+#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
+#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
+
+
+/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
+#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
+#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
+#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
+#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
+#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
+#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
+#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
+#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
+#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
+#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
+#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
+#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
+
+
+/* SPI Controller			(0xFFC00500 - 0xFFC005FF)								*/
+#define SPI0_REGBASE			0xFFC00500
+#define SPI_CTL				0xFFC00500	/* SPI Control Register						*/
+#define SPI_FLG				0xFFC00504	/* SPI Flag register						*/
+#define SPI_STAT			0xFFC00508	/* SPI Status register						*/
+#define SPI_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register		*/
+#define SPI_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register			*/
+#define SPI_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
+#define SPI_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
+
+
+/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
+#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
+#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
+#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
+#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
+
+#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
+#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
+#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
+#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
+
+#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
+#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
+#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
+#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
+
+#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
+#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
+#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
+#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
+
+#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
+#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
+#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
+#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
+
+#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
+#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
+#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
+#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
+
+#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
+#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
+#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
+#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
+
+#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
+#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
+#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
+#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
+
+#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
+#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
+#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
+
+
+/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
+#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
+#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
+#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
+#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
+#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
+#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
+#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
+#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
+#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
+#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
+#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
+#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
+#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
+#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
+#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
+
+
+/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
+#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
+#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
+#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
+#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
+#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
+#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
+#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
+#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
+#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
+#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
+#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
+#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
+#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
+#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
+#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
+#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
+#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
+#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
+#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
+#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
+
+
+/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
+#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
+#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
+#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
+#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
+#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
+#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
+#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
+#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
+#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
+#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
+#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
+#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
+#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
+#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
+#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
+#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
+#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
+#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
+#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
+#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
+
+
+/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
+#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
+#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
+#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
+#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
+#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
+#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
+#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
+
+
+/* DMA Traffic Control Registers													*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+
+/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
+#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
+#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
+#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
+#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
+#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
+#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
+#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
+#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
+#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
+#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
+#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
+#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
+#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
+
+#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
+#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
+#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
+#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
+#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
+#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
+#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
+#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
+#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
+#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
+#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
+#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
+#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
+
+#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
+#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
+#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
+#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
+#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
+#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
+#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
+#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
+#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
+#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
+#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
+#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
+#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
+
+#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
+#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
+#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
+#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
+#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
+#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
+#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
+#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
+#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
+#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
+#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
+#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
+#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
+
+#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
+#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
+#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
+#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
+#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
+#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
+#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
+#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
+#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
+#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
+#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
+#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
+#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
+
+#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
+#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
+#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
+#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
+#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
+#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
+#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
+#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
+#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
+#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
+#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
+#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
+#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
+
+#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
+#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
+#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
+#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
+#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
+#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
+#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
+#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
+#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
+#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
+#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
+#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
+#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
+
+#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
+#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
+#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
+#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
+#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
+#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
+#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
+#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
+#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
+#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
+#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
+#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
+#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
+
+#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
+#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
+#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
+#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
+#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
+#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
+#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
+#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
+#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
+#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
+#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
+#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
+#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
+
+#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
+#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
+#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
+#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
+#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
+#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
+#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
+#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
+#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
+#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
+#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
+#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
+#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
+
+#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
+#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
+#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
+#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
+#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
+#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
+#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
+#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
+#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
+#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
+#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
+#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
+#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
+
+#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
+#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
+#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
+#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
+#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
+#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
+#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
+#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
+#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
+#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
+#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
+#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
+#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
+
+#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
+#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
+#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
+#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
+#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
+#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
+#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
+#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
+#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
+#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
+#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
+
+#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
+#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
+#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
+#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
+#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
+#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
+#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
+#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
+#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
+#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
+#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
+#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
+#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
+
+#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
+#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
+#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
+#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
+#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
+#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
+#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
+#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
+#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
+#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
+#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
+#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
+#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
+
+#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
+#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
+#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
+#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
+#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
+#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
+#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
+#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
+#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
+#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
+#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
+#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
+#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
+
+
+/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
+#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
+#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
+#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
+#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
+#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
+
+
+/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
+#define TWI0_REGBASE			0xFFC01400
+#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
+#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
+#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
+#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
+#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
+#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
+#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
+#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
+#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
+#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
+#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
+#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
+#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
+#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
+#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
+#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
+
+
+/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
+#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
+#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
+#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
+#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
+#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
+#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
+#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
+#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
+#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
+#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
+#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
+#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
+#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
+#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
+#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
+
+
+/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
+#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
+#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
+#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
+#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
+#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
+#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
+#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
+#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
+#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
+#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
+#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
+#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
+#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
+#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
+#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
+#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
+#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
+#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
+#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
+#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
+#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
+#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
+#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
+#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
+#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
+#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
+#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
+#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
+#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
+
+
+/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
+
+/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
+#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
+#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
+#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
+#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
+
+
+/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
+#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
+#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
+#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
+#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
+#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
+#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
+#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
+
+#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
+#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
+#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
+#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
+#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
+#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
+#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
+
+/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
+#define PORTF_MUX               0xFFC03210      /* Port F mux control */
+#define PORTG_MUX               0xFFC03214      /* Port G mux control */
+#define PORTH_MUX               0xFFC03218      /* Port H mux control */
+#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
+#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
+#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
+#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
+#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
+#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
+#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
+#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
+#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
+#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
+#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
+#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
+
+
+/***********************************************************************************
+** System MMR Register Bits And Macros
+**
+** Disclaimer:	All macros are intended to make C and Assembly code more readable.
+**				Use these macros carefully, as any that do left shifts for field
+**				depositing will result in the lower order bits being destroyed.  Any
+**				macro that shifts left to properly position the bit-field should be
+**				used as part of an OR to initialize a register and NOT as a dynamic
+**				modifier UNLESS the lower order bits are saved and ORed back in when
+**				the macro is used.
+*************************************************************************************/
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* SWRST Masks																		*/
+#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
+#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
+#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
+#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
+#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
+
+/* SYSCR Masks																				*/
+#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
+#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
+/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
+
+#if 0
+#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
+
+#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
+#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
+#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
+#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
+#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
+#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
+#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
+
+#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
+#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
+#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
+#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
+#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
+#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
+#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
+#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
+
+#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
+#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
+#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
+#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
+#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
+#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
+#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
+#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
+#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
+#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
+
+#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
+#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
+#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
+#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
+#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
+#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
+#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
+#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
+#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
+#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
+#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
+#endif
+
+/* SIC_IAR0 Macros															*/
+#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
+#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
+#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
+#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
+#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
+#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
+#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
+#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
+
+/* SIC_IAR1 Macros															*/
+#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
+#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
+#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
+#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
+#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
+#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
+#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
+#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
+
+/* SIC_IAR2 Macros															*/
+#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
+#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
+#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
+#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
+#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
+#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
+#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
+#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
+
+/* SIC_IAR3 Macros															*/
+#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
+#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
+#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
+#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
+#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
+#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
+#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
+#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
+
+
+/* SIC_IMASK Masks																		*/
+#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
+#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
+#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
+#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
+
+/* SIC_IWR Masks																		*/
+#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
+#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
+#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
+#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
+
+/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
+/* TIMER_ENABLE Masks													*/
+#define TIMEN0			0x0001		/* Enable Timer 0					*/
+#define TIMEN1			0x0002		/* Enable Timer 1					*/
+#define TIMEN2			0x0004		/* Enable Timer 2					*/
+#define TIMEN3			0x0008		/* Enable Timer 3					*/
+#define TIMEN4			0x0010		/* Enable Timer 4					*/
+#define TIMEN5			0x0020		/* Enable Timer 5					*/
+#define TIMEN6			0x0040		/* Enable Timer 6					*/
+#define TIMEN7			0x0080		/* Enable Timer 7					*/
+
+/* TIMER_DISABLE Masks													*/
+#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
+#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
+#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
+#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
+#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
+#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
+#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
+#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
+
+/* TIMER_STATUS Masks													*/
+#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
+#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
+#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
+#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
+#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
+#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
+#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
+#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
+#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
+#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
+#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
+#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
+#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
+#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
+#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
+#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
+#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
+#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
+#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
+#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
+#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
+#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
+#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
+#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define TOVL_ERR0 TOVF_ERR0
+#define TOVL_ERR1 TOVF_ERR1
+#define TOVL_ERR2 TOVF_ERR2
+#define TOVL_ERR3 TOVF_ERR3
+#define TOVL_ERR4 TOVF_ERR4
+#define TOVL_ERR5 TOVF_ERR5
+#define TOVL_ERR6 TOVF_ERR6
+#define TOVL_ERR7 TOVF_ERR7
+
+/* TIMERx_CONFIG Masks													*/
+#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
+#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
+#define EXT_CLK			0x0003	/* External Clock Mode					*/
+#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
+#define PERIOD_CNT		0x0008	/* Period Count							*/
+#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
+#define TIN_SEL			0x0020	/* Timer Input Select					*/
+#define OUT_DIS			0x0040	/* Output Pad Disable					*/
+#define CLK_SEL			0x0080	/* Timer Clock Select					*/
+#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
+#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
+#define ERR_TYP			0xC000	/* Error Type							*/
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
+/* EBIU_AMGCTL Masks																	*/
+#define AMCKEN			0x0001		/* Enable CLKOUT									*/
+#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
+#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
+#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
+#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
+#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
+
+/* EBIU_AMBCTL0 Masks																	*/
+#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
+#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
+#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
+#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
+#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
+#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
+#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
+#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
+#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
+#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
+#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
+#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
+#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
+#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
+#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
+#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
+#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
+#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
+#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
+#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
+#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
+#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
+#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
+#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
+#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
+#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
+#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
+#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
+#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
+#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
+#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
+#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
+#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
+#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
+#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
+#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
+
+#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
+#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
+#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
+#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
+#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
+#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
+#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
+#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
+#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
+#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
+#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
+#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
+#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
+#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
+#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
+#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
+#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
+#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
+#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
+#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
+#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
+#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
+#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
+#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
+#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
+#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
+#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
+#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
+#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
+#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
+#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
+#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
+#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
+#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
+#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
+#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
+#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
+#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
+#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
+#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
+#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
+
+/* EBIU_AMBCTL1 Masks																	*/
+#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
+#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
+#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
+#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
+#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
+#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
+#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
+#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
+#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
+#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
+#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
+#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
+#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
+#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
+#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
+#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
+#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
+#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
+#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
+#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
+#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
+#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
+#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
+#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
+#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
+#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
+#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
+#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
+#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
+#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
+#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
+#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
+#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
+#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
+#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
+#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
+
+#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
+#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
+#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
+#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
+#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
+#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
+#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
+#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
+#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
+#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
+#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
+#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
+#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
+#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
+#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
+#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
+#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
+#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
+#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
+#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
+#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
+#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
+#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
+#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
+#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
+#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
+#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
+#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
+#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
+#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
+#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
+#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
+#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
+#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
+#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
+#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
+#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
+#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
+#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
+#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
+#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
+#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
+#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
+#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
+
+
+/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
+/* EBIU_SDGCTL Masks																			*/
+#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
+#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
+#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
+#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
+#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
+#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
+#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
+#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
+#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
+#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
+#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
+#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
+#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
+#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
+#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
+#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
+#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
+#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
+#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
+#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
+#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
+#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
+#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
+#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
+#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
+#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
+#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
+#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
+#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
+#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
+#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
+#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
+#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
+#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
+#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
+#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
+#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
+#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
+#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
+#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
+#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
+#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
+#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
+#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
+#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
+#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
+#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
+
+/* EBIU_SDBCTL Masks																		*/
+#define EBE				0x0001		/* Enable SDRAM External Bank							*/
+#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
+#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
+#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
+#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
+#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
+#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
+#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
+#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
+#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
+#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
+
+/* EBIU_SDSTAT Masks														*/
+#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
+#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
+#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
+#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
+#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
+#define BGSTAT			0x0020		/* Bus Grant Status						*/
+
+
+/* **************************  DMA CONTROLLER MASKS  ********************************/
+
+/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
+#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
+#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
+#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
+#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
+#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
+#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
+#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
+#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
+#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
+#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
+#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
+#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
+#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
+#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
+
+/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
+/*  PPI_CONTROL Masks													*/
+#define PORT_EN			0x0001		/* PPI Port Enable					*/
+#define PORT_DIR		0x0002		/* PPI Port Direction				*/
+#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
+#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
+#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
+#define PACK_EN			0x0080		/* PPI Packing Mode					*/
+#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
+#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
+#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
+#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
+#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
+#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
+#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
+#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
+#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
+#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
+#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
+#define DLENGTH			0x3800		/* PPI Data Length  */
+#define POLC			0x4000		/* PPI Clock Polarity				*/
+#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
+
+/* PPI_STATUS Masks														*/
+#define FLD				0x0400		/* Field Indicator					*/
+#define FT_ERR			0x0800		/* Frame Track Error				*/
+#define OVR				0x1000		/* FIFO Overflow Error				*/
+#define UNDR			0x2000		/* FIFO Underrun Error				*/
+#define ERR_DET			0x4000		/* Error Detected Indicator			*/
+#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
+/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
+#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
+
+/* TWI_PRESCALE Masks															*/
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
+#define	TWI_ENA		0x0080		/* TWI Enable									*/
+#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
+
+/* TWI_SLAVE_CTL Masks															*/
+#define	SEN			0x0001		/* Slave Enable									*/
+#define	SADD_LEN	0x0002		/* Slave Address Length							*/
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
+
+/* TWI_SLAVE_STAT Masks															*/
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
+#define GCALL		0x0002		/* General Call Indicator						*/
+
+/* TWI_MASTER_CTL Masks													*/
+#define	MEN			0x0001		/* Master Mode Enable						*/
+#define	MADD_LEN	0x0002		/* Master Address Length					*/
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
+#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
+#define	STOP		0x0010		/* Issue Stop Condition						*/
+#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
+#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
+#define	SDAOVR		0x4000		/* Serial Data Override						*/
+#define	SCLOVR		0x8000		/* Serial Clock Override					*/
+
+/* TWI_MASTER_STAT Masks														*/
+#define	MPROG		0x0001		/* Master Transfer In Progress					*/
+#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
+#define	ANAK		0x0004		/* Address Not Acknowledged						*/
+#define	DNAK		0x0008		/* Data Not Acknowledged						*/
+#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
+#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
+#define	SDASEN		0x0040		/* Serial Data Sense							*/
+#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
+
+/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
+#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
+#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
+#define	SERR		0x0004		/* Slave Transfer Error		*/
+#define	SOVF		0x0008		/* Slave Overflow			*/
+#define	MCOMP		0x0010		/* Master Transfer Complete	*/
+#define	MERR		0x0020		/* Master Transfer Error	*/
+#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
+#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
+
+/* TWI_FIFO_CTRL Masks												*/
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
+
+/* TWI_FIFO_STAT Masks															*/
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
+#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
+#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
+#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
+
+#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
+#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
+#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
+#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
+
+
+/* Omit CAN masks from defBF534.h */
+
+/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
+/* PORT_MUX Masks															*/
+#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
+#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
+#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
+
+#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
+#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
+#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
+#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
+
+#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
+#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
+#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
+
+#define	PFTE			0x0010			/* Port F Timer Enable				*/
+#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
+#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
+
+#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
+#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
+#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
+
+#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
+#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
+#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
+
+#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
+#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
+#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
+
+#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
+#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
+#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
+
+#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
+#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
+#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
+
+#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
+#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
+#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
+
+#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
+#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
+#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
+
+
+/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
+/* HDMAx_CTL Masks														*/
+#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
+#define	REP			0x0002	/* HDMA Request Polarity					*/
+#define	UTE			0x0004	/* Urgency Threshold Enable					*/
+#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
+#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
+#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
+#define	DRQ			0x0300	/* HDMA Request Type						*/
+#define	DRQ_NONE	0x0000	/* 		No Request							*/
+#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
+#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
+#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
+#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
+#define	PS			0x2000	/* HDMA Pin Status							*/
+#define	OI			0x4000	/* Overflow Interrupt Generated				*/
+#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
+
+/* entry addresses of the user-callable Boot ROM functions */
+
+#define _BOOTROM_RESET 0xEF000000
+#define _BOOTROM_FINAL_INIT 0xEF000002
+#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
+#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
+#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
+#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
+#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
+#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
+#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
+
+/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
+#define	PGDE_UART   PFDE_UART
+#define	PGDE_DMA    PFDE_DMA
+#define	CKELOW		SCKELOW
+
+/* ==== end from defBF534.h ==== */
+
+/* HOST Port Registers */
+
+#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
+#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
+#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
+
+/* Counter Registers */
+
+#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
+#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
+#define                       CNT_STATUS  0xffc03508   /* Status Register */
+#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
+#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
+#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
+#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
+#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
+
+/* OTP/FUSE Registers */
+
+#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
+#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
+#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
+#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
+
+/* Security Registers */
+
+#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
+#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
+#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
+
+/* OTP Read/Write Data Buffer Registers */
+
+#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
+
+/* NFC Registers */
+
+#define                          NFC_CTL  0xffc03700   /* NAND Control Register */
+#define                         NFC_STAT  0xffc03704   /* NAND Status Register */
+#define                      NFC_IRQSTAT  0xffc03708   /* NAND Interrupt Status Register */
+#define                      NFC_IRQMASK  0xffc0370c   /* NAND Interrupt Mask Register */
+#define                         NFC_ECC0  0xffc03710   /* NAND ECC Register 0 */
+#define                         NFC_ECC1  0xffc03714   /* NAND ECC Register 1 */
+#define                         NFC_ECC2  0xffc03718   /* NAND ECC Register 2 */
+#define                         NFC_ECC3  0xffc0371c   /* NAND ECC Register 3 */
+#define                        NFC_COUNT  0xffc03720   /* NAND ECC Count Register */
+#define                          NFC_RST  0xffc03724   /* NAND ECC Reset Register */
+#define                        NFC_PGCTL  0xffc03728   /* NAND Page Control Register */
+#define                         NFC_READ  0xffc0372c   /* NAND Read Data Register */
+#define                         NFC_ADDR  0xffc03740   /* NAND Address Register */
+#define                          NFC_CMD  0xffc03744   /* NAND Command Register */
+#define                      NFC_DATA_WR  0xffc03748   /* NAND Data Write Register */
+#define                      NFC_DATA_RD  0xffc0374c   /* NAND Data Read Register */
+
+/* ********************************************************** */
+/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
+/*     and MULTI BIT READ MACROS                              */
+/* ********************************************************** */
+
+/* Bit masks for HOST_CONTROL */
+
+#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
+#define                  HOST_CNTR_nHOST_EN  0x0
+#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
+#define                 HOST_CNTR_nHOST_END  0x0
+#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
+#define                HOST_CNTR_nDATA_SIZE  0x0
+#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
+#define                 HOST_CNTR_nHOST_RST  0x0
+#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
+#define                 HOST_CNTR_nHRDY_OVR  0x0
+#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
+#define                 HOST_CNTR_nINT_MODE  0x0
+#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
+#define                   HOST_CNTR_ nBT_EN  0x0
+#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
+#define                      HOST_CNTR_nEHW  0x0
+#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
+#define                      HOST_CNTR_nEHR  0x0
+#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
+#define                      HOST_CNTR_nBDR  0x0
+
+/* Bit masks for HOST_STATUS */
+
+#define                     HOST_STAT_READY  0x1        /* DMA Ready */
+#define                    HOST_STAT_nREADY  0x0
+#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
+#define                 HOST_STAT_nFIFOFULL  0x0
+#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
+#define                HOST_STAT_nFIFOEMPTY  0x0
+#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
+#define                 HOST_STAT_nCOMPLETE  0x0
+#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
+#define                     HOST_STAT_nHSHK  0x0
+#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
+#define                  HOST_STAT_nTIMEOUT  0x0
+#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
+#define                     HOST_STAT_nHIRQ  0x0
+#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
+#define               HOST_STAT_nALLOW_CNFG  0x0
+#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
+#define                  HOST_STAT_nDMA_DIR  0x0
+#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
+#define                      HOST_STAT_nBTE  0x0
+#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
+#define              HOST_STAT_nHOSTRD_DONE  0x0
+
+/* Bit masks for HOST_TIMEOUT */
+
+#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
+
+/* Bit masks for SECURE_SYSSWT */
+
+#define                   EMUDABL  0x1        /* Emulation Disable. */
+#define                  nEMUDABL  0x0
+#define                   RSTDABL  0x2        /* Reset Disable */
+#define                  nRSTDABL  0x0
+#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
+#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
+#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
+#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
+#define                  nDMA0OVR  0x0
+#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
+#define                  nDMA1OVR  0x0
+#define                    EMUOVR  0x4000     /* Emulation Override */
+#define                   nEMUOVR  0x0
+#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
+#define                   nOTPSEN  0x0
+#define                    L2DABL  0x70000    /* L2 Memory Disable. */
+
+/* Bit masks for SECURE_CONTROL */
+
+#define                   SECURE0  0x1        /* SECURE 0 */
+#define                  nSECURE0  0x0
+#define                   SECURE1  0x2        /* SECURE 1 */
+#define                  nSECURE1  0x0
+#define                   SECURE2  0x4        /* SECURE 2 */
+#define                  nSECURE2  0x0
+#define                   SECURE3  0x8        /* SECURE 3 */
+#define                  nSECURE3  0x0
+
+/* Bit masks for SECURE_STATUS */
+
+#define                   SECMODE  0x3        /* Secured Mode Control State */
+#define                       NMI  0x4        /* Non Maskable Interrupt */
+#define                      nNMI  0x0
+#define                   AFVALID  0x8        /* Authentication Firmware Valid */
+#define                  nAFVALID  0x0
+#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
+#define                   nAFEXIT  0x0
+#define                   SECSTAT  0xe0       /* Secure Status */
 
 #endif /* _DEF_BF522_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF525.h b/arch/blackfin/mach-bf527/include/mach/defBF525.h
index c136f70..cc383ad 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF525.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF525.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF527.h b/arch/blackfin/mach-bf527/include/mach/defBF527.h
index 4dd58fb..05369a9 100644
--- a/arch/blackfin/mach-bf527/include/mach/defBF527.h
+++ b/arch/blackfin/mach-bf527/include/mach/defBF527.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
diff --git a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h b/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
deleted file mode 100644
index 0947503..0000000
--- a/arch/blackfin/mach-bf527/include/mach/defBF52x_base.h
+++ /dev/null
@@ -1,1506 +0,0 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the ADI BSD license or the GPL-2 (or later)
- */
-
-#ifndef _DEF_BF52X_H
-#define _DEF_BF52X_H
-
-
-/* ************************************************************** */
-/*   SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF52x    */
-/* ************************************************************** */
-
-/* ==== begin from defBF534.h ==== */
-
-/* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
-#define PLL_CTL				0xFFC00000	/* PLL Control Register						*/
-#define PLL_DIV				0xFFC00004	/* PLL Divide Register						*/
-#define VR_CTL				0xFFC00008	/* Voltage Regulator Control Register		*/
-#define PLL_STAT			0xFFC0000C	/* PLL Status Register						*/
-#define PLL_LOCKCNT			0xFFC00010	/* PLL Lock Count Register					*/
-#define CHIPID        0xFFC00014  /* Device ID Register */
-
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF)							*/
-#define SWRST				0xFFC00100	/* Software Reset Register					*/
-#define SYSCR				0xFFC00104	/* System Configuration Register			*/
-#define SIC_RVECT			0xFFC00108	/* Interrupt Reset Vector Address Register	*/
-
-#define SIC_IMASK0			0xFFC0010C	/* Interrupt Mask Register					*/
-#define SIC_IAR0			0xFFC00110	/* Interrupt Assignment Register 0			*/
-#define SIC_IAR1			0xFFC00114	/* Interrupt Assignment Register 1			*/
-#define SIC_IAR2			0xFFC00118	/* Interrupt Assignment Register 2			*/
-#define SIC_IAR3			0xFFC0011C	/* Interrupt Assignment Register 3			*/
-#define SIC_ISR0				0xFFC00120	/* Interrupt Status Register				*/
-#define SIC_IWR0				0xFFC00124	/* Interrupt Wakeup Register				*/
-
-/* SIC Additions to ADSP-BF52x (0xFFC0014C - 0xFFC00162) */
-#define SIC_IMASK1                      0xFFC0014C     /* Interrupt Mask register of SIC2 */
-#define SIC_IAR4                        0xFFC00150     /* Interrupt Assignment register4 */
-#define SIC_IAR5                        0xFFC00154     /* Interrupt Assignment register5 */
-#define SIC_IAR6                        0xFFC00158     /* Interrupt Assignment register6 */
-#define SIC_IAR7                        0xFFC0015C     /* Interrupt Assignment register7 */
-#define SIC_ISR1                        0xFFC00160     /* Interrupt Statur register */
-#define SIC_IWR1                        0xFFC00164     /* Interrupt Wakeup register */
-
-
-/* Watchdog Timer			(0xFFC00200 - 0xFFC002FF)								*/
-#define WDOG_CTL			0xFFC00200	/* Watchdog Control Register				*/
-#define WDOG_CNT			0xFFC00204	/* Watchdog Count Register					*/
-#define WDOG_STAT			0xFFC00208	/* Watchdog Status Register					*/
-
-
-/* Real Time Clock		(0xFFC00300 - 0xFFC003FF)									*/
-#define RTC_STAT			0xFFC00300	/* RTC Status Register						*/
-#define RTC_ICTL			0xFFC00304	/* RTC Interrupt Control Register			*/
-#define RTC_ISTAT			0xFFC00308	/* RTC Interrupt Status Register			*/
-#define RTC_SWCNT			0xFFC0030C	/* RTC Stopwatch Count Register				*/
-#define RTC_ALARM			0xFFC00310	/* RTC Alarm Time Register					*/
-#define RTC_FAST			0xFFC00314	/* RTC Prescaler Enable Register			*/
-#define RTC_PREN			0xFFC00314	/* RTC Prescaler Enable Alternate Macro		*/
-
-
-/* UART0 Controller		(0xFFC00400 - 0xFFC004FF)									*/
-#define UART0_THR			0xFFC00400	/* Transmit Holding register				*/
-#define UART0_RBR			0xFFC00400	/* Receive Buffer register					*/
-#define UART0_DLL			0xFFC00400	/* Divisor Latch (Low-Byte)					*/
-#define UART0_IER			0xFFC00404	/* Interrupt Enable Register				*/
-#define UART0_DLH			0xFFC00404	/* Divisor Latch (High-Byte)				*/
-#define UART0_IIR			0xFFC00408	/* Interrupt Identification Register		*/
-#define UART0_LCR			0xFFC0040C	/* Line Control Register					*/
-#define UART0_MCR			0xFFC00410	/* Modem Control Register					*/
-#define UART0_LSR			0xFFC00414	/* Line Status Register						*/
-#define UART0_MSR			0xFFC00418	/* Modem Status Register					*/
-#define UART0_SCR			0xFFC0041C	/* SCR Scratch Register						*/
-#define UART0_GCTL			0xFFC00424	/* Global Control Register					*/
-
-
-/* SPI Controller			(0xFFC00500 - 0xFFC005FF)								*/
-#define SPI0_REGBASE			0xFFC00500
-#define SPI_CTL				0xFFC00500	/* SPI Control Register						*/
-#define SPI_FLG				0xFFC00504	/* SPI Flag register						*/
-#define SPI_STAT			0xFFC00508	/* SPI Status register						*/
-#define SPI_TDBR			0xFFC0050C	/* SPI Transmit Data Buffer Register		*/
-#define SPI_RDBR			0xFFC00510	/* SPI Receive Data Buffer Register			*/
-#define SPI_BAUD			0xFFC00514	/* SPI Baud rate Register					*/
-#define SPI_SHADOW			0xFFC00518	/* SPI_RDBR Shadow Register					*/
-
-
-/* TIMER0-7 Registers		(0xFFC00600 - 0xFFC006FF)								*/
-#define TIMER0_CONFIG		0xFFC00600	/* Timer 0 Configuration Register			*/
-#define TIMER0_COUNTER		0xFFC00604	/* Timer 0 Counter Register					*/
-#define TIMER0_PERIOD		0xFFC00608	/* Timer 0 Period Register					*/
-#define TIMER0_WIDTH		0xFFC0060C	/* Timer 0 Width Register					*/
-
-#define TIMER1_CONFIG		0xFFC00610	/* Timer 1 Configuration Register  			*/
-#define TIMER1_COUNTER		0xFFC00614	/* Timer 1 Counter Register        			*/
-#define TIMER1_PERIOD		0xFFC00618	/* Timer 1 Period Register         			*/
-#define TIMER1_WIDTH		0xFFC0061C	/* Timer 1 Width Register          			*/
-
-#define TIMER2_CONFIG		0xFFC00620	/* Timer 2 Configuration Register  			*/
-#define TIMER2_COUNTER		0xFFC00624	/* Timer 2 Counter Register        			*/
-#define TIMER2_PERIOD		0xFFC00628	/* Timer 2 Period Register         			*/
-#define TIMER2_WIDTH		0xFFC0062C	/* Timer 2 Width Register          			*/
-
-#define TIMER3_CONFIG		0xFFC00630	/* Timer 3 Configuration Register			*/
-#define TIMER3_COUNTER		0xFFC00634	/* Timer 3 Counter Register					*/
-#define TIMER3_PERIOD		0xFFC00638	/* Timer 3 Period Register					*/
-#define TIMER3_WIDTH		0xFFC0063C	/* Timer 3 Width Register					*/
-
-#define TIMER4_CONFIG		0xFFC00640	/* Timer 4 Configuration Register  			*/
-#define TIMER4_COUNTER		0xFFC00644	/* Timer 4 Counter Register        			*/
-#define TIMER4_PERIOD		0xFFC00648	/* Timer 4 Period Register         			*/
-#define TIMER4_WIDTH		0xFFC0064C	/* Timer 4 Width Register          			*/
-
-#define TIMER5_CONFIG		0xFFC00650	/* Timer 5 Configuration Register  			*/
-#define TIMER5_COUNTER		0xFFC00654	/* Timer 5 Counter Register        			*/
-#define TIMER5_PERIOD		0xFFC00658	/* Timer 5 Period Register         			*/
-#define TIMER5_WIDTH		0xFFC0065C	/* Timer 5 Width Register          			*/
-
-#define TIMER6_CONFIG		0xFFC00660	/* Timer 6 Configuration Register  			*/
-#define TIMER6_COUNTER		0xFFC00664	/* Timer 6 Counter Register        			*/
-#define TIMER6_PERIOD		0xFFC00668	/* Timer 6 Period Register         			*/
-#define TIMER6_WIDTH		0xFFC0066C	/* Timer 6 Width Register          			*/
-
-#define TIMER7_CONFIG		0xFFC00670	/* Timer 7 Configuration Register  			*/
-#define TIMER7_COUNTER		0xFFC00674	/* Timer 7 Counter Register        			*/
-#define TIMER7_PERIOD		0xFFC00678	/* Timer 7 Period Register         			*/
-#define TIMER7_WIDTH		0xFFC0067C	/* Timer 7 Width Register       			*/
-
-#define TIMER_ENABLE		0xFFC00680	/* Timer Enable Register					*/
-#define TIMER_DISABLE		0xFFC00684	/* Timer Disable Register					*/
-#define TIMER_STATUS		0xFFC00688	/* Timer Status Register					*/
-
-
-/* General Purpose I/O Port F (0xFFC00700 - 0xFFC007FF)												*/
-#define PORTFIO					0xFFC00700	/* Port F I/O Pin State Specify Register				*/
-#define PORTFIO_CLEAR			0xFFC00704	/* Port F I/O Peripheral Interrupt Clear Register		*/
-#define PORTFIO_SET				0xFFC00708	/* Port F I/O Peripheral Interrupt Set Register			*/
-#define PORTFIO_TOGGLE			0xFFC0070C	/* Port F I/O Pin State Toggle Register					*/
-#define PORTFIO_MASKA			0xFFC00710	/* Port F I/O Mask State Specify Interrupt A Register	*/
-#define PORTFIO_MASKA_CLEAR		0xFFC00714	/* Port F I/O Mask Disable Interrupt A Register			*/
-#define PORTFIO_MASKA_SET		0xFFC00718	/* Port F I/O Mask Enable Interrupt A Register			*/
-#define PORTFIO_MASKA_TOGGLE	0xFFC0071C	/* Port F I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTFIO_MASKB			0xFFC00720	/* Port F I/O Mask State Specify Interrupt B Register	*/
-#define PORTFIO_MASKB_CLEAR		0xFFC00724	/* Port F I/O Mask Disable Interrupt B Register			*/
-#define PORTFIO_MASKB_SET		0xFFC00728	/* Port F I/O Mask Enable Interrupt B Register			*/
-#define PORTFIO_MASKB_TOGGLE	0xFFC0072C	/* Port F I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTFIO_DIR				0xFFC00730	/* Port F I/O Direction Register						*/
-#define PORTFIO_POLAR			0xFFC00734	/* Port F I/O Source Polarity Register					*/
-#define PORTFIO_EDGE			0xFFC00738	/* Port F I/O Source Sensitivity Register				*/
-#define PORTFIO_BOTH			0xFFC0073C	/* Port F I/O Set on BOTH Edges Register				*/
-#define PORTFIO_INEN			0xFFC00740	/* Port F I/O Input Enable Register 					*/
-
-
-/* SPORT0 Controller		(0xFFC00800 - 0xFFC008FF)										*/
-#define SPORT0_TCR1			0xFFC00800	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_TCR2			0xFFC00804	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_TCLKDIV		0xFFC00808	/* SPORT0 Transmit Clock Divider					*/
-#define SPORT0_TFSDIV		0xFFC0080C	/* SPORT0 Transmit Frame Sync Divider				*/
-#define SPORT0_TX			0xFFC00810	/* SPORT0 TX Data Register							*/
-#define SPORT0_RX			0xFFC00818	/* SPORT0 RX Data Register							*/
-#define SPORT0_RCR1			0xFFC00820	/* SPORT0 Transmit Configuration 1 Register			*/
-#define SPORT0_RCR2			0xFFC00824	/* SPORT0 Transmit Configuration 2 Register			*/
-#define SPORT0_RCLKDIV		0xFFC00828	/* SPORT0 Receive Clock Divider						*/
-#define SPORT0_RFSDIV		0xFFC0082C	/* SPORT0 Receive Frame Sync Divider				*/
-#define SPORT0_STAT			0xFFC00830	/* SPORT0 Status Register							*/
-#define SPORT0_CHNL			0xFFC00834	/* SPORT0 Current Channel Register					*/
-#define SPORT0_MCMC1		0xFFC00838	/* SPORT0 Multi-Channel Configuration Register 1	*/
-#define SPORT0_MCMC2		0xFFC0083C	/* SPORT0 Multi-Channel Configuration Register 2	*/
-#define SPORT0_MTCS0		0xFFC00840	/* SPORT0 Multi-Channel Transmit Select Register 0	*/
-#define SPORT0_MTCS1		0xFFC00844	/* SPORT0 Multi-Channel Transmit Select Register 1	*/
-#define SPORT0_MTCS2		0xFFC00848	/* SPORT0 Multi-Channel Transmit Select Register 2	*/
-#define SPORT0_MTCS3		0xFFC0084C	/* SPORT0 Multi-Channel Transmit Select Register 3	*/
-#define SPORT0_MRCS0		0xFFC00850	/* SPORT0 Multi-Channel Receive Select Register 0	*/
-#define SPORT0_MRCS1		0xFFC00854	/* SPORT0 Multi-Channel Receive Select Register 1	*/
-#define SPORT0_MRCS2		0xFFC00858	/* SPORT0 Multi-Channel Receive Select Register 2	*/
-#define SPORT0_MRCS3		0xFFC0085C	/* SPORT0 Multi-Channel Receive Select Register 3	*/
-
-
-/* SPORT1 Controller		(0xFFC00900 - 0xFFC009FF)										*/
-#define SPORT1_TCR1			0xFFC00900	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_TCR2			0xFFC00904	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_TCLKDIV		0xFFC00908	/* SPORT1 Transmit Clock Divider					*/
-#define SPORT1_TFSDIV		0xFFC0090C	/* SPORT1 Transmit Frame Sync Divider				*/
-#define SPORT1_TX			0xFFC00910	/* SPORT1 TX Data Register							*/
-#define SPORT1_RX			0xFFC00918	/* SPORT1 RX Data Register							*/
-#define SPORT1_RCR1			0xFFC00920	/* SPORT1 Transmit Configuration 1 Register			*/
-#define SPORT1_RCR2			0xFFC00924	/* SPORT1 Transmit Configuration 2 Register			*/
-#define SPORT1_RCLKDIV		0xFFC00928	/* SPORT1 Receive Clock Divider						*/
-#define SPORT1_RFSDIV		0xFFC0092C	/* SPORT1 Receive Frame Sync Divider				*/
-#define SPORT1_STAT			0xFFC00930	/* SPORT1 Status Register							*/
-#define SPORT1_CHNL			0xFFC00934	/* SPORT1 Current Channel Register					*/
-#define SPORT1_MCMC1		0xFFC00938	/* SPORT1 Multi-Channel Configuration Register 1	*/
-#define SPORT1_MCMC2		0xFFC0093C	/* SPORT1 Multi-Channel Configuration Register 2	*/
-#define SPORT1_MTCS0		0xFFC00940	/* SPORT1 Multi-Channel Transmit Select Register 0	*/
-#define SPORT1_MTCS1		0xFFC00944	/* SPORT1 Multi-Channel Transmit Select Register 1	*/
-#define SPORT1_MTCS2		0xFFC00948	/* SPORT1 Multi-Channel Transmit Select Register 2	*/
-#define SPORT1_MTCS3		0xFFC0094C	/* SPORT1 Multi-Channel Transmit Select Register 3	*/
-#define SPORT1_MRCS0		0xFFC00950	/* SPORT1 Multi-Channel Receive Select Register 0	*/
-#define SPORT1_MRCS1		0xFFC00954	/* SPORT1 Multi-Channel Receive Select Register 1	*/
-#define SPORT1_MRCS2		0xFFC00958	/* SPORT1 Multi-Channel Receive Select Register 2	*/
-#define SPORT1_MRCS3		0xFFC0095C	/* SPORT1 Multi-Channel Receive Select Register 3	*/
-
-
-/* External Bus Interface Unit (0xFFC00A00 - 0xFFC00AFF)								*/
-#define EBIU_AMGCTL			0xFFC00A00	/* Asynchronous Memory Global Control Register	*/
-#define EBIU_AMBCTL0		0xFFC00A04	/* Asynchronous Memory Bank Control Register 0	*/
-#define EBIU_AMBCTL1		0xFFC00A08	/* Asynchronous Memory Bank Control Register 1	*/
-#define EBIU_SDGCTL			0xFFC00A10	/* SDRAM Global Control Register				*/
-#define EBIU_SDBCTL			0xFFC00A14	/* SDRAM Bank Control Register					*/
-#define EBIU_SDRRC			0xFFC00A18	/* SDRAM Refresh Rate Control Register			*/
-#define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register						*/
-
-
-/* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
-#define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register		*/
-#define DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register					*/
-#define DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register					*/
-#define DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register						*/
-#define DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register						*/
-#define DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register						*/
-#define DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register						*/
-#define DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register	*/
-#define DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register				*/
-#define DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register				*/
-#define DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map Register				*/
-#define DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register				*/
-#define DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register				*/
-
-#define DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register		*/
-#define DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register					*/
-#define DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register					*/
-#define DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register						*/
-#define DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register						*/
-#define DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register						*/
-#define DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register						*/
-#define DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register	*/
-#define DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register				*/
-#define DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register				*/
-#define DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map Register				*/
-#define DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register				*/
-#define DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register				*/
-
-#define DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register		*/
-#define DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register					*/
-#define DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register					*/
-#define DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register						*/
-#define DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register						*/
-#define DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register						*/
-#define DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register						*/
-#define DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register	*/
-#define DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register				*/
-#define DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register				*/
-#define DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map Register				*/
-#define DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register				*/
-#define DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register				*/
-
-#define DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register		*/
-#define DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register					*/
-#define DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register					*/
-#define DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register						*/
-#define DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register						*/
-#define DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register						*/
-#define DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register						*/
-#define DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register	*/
-#define DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register				*/
-#define DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register				*/
-#define DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map Register				*/
-#define DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register				*/
-#define DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register				*/
-
-#define DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register		*/
-#define DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register					*/
-#define DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register					*/
-#define DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register						*/
-#define DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register						*/
-#define DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register						*/
-#define DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register						*/
-#define DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register	*/
-#define DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register				*/
-#define DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register				*/
-#define DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map Register				*/
-#define DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register				*/
-#define DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register				*/
-
-#define DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register		*/
-#define DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register					*/
-#define DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register					*/
-#define DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register						*/
-#define DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register						*/
-#define DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register						*/
-#define DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register						*/
-#define DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register	*/
-#define DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register				*/
-#define DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register				*/
-#define DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map Register				*/
-#define DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register				*/
-#define DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register				*/
-
-#define DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register		*/
-#define DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register					*/
-#define DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register					*/
-#define DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register						*/
-#define DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register						*/
-#define DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register						*/
-#define DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register						*/
-#define DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register	*/
-#define DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register				*/
-#define DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register				*/
-#define DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map Register				*/
-#define DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register				*/
-#define DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register				*/
-
-#define DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register		*/
-#define DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register					*/
-#define DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register					*/
-#define DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register						*/
-#define DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register						*/
-#define DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register						*/
-#define DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register						*/
-#define DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register	*/
-#define DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register				*/
-#define DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register				*/
-#define DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map Register				*/
-#define DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register				*/
-#define DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register				*/
-
-#define DMA8_NEXT_DESC_PTR		0xFFC00E00	/* DMA Channel 8 Next Descriptor Pointer Register		*/
-#define DMA8_START_ADDR			0xFFC00E04	/* DMA Channel 8 Start Address Register					*/
-#define DMA8_CONFIG				0xFFC00E08	/* DMA Channel 8 Configuration Register					*/
-#define DMA8_X_COUNT			0xFFC00E10	/* DMA Channel 8 X Count Register						*/
-#define DMA8_X_MODIFY			0xFFC00E14	/* DMA Channel 8 X Modify Register						*/
-#define DMA8_Y_COUNT			0xFFC00E18	/* DMA Channel 8 Y Count Register						*/
-#define DMA8_Y_MODIFY			0xFFC00E1C	/* DMA Channel 8 Y Modify Register						*/
-#define DMA8_CURR_DESC_PTR		0xFFC00E20	/* DMA Channel 8 Current Descriptor Pointer Register	*/
-#define DMA8_CURR_ADDR			0xFFC00E24	/* DMA Channel 8 Current Address Register				*/
-#define DMA8_IRQ_STATUS			0xFFC00E28	/* DMA Channel 8 Interrupt/Status Register				*/
-#define DMA8_PERIPHERAL_MAP		0xFFC00E2C	/* DMA Channel 8 Peripheral Map Register				*/
-#define DMA8_CURR_X_COUNT		0xFFC00E30	/* DMA Channel 8 Current X Count Register				*/
-#define DMA8_CURR_Y_COUNT		0xFFC00E38	/* DMA Channel 8 Current Y Count Register				*/
-
-#define DMA9_NEXT_DESC_PTR		0xFFC00E40	/* DMA Channel 9 Next Descriptor Pointer Register		*/
-#define DMA9_START_ADDR			0xFFC00E44	/* DMA Channel 9 Start Address Register					*/
-#define DMA9_CONFIG				0xFFC00E48	/* DMA Channel 9 Configuration Register					*/
-#define DMA9_X_COUNT			0xFFC00E50	/* DMA Channel 9 X Count Register						*/
-#define DMA9_X_MODIFY			0xFFC00E54	/* DMA Channel 9 X Modify Register						*/
-#define DMA9_Y_COUNT			0xFFC00E58	/* DMA Channel 9 Y Count Register						*/
-#define DMA9_Y_MODIFY			0xFFC00E5C	/* DMA Channel 9 Y Modify Register						*/
-#define DMA9_CURR_DESC_PTR		0xFFC00E60	/* DMA Channel 9 Current Descriptor Pointer Register	*/
-#define DMA9_CURR_ADDR			0xFFC00E64	/* DMA Channel 9 Current Address Register				*/
-#define DMA9_IRQ_STATUS			0xFFC00E68	/* DMA Channel 9 Interrupt/Status Register				*/
-#define DMA9_PERIPHERAL_MAP		0xFFC00E6C	/* DMA Channel 9 Peripheral Map Register				*/
-#define DMA9_CURR_X_COUNT		0xFFC00E70	/* DMA Channel 9 Current X Count Register				*/
-#define DMA9_CURR_Y_COUNT		0xFFC00E78	/* DMA Channel 9 Current Y Count Register				*/
-
-#define DMA10_NEXT_DESC_PTR		0xFFC00E80	/* DMA Channel 10 Next Descriptor Pointer Register		*/
-#define DMA10_START_ADDR		0xFFC00E84	/* DMA Channel 10 Start Address Register				*/
-#define DMA10_CONFIG			0xFFC00E88	/* DMA Channel 10 Configuration Register				*/
-#define DMA10_X_COUNT			0xFFC00E90	/* DMA Channel 10 X Count Register						*/
-#define DMA10_X_MODIFY			0xFFC00E94	/* DMA Channel 10 X Modify Register						*/
-#define DMA10_Y_COUNT			0xFFC00E98	/* DMA Channel 10 Y Count Register						*/
-#define DMA10_Y_MODIFY			0xFFC00E9C	/* DMA Channel 10 Y Modify Register						*/
-#define DMA10_CURR_DESC_PTR		0xFFC00EA0	/* DMA Channel 10 Current Descriptor Pointer Register	*/
-#define DMA10_CURR_ADDR			0xFFC00EA4	/* DMA Channel 10 Current Address Register				*/
-#define DMA10_IRQ_STATUS		0xFFC00EA8	/* DMA Channel 10 Interrupt/Status Register				*/
-#define DMA10_PERIPHERAL_MAP	0xFFC00EAC	/* DMA Channel 10 Peripheral Map Register				*/
-#define DMA10_CURR_X_COUNT		0xFFC00EB0	/* DMA Channel 10 Current X Count Register				*/
-#define DMA10_CURR_Y_COUNT		0xFFC00EB8	/* DMA Channel 10 Current Y Count Register				*/
-
-#define DMA11_NEXT_DESC_PTR		0xFFC00EC0	/* DMA Channel 11 Next Descriptor Pointer Register		*/
-#define DMA11_START_ADDR		0xFFC00EC4	/* DMA Channel 11 Start Address Register				*/
-#define DMA11_CONFIG			0xFFC00EC8	/* DMA Channel 11 Configuration Register				*/
-#define DMA11_X_COUNT			0xFFC00ED0	/* DMA Channel 11 X Count Register						*/
-#define DMA11_X_MODIFY			0xFFC00ED4	/* DMA Channel 11 X Modify Register						*/
-#define DMA11_Y_COUNT			0xFFC00ED8	/* DMA Channel 11 Y Count Register						*/
-#define DMA11_Y_MODIFY			0xFFC00EDC	/* DMA Channel 11 Y Modify Register						*/
-#define DMA11_CURR_DESC_PTR		0xFFC00EE0	/* DMA Channel 11 Current Descriptor Pointer Register	*/
-#define DMA11_CURR_ADDR			0xFFC00EE4	/* DMA Channel 11 Current Address Register				*/
-#define DMA11_IRQ_STATUS		0xFFC00EE8	/* DMA Channel 11 Interrupt/Status Register				*/
-#define DMA11_PERIPHERAL_MAP	0xFFC00EEC	/* DMA Channel 11 Peripheral Map Register				*/
-#define DMA11_CURR_X_COUNT		0xFFC00EF0	/* DMA Channel 11 Current X Count Register				*/
-#define DMA11_CURR_Y_COUNT		0xFFC00EF8	/* DMA Channel 11 Current Y Count Register				*/
-
-#define MDMA_D0_NEXT_DESC_PTR	0xFFC00F00	/* MemDMA Stream 0 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D0_START_ADDR		0xFFC00F04	/* MemDMA Stream 0 Destination Start Address Register				*/
-#define MDMA_D0_CONFIG			0xFFC00F08	/* MemDMA Stream 0 Destination Configuration Register				*/
-#define MDMA_D0_X_COUNT			0xFFC00F10	/* MemDMA Stream 0 Destination X Count Register						*/
-#define MDMA_D0_X_MODIFY		0xFFC00F14	/* MemDMA Stream 0 Destination X Modify Register					*/
-#define MDMA_D0_Y_COUNT			0xFFC00F18	/* MemDMA Stream 0 Destination Y Count Register						*/
-#define MDMA_D0_Y_MODIFY		0xFFC00F1C	/* MemDMA Stream 0 Destination Y Modify Register					*/
-#define MDMA_D0_CURR_DESC_PTR	0xFFC00F20	/* MemDMA Stream 0 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D0_CURR_ADDR		0xFFC00F24	/* MemDMA Stream 0 Destination Current Address Register				*/
-#define MDMA_D0_IRQ_STATUS		0xFFC00F28	/* MemDMA Stream 0 Destination Interrupt/Status Register			*/
-#define MDMA_D0_PERIPHERAL_MAP	0xFFC00F2C	/* MemDMA Stream 0 Destination Peripheral Map Register				*/
-#define MDMA_D0_CURR_X_COUNT	0xFFC00F30	/* MemDMA Stream 0 Destination Current X Count Register				*/
-#define MDMA_D0_CURR_Y_COUNT	0xFFC00F38	/* MemDMA Stream 0 Destination Current Y Count Register				*/
-
-#define MDMA_S0_NEXT_DESC_PTR	0xFFC00F40	/* MemDMA Stream 0 Source Next Descriptor Pointer Register			*/
-#define MDMA_S0_START_ADDR		0xFFC00F44	/* MemDMA Stream 0 Source Start Address Register					*/
-#define MDMA_S0_CONFIG			0xFFC00F48	/* MemDMA Stream 0 Source Configuration Register					*/
-#define MDMA_S0_X_COUNT			0xFFC00F50	/* MemDMA Stream 0 Source X Count Register							*/
-#define MDMA_S0_X_MODIFY		0xFFC00F54	/* MemDMA Stream 0 Source X Modify Register							*/
-#define MDMA_S0_Y_COUNT			0xFFC00F58	/* MemDMA Stream 0 Source Y Count Register							*/
-#define MDMA_S0_Y_MODIFY		0xFFC00F5C	/* MemDMA Stream 0 Source Y Modify Register							*/
-#define MDMA_S0_CURR_DESC_PTR	0xFFC00F60	/* MemDMA Stream 0 Source Current Descriptor Pointer Register		*/
-#define MDMA_S0_CURR_ADDR		0xFFC00F64	/* MemDMA Stream 0 Source Current Address Register					*/
-#define MDMA_S0_IRQ_STATUS		0xFFC00F68	/* MemDMA Stream 0 Source Interrupt/Status Register					*/
-#define MDMA_S0_PERIPHERAL_MAP	0xFFC00F6C	/* MemDMA Stream 0 Source Peripheral Map Register					*/
-#define MDMA_S0_CURR_X_COUNT	0xFFC00F70	/* MemDMA Stream 0 Source Current X Count Register					*/
-#define MDMA_S0_CURR_Y_COUNT	0xFFC00F78	/* MemDMA Stream 0 Source Current Y Count Register					*/
-
-#define MDMA_D1_NEXT_DESC_PTR	0xFFC00F80	/* MemDMA Stream 1 Destination Next Descriptor Pointer Register		*/
-#define MDMA_D1_START_ADDR		0xFFC00F84	/* MemDMA Stream 1 Destination Start Address Register				*/
-#define MDMA_D1_CONFIG			0xFFC00F88	/* MemDMA Stream 1 Destination Configuration Register				*/
-#define MDMA_D1_X_COUNT			0xFFC00F90	/* MemDMA Stream 1 Destination X Count Register						*/
-#define MDMA_D1_X_MODIFY		0xFFC00F94	/* MemDMA Stream 1 Destination X Modify Register					*/
-#define MDMA_D1_Y_COUNT			0xFFC00F98	/* MemDMA Stream 1 Destination Y Count Register						*/
-#define MDMA_D1_Y_MODIFY		0xFFC00F9C	/* MemDMA Stream 1 Destination Y Modify Register					*/
-#define MDMA_D1_CURR_DESC_PTR	0xFFC00FA0	/* MemDMA Stream 1 Destination Current Descriptor Pointer Register	*/
-#define MDMA_D1_CURR_ADDR		0xFFC00FA4	/* MemDMA Stream 1 Destination Current Address Register				*/
-#define MDMA_D1_IRQ_STATUS		0xFFC00FA8	/* MemDMA Stream 1 Destination Interrupt/Status Register			*/
-#define MDMA_D1_PERIPHERAL_MAP	0xFFC00FAC	/* MemDMA Stream 1 Destination Peripheral Map Register				*/
-#define MDMA_D1_CURR_X_COUNT	0xFFC00FB0	/* MemDMA Stream 1 Destination Current X Count Register				*/
-#define MDMA_D1_CURR_Y_COUNT	0xFFC00FB8	/* MemDMA Stream 1 Destination Current Y Count Register				*/
-
-#define MDMA_S1_NEXT_DESC_PTR	0xFFC00FC0	/* MemDMA Stream 1 Source Next Descriptor Pointer Register			*/
-#define MDMA_S1_START_ADDR		0xFFC00FC4	/* MemDMA Stream 1 Source Start Address Register					*/
-#define MDMA_S1_CONFIG			0xFFC00FC8	/* MemDMA Stream 1 Source Configuration Register					*/
-#define MDMA_S1_X_COUNT			0xFFC00FD0	/* MemDMA Stream 1 Source X Count Register							*/
-#define MDMA_S1_X_MODIFY		0xFFC00FD4	/* MemDMA Stream 1 Source X Modify Register							*/
-#define MDMA_S1_Y_COUNT			0xFFC00FD8	/* MemDMA Stream 1 Source Y Count Register							*/
-#define MDMA_S1_Y_MODIFY		0xFFC00FDC	/* MemDMA Stream 1 Source Y Modify Register							*/
-#define MDMA_S1_CURR_DESC_PTR	0xFFC00FE0	/* MemDMA Stream 1 Source Current Descriptor Pointer Register		*/
-#define MDMA_S1_CURR_ADDR		0xFFC00FE4	/* MemDMA Stream 1 Source Current Address Register					*/
-#define MDMA_S1_IRQ_STATUS		0xFFC00FE8	/* MemDMA Stream 1 Source Interrupt/Status Register					*/
-#define MDMA_S1_PERIPHERAL_MAP	0xFFC00FEC	/* MemDMA Stream 1 Source Peripheral Map Register					*/
-#define MDMA_S1_CURR_X_COUNT	0xFFC00FF0	/* MemDMA Stream 1 Source Current X Count Register					*/
-#define MDMA_S1_CURR_Y_COUNT	0xFFC00FF8	/* MemDMA Stream 1 Source Current Y Count Register					*/
-
-
-/* Parallel Peripheral Interface (0xFFC01000 - 0xFFC010FF)				*/
-#define PPI_CONTROL			0xFFC01000	/* PPI Control Register			*/
-#define PPI_STATUS			0xFFC01004	/* PPI Status Register			*/
-#define PPI_COUNT			0xFFC01008	/* PPI Transfer Count Register	*/
-#define PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register		*/
-#define PPI_FRAME			0xFFC01010	/* PPI Frame Length Register	*/
-
-
-/* Two-Wire Interface		(0xFFC01400 - 0xFFC014FF)								*/
-#define TWI0_REGBASE			0xFFC01400
-#define TWI0_CLKDIV			0xFFC01400	/* Serial Clock Divider Register			*/
-#define TWI0_CONTROL			0xFFC01404	/* TWI Control Register						*/
-#define TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register				*/
-#define TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register				*/
-#define TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register				*/
-#define TWI0_MASTER_CTL		0xFFC01414	/* Master Mode Control Register				*/
-#define TWI0_MASTER_STAT		0xFFC01418	/* Master Mode Status Register				*/
-#define TWI0_MASTER_ADDR		0xFFC0141C	/* Master Mode Address Register				*/
-#define TWI0_INT_STAT		0xFFC01420	/* TWI Interrupt Status Register			*/
-#define TWI0_INT_MASK		0xFFC01424	/* TWI Master Interrupt Mask Register		*/
-#define TWI0_FIFO_CTL		0xFFC01428	/* FIFO Control Register					*/
-#define TWI0_FIFO_STAT		0xFFC0142C	/* FIFO Status Register						*/
-#define TWI0_XMT_DATA8		0xFFC01480	/* FIFO Transmit Data Single Byte Register	*/
-#define TWI0_XMT_DATA16		0xFFC01484	/* FIFO Transmit Data Double Byte Register	*/
-#define TWI0_RCV_DATA8		0xFFC01488	/* FIFO Receive Data Single Byte Register	*/
-#define TWI0_RCV_DATA16		0xFFC0148C	/* FIFO Receive Data Double Byte Register	*/
-
-
-/* General Purpose I/O Port G (0xFFC01500 - 0xFFC015FF)												*/
-#define PORTGIO					0xFFC01500	/* Port G I/O Pin State Specify Register				*/
-#define PORTGIO_CLEAR			0xFFC01504	/* Port G I/O Peripheral Interrupt Clear Register		*/
-#define PORTGIO_SET				0xFFC01508	/* Port G I/O Peripheral Interrupt Set Register			*/
-#define PORTGIO_TOGGLE			0xFFC0150C	/* Port G I/O Pin State Toggle Register					*/
-#define PORTGIO_MASKA			0xFFC01510	/* Port G I/O Mask State Specify Interrupt A Register	*/
-#define PORTGIO_MASKA_CLEAR		0xFFC01514	/* Port G I/O Mask Disable Interrupt A Register			*/
-#define PORTGIO_MASKA_SET		0xFFC01518	/* Port G I/O Mask Enable Interrupt A Register			*/
-#define PORTGIO_MASKA_TOGGLE	0xFFC0151C	/* Port G I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTGIO_MASKB			0xFFC01520	/* Port G I/O Mask State Specify Interrupt B Register	*/
-#define PORTGIO_MASKB_CLEAR		0xFFC01524	/* Port G I/O Mask Disable Interrupt B Register			*/
-#define PORTGIO_MASKB_SET		0xFFC01528	/* Port G I/O Mask Enable Interrupt B Register			*/
-#define PORTGIO_MASKB_TOGGLE	0xFFC0152C	/* Port G I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTGIO_DIR				0xFFC01530	/* Port G I/O Direction Register						*/
-#define PORTGIO_POLAR			0xFFC01534	/* Port G I/O Source Polarity Register					*/
-#define PORTGIO_EDGE			0xFFC01538	/* Port G I/O Source Sensitivity Register				*/
-#define PORTGIO_BOTH			0xFFC0153C	/* Port G I/O Set on BOTH Edges Register				*/
-#define PORTGIO_INEN			0xFFC01540	/* Port G I/O Input Enable Register						*/
-
-
-/* General Purpose I/O Port H (0xFFC01700 - 0xFFC017FF)												*/
-#define PORTHIO					0xFFC01700	/* Port H I/O Pin State Specify Register				*/
-#define PORTHIO_CLEAR			0xFFC01704	/* Port H I/O Peripheral Interrupt Clear Register		*/
-#define PORTHIO_SET				0xFFC01708	/* Port H I/O Peripheral Interrupt Set Register			*/
-#define PORTHIO_TOGGLE			0xFFC0170C	/* Port H I/O Pin State Toggle Register					*/
-#define PORTHIO_MASKA			0xFFC01710	/* Port H I/O Mask State Specify Interrupt A Register	*/
-#define PORTHIO_MASKA_CLEAR		0xFFC01714	/* Port H I/O Mask Disable Interrupt A Register			*/
-#define PORTHIO_MASKA_SET		0xFFC01718	/* Port H I/O Mask Enable Interrupt A Register			*/
-#define PORTHIO_MASKA_TOGGLE	0xFFC0171C	/* Port H I/O Mask Toggle Enable Interrupt A Register	*/
-#define PORTHIO_MASKB			0xFFC01720	/* Port H I/O Mask State Specify Interrupt B Register	*/
-#define PORTHIO_MASKB_CLEAR		0xFFC01724	/* Port H I/O Mask Disable Interrupt B Register			*/
-#define PORTHIO_MASKB_SET		0xFFC01728	/* Port H I/O Mask Enable Interrupt B Register			*/
-#define PORTHIO_MASKB_TOGGLE	0xFFC0172C	/* Port H I/O Mask Toggle Enable Interrupt B Register	*/
-#define PORTHIO_DIR				0xFFC01730	/* Port H I/O Direction Register						*/
-#define PORTHIO_POLAR			0xFFC01734	/* Port H I/O Source Polarity Register					*/
-#define PORTHIO_EDGE			0xFFC01738	/* Port H I/O Source Sensitivity Register				*/
-#define PORTHIO_BOTH			0xFFC0173C	/* Port H I/O Set on BOTH Edges Register				*/
-#define PORTHIO_INEN			0xFFC01740	/* Port H I/O Input Enable Register						*/
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)								*/
-#define UART1_THR			0xFFC02000	/* Transmit Holding register			*/
-#define UART1_RBR			0xFFC02000	/* Receive Buffer register				*/
-#define UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte)				*/
-#define UART1_IER			0xFFC02004	/* Interrupt Enable Register			*/
-#define UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte)			*/
-#define UART1_IIR			0xFFC02008	/* Interrupt Identification Register	*/
-#define UART1_LCR			0xFFC0200C	/* Line Control Register				*/
-#define UART1_MCR			0xFFC02010	/* Modem Control Register				*/
-#define UART1_LSR			0xFFC02014	/* Line Status Register					*/
-#define UART1_MSR			0xFFC02018	/* Modem Status Register				*/
-#define UART1_SCR			0xFFC0201C	/* SCR Scratch Register					*/
-#define UART1_GCTL			0xFFC02024	/* Global Control Register				*/
-
-
-/* Omit CAN register sets from the defBF534.h (CAN is not in the ADSP-BF52x processor) */
-
-/* Pin Control Registers	(0xFFC03200 - 0xFFC032FF)											*/
-#define PORTF_FER			0xFFC03200	/* Port F Function Enable Register (Alternate/Flag*)	*/
-#define PORTG_FER			0xFFC03204	/* Port G Function Enable Register (Alternate/Flag*)	*/
-#define PORTH_FER			0xFFC03208	/* Port H Function Enable Register (Alternate/Flag*)	*/
-#define BFIN_PORT_MUX			0xFFC0320C	/* Port Multiplexer Control Register					*/
-
-
-/* Handshake MDMA Registers	(0xFFC03300 - 0xFFC033FF)										*/
-#define HMDMA0_CONTROL		0xFFC03300	/* Handshake MDMA0 Control Register					*/
-#define HMDMA0_ECINIT		0xFFC03304	/* HMDMA0 Initial Edge Count Register				*/
-#define HMDMA0_BCINIT		0xFFC03308	/* HMDMA0 Initial Block Count Register				*/
-#define HMDMA0_ECURGENT		0xFFC0330C	/* HMDMA0 Urgent Edge Count Threshold Register		*/
-#define HMDMA0_ECOVERFLOW	0xFFC03310	/* HMDMA0 Edge Count Overflow Interrupt Register	*/
-#define HMDMA0_ECOUNT		0xFFC03314	/* HMDMA0 Current Edge Count Register				*/
-#define HMDMA0_BCOUNT		0xFFC03318	/* HMDMA0 Current Block Count Register				*/
-
-#define HMDMA1_CONTROL		0xFFC03340	/* Handshake MDMA1 Control Register					*/
-#define HMDMA1_ECINIT		0xFFC03344	/* HMDMA1 Initial Edge Count Register				*/
-#define HMDMA1_BCINIT		0xFFC03348	/* HMDMA1 Initial Block Count Register				*/
-#define HMDMA1_ECURGENT		0xFFC0334C	/* HMDMA1 Urgent Edge Count Threshold Register		*/
-#define HMDMA1_ECOVERFLOW	0xFFC03350	/* HMDMA1 Edge Count Overflow Interrupt Register	*/
-#define HMDMA1_ECOUNT		0xFFC03354	/* HMDMA1 Current Edge Count Register				*/
-#define HMDMA1_BCOUNT		0xFFC03358	/* HMDMA1 Current Block Count Register				*/
-
-/* GPIO PIN mux (0xFFC03210 - OxFFC03288) */
-#define PORTF_MUX               0xFFC03210      /* Port F mux control */
-#define PORTG_MUX               0xFFC03214      /* Port G mux control */
-#define PORTH_MUX               0xFFC03218      /* Port H mux control */
-#define PORTF_DRIVE             0xFFC03220      /* Port F drive strength control */
-#define PORTG_DRIVE             0xFFC03224      /* Port G drive strength control */
-#define PORTH_DRIVE             0xFFC03228      /* Port H drive strength control */
-#define PORTF_SLEW              0xFFC03230      /* Port F slew control */
-#define PORTG_SLEW              0xFFC03234      /* Port G slew control */
-#define PORTH_SLEW              0xFFC03238      /* Port H slew control */
-#define PORTF_HYSTERISIS        0xFFC03240      /* Port F Schmitt trigger control */
-#define PORTG_HYSTERISIS        0xFFC03244      /* Port G Schmitt trigger control */
-#define PORTH_HYSTERISIS        0xFFC03248      /* Port H Schmitt trigger control */
-#define MISCPORT_DRIVE          0xFFC03280      /* Misc Port drive strength control */
-#define MISCPORT_SLEW           0xFFC03284      /* Misc Port slew control */
-#define MISCPORT_HYSTERISIS     0xFFC03288      /* Misc Port Schmitt trigger control */
-
-
-/***********************************************************************************
-** System MMR Register Bits And Macros
-**
-** Disclaimer:	All macros are intended to make C and Assembly code more readable.
-**				Use these macros carefully, as any that do left shifts for field
-**				depositing will result in the lower order bits being destroyed.  Any
-**				macro that shifts left to properly position the bit-field should be
-**				used as part of an OR to initialize a register and NOT as a dynamic
-**				modifier UNLESS the lower order bits are saved and ORed back in when
-**				the macro is used.
-*************************************************************************************/
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* SWRST Masks																		*/
-#define SYSTEM_RESET		0x0007	/* Initiates A System Software Reset			*/
-#define	DOUBLE_FAULT		0x0008	/* Core Double Fault Causes Reset				*/
-#define RESET_DOUBLE		0x2000	/* SW Reset Generated By Core Double-Fault		*/
-#define RESET_WDOG			0x4000	/* SW Reset Generated By Watchdog Timer			*/
-#define RESET_SOFTWARE		0x8000	/* SW Reset Occurred Since Last Read Of SWRST	*/
-
-/* SYSCR Masks																				*/
-#define BMODE				0x0007	/* Boot Mode - Latched During HW Reset From Mode Pins	*/
-#define	NOBOOT				0x0010	/* Execute From L1 or ASYNC Bank 0 When BMODE = 0		*/
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS *************************************/
-/* Peripheral Masks For SIC_ISR, SIC_IWR, SIC_IMASK										*/
-
-#if 0
-#define IRQ_PLL_WAKEUP	0x00000001	/* PLL Wakeup Interrupt			 					*/
-
-#define IRQ_ERROR1      0x00000002  /* Error Interrupt (DMA, DMARx Block, DMARx Overflow) */
-#define IRQ_ERROR2      0x00000004  /* Error Interrupt (CAN, Ethernet, SPORTx, PPI, SPI, UARTx) */
-#define IRQ_RTC			0x00000008	/* Real Time Clock Interrupt 						*/
-#define IRQ_DMA0		0x00000010	/* DMA Channel 0 (PPI) Interrupt 					*/
-#define IRQ_DMA3		0x00000020	/* DMA Channel 3 (SPORT0 RX) Interrupt 				*/
-#define IRQ_DMA4		0x00000040	/* DMA Channel 4 (SPORT0 TX) Interrupt 				*/
-#define IRQ_DMA5		0x00000080	/* DMA Channel 5 (SPORT1 RX) Interrupt 				*/
-
-#define IRQ_DMA6		0x00000100	/* DMA Channel 6 (SPORT1 TX) Interrupt 		 		*/
-#define IRQ_TWI			0x00000200	/* TWI Interrupt									*/
-#define IRQ_DMA7		0x00000400	/* DMA Channel 7 (SPI) Interrupt 					*/
-#define IRQ_DMA8		0x00000800	/* DMA Channel 8 (UART0 RX) Interrupt 				*/
-#define IRQ_DMA9		0x00001000	/* DMA Channel 9 (UART0 TX) Interrupt 				*/
-#define IRQ_DMA10		0x00002000	/* DMA Channel 10 (UART1 RX) Interrupt 				*/
-#define IRQ_DMA11		0x00004000	/* DMA Channel 11 (UART1 TX) Interrupt 				*/
-#define IRQ_CAN_RX		0x00008000	/* CAN Receive Interrupt 							*/
-
-#define IRQ_CAN_TX		0x00010000	/* CAN Transmit Interrupt  							*/
-#define IRQ_DMA1		0x00020000	/* DMA Channel 1 (Ethernet RX) Interrupt 			*/
-#define IRQ_PFA_PORTH	0x00020000	/* PF Port H (PF47:32) Interrupt A 					*/
-#define IRQ_DMA2		0x00040000	/* DMA Channel 2 (Ethernet TX) Interrupt 			*/
-#define IRQ_PFB_PORTH	0x00040000	/* PF Port H (PF47:32) Interrupt B 					*/
-#define IRQ_TIMER0		0x00080000	/* Timer 0 Interrupt								*/
-#define IRQ_TIMER1		0x00100000	/* Timer 1 Interrupt 								*/
-#define IRQ_TIMER2		0x00200000	/* Timer 2 Interrupt 								*/
-#define IRQ_TIMER3		0x00400000	/* Timer 3 Interrupt 								*/
-#define IRQ_TIMER4		0x00800000	/* Timer 4 Interrupt 								*/
-
-#define IRQ_TIMER5		0x01000000	/* Timer 5 Interrupt 								*/
-#define IRQ_TIMER6		0x02000000	/* Timer 6 Interrupt 								*/
-#define IRQ_TIMER7		0x04000000	/* Timer 7 Interrupt 								*/
-#define IRQ_PFA_PORTFG	0x08000000	/* PF Ports F&G (PF31:0) Interrupt A 				*/
-#define IRQ_PFB_PORTF	0x80000000	/* PF Port F (PF15:0) Interrupt B 					*/
-#define IRQ_DMA12		0x20000000	/* DMA Channels 12 (MDMA1 Source) RX Interrupt 		*/
-#define IRQ_DMA13		0x20000000	/* DMA Channels 13 (MDMA1 Destination) TX Interrupt */
-#define IRQ_DMA14		0x40000000	/* DMA Channels 14 (MDMA0 Source) RX Interrupt 		*/
-#define IRQ_DMA15		0x40000000	/* DMA Channels 15 (MDMA0 Destination) TX Interrupt */
-#define IRQ_WDOG		0x80000000	/* Software Watchdog Timer Interrupt 				*/
-#define IRQ_PFB_PORTG	0x10000000	/* PF Port G (PF31:16) Interrupt B 					*/
-#endif
-
-/* SIC_IAR0 Macros															*/
-#define P0_IVG(x)		(((x)&0xF)-7)			/* Peripheral #0 assigned IVG #x 	*/
-#define P1_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #1 assigned IVG #x 	*/
-#define P2_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #2 assigned IVG #x 	*/
-#define P3_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #3 assigned IVG #x	*/
-#define P4_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #4 assigned IVG #x	*/
-#define P5_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #5 assigned IVG #x	*/
-#define P6_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #6 assigned IVG #x	*/
-#define P7_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #7 assigned IVG #x	*/
-
-/* SIC_IAR1 Macros															*/
-#define P8_IVG(x)		(((x)&0xF)-7)			/* Peripheral #8 assigned IVG #x 	*/
-#define P9_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #9 assigned IVG #x 	*/
-#define P10_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #10 assigned IVG #x	*/
-#define P11_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #11 assigned IVG #x 	*/
-#define P12_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #12 assigned IVG #x	*/
-#define P13_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #13 assigned IVG #x	*/
-#define P14_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #14 assigned IVG #x	*/
-#define P15_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #15 assigned IVG #x	*/
-
-/* SIC_IAR2 Macros															*/
-#define P16_IVG(x)		(((x)&0xF)-7)			/* Peripheral #16 assigned IVG #x	*/
-#define P17_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #17 assigned IVG #x	*/
-#define P18_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #18 assigned IVG #x	*/
-#define P19_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #19 assigned IVG #x	*/
-#define P20_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #20 assigned IVG #x	*/
-#define P21_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #21 assigned IVG #x	*/
-#define P22_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #22 assigned IVG #x	*/
-#define P23_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #23 assigned IVG #x	*/
-
-/* SIC_IAR3 Macros															*/
-#define P24_IVG(x)		(((x)&0xF)-7)			/* Peripheral #24 assigned IVG #x	*/
-#define P25_IVG(x)		(((x)&0xF)-7) << 0x4	/* Peripheral #25 assigned IVG #x	*/
-#define P26_IVG(x)		(((x)&0xF)-7) << 0x8	/* Peripheral #26 assigned IVG #x	*/
-#define P27_IVG(x)		(((x)&0xF)-7) << 0xC	/* Peripheral #27 assigned IVG #x	*/
-#define P28_IVG(x)		(((x)&0xF)-7) << 0x10	/* Peripheral #28 assigned IVG #x	*/
-#define P29_IVG(x)		(((x)&0xF)-7) << 0x14	/* Peripheral #29 assigned IVG #x	*/
-#define P30_IVG(x)		(((x)&0xF)-7) << 0x18	/* Peripheral #30 assigned IVG #x	*/
-#define P31_IVG(x)		(((x)&0xF)-7) << 0x1C	/* Peripheral #31 assigned IVG #x	*/
-
-
-/* SIC_IMASK Masks																		*/
-#define SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts	*/
-#define SIC_MASK_ALL	0xFFFFFFFF					/* Mask all peripheral interrupts	*/
-#define SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask Peripheral #x interrupt		*/
-#define SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x interrupt	*/
-
-/* SIC_IWR Masks																		*/
-#define IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals	*/
-#define IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals	*/
-#define IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x		*/
-#define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F))) 	/* Wakeup Disable Peripheral #x		*/
-
-
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select */
-#define STB			0x04				/* Stop Bits			*/
-#define PEN			0x08				/* Parity Enable		*/
-#define EPS			0x10				/* Even Parity Select	*/
-#define STP			0x20				/* Stick Parity			*/
-#define SB			0x40				/* Set Break			*/
-#define DLAB		0x80				/* Divisor Latch Access	*/
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define LOOP_ENA_P	0x04
-
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready				*/
-#define OE			0x02	/* Overrun Error			*/
-#define PE			0x04	/* Parity Error				*/
-#define FE			0x08	/* Framing Error			*/
-#define BI			0x10	/* Break Interrupt			*/
-#define THRE		0x20	/* THR Empty				*/
-#define TEMT		0x40	/* TSR and UART_THR Empty	*/
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01		/* Enable Receive Buffer Full Interrupt		*/
-#define ETBEI		0x02		/* Enable Transmit Buffer Empty Interrupt	*/
-#define ELSI		0x04		/* Enable RX Status Interrupt				*/
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01		/* Pending Interrupt					*/
-#define IIR_TX_READY    0x02		/* UART_THR empty                               */
-#define IIR_RX_READY    0x04		/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06		/* Receive line status    			*/
-#define IIR_STATUS	0x06		/* Highest Priority Pending Interrupt	*/
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01		/* Enable UARTx Clocks				*/
-#define IREN		0x02		/* Enable IrDA Mode					*/
-#define TPOLC		0x04		/* IrDA TX Polarity Change			*/
-#define RPOLC		0x08		/* IrDA RX Polarity Change			*/
-#define FPE			0x10		/* Force Parity Error On Transmit	*/
-#define FFE			0x20		/* Force Framing Error On Transmit	*/
-
-
-/*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
-/* TIMER_ENABLE Masks													*/
-#define TIMEN0			0x0001		/* Enable Timer 0					*/
-#define TIMEN1			0x0002		/* Enable Timer 1					*/
-#define TIMEN2			0x0004		/* Enable Timer 2					*/
-#define TIMEN3			0x0008		/* Enable Timer 3					*/
-#define TIMEN4			0x0010		/* Enable Timer 4					*/
-#define TIMEN5			0x0020		/* Enable Timer 5					*/
-#define TIMEN6			0x0040		/* Enable Timer 6					*/
-#define TIMEN7			0x0080		/* Enable Timer 7					*/
-
-/* TIMER_DISABLE Masks													*/
-#define TIMDIS0			TIMEN0		/* Disable Timer 0					*/
-#define TIMDIS1			TIMEN1		/* Disable Timer 1					*/
-#define TIMDIS2			TIMEN2		/* Disable Timer 2					*/
-#define TIMDIS3			TIMEN3		/* Disable Timer 3					*/
-#define TIMDIS4			TIMEN4		/* Disable Timer 4					*/
-#define TIMDIS5			TIMEN5		/* Disable Timer 5					*/
-#define TIMDIS6			TIMEN6		/* Disable Timer 6					*/
-#define TIMDIS7			TIMEN7		/* Disable Timer 7					*/
-
-/* TIMER_STATUS Masks													*/
-#define TIMIL0			0x00000001	/* Timer 0 Interrupt				*/
-#define TIMIL1			0x00000002	/* Timer 1 Interrupt				*/
-#define TIMIL2			0x00000004	/* Timer 2 Interrupt				*/
-#define TIMIL3			0x00000008	/* Timer 3 Interrupt				*/
-#define TOVF_ERR0		0x00000010	/* Timer 0 Counter Overflow			*/
-#define TOVF_ERR1		0x00000020	/* Timer 1 Counter Overflow			*/
-#define TOVF_ERR2		0x00000040	/* Timer 2 Counter Overflow			*/
-#define TOVF_ERR3		0x00000080	/* Timer 3 Counter Overflow			*/
-#define TRUN0			0x00001000	/* Timer 0 Slave Enable Status		*/
-#define TRUN1			0x00002000	/* Timer 1 Slave Enable Status		*/
-#define TRUN2			0x00004000	/* Timer 2 Slave Enable Status		*/
-#define TRUN3			0x00008000	/* Timer 3 Slave Enable Status		*/
-#define TIMIL4			0x00010000	/* Timer 4 Interrupt				*/
-#define TIMIL5			0x00020000	/* Timer 5 Interrupt				*/
-#define TIMIL6			0x00040000	/* Timer 6 Interrupt				*/
-#define TIMIL7			0x00080000	/* Timer 7 Interrupt				*/
-#define TOVF_ERR4		0x00100000	/* Timer 4 Counter Overflow			*/
-#define TOVF_ERR5		0x00200000	/* Timer 5 Counter Overflow			*/
-#define TOVF_ERR6		0x00400000	/* Timer 6 Counter Overflow			*/
-#define TOVF_ERR7		0x00800000	/* Timer 7 Counter Overflow			*/
-#define TRUN4			0x10000000	/* Timer 4 Slave Enable Status		*/
-#define TRUN5			0x20000000	/* Timer 5 Slave Enable Status		*/
-#define TRUN6			0x40000000	/* Timer 6 Slave Enable Status		*/
-#define TRUN7			0x80000000	/* Timer 7 Slave Enable Status		*/
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define TOVL_ERR0 TOVF_ERR0
-#define TOVL_ERR1 TOVF_ERR1
-#define TOVL_ERR2 TOVF_ERR2
-#define TOVL_ERR3 TOVF_ERR3
-#define TOVL_ERR4 TOVF_ERR4
-#define TOVL_ERR5 TOVF_ERR5
-#define TOVL_ERR6 TOVF_ERR6
-#define TOVL_ERR7 TOVF_ERR7
-
-/* TIMERx_CONFIG Masks													*/
-#define PWM_OUT			0x0001	/* Pulse-Width Modulation Output Mode	*/
-#define WDTH_CAP		0x0002	/* Width Capture Input Mode				*/
-#define EXT_CLK			0x0003	/* External Clock Mode					*/
-#define PULSE_HI		0x0004	/* Action Pulse (Positive/Negative*)	*/
-#define PERIOD_CNT		0x0008	/* Period Count							*/
-#define IRQ_ENA			0x0010	/* Interrupt Request Enable				*/
-#define TIN_SEL			0x0020	/* Timer Input Select					*/
-#define OUT_DIS			0x0040	/* Output Pad Disable					*/
-#define CLK_SEL			0x0080	/* Timer Clock Select					*/
-#define TOGGLE_HI		0x0100	/* PWM_OUT PULSE_HI Toggle Mode			*/
-#define EMU_RUN			0x0200	/* Emulation Behavior Select			*/
-#define ERR_TYP			0xC000	/* Error Type							*/
-
-
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-#define PH8		0x0100
-#define PH9		0x0200
-#define PH10	0x0400
-#define PH11	0x0800
-#define PH12	0x1000
-#define PH13	0x2000
-#define PH14	0x4000
-#define PH15	0x8000
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
-/* EBIU_AMGCTL Masks																	*/
-#define AMCKEN			0x0001		/* Enable CLKOUT									*/
-#define	AMBEN_NONE		0x0000		/* All Banks Disabled								*/
-#define AMBEN_B0		0x0002		/* Enable Async Memory Bank 0 only					*/
-#define AMBEN_B0_B1		0x0004		/* Enable Async Memory Banks 0 & 1 only				*/
-#define AMBEN_B0_B1_B2	0x0006		/* Enable Async Memory Banks 0, 1, and 2			*/
-#define AMBEN_ALL		0x0008		/* Enable Async Memory Banks (all) 0, 1, 2, and 3	*/
-
-/* EBIU_AMBCTL0 Masks																	*/
-#define B0RDYEN			0x00000001  /* Bank 0 (B0) RDY Enable							*/
-#define B0RDYPOL		0x00000002  /* B0 RDY Active High								*/
-#define B0TT_1			0x00000004  /* B0 Transition Time (Read to Write) = 1 cycle		*/
-#define B0TT_2			0x00000008  /* B0 Transition Time (Read to Write) = 2 cycles	*/
-#define B0TT_3			0x0000000C  /* B0 Transition Time (Read to Write) = 3 cycles	*/
-#define B0TT_4			0x00000000  /* B0 Transition Time (Read to Write) = 4 cycles	*/
-#define B0ST_1			0x00000010  /* B0 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B0ST_2			0x00000020  /* B0 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B0ST_3			0x00000030  /* B0 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B0ST_4			0x00000000  /* B0 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B0HT_1			0x00000040  /* B0 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B0HT_2			0x00000080  /* B0 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B0HT_3			0x000000C0  /* B0 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B0HT_0			0x00000000  /* B0 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B0RAT_1			0x00000100  /* B0 Read Access Time = 1 cycle					*/
-#define B0RAT_2			0x00000200  /* B0 Read Access Time = 2 cycles					*/
-#define B0RAT_3			0x00000300  /* B0 Read Access Time = 3 cycles					*/
-#define B0RAT_4			0x00000400  /* B0 Read Access Time = 4 cycles					*/
-#define B0RAT_5			0x00000500  /* B0 Read Access Time = 5 cycles					*/
-#define B0RAT_6			0x00000600  /* B0 Read Access Time = 6 cycles					*/
-#define B0RAT_7			0x00000700  /* B0 Read Access Time = 7 cycles					*/
-#define B0RAT_8			0x00000800  /* B0 Read Access Time = 8 cycles					*/
-#define B0RAT_9			0x00000900  /* B0 Read Access Time = 9 cycles					*/
-#define B0RAT_10		0x00000A00  /* B0 Read Access Time = 10 cycles					*/
-#define B0RAT_11		0x00000B00  /* B0 Read Access Time = 11 cycles					*/
-#define B0RAT_12		0x00000C00  /* B0 Read Access Time = 12 cycles					*/
-#define B0RAT_13		0x00000D00  /* B0 Read Access Time = 13 cycles					*/
-#define B0RAT_14		0x00000E00  /* B0 Read Access Time = 14 cycles					*/
-#define B0RAT_15		0x00000F00  /* B0 Read Access Time = 15 cycles					*/
-#define B0WAT_1			0x00001000  /* B0 Write Access Time = 1 cycle					*/
-#define B0WAT_2			0x00002000  /* B0 Write Access Time = 2 cycles					*/
-#define B0WAT_3			0x00003000  /* B0 Write Access Time = 3 cycles					*/
-#define B0WAT_4			0x00004000  /* B0 Write Access Time = 4 cycles					*/
-#define B0WAT_5			0x00005000  /* B0 Write Access Time = 5 cycles					*/
-#define B0WAT_6			0x00006000  /* B0 Write Access Time = 6 cycles					*/
-#define B0WAT_7			0x00007000  /* B0 Write Access Time = 7 cycles					*/
-#define B0WAT_8			0x00008000  /* B0 Write Access Time = 8 cycles					*/
-#define B0WAT_9			0x00009000  /* B0 Write Access Time = 9 cycles					*/
-#define B0WAT_10		0x0000A000  /* B0 Write Access Time = 10 cycles					*/
-#define B0WAT_11		0x0000B000  /* B0 Write Access Time = 11 cycles					*/
-#define B0WAT_12		0x0000C000  /* B0 Write Access Time = 12 cycles					*/
-#define B0WAT_13		0x0000D000  /* B0 Write Access Time = 13 cycles					*/
-#define B0WAT_14		0x0000E000  /* B0 Write Access Time = 14 cycles					*/
-#define B0WAT_15		0x0000F000  /* B0 Write Access Time = 15 cycles					*/
-
-#define B1RDYEN			0x00010000  /* Bank 1 (B1) RDY Enable                       	*/
-#define B1RDYPOL		0x00020000  /* B1 RDY Active High                           	*/
-#define B1TT_1			0x00040000  /* B1 Transition Time (Read to Write) = 1 cycle 	*/
-#define B1TT_2			0x00080000  /* B1 Transition Time (Read to Write) = 2 cycles	*/
-#define B1TT_3			0x000C0000  /* B1 Transition Time (Read to Write) = 3 cycles	*/
-#define B1TT_4			0x00000000  /* B1 Transition Time (Read to Write) = 4 cycles	*/
-#define B1ST_1			0x00100000  /* B1 Setup Time (AOE to Read/Write) = 1 cycle  	*/
-#define B1ST_2			0x00200000  /* B1 Setup Time (AOE to Read/Write) = 2 cycles 	*/
-#define B1ST_3			0x00300000  /* B1 Setup Time (AOE to Read/Write) = 3 cycles 	*/
-#define B1ST_4			0x00000000  /* B1 Setup Time (AOE to Read/Write) = 4 cycles 	*/
-#define B1HT_1			0x00400000  /* B1 Hold Time (~Read/Write to ~AOE) = 1 cycle 	*/
-#define B1HT_2			0x00800000  /* B1 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B1HT_3			0x00C00000  /* B1 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B1HT_0			0x00000000  /* B1 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B1RAT_1			0x01000000  /* B1 Read Access Time = 1 cycle					*/
-#define B1RAT_2			0x02000000  /* B1 Read Access Time = 2 cycles					*/
-#define B1RAT_3			0x03000000  /* B1 Read Access Time = 3 cycles					*/
-#define B1RAT_4			0x04000000  /* B1 Read Access Time = 4 cycles					*/
-#define B1RAT_5			0x05000000  /* B1 Read Access Time = 5 cycles					*/
-#define B1RAT_6			0x06000000  /* B1 Read Access Time = 6 cycles					*/
-#define B1RAT_7			0x07000000  /* B1 Read Access Time = 7 cycles					*/
-#define B1RAT_8			0x08000000  /* B1 Read Access Time = 8 cycles					*/
-#define B1RAT_9			0x09000000  /* B1 Read Access Time = 9 cycles					*/
-#define B1RAT_10		0x0A000000  /* B1 Read Access Time = 10 cycles					*/
-#define B1RAT_11		0x0B000000  /* B1 Read Access Time = 11 cycles					*/
-#define B1RAT_12		0x0C000000  /* B1 Read Access Time = 12 cycles					*/
-#define B1RAT_13		0x0D000000  /* B1 Read Access Time = 13 cycles					*/
-#define B1RAT_14		0x0E000000  /* B1 Read Access Time = 14 cycles					*/
-#define B1RAT_15		0x0F000000  /* B1 Read Access Time = 15 cycles					*/
-#define B1WAT_1			0x10000000  /* B1 Write Access Time = 1 cycle					*/
-#define B1WAT_2			0x20000000  /* B1 Write Access Time = 2 cycles					*/
-#define B1WAT_3			0x30000000  /* B1 Write Access Time = 3 cycles					*/
-#define B1WAT_4			0x40000000  /* B1 Write Access Time = 4 cycles					*/
-#define B1WAT_5			0x50000000  /* B1 Write Access Time = 5 cycles					*/
-#define B1WAT_6			0x60000000  /* B1 Write Access Time = 6 cycles					*/
-#define B1WAT_7			0x70000000  /* B1 Write Access Time = 7 cycles					*/
-#define B1WAT_8			0x80000000  /* B1 Write Access Time = 8 cycles					*/
-#define B1WAT_9			0x90000000  /* B1 Write Access Time = 9 cycles					*/
-#define B1WAT_10		0xA0000000  /* B1 Write Access Time = 10 cycles					*/
-#define B1WAT_11		0xB0000000  /* B1 Write Access Time = 11 cycles					*/
-#define B1WAT_12		0xC0000000  /* B1 Write Access Time = 12 cycles					*/
-#define B1WAT_13		0xD0000000  /* B1 Write Access Time = 13 cycles					*/
-#define B1WAT_14		0xE0000000  /* B1 Write Access Time = 14 cycles					*/
-#define B1WAT_15		0xF0000000  /* B1 Write Access Time = 15 cycles					*/
-
-/* EBIU_AMBCTL1 Masks																	*/
-#define B2RDYEN			0x00000001  /* Bank 2 (B2) RDY Enable							*/
-#define B2RDYPOL		0x00000002  /* B2 RDY Active High								*/
-#define B2TT_1			0x00000004  /* B2 Transition Time (Read to Write) = 1 cycle		*/
-#define B2TT_2			0x00000008  /* B2 Transition Time (Read to Write) = 2 cycles	*/
-#define B2TT_3			0x0000000C  /* B2 Transition Time (Read to Write) = 3 cycles	*/
-#define B2TT_4			0x00000000  /* B2 Transition Time (Read to Write) = 4 cycles	*/
-#define B2ST_1			0x00000010  /* B2 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B2ST_2			0x00000020  /* B2 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B2ST_3			0x00000030  /* B2 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B2ST_4			0x00000000  /* B2 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B2HT_1			0x00000040  /* B2 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B2HT_2			0x00000080  /* B2 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B2HT_3			0x000000C0  /* B2 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B2HT_0			0x00000000  /* B2 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B2RAT_1			0x00000100  /* B2 Read Access Time = 1 cycle					*/
-#define B2RAT_2			0x00000200  /* B2 Read Access Time = 2 cycles					*/
-#define B2RAT_3			0x00000300  /* B2 Read Access Time = 3 cycles					*/
-#define B2RAT_4			0x00000400  /* B2 Read Access Time = 4 cycles					*/
-#define B2RAT_5			0x00000500  /* B2 Read Access Time = 5 cycles					*/
-#define B2RAT_6			0x00000600  /* B2 Read Access Time = 6 cycles					*/
-#define B2RAT_7			0x00000700  /* B2 Read Access Time = 7 cycles					*/
-#define B2RAT_8			0x00000800  /* B2 Read Access Time = 8 cycles					*/
-#define B2RAT_9			0x00000900  /* B2 Read Access Time = 9 cycles					*/
-#define B2RAT_10		0x00000A00  /* B2 Read Access Time = 10 cycles					*/
-#define B2RAT_11		0x00000B00  /* B2 Read Access Time = 11 cycles					*/
-#define B2RAT_12		0x00000C00  /* B2 Read Access Time = 12 cycles					*/
-#define B2RAT_13		0x00000D00  /* B2 Read Access Time = 13 cycles					*/
-#define B2RAT_14		0x00000E00  /* B2 Read Access Time = 14 cycles					*/
-#define B2RAT_15		0x00000F00  /* B2 Read Access Time = 15 cycles					*/
-#define B2WAT_1			0x00001000  /* B2 Write Access Time = 1 cycle					*/
-#define B2WAT_2			0x00002000  /* B2 Write Access Time = 2 cycles					*/
-#define B2WAT_3			0x00003000  /* B2 Write Access Time = 3 cycles					*/
-#define B2WAT_4			0x00004000  /* B2 Write Access Time = 4 cycles					*/
-#define B2WAT_5			0x00005000  /* B2 Write Access Time = 5 cycles					*/
-#define B2WAT_6			0x00006000  /* B2 Write Access Time = 6 cycles					*/
-#define B2WAT_7			0x00007000  /* B2 Write Access Time = 7 cycles					*/
-#define B2WAT_8			0x00008000  /* B2 Write Access Time = 8 cycles					*/
-#define B2WAT_9			0x00009000  /* B2 Write Access Time = 9 cycles					*/
-#define B2WAT_10		0x0000A000  /* B2 Write Access Time = 10 cycles					*/
-#define B2WAT_11		0x0000B000  /* B2 Write Access Time = 11 cycles					*/
-#define B2WAT_12		0x0000C000  /* B2 Write Access Time = 12 cycles					*/
-#define B2WAT_13		0x0000D000  /* B2 Write Access Time = 13 cycles					*/
-#define B2WAT_14		0x0000E000  /* B2 Write Access Time = 14 cycles					*/
-#define B2WAT_15		0x0000F000  /* B2 Write Access Time = 15 cycles					*/
-
-#define B3RDYEN			0x00010000  /* Bank 3 (B3) RDY Enable							*/
-#define B3RDYPOL		0x00020000  /* B3 RDY Active High								*/
-#define B3TT_1			0x00040000  /* B3 Transition Time (Read to Write) = 1 cycle		*/
-#define B3TT_2			0x00080000  /* B3 Transition Time (Read to Write) = 2 cycles	*/
-#define B3TT_3			0x000C0000  /* B3 Transition Time (Read to Write) = 3 cycles	*/
-#define B3TT_4			0x00000000  /* B3 Transition Time (Read to Write) = 4 cycles	*/
-#define B3ST_1			0x00100000  /* B3 Setup Time (AOE to Read/Write) = 1 cycle		*/
-#define B3ST_2			0x00200000  /* B3 Setup Time (AOE to Read/Write) = 2 cycles		*/
-#define B3ST_3			0x00300000  /* B3 Setup Time (AOE to Read/Write) = 3 cycles		*/
-#define B3ST_4			0x00000000  /* B3 Setup Time (AOE to Read/Write) = 4 cycles		*/
-#define B3HT_1			0x00400000  /* B3 Hold Time (~Read/Write to ~AOE) = 1 cycle		*/
-#define B3HT_2			0x00800000  /* B3 Hold Time (~Read/Write to ~AOE) = 2 cycles	*/
-#define B3HT_3			0x00C00000  /* B3 Hold Time (~Read/Write to ~AOE) = 3 cycles	*/
-#define B3HT_0			0x00000000  /* B3 Hold Time (~Read/Write to ~AOE) = 0 cycles	*/
-#define B3RAT_1			0x01000000  /* B3 Read Access Time = 1 cycle					*/
-#define B3RAT_2			0x02000000  /* B3 Read Access Time = 2 cycles					*/
-#define B3RAT_3			0x03000000  /* B3 Read Access Time = 3 cycles					*/
-#define B3RAT_4			0x04000000  /* B3 Read Access Time = 4 cycles					*/
-#define B3RAT_5			0x05000000  /* B3 Read Access Time = 5 cycles					*/
-#define B3RAT_6			0x06000000  /* B3 Read Access Time = 6 cycles					*/
-#define B3RAT_7			0x07000000  /* B3 Read Access Time = 7 cycles					*/
-#define B3RAT_8			0x08000000  /* B3 Read Access Time = 8 cycles					*/
-#define B3RAT_9			0x09000000  /* B3 Read Access Time = 9 cycles					*/
-#define B3RAT_10		0x0A000000  /* B3 Read Access Time = 10 cycles					*/
-#define B3RAT_11		0x0B000000  /* B3 Read Access Time = 11 cycles					*/
-#define B3RAT_12		0x0C000000  /* B3 Read Access Time = 12 cycles					*/
-#define B3RAT_13		0x0D000000  /* B3 Read Access Time = 13 cycles					*/
-#define B3RAT_14		0x0E000000  /* B3 Read Access Time = 14 cycles					*/
-#define B3RAT_15		0x0F000000  /* B3 Read Access Time = 15 cycles					*/
-#define B3WAT_1			0x10000000  /* B3 Write Access Time = 1 cycle					*/
-#define B3WAT_2			0x20000000  /* B3 Write Access Time = 2 cycles					*/
-#define B3WAT_3			0x30000000  /* B3 Write Access Time = 3 cycles					*/
-#define B3WAT_4			0x40000000  /* B3 Write Access Time = 4 cycles					*/
-#define B3WAT_5			0x50000000  /* B3 Write Access Time = 5 cycles					*/
-#define B3WAT_6			0x60000000  /* B3 Write Access Time = 6 cycles					*/
-#define B3WAT_7			0x70000000  /* B3 Write Access Time = 7 cycles					*/
-#define B3WAT_8			0x80000000  /* B3 Write Access Time = 8 cycles					*/
-#define B3WAT_9			0x90000000  /* B3 Write Access Time = 9 cycles					*/
-#define B3WAT_10		0xA0000000  /* B3 Write Access Time = 10 cycles					*/
-#define B3WAT_11		0xB0000000  /* B3 Write Access Time = 11 cycles					*/
-#define B3WAT_12		0xC0000000  /* B3 Write Access Time = 12 cycles					*/
-#define B3WAT_13		0xD0000000  /* B3 Write Access Time = 13 cycles					*/
-#define B3WAT_14		0xE0000000  /* B3 Write Access Time = 14 cycles					*/
-#define B3WAT_15		0xF0000000  /* B3 Write Access Time = 15 cycles					*/
-
-
-/* **********************  SDRAM CONTROLLER MASKS  **********************************************/
-/* EBIU_SDGCTL Masks																			*/
-#define SCTLE			0x00000001	/* Enable SDRAM Signals										*/
-#define CL_2			0x00000008	/* SDRAM CAS Latency = 2 cycles								*/
-#define CL_3			0x0000000C	/* SDRAM CAS Latency = 3 cycles								*/
-#define PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh				*/
-#define PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In Self-Refresh		*/
-#define PASR_B0			0x00000020	/* Only SDRAM Bank 0 Is Refreshed In Self-Refresh			*/
-#define TRAS_1			0x00000040	/* SDRAM tRAS = 1 cycle										*/
-#define TRAS_2			0x00000080	/* SDRAM tRAS = 2 cycles									*/
-#define TRAS_3			0x000000C0	/* SDRAM tRAS = 3 cycles									*/
-#define TRAS_4			0x00000100	/* SDRAM tRAS = 4 cycles									*/
-#define TRAS_5			0x00000140	/* SDRAM tRAS = 5 cycles									*/
-#define TRAS_6			0x00000180	/* SDRAM tRAS = 6 cycles									*/
-#define TRAS_7			0x000001C0	/* SDRAM tRAS = 7 cycles									*/
-#define TRAS_8			0x00000200	/* SDRAM tRAS = 8 cycles									*/
-#define TRAS_9			0x00000240	/* SDRAM tRAS = 9 cycles									*/
-#define TRAS_10			0x00000280	/* SDRAM tRAS = 10 cycles									*/
-#define TRAS_11			0x000002C0	/* SDRAM tRAS = 11 cycles									*/
-#define TRAS_12			0x00000300	/* SDRAM tRAS = 12 cycles									*/
-#define TRAS_13			0x00000340	/* SDRAM tRAS = 13 cycles									*/
-#define TRAS_14			0x00000380	/* SDRAM tRAS = 14 cycles									*/
-#define TRAS_15			0x000003C0	/* SDRAM tRAS = 15 cycles									*/
-#define TRP_1			0x00000800	/* SDRAM tRP = 1 cycle										*/
-#define TRP_2			0x00001000	/* SDRAM tRP = 2 cycles										*/
-#define TRP_3			0x00001800	/* SDRAM tRP = 3 cycles										*/
-#define TRP_4			0x00002000	/* SDRAM tRP = 4 cycles										*/
-#define TRP_5			0x00002800	/* SDRAM tRP = 5 cycles										*/
-#define TRP_6			0x00003000	/* SDRAM tRP = 6 cycles										*/
-#define TRP_7			0x00003800	/* SDRAM tRP = 7 cycles										*/
-#define TRCD_1			0x00008000	/* SDRAM tRCD = 1 cycle										*/
-#define TRCD_2			0x00010000	/* SDRAM tRCD = 2 cycles									*/
-#define TRCD_3			0x00018000	/* SDRAM tRCD = 3 cycles									*/
-#define TRCD_4			0x00020000	/* SDRAM tRCD = 4 cycles									*/
-#define TRCD_5			0x00028000	/* SDRAM tRCD = 5 cycles									*/
-#define TRCD_6			0x00030000	/* SDRAM tRCD = 6 cycles									*/
-#define TRCD_7			0x00038000	/* SDRAM tRCD = 7 cycles									*/
-#define TWR_1			0x00080000	/* SDRAM tWR = 1 cycle										*/
-#define TWR_2			0x00100000	/* SDRAM tWR = 2 cycles										*/
-#define TWR_3			0x00180000	/* SDRAM tWR = 3 cycles										*/
-#define PUPSD			0x00200000	/* Power-Up Start Delay (15 SCLK Cycles Delay)				*/
-#define PSM				0x00400000	/* Power-Up Sequence (Mode Register Before/After* Refresh)	*/
-#define PSS				0x00800000	/* Enable Power-Up Sequence on Next SDRAM Access			*/
-#define SRFS			0x01000000	/* Enable SDRAM Self-Refresh Mode							*/
-#define EBUFE			0x02000000	/* Enable External Buffering Timing							*/
-#define FBBRW			0x04000000	/* Enable Fast Back-To-Back Read To Write					*/
-#define EMREN			0x10000000	/* Extended Mode Register Enable							*/
-#define TCSR			0x20000000	/* Temp-Compensated Self-Refresh Value (85/45* Deg C)		*/
-#define CDDBG			0x40000000	/* Tristate SDRAM Controls During Bus Grant					*/
-
-/* EBIU_SDBCTL Masks																		*/
-#define EBE				0x0001		/* Enable SDRAM External Bank							*/
-#define EBSZ_16			0x0000		/* SDRAM External Bank Size = 16MB	*/
-#define EBSZ_32			0x0002		/* SDRAM External Bank Size = 32MB	*/
-#define EBSZ_64			0x0004		/* SDRAM External Bank Size = 64MB	*/
-#define EBSZ_128		0x0006		/* SDRAM External Bank Size = 128MB		*/
-#define EBSZ_256		0x0008		/* SDRAM External Bank Size = 256MB 	*/
-#define EBSZ_512		0x000A		/* SDRAM External Bank Size = 512MB		*/
-#define EBCAW_8			0x0000		/* SDRAM External Bank Column Address Width = 8 Bits	*/
-#define EBCAW_9			0x0010		/* SDRAM External Bank Column Address Width = 9 Bits	*/
-#define EBCAW_10		0x0020		/* SDRAM External Bank Column Address Width = 10 Bits	*/
-#define EBCAW_11		0x0030		/* SDRAM External Bank Column Address Width = 11 Bits	*/
-
-/* EBIU_SDSTAT Masks														*/
-#define SDCI			0x0001		/* SDRAM Controller Idle 				*/
-#define SDSRA			0x0002		/* SDRAM Self-Refresh Active			*/
-#define SDPUA			0x0004		/* SDRAM Power-Up Active 				*/
-#define SDRS			0x0008		/* SDRAM Will Power-Up On Next Access	*/
-#define SDEASE			0x0010		/* SDRAM EAB Sticky Error Status		*/
-#define BGSTAT			0x0020		/* Bus Grant Status						*/
-
-
-/* **************************  DMA CONTROLLER MASKS  ********************************/
-
-/* DMAx_PERIPHERAL_MAP, MDMA_yy_PERIPHERAL_MAP Masks								*/
-#define CTYPE			0x0040	/* DMA Channel Type Indicator (Memory/Peripheral*)	*/
-#define PMAP			0xF000	/* Peripheral Mapped To This Channel				*/
-#define PMAP_PPI		0x0000	/* 		PPI Port DMA								*/
-#define	PMAP_EMACRX		0x1000	/* 		Ethernet Receive DMA						*/
-#define PMAP_EMACTX		0x2000	/* 		Ethernet Transmit DMA						*/
-#define PMAP_SPORT0RX	0x3000	/* 		SPORT0 Receive DMA							*/
-#define PMAP_SPORT0TX	0x4000	/* 		SPORT0 Transmit DMA							*/
-#define PMAP_SPORT1RX	0x5000	/* 		SPORT1 Receive DMA							*/
-#define PMAP_SPORT1TX	0x6000	/* 		SPORT1 Transmit DMA							*/
-#define PMAP_SPI		0x7000	/* 		SPI Port DMA								*/
-#define PMAP_UART0RX	0x8000	/* 		UART0 Port Receive DMA						*/
-#define PMAP_UART0TX	0x9000	/* 		UART0 Port Transmit DMA						*/
-#define	PMAP_UART1RX	0xA000	/* 		UART1 Port Receive DMA						*/
-#define	PMAP_UART1TX	0xB000	/* 		UART1 Port Transmit DMA						*/
-
-/*  ************  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS *************/
-/*  PPI_CONTROL Masks													*/
-#define PORT_EN			0x0001		/* PPI Port Enable					*/
-#define PORT_DIR		0x0002		/* PPI Port Direction				*/
-#define XFR_TYPE		0x000C		/* PPI Transfer Type				*/
-#define PORT_CFG		0x0030		/* PPI Port Configuration			*/
-#define FLD_SEL			0x0040		/* PPI Active Field Select			*/
-#define PACK_EN			0x0080		/* PPI Packing Mode					*/
-#define DMA32			0x0100		/* PPI 32-bit DMA Enable			*/
-#define SKIP_EN			0x0200		/* PPI Skip Element Enable			*/
-#define SKIP_EO			0x0400		/* PPI Skip Even/Odd Elements		*/
-#define DLEN_8			0x0000		/* Data Length = 8 Bits				*/
-#define DLEN_10			0x0800		/* Data Length = 10 Bits			*/
-#define DLEN_11			0x1000		/* Data Length = 11 Bits			*/
-#define DLEN_12			0x1800		/* Data Length = 12 Bits			*/
-#define DLEN_13			0x2000		/* Data Length = 13 Bits			*/
-#define DLEN_14			0x2800		/* Data Length = 14 Bits			*/
-#define DLEN_15			0x3000		/* Data Length = 15 Bits			*/
-#define DLEN_16			0x3800		/* Data Length = 16 Bits			*/
-#define DLENGTH			0x3800		/* PPI Data Length  */
-#define POLC			0x4000		/* PPI Clock Polarity				*/
-#define POLS			0x8000		/* PPI Frame Sync Polarity			*/
-
-/* PPI_STATUS Masks														*/
-#define FLD				0x0400		/* Field Indicator					*/
-#define FT_ERR			0x0800		/* Frame Track Error				*/
-#define OVR				0x1000		/* FIFO Overflow Error				*/
-#define UNDR			0x2000		/* FIFO Underrun Error				*/
-#define ERR_DET			0x4000		/* Error Detected Indicator			*/
-#define ERR_NCOR		0x8000		/* Error Not Corrected Indicator	*/
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWI) MASKS  ***********************/
-/* TWI_CLKDIV Macros (Use: *pTWI_CLKDIV = CLKLOW(x)|CLKHI(y);  )				*/
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low			*/
-#define CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low			*/
-
-/* TWI_PRESCALE Masks															*/
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz)	*/
-#define	TWI_ENA		0x0080		/* TWI Enable									*/
-#define	SCCB		0x0200		/* SCCB Compatibility Enable					*/
-
-/* TWI_SLAVE_CTL Masks															*/
-#define	SEN			0x0001		/* Slave Enable									*/
-#define	SADD_LEN	0x0002		/* Slave Address Length							*/
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid					*/
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call Adrress Matching Enabled		*/
-
-/* TWI_SLAVE_STAT Masks															*/
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*)	*/
-#define GCALL		0x0002		/* General Call Indicator						*/
-
-/* TWI_MASTER_CTL Masks													*/
-#define	MEN			0x0001		/* Master Mode Enable						*/
-#define	MADD_LEN	0x0002		/* Master Address Length					*/
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*)		*/
-#define	FAST		0x0008		/* Use Fast Mode Timing Specs				*/
-#define	STOP		0x0010		/* Issue Stop Condition						*/
-#define	RSTART		0x0020		/* Repeat Start or Stop* At End Of Transfer	*/
-#define	DCNT		0x3FC0		/* Data Bytes To Transfer					*/
-#define	SDAOVR		0x4000		/* Serial Data Override						*/
-#define	SCLOVR		0x8000		/* Serial Clock Override					*/
-
-/* TWI_MASTER_STAT Masks														*/
-#define	MPROG		0x0001		/* Master Transfer In Progress					*/
-#define	LOSTARB		0x0002		/* Lost Arbitration Indicator (Xfer Aborted)	*/
-#define	ANAK		0x0004		/* Address Not Acknowledged						*/
-#define	DNAK		0x0008		/* Data Not Acknowledged						*/
-#define	BUFRDERR	0x0010		/* Buffer Read Error							*/
-#define	BUFWRERR	0x0020		/* Buffer Write Error							*/
-#define	SDASEN		0x0040		/* Serial Data Sense							*/
-#define	SCLSEN		0x0080		/* Serial Clock Sense							*/
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator							*/
-
-/* TWI_INT_SRC and TWI_INT_ENABLE Masks						*/
-#define	SINIT		0x0001		/* Slave Transfer Initiated	*/
-#define	SCOMP		0x0002		/* Slave Transfer Complete	*/
-#define	SERR		0x0004		/* Slave Transfer Error		*/
-#define	SOVF		0x0008		/* Slave Overflow			*/
-#define	MCOMP		0x0010		/* Master Transfer Complete	*/
-#define	MERR		0x0020		/* Master Transfer Error	*/
-#define	XMTSERV		0x0040		/* Transmit FIFO Service	*/
-#define	RCVSERV		0x0080		/* Receive FIFO Service		*/
-
-/* TWI_FIFO_CTRL Masks												*/
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush			*/
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush				*/
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length	*/
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length	*/
-
-/* TWI_FIFO_STAT Masks															*/
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status							*/
-#define	XMT_EMPTY	0x0000		/* 		Transmit FIFO Empty						*/
-#define	XMT_HALF	0x0001		/* 		Transmit FIFO Has 1 Byte To Write		*/
-#define	XMT_FULL	0x0003		/* 		Transmit FIFO Full (2 Bytes To Write)	*/
-
-#define	RCVSTAT		0x000C		/* Receive FIFO Status							*/
-#define	RCV_EMPTY	0x0000		/* 		Receive FIFO Empty						*/
-#define	RCV_HALF	0x0004		/* 		Receive FIFO Has 1 Byte To Read			*/
-#define	RCV_FULL	0x000C		/* 		Receive FIFO Full (2 Bytes To Read)		*/
-
-
-/* Omit CAN masks from defBF534.h */
-
-/*  *******************  PIN CONTROL REGISTER MASKS  ************************/
-/* PORT_MUX Masks															*/
-#define	PJSE			0x0001			/* Port J SPI/SPORT Enable			*/
-#define	PJSE_SPORT		0x0000			/* 		Enable TFS0/DT0PRI			*/
-#define	PJSE_SPI		0x0001			/* 		Enable SPI_SSEL3:2			*/
-
-#define	PJCE(x)			(((x)&0x3)<<1)	/* Port J CAN/SPI/SPORT Enable		*/
-#define	PJCE_SPORT		0x0000			/* 		Enable DR0SEC/DT0SEC		*/
-#define	PJCE_CAN		0x0002			/* 		Enable CAN RX/TX			*/
-#define	PJCE_SPI		0x0004			/* 		Enable SPI_SSEL7			*/
-
-#define	PFDE			0x0008			/* Port F DMA Request Enable		*/
-#define	PFDE_UART		0x0000			/* 		Enable UART0 RX/TX			*/
-#define	PFDE_DMA		0x0008			/* 		Enable DMAR1:0				*/
-
-#define	PFTE			0x0010			/* Port F Timer Enable				*/
-#define	PFTE_UART		0x0000			/*		Enable UART1 RX/TX			*/
-#define	PFTE_TIMER		0x0010			/* 		Enable TMR7:6				*/
-
-#define	PFS6E			0x0020			/* Port F SPI SSEL 6 Enable			*/
-#define	PFS6E_TIMER		0x0000			/*		Enable TMR5					*/
-#define	PFS6E_SPI		0x0020			/* 		Enable SPI_SSEL6			*/
-
-#define	PFS5E			0x0040			/* Port F SPI SSEL 5 Enable			*/
-#define	PFS5E_TIMER		0x0000			/*		Enable TMR4					*/
-#define	PFS5E_SPI		0x0040			/* 		Enable SPI_SSEL5			*/
-
-#define	PFS4E			0x0080			/* Port F SPI SSEL 4 Enable			*/
-#define	PFS4E_TIMER		0x0000			/*		Enable TMR3					*/
-#define	PFS4E_SPI		0x0080			/* 		Enable SPI_SSEL4			*/
-
-#define	PFFE			0x0100			/* Port F PPI Frame Sync Enable		*/
-#define	PFFE_TIMER		0x0000			/* 		Enable TMR2					*/
-#define	PFFE_PPI		0x0100			/* 		Enable PPI FS3				*/
-
-#define	PGSE			0x0200			/* Port G SPORT1 Secondary Enable	*/
-#define	PGSE_PPI		0x0000			/* 		Enable PPI D9:8				*/
-#define	PGSE_SPORT		0x0200			/* 		Enable DR1SEC/DT1SEC		*/
-
-#define	PGRE			0x0400			/* Port G SPORT1 Receive Enable		*/
-#define	PGRE_PPI		0x0000			/* 		Enable PPI D12:10			*/
-#define	PGRE_SPORT		0x0400			/* 		Enable DR1PRI/RFS1/RSCLK1	*/
-
-#define	PGTE			0x0800			/* Port G SPORT1 Transmit Enable	*/
-#define	PGTE_PPI		0x0000			/* 		Enable PPI D15:13			*/
-#define	PGTE_SPORT		0x0800			/* 		Enable DT1PRI/TFS1/TSCLK1	*/
-
-
-/*  ******************  HANDSHAKE DMA (HDMA) MASKS  *********************/
-/* HDMAx_CTL Masks														*/
-#define	HMDMAEN		0x0001	/* Enable Handshake DMA 0/1					*/
-#define	REP			0x0002	/* HDMA Request Polarity					*/
-#define	UTE			0x0004	/* Urgency Threshold Enable					*/
-#define	OIE			0x0010	/* Overflow Interrupt Enable				*/
-#define	BDIE		0x0020	/* Block Done Interrupt Enable				*/
-#define	MBDI		0x0040	/* Mask Block Done IRQ If Pending ECNT		*/
-#define	DRQ			0x0300	/* HDMA Request Type						*/
-#define	DRQ_NONE	0x0000	/* 		No Request							*/
-#define	DRQ_SINGLE	0x0100	/* 		Channels Request Single				*/
-#define	DRQ_MULTI	0x0200	/* 		Channels Request Multi (Default)	*/
-#define	DRQ_URGENT	0x0300	/* 		Channels Request Multi Urgent		*/
-#define	RBC			0x1000	/* Reload BCNT With IBCNT					*/
-#define	PS			0x2000	/* HDMA Pin Status							*/
-#define	OI			0x4000	/* Overflow Interrupt Generated				*/
-#define	BDI			0x8000	/* Block Done Interrupt Generated			*/
-
-/* entry addresses of the user-callable Boot ROM functions */
-
-#define _BOOTROM_RESET 0xEF000000
-#define _BOOTROM_FINAL_INIT 0xEF000002
-#define _BOOTROM_DO_MEMORY_DMA 0xEF000006
-#define _BOOTROM_BOOT_DXE_FLASH 0xEF000008
-#define _BOOTROM_BOOT_DXE_SPI 0xEF00000A
-#define _BOOTROM_BOOT_DXE_TWI 0xEF00000C
-#define _BOOTROM_GET_DXE_ADDRESS_FLASH 0xEF000010
-#define _BOOTROM_GET_DXE_ADDRESS_SPI 0xEF000012
-#define _BOOTROM_GET_DXE_ADDRESS_TWI 0xEF000014
-
-/* Alternate Deprecated Macros Provided For Backwards Code Compatibility */
-#define	PGDE_UART   PFDE_UART
-#define	PGDE_DMA    PFDE_DMA
-#define	CKELOW		SCKELOW
-
-/* ==== end from defBF534.h ==== */
-
-/* HOST Port Registers */
-
-#define                     HOST_CONTROL  0xffc03400   /* HOST Control Register */
-#define                      HOST_STATUS  0xffc03404   /* HOST Status Register */
-#define                     HOST_TIMEOUT  0xffc03408   /* HOST Acknowledge Mode Timeout Register */
-
-/* Counter Registers */
-
-#define                       CNT_CONFIG  0xffc03500   /* Configuration Register */
-#define                        CNT_IMASK  0xffc03504   /* Interrupt Mask Register */
-#define                       CNT_STATUS  0xffc03508   /* Status Register */
-#define                      CNT_COMMAND  0xffc0350c   /* Command Register */
-#define                     CNT_DEBOUNCE  0xffc03510   /* Debounce Register */
-#define                      CNT_COUNTER  0xffc03514   /* Counter Register */
-#define                          CNT_MAX  0xffc03518   /* Maximal Count Register */
-#define                          CNT_MIN  0xffc0351c   /* Minimal Count Register */
-
-/* OTP/FUSE Registers */
-
-#define                      OTP_CONTROL  0xffc03600   /* OTP/Fuse Control Register */
-#define                          OTP_BEN  0xffc03604   /* OTP/Fuse Byte Enable */
-#define                       OTP_STATUS  0xffc03608   /* OTP/Fuse Status */
-#define                       OTP_TIMING  0xffc0360c   /* OTP/Fuse Access Timing */
-
-/* Security Registers */
-
-#define                    SECURE_SYSSWT  0xffc03620   /* Secure System Switches */
-#define                   SECURE_CONTROL  0xffc03624   /* Secure Control */
-#define                    SECURE_STATUS  0xffc03628   /* Secure Status */
-
-/* OTP Read/Write Data Buffer Registers */
-
-#define                        OTP_DATA0  0xffc03680   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA1  0xffc03684   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA2  0xffc03688   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-#define                        OTP_DATA3  0xffc0368c   /* OTP/Fuse Data (OTP_DATA0-3) accesses the fuse read write buffer */
-
-/* NFC Registers */
-
-#define                          NFC_CTL  0xffc03700   /* NAND Control Register */
-#define                         NFC_STAT  0xffc03704   /* NAND Status Register */
-#define                      NFC_IRQSTAT  0xffc03708   /* NAND Interrupt Status Register */
-#define                      NFC_IRQMASK  0xffc0370c   /* NAND Interrupt Mask Register */
-#define                         NFC_ECC0  0xffc03710   /* NAND ECC Register 0 */
-#define                         NFC_ECC1  0xffc03714   /* NAND ECC Register 1 */
-#define                         NFC_ECC2  0xffc03718   /* NAND ECC Register 2 */
-#define                         NFC_ECC3  0xffc0371c   /* NAND ECC Register 3 */
-#define                        NFC_COUNT  0xffc03720   /* NAND ECC Count Register */
-#define                          NFC_RST  0xffc03724   /* NAND ECC Reset Register */
-#define                        NFC_PGCTL  0xffc03728   /* NAND Page Control Register */
-#define                         NFC_READ  0xffc0372c   /* NAND Read Data Register */
-#define                         NFC_ADDR  0xffc03740   /* NAND Address Register */
-#define                          NFC_CMD  0xffc03744   /* NAND Command Register */
-#define                      NFC_DATA_WR  0xffc03748   /* NAND Data Write Register */
-#define                      NFC_DATA_RD  0xffc0374c   /* NAND Data Read Register */
-
-/* ********************************************************** */
-/*     SINGLE BIT MACRO PAIRS (bit mask and negated one)      */
-/*     and MULTI BIT READ MACROS                              */
-/* ********************************************************** */
-
-/* Bit masks for HOST_CONTROL */
-
-#define                   HOST_CNTR_HOST_EN  0x1        /* Host Enable */
-#define                  HOST_CNTR_nHOST_EN  0x0
-#define                  HOST_CNTR_HOST_END  0x2        /* Host Endianess */
-#define                 HOST_CNTR_nHOST_END  0x0
-#define                 HOST_CNTR_DATA_SIZE  0x4        /* Data Size */
-#define                HOST_CNTR_nDATA_SIZE  0x0
-#define                  HOST_CNTR_HOST_RST  0x8        /* Host Reset */
-#define                 HOST_CNTR_nHOST_RST  0x0
-#define                  HOST_CNTR_HRDY_OVR  0x20       /* Host Ready Override */
-#define                 HOST_CNTR_nHRDY_OVR  0x0
-#define                  HOST_CNTR_INT_MODE  0x40       /* Interrupt Mode */
-#define                 HOST_CNTR_nINT_MODE  0x0
-#define                     HOST_CNTR_BT_EN  0x80       /* Bus Timeout Enable */
-#define                   HOST_CNTR_ nBT_EN  0x0
-#define                       HOST_CNTR_EHW  0x100      /* Enable Host Write */
-#define                      HOST_CNTR_nEHW  0x0
-#define                       HOST_CNTR_EHR  0x200      /* Enable Host Read */
-#define                      HOST_CNTR_nEHR  0x0
-#define                       HOST_CNTR_BDR  0x400      /* Burst DMA Requests */
-#define                      HOST_CNTR_nBDR  0x0
-
-/* Bit masks for HOST_STATUS */
-
-#define                     HOST_STAT_READY  0x1        /* DMA Ready */
-#define                    HOST_STAT_nREADY  0x0
-#define                  HOST_STAT_FIFOFULL  0x2        /* FIFO Full */
-#define                 HOST_STAT_nFIFOFULL  0x0
-#define                 HOST_STAT_FIFOEMPTY  0x4        /* FIFO Empty */
-#define                HOST_STAT_nFIFOEMPTY  0x0
-#define                  HOST_STAT_COMPLETE  0x8        /* DMA Complete */
-#define                 HOST_STAT_nCOMPLETE  0x0
-#define                      HOST_STAT_HSHK  0x10       /* Host Handshake */
-#define                     HOST_STAT_nHSHK  0x0
-#define                   HOST_STAT_TIMEOUT  0x20       /* Host Timeout */
-#define                  HOST_STAT_nTIMEOUT  0x0
-#define                      HOST_STAT_HIRQ  0x40       /* Host Interrupt Request */
-#define                     HOST_STAT_nHIRQ  0x0
-#define                HOST_STAT_ALLOW_CNFG  0x80       /* Allow New Configuration */
-#define               HOST_STAT_nALLOW_CNFG  0x0
-#define                   HOST_STAT_DMA_DIR  0x100      /* DMA Direction */
-#define                  HOST_STAT_nDMA_DIR  0x0
-#define                       HOST_STAT_BTE  0x200      /* Bus Timeout Enabled */
-#define                      HOST_STAT_nBTE  0x0
-#define               HOST_STAT_HOSTRD_DONE  0x8000     /* Host Read Completion Interrupt */
-#define              HOST_STAT_nHOSTRD_DONE  0x0
-
-/* Bit masks for HOST_TIMEOUT */
-
-#define             HOST_COUNT_TIMEOUT  0x7ff      /* Host Timeout count */
-
-/* Bit masks for SECURE_SYSSWT */
-
-#define                   EMUDABL  0x1        /* Emulation Disable. */
-#define                  nEMUDABL  0x0
-#define                   RSTDABL  0x2        /* Reset Disable */
-#define                  nRSTDABL  0x0
-#define                   L1IDABL  0x1c       /* L1 Instruction Memory Disable. */
-#define                  L1DADABL  0xe0       /* L1 Data Bank A Memory Disable. */
-#define                  L1DBDABL  0x700      /* L1 Data Bank B Memory Disable. */
-#define                   DMA0OVR  0x800      /* DMA0 Memory Access Override */
-#define                  nDMA0OVR  0x0
-#define                   DMA1OVR  0x1000     /* DMA1 Memory Access Override */
-#define                  nDMA1OVR  0x0
-#define                    EMUOVR  0x4000     /* Emulation Override */
-#define                   nEMUOVR  0x0
-#define                    OTPSEN  0x8000     /* OTP Secrets Enable. */
-#define                   nOTPSEN  0x0
-#define                    L2DABL  0x70000    /* L2 Memory Disable. */
-
-/* Bit masks for SECURE_CONTROL */
-
-#define                   SECURE0  0x1        /* SECURE 0 */
-#define                  nSECURE0  0x0
-#define                   SECURE1  0x2        /* SECURE 1 */
-#define                  nSECURE1  0x0
-#define                   SECURE2  0x4        /* SECURE 2 */
-#define                  nSECURE2  0x0
-#define                   SECURE3  0x8        /* SECURE 3 */
-#define                  nSECURE3  0x0
-
-/* Bit masks for SECURE_STATUS */
-
-#define                   SECMODE  0x3        /* Secured Mode Control State */
-#define                       NMI  0x4        /* Non Maskable Interrupt */
-#define                      nNMI  0x0
-#define                   AFVALID  0x8        /* Authentication Firmware Valid */
-#define                  nAFVALID  0x0
-#define                    AFEXIT  0x10       /* Authentication Firmware Exit */
-#define                   nAFEXIT  0x0
-#define                   SECSTAT  0xe0       /* Secure Status */
-
-#endif /* _DEF_BF52X_H */
diff --git a/arch/blackfin/mach-bf527/include/mach/gpio.h b/arch/blackfin/mach-bf527/include/mach/gpio.h
index f80c299..fba606b 100644
--- a/arch/blackfin/mach-bf527/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf527/include/mach/gpio.h
@@ -62,4 +62,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf527/include/mach/pll.h b/arch/blackfin/mach-bf527/include/mach/pll.h
index 24f1d7c..94cca67 100644
--- a/arch/blackfin/mach-bf527/include/mach/pll.h
+++ b/arch/blackfin/mach-bf527/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf533/boards/H8606.c b/arch/blackfin/mach-bf533/boards/H8606.c
index 2ce7b16..d4bfcea 100644
--- a/arch/blackfin/mach-bf533/boards/H8606.c
+++ b/arch/blackfin/mach-bf533/boards/H8606.c
@@ -286,7 +286,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c
index 20c1022..87b5af3 100644
--- a/arch/blackfin/mach-bf533/boards/blackstamp.c
+++ b/arch/blackfin/mach-bf533/boards/blackstamp.c
@@ -25,7 +25,6 @@
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
 #include <asm/dpmc.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -225,7 +224,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -290,9 +289,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -324,9 +323,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -476,10 +475,16 @@
 		return ret;
 
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-	/* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
-	bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
-	bfin_write_FIO_FLAG_S(PF0);
-	SSYNC();
+	/*
+	 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+	 * the bfin-async-map driver takes care of flipping between
+	 * flash and ethernet when necessary.
+	 */
+	ret = gpio_request(GPIO_PF0, "enet_cpld");
+	if (!ret) {
+		gpio_direction_output(GPIO_PF0, 1);
+		gpio_free(GPIO_PF0);
+	}
 #endif
 
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
diff --git a/arch/blackfin/mach-bf533/boards/cm_bf533.c b/arch/blackfin/mach-bf533/boards/cm_bf533.c
index adbe62a..4d5604e 100644
--- a/arch/blackfin/mach-bf533/boards/cm_bf533.c
+++ b/arch/blackfin/mach-bf533/boards/cm_bf533.c
@@ -271,7 +271,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -336,9 +336,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -370,9 +370,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf533/boards/ezkit.c b/arch/blackfin/mach-bf533/boards/ezkit.c
index a1cb8e7..b67b91d 100644
--- a/arch/blackfin/mach-bf533/boards/ezkit.c
+++ b/arch/blackfin/mach-bf533/boards/ezkit.c
@@ -349,7 +349,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf533/boards/ip0x.c b/arch/blackfin/mach-bf533/boards/ip0x.c
index 5ba4b02..f869a37 100644
--- a/arch/blackfin/mach-bf533/boards/ip0x.c
+++ b/arch/blackfin/mach-bf533/boards/ip0x.c
@@ -22,7 +22,6 @@
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -174,7 +173,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -295,15 +294,7 @@
 	printk(KERN_INFO "%s(): registering device resources\n", __func__);
 	platform_add_devices(ip0x_devices, ARRAY_SIZE(ip0x_devices));
 
-#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
-	for (i = 0; i < ARRAY_SIZE(bfin_spi_board_info); ++i) {
-		int j = 1 << bfin_spi_board_info[i].chip_select;
-		/* set spi cs to 1 */
-		bfin_write_FIO_DIR(bfin_read_FIO_DIR() | j);
-		bfin_write_FIO_FLAG_S(j);
-	}
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
-#endif
 
 	return 0;
 }
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index b3b1cde..43224ef 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -24,7 +24,6 @@
 #include <asm/reboot.h>
 #include <asm/portmux.h>
 #include <asm/dpmc.h>
-#include <mach/fio_flag.h>
 
 /*
  * Name the Board for the /proc/cpuinfo
@@ -354,7 +353,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -419,9 +418,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +452,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -674,10 +673,16 @@
 		return ret;
 
 #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
-	/* setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC */
-	bfin_write_FIO_DIR(bfin_read_FIO_DIR() | PF0);
-	bfin_write_FIO_FLAG_S(PF0);
-	SSYNC();
+	/*
+	 * setup BF533_STAMP CPLD to route AMS3 to Ethernet MAC.
+	 * the bfin-async-map driver takes care of flipping between
+	 * flash and ethernet when necessary.
+	 */
+	ret = gpio_request(GPIO_PF0, "enet_cpld");
+	if (!ret) {
+		gpio_direction_output(GPIO_PF0, 1);
+		gpio_free(GPIO_PF0);
+	}
 #endif
 
 	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
@@ -713,7 +718,6 @@
 void native_machine_restart(char *cmd)
 {
 	/* workaround pull up on cpld / flash pin not being strong enough */
-	bfin_write_FIO_INEN(~PF0);
-	bfin_write_FIO_DIR(PF0);
-	bfin_write_FIO_FLAG_C(PF0);
+	gpio_request(GPIO_PF0, "flash_cpld");
+	gpio_direction_output(GPIO_PF0, 0);
 }
diff --git a/arch/blackfin/mach-bf533/dma.c b/arch/blackfin/mach-bf533/dma.c
index 4a14a46..1f5988d 100644
--- a/arch/blackfin/mach-bf533/dma.c
+++ b/arch/blackfin/mach-bf533/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
new file mode 100644
index 0000000..08072c8
--- /dev/null
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	1
+
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
index 9e1f3de..45dcaa4 100644
--- a/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf533/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #ifdef CONFIG_BFIN_UART0_CTSRTS
 # define CONFIG_SERIAL_BFIN_CTSRTS
 # ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
-	unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list       cts_timer;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -120,3 +48,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf533/include/mach/blackfin.h b/arch/blackfin/mach-bf533/include/mach/blackfin.h
index f4bd6df..e366207 100644
--- a/arch/blackfin/mach-bf533/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf533/include/mach/blackfin.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
@@ -10,26 +10,14 @@
 #define BF533_FAMILY
 
 #include "bf533.h"
-#include "defBF532.h"
 #include "anomaly.h"
 
-#if !defined(__ASSEMBLY__)
-#include "cdefBF532.h"
+#include <asm/def_LPBlackfin.h>
+#include "defBF532.h"
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF532.h"
 #endif
 
-#define BFIN_UART_NR_PORTS      1
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
-#endif				/* _MACH_BLACKFIN_H_ */
+#endif
diff --git a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
index 401e524..fd0cbe4 100644
--- a/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/cdefBF532.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -7,9 +7,6 @@
 #ifndef _CDEF_BF532_H
 #define _CDEF_BF532_H
 
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Clock and System Control (0xFFC0 0400-0xFFC0 07FF) */
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
 #define bfin_read_PLL_STAT()                 bfin_read16(PLL_STAT)
@@ -66,16 +63,10 @@
 #define bfin_write_RTC_PREN(val)             bfin_write16(RTC_PREN,val)
 
 /* DMA Traffic controls */
-#define bfin_read_DMA_TCPER()                bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)            bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT()                bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)            bfin_write16(DMA_TCCNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TC_PER()               bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)           bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT()               bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)           bfin_write16(DMA_TC_CNT,val)
+#define bfin_read_DMAC_TC_PER()              bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)          bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT()              bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)          bfin_write16(DMAC_TC_CNT,val)
 
 /* General Purpose IO (0xFFC0 2400-0xFFC0 27FF) */
 #define bfin_read_FIO_DIR()                  bfin_read16(FIO_DIR)
@@ -105,6 +96,47 @@
 #define bfin_read_FIO_MASKB_T()              bfin_read16(FIO_MASKB_T)
 #define bfin_write_FIO_MASKB_T(val)          bfin_write16(FIO_MASKB_T,val)
 
+#if ANOMALY_05000311
+/* Keep at the CPP expansion to avoid circular header dependency loops */
+#define BFIN_WRITE_FIO_FLAG(name, val) \
+	do { \
+		unsigned long __flags; \
+		__flags = hard_local_irq_save(); \
+		bfin_write16(FIO_FLAG_##name, val); \
+		bfin_read_CHIPID(); \
+		hard_local_irq_restore(__flags); \
+	} while (0)
+#define bfin_write_FIO_FLAG_D(val)           BFIN_WRITE_FIO_FLAG(D, val)
+#define bfin_write_FIO_FLAG_C(val)           BFIN_WRITE_FIO_FLAG(C, val)
+#define bfin_write_FIO_FLAG_S(val)           BFIN_WRITE_FIO_FLAG(S, val)
+#define bfin_write_FIO_FLAG_T(val)           BFIN_WRITE_FIO_FLAG(T, val)
+
+#define BFIN_READ_FIO_FLAG(name) \
+	({ \
+		unsigned long __flags; \
+		u16 __ret; \
+		__flags = hard_local_irq_save(); \
+		__ret = bfin_read16(FIO_FLAG_##name); \
+		bfin_read_CHIPID(); \
+		hard_local_irq_restore(__flags); \
+		__ret; \
+	})
+#define bfin_read_FIO_FLAG_D()               BFIN_READ_FIO_FLAG(D)
+#define bfin_read_FIO_FLAG_C()               BFIN_READ_FIO_FLAG(C)
+#define bfin_read_FIO_FLAG_S()               BFIN_READ_FIO_FLAG(S)
+#define bfin_read_FIO_FLAG_T()               BFIN_READ_FIO_FLAG(T)
+
+#else
+#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D, val)
+#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C, val)
+#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S, val)
+#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T, val)
+#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
+#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
+#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
+#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
+#endif
+
 /* DMA Controller */
 #define bfin_read_DMA0_CONFIG()              bfin_read16(DMA0_CONFIG)
 #define bfin_write_DMA0_CONFIG(val)          bfin_write16(DMA0_CONFIG,val)
@@ -647,7 +679,4 @@
 #define bfin_read_PPI_FRAME()                bfin_read16(PPI_FRAME)
 #define bfin_write_PPI_FRAME(val)            bfin_write16(PPI_FRAME,val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF532_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/defBF532.h b/arch/blackfin/mach-bf533/include/mach/defBF532.h
index 3adb0b4..2376d53 100644
--- a/arch/blackfin/mach-bf533/include/mach/defBF532.h
+++ b/arch/blackfin/mach-bf533/include/mach/defBF532.h
@@ -1,7 +1,7 @@
 /*
  * System & MMR bit and Address definitions for ADSP-BF532
  *
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -9,9 +9,6 @@
 #ifndef _DEF_BF532_H
 #define _DEF_BF532_H
 
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
 /*********************************************************************************** */
 /* System MMR Register Map */
 /*********************************************************************************** */
@@ -182,12 +179,8 @@
 #define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register */
 
 /* DMA Traffic controls */
-#define DMA_TC_PER 0xFFC00B0C	/* Traffic Control Periods Register */
-#define DMA_TC_CNT 0xFFC00B10	/* Traffic Control Current Counts Register */
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER 0xFFC00B0C	/* Traffic Control Periods Register */
-#define DMA_TCCNT 0xFFC00B10	/* Traffic Control Current Counts Register */
+#define DMAC_TC_PER 0xFFC00B0C	/* Traffic Control Periods Register */
+#define DMAC_TC_CNT 0xFFC00B10	/* Traffic Control Current Counts Register */
 
 /* DMA Controller (0xFFC00C00 - 0xFFC00FFF) */
 #define DMA0_CONFIG		0xFFC00C08	/* DMA Channel 0 Configuration Register */
@@ -432,83 +425,6 @@
 #define IWR_ENABLE(x)	       (1 << (x))	/* Wakeup Enable Peripheral #x */
 #define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x)))	/* Wakeup Disable Peripheral #x */
 
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB	0x80
-#define SB      0x40
-#define STP      0x20
-#define EPS     0x10
-#define PEN	0x08
-#define STB	0x04
-#define WLS(x)	((x-5) & 0x03)
-
-#define DLAB_P	0x07
-#define SB_P	0x06
-#define STP_P	0x05
-#define EPS_P	0x04
-#define PEN_P	0x03
-#define STB_P	0x02
-#define WLS_P1	0x01
-#define WLS_P0	0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA	0x10
-#define LOOP_ENA_P	0x04
-
-/* UART_LSR Register */
-#define TEMT	0x40
-#define THRE	0x20
-#define BI	0x10
-#define FE	0x08
-#define PE	0x04
-#define OE	0x02
-#define DR	0x01
-
-#define TEMP_P	0x06
-#define THRE_P	0x05
-#define BI_P	0x04
-#define FE_P	0x03
-#define PE_P	0x02
-#define OE_P	0x01
-#define DR_P	0x00
-
-/* UART_IER Register */
-#define ELSI	0x04
-#define ETBEI	0x02
-#define ERBFI	0x01
-
-#define ELSI_P	0x02
-#define ETBEI_P	0x01
-#define ERBFI_P	0x00
-
-/* UART_IIR Register */
-#define STATUS(x)	((x << 1) & 0x06)
-#define NINT		0x01
-#define STATUS_P1	0x02
-#define STATUS_P0	0x01
-#define NINT_P		0x00
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UART_GCTL Register */
-#define FFE	0x20
-#define FPE	0x10
-#define RPOLC	0x08
-#define TPOLC	0x04
-#define IREN	0x02
-#define UCEN	0x01
-
-#define FFE_P	0x05
-#define FPE_P	0x04
-#define RPOLC_P	0x03
-#define TPOLC_P	0x02
-#define IREN_P	0x01
-#define UCEN_P	0x00
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
@@ -643,44 +559,6 @@
 #define ERR_TYP_P0		0x0E
 #define ERR_TYP_P1		0x0F
 
-/*/ ******************   PROGRAMMABLE FLAG MASKS  ********************* */
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks */
-#define PF0         0x0001
-#define PF1         0x0002
-#define PF2         0x0004
-#define PF3         0x0008
-#define PF4         0x0010
-#define PF5         0x0020
-#define PF6         0x0040
-#define PF7         0x0080
-#define PF8         0x0100
-#define PF9         0x0200
-#define PF10        0x0400
-#define PF11        0x0800
-#define PF12        0x1000
-#define PF13        0x2000
-#define PF14        0x4000
-#define PF15        0x8000
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  BIT POSITIONS */
-#define PF0_P         0
-#define PF1_P         1
-#define PF2_P         2
-#define PF3_P         3
-#define PF4_P         4
-#define PF5_P         5
-#define PF6_P         6
-#define PF7_P         7
-#define PF8_P         8
-#define PF9_P         9
-#define PF10_P        10
-#define PF11_P        11
-#define PF12_P        12
-#define PF13_P        13
-#define PF14_P        14
-#define PF15_P        15
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  ************* */
 
 /* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf533/include/mach/fio_flag.h b/arch/blackfin/mach-bf533/include/mach/fio_flag.h
deleted file mode 100644
index d0bfba0..0000000
--- a/arch/blackfin/mach-bf533/include/mach/fio_flag.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_FIO_FLAG_H
-#define _MACH_FIO_FLAG_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-#if ANOMALY_05000311
-#define BFIN_WRITE_FIO_FLAG(name) \
-static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
-{ \
-	unsigned long flags; \
-	flags = hard_local_irq_save(); \
-	bfin_write16(FIO_FLAG_##name, val); \
-	bfin_read_CHIPID(); \
-	hard_local_irq_restore(flags); \
-}
-BFIN_WRITE_FIO_FLAG(D)
-BFIN_WRITE_FIO_FLAG(C)
-BFIN_WRITE_FIO_FLAG(S)
-BFIN_WRITE_FIO_FLAG(T)
-
-#define BFIN_READ_FIO_FLAG(name) \
-static inline u16 bfin_read_FIO_FLAG_##name(void) \
-{ \
-	unsigned long flags; \
-	u16 ret; \
-	flags = hard_local_irq_save(); \
-	ret = bfin_read16(FIO_FLAG_##name); \
-	bfin_read_CHIPID(); \
-	hard_local_irq_restore(flags); \
-	return ret; \
-}
-BFIN_READ_FIO_FLAG(D)
-BFIN_READ_FIO_FLAG(C)
-BFIN_READ_FIO_FLAG(S)
-BFIN_READ_FIO_FLAG(T)
-
-#else
-#define bfin_write_FIO_FLAG_D(val)           bfin_write16(FIO_FLAG_D, val)
-#define bfin_write_FIO_FLAG_C(val)           bfin_write16(FIO_FLAG_C, val)
-#define bfin_write_FIO_FLAG_S(val)           bfin_write16(FIO_FLAG_S, val)
-#define bfin_write_FIO_FLAG_T(val)           bfin_write16(FIO_FLAG_T, val)
-#define bfin_read_FIO_FLAG_T()               bfin_read16(FIO_FLAG_T)
-#define bfin_read_FIO_FLAG_C()               bfin_read16(FIO_FLAG_C)
-#define bfin_read_FIO_FLAG_S()               bfin_read16(FIO_FLAG_S)
-#define bfin_read_FIO_FLAG_D()               bfin_read16(FIO_FLAG_D)
-#endif
-
-#endif /* _MACH_FIO_FLAG_H */
diff --git a/arch/blackfin/mach-bf533/include/mach/gpio.h b/arch/blackfin/mach-bf533/include/mach/gpio.h
index e02416d..cce4f8f 100644
--- a/arch/blackfin/mach-bf533/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf533/include/mach/gpio.h
@@ -28,4 +28,6 @@
 
 #define PORT_F GPIO_PF0
 
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf533/include/mach/pll.h b/arch/blackfin/mach-bf533/include/mach/pll.h
index 169c106..94cca67 100644
--- a/arch/blackfin/mach-bf533/include/mach/pll.h
+++ b/arch/blackfin/mach-bf533/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf537/boards/Kconfig b/arch/blackfin/mach-bf537/boards/Kconfig
index 44132fd..a44bf3a 100644
--- a/arch/blackfin/mach-bf537/boards/Kconfig
+++ b/arch/blackfin/mach-bf537/boards/Kconfig
@@ -39,4 +39,10 @@
 	help
 	  Board supply package for CSP Minotaur
 
+config DNP5370
+	bool "SSV Dil/NetPC DNP/5370"
+	depends on (BF537)
+	help
+	  Board supply package for DNP/5370 DIL64 module
+
 endchoice
diff --git a/arch/blackfin/mach-bf537/boards/Makefile b/arch/blackfin/mach-bf537/boards/Makefile
index 7e6aa4e..fe42258 100644
--- a/arch/blackfin/mach-bf537/boards/Makefile
+++ b/arch/blackfin/mach-bf537/boards/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_BFIN537_BLUETECHNIX_TCM)  += tcm_bf537.o
 obj-$(CONFIG_PNAV10)                   += pnav10.o
 obj-$(CONFIG_CAMSIG_MINOTAUR)          += minotaur.o
+obj-$(CONFIG_DNP5370)                  += dnp5370.o
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
index 836698c..2c776e1 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c
@@ -373,7 +373,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -434,7 +434,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -545,9 +545,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -579,9 +579,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
index 2a85670..0856611 100644
--- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c
+++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c
@@ -356,7 +356,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -399,7 +399,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -510,9 +510,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -544,9 +544,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/dnp5370.c b/arch/blackfin/mach-bf537/boards/dnp5370.c
new file mode 100644
index 0000000..e1e9ea0
--- /dev/null
+++ b/arch/blackfin/mach-bf537/boards/dnp5370.c
@@ -0,0 +1,418 @@
+/*
+ * This is the configuration for SSV Dil/NetPC DNP/5370 board.
+ *
+ * DIL module:         http://www.dilnetpc.com/dnp0086.htm
+ * SK28 (starter kit): http://www.dilnetpc.com/dnp0088.htm
+ *
+ * Copyright 2010 3ality Digital Systems
+ * Copyright 2005 National ICT Australia (NICTA)
+ * Copyright 2004-2006 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/plat-ram.h>
+#include <linux/mtd/physmap.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/spi/mmc_spi.h>
+#include <linux/phy.h>
+#include <asm/dma.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/reboot.h>
+#include <asm/portmux.h>
+#include <asm/dpmc.h>
+
+/*
+ * Name the Board for the /proc/cpuinfo
+ */
+const char bfin_board_name[] = "DNP/5370";
+#define FLASH_MAC               0x202f0000
+#define CONFIG_MTD_PHYSMAP_LEN  0x300000
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+static struct platform_device rtc_device = {
+	.name = "rtc-bfin",
+	.id   = -1,
+};
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+#include <linux/bfin_mac.h>
+static const unsigned short bfin_mac_peripherals[] = P_RMII0;
+
+static struct bfin_phydev_platform_data bfin_phydev_data[] = {
+	{
+		.addr = 1,
+		.irq = PHY_POLL, /* IRQ_MAC_PHYINT */
+	},
+};
+
+static struct bfin_mii_bus_platform_data bfin_mii_bus_data = {
+	.phydev_number   = 1,
+	.phydev_data     = bfin_phydev_data,
+	.phy_mode        = PHY_INTERFACE_MODE_RMII,
+	.mac_peripherals = bfin_mac_peripherals,
+};
+
+static struct platform_device bfin_mii_bus = {
+	.name = "bfin_mii_bus",
+	.dev = {
+		.platform_data = &bfin_mii_bus_data,
+	}
+};
+
+static struct platform_device bfin_mac_device = {
+	.name = "bfin_mac",
+	.dev = {
+		.platform_data = &bfin_mii_bus,
+	}
+};
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static struct mtd_partition asmb_flash_partitions[] = {
+	{
+		.name       = "bootloader(nor)",
+		.size       = 0x30000,
+		.offset     = 0,
+	}, {
+		.name       = "linux kernel and rootfs(nor)",
+		.size       = 0x300000 - 0x30000 - 0x10000,
+		.offset     = MTDPART_OFS_APPEND,
+	}, {
+		.name       = "MAC address(nor)",
+		.size       = 0x10000,
+		.offset     = MTDPART_OFS_APPEND,
+		.mask_flags = MTD_WRITEABLE,
+	}
+};
+
+static struct physmap_flash_data asmb_flash_data = {
+	.width      = 1,
+	.parts      = asmb_flash_partitions,
+	.nr_parts   = ARRAY_SIZE(asmb_flash_partitions),
+};
+
+static struct resource asmb_flash_resource = {
+	.start = 0x20000000,
+	.end   = 0x202fffff,
+	.flags = IORESOURCE_MEM,
+};
+
+/* 4 MB NOR flash attached to async memory banks 0-2,
+ * therefore only 3 MB visible.
+ */
+static struct platform_device asmb_flash_device = {
+	.name	  = "physmap-flash",
+	.id	  = 0,
+	.dev = {
+		.platform_data = &asmb_flash_data,
+	},
+	.num_resources = 1,
+	.resource      = &asmb_flash_resource,
+};
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+
+#define MMC_SPI_CARD_DETECT_INT IRQ_PF5
+
+static int bfin_mmc_spi_init(struct device *dev,
+	irqreturn_t (*detect_int)(int, void *), void *data)
+{
+	return request_irq(MMC_SPI_CARD_DETECT_INT, detect_int,
+		IRQF_TRIGGER_FALLING, "mmc-spi-detect", data);
+}
+
+static void bfin_mmc_spi_exit(struct device *dev, void *data)
+{
+	free_irq(MMC_SPI_CARD_DETECT_INT, data);
+}
+
+static struct bfin5xx_spi_chip mmc_spi_chip_info = {
+	.enable_dma    = 0,	 /* use no dma transfer with this chip*/
+	.bits_per_word = 8,
+};
+
+static struct mmc_spi_platform_data bfin_mmc_spi_pdata = {
+	.init = bfin_mmc_spi_init,
+	.exit = bfin_mmc_spi_exit,
+	.detect_delay = 100, /* msecs */
+};
+#endif
+
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+/* This mapping is for at45db642 it has 1056 page size,
+ * partition size and offset should be page aligned
+ */
+static struct mtd_partition bfin_spi_dataflash_partitions[] = {
+	{
+		.name   = "JFFS2 dataflash(nor)",
+#ifdef CONFIG_MTD_PAGESIZE_1024
+		.offset = 0x40000,
+		.size   = 0x7C0000,
+#else
+		.offset = 0x0,
+		.size   = 0x840000,
+#endif
+	}
+};
+
+static struct flash_platform_data bfin_spi_dataflash_data = {
+	.name     = "mtd_dataflash",
+	.parts    = bfin_spi_dataflash_partitions,
+	.nr_parts = ARRAY_SIZE(bfin_spi_dataflash_partitions),
+	.type     = "mtd_dataflash",
+};
+
+static struct bfin5xx_spi_chip spi_dataflash_chip_info = {
+	.enable_dma    = 0,	 /* use no dma transfer with this chip*/
+	.bits_per_word = 8,
+};
+#endif
+
+static struct spi_board_info bfin_spi_board_info[] __initdata = {
+/* SD/MMC card reader at SPI bus */
+#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
+	{
+		.modalias	 = "mmc_spi",
+		.max_speed_hz    = 20000000,
+		.bus_num	 = 0,
+		.chip_select     = 1,
+		.platform_data   = &bfin_mmc_spi_pdata,
+		.controller_data = &mmc_spi_chip_info,
+		.mode	         = SPI_MODE_3,
+	},
+#endif
+
+/* 8 Megabyte Atmel NOR flash chip at SPI bus */
+#if defined(CONFIG_MTD_DATAFLASH) || defined(CONFIG_MTD_DATAFLASH_MODULE)
+	{
+	.modalias        = "mtd_dataflash",
+	.max_speed_hz    = 16700000,
+	.bus_num         = 0,
+	.chip_select     = 2,
+	.platform_data   = &bfin_spi_dataflash_data,
+	.controller_data = &spi_dataflash_chip_info,
+	.mode            = SPI_MODE_3, /* SPI_CPHA and SPI_CPOL */
+	},
+#endif
+};
+
+/* SPI controller data */
+/* SPI (0) */
+static struct resource bfin_spi0_resource[] = {
+	[0] = {
+		.start = SPI0_REGBASE,
+		.end   = SPI0_REGBASE + 0xFF,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = CH_SPI,
+		.end   = CH_SPI,
+		.flags = IORESOURCE_DMA,
+	},
+	[2] = {
+		.start = IRQ_SPI,
+		.end   = IRQ_SPI,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct bfin5xx_spi_master spi_bfin_master_info = {
+	.num_chipselect = 8,
+	.enable_dma     = 1,  /* master has the ability to do dma transfer */
+	.pin_req        = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
+};
+
+static struct platform_device spi_bfin_master_device = {
+	.name          = "bfin-spi",
+	.id            = 0, /* Bus number */
+	.num_resources = ARRAY_SIZE(bfin_spi0_resource),
+	.resource      = bfin_spi0_resource,
+	.dev           = {
+		.platform_data = &spi_bfin_master_info, /* Passed to driver */
+	},
+};
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+static struct resource bfin_uart0_resources[] = {
+	{
+		.start = UART0_THR,
+		.end = UART0_GCTL+2,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART0_RX,
+		.end = IRQ_UART0_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART0_ERROR,
+		.end = IRQ_UART0_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART0_TX,
+		.end = CH_UART0_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART0_RX,
+		.end = CH_UART0_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static unsigned short bfin_uart0_peripherals[] = {
+	P_UART0_TX, P_UART0_RX, 0
+};
+
+static struct platform_device bfin_uart0_device = {
+	.name = "bfin-uart",
+	.id = 0,
+	.num_resources = ARRAY_SIZE(bfin_uart0_resources),
+	.resource = bfin_uart0_resources,
+	.dev = {
+		.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
+	},
+};
+#endif
+
+#ifdef CONFIG_SERIAL_BFIN_UART1
+static struct resource bfin_uart1_resources[] = {
+	{
+		.start = UART1_THR,
+		.end   = UART1_GCTL+2,
+		.flags = IORESOURCE_MEM,
+	},
+	{
+		.start = IRQ_UART1_RX,
+		.end   = IRQ_UART1_RX+1,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = IRQ_UART1_ERROR,
+		.end   = IRQ_UART1_ERROR,
+		.flags = IORESOURCE_IRQ,
+	},
+	{
+		.start = CH_UART1_TX,
+		.end   = CH_UART1_TX,
+		.flags = IORESOURCE_DMA,
+	},
+	{
+		.start = CH_UART1_RX,
+		.end   = CH_UART1_RX,
+		.flags = IORESOURCE_DMA,
+	},
+};
+
+static unsigned short bfin_uart1_peripherals[] = {
+	P_UART1_TX, P_UART1_RX, 0
+};
+
+static struct platform_device bfin_uart1_device = {
+	.name          = "bfin-uart",
+	.id            = 1,
+	.num_resources = ARRAY_SIZE(bfin_uart1_resources),
+	.resource      = bfin_uart1_resources,
+	.dev = {
+		.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
+	},
+};
+#endif
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+static struct resource bfin_twi0_resource[] = {
+	[0] = {
+		.start = TWI0_REGBASE,
+		.end   = TWI0_REGBASE + 0xff,
+		.flags = IORESOURCE_MEM,
+	},
+	[1] = {
+		.start = IRQ_TWI,
+		.end   = IRQ_TWI,
+		.flags = IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2c_bfin_twi_device = {
+	.name          = "i2c-bfin-twi",
+	.id            = 0,
+	.num_resources = ARRAY_SIZE(bfin_twi0_resource),
+	.resource      = bfin_twi0_resource,
+};
+#endif
+
+static struct platform_device *dnp5370_devices[] __initdata = {
+
+#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
+#ifdef CONFIG_SERIAL_BFIN_UART0
+	&bfin_uart0_device,
+#endif
+#ifdef CONFIG_SERIAL_BFIN_UART1
+	&bfin_uart1_device,
+#endif
+#endif
+
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+	&asmb_flash_device,
+#endif
+
+#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
+	&bfin_mii_bus,
+	&bfin_mac_device,
+#endif
+
+#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
+	&spi_bfin_master_device,
+#endif
+
+#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
+	&i2c_bfin_twi_device,
+#endif
+
+#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
+	&rtc_device,
+#endif
+
+};
+
+static int __init dnp5370_init(void)
+{
+	printk(KERN_INFO "DNP/5370: registering device resources\n");
+	platform_add_devices(dnp5370_devices, ARRAY_SIZE(dnp5370_devices));
+	printk(KERN_INFO "DNP/5370: registering %zu SPI slave devices\n",
+	       ARRAY_SIZE(bfin_spi_board_info));
+	spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
+	printk(KERN_INFO "DNP/5370: MAC %pM\n", (void *)FLASH_MAC);
+	return 0;
+}
+arch_initcall(dnp5370_init);
+
+/*
+ * Currently the MAC address is saved in Flash by U-Boot
+ */
+void bfin_get_ether_addr(char *addr)
+{
+	*(u32 *)(&(addr[0])) = bfin_read32(FLASH_MAC);
+	*(u16 *)(&(addr[4])) = bfin_read16(FLASH_MAC + 4);
+}
+EXPORT_SYMBOL(bfin_get_ether_addr);
diff --git a/arch/blackfin/mach-bf537/boards/minotaur.c b/arch/blackfin/mach-bf537/boards/minotaur.c
index 4980051..bfb3671 100644
--- a/arch/blackfin/mach-bf537/boards/minotaur.c
+++ b/arch/blackfin/mach-bf537/boards/minotaur.c
@@ -263,7 +263,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -306,7 +306,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -419,9 +419,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -453,9 +453,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/pnav10.c b/arch/blackfin/mach-bf537/boards/pnav10.c
index b958078..9389f03 100644
--- a/arch/blackfin/mach-bf537/boards/pnav10.c
+++ b/arch/blackfin/mach-bf537/boards/pnav10.c
@@ -367,7 +367,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -410,7 +410,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index 3aa344c..2c69785 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -289,7 +289,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
@@ -693,7 +693,7 @@
 #endif
 
 #if defined(CONFIG_AD2S120X) || defined(CONFIG_AD2S120X_MODULE)
-unsigned short ad2s120x_platform_data[] = {
+static unsigned short ad2s120x_platform_data[] = {
 	/* used as SAMPLE and RDVEL */
 	GPIO_PF5, GPIO_PF6, 0
 };
@@ -705,7 +705,7 @@
 #endif
 
 #if defined(CONFIG_AD2S1210) || defined(CONFIG_AD2S1210_MODULE)
-unsigned short ad2s1210_platform_data[] = {
+static unsigned short ad2s1210_platform_data[] = {
 	/* use as SAMPLE, A0, A1 */
 	GPIO_PF7, GPIO_PF8, GPIO_PF9,
 # if defined(CONFIG_AD2S1210_GPIO_INPUT) || defined(CONFIG_AD2S1210_GPIO_OUTPUT)
@@ -1717,7 +1717,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -1760,7 +1760,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -2447,9 +2447,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -2481,9 +2481,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
index 31498ad..0761b20 100644
--- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c
+++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c
@@ -356,7 +356,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -399,7 +399,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -512,9 +512,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -546,9 +546,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
diff --git a/arch/blackfin/mach-bf537/dma.c b/arch/blackfin/mach-bf537/dma.c
index 5c8c4ed..5c62e99 100644
--- a/arch/blackfin/mach-bf537/dma.c
+++ b/arch/blackfin/mach-bf537/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
new file mode 100644
index 0000000..00c603f
--- /dev/null
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	2
+
+#endif
diff --git a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
index 635c91c..3e955db 100644
--- a/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf537/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,49 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	int		cts_pin;
-	int 		rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -145,3 +75,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf537/include/mach/blackfin.h b/arch/blackfin/mach-bf537/include/mach/blackfin.h
index a12d4b6..baa096f 100644
--- a/arch/blackfin/mach-bf537/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf537/include/mach/blackfin.h
@@ -1,7 +1,7 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
- * Licensed under the GPL-2 or later
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _MACH_BLACKFIN_H_
@@ -10,34 +10,24 @@
 #define BF537_FAMILY
 
 #include "bf537.h"
-#include "defBF534.h"
 #include "anomaly.h"
 
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF534
+# include "defBF534.h"
+#endif
 #if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "defBF537.h"
+# include "defBF537.h"
 #endif
 
 #if !defined(__ASSEMBLY__)
-#include "cdefBF534.h"
-
-#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
-#include "cdefBF537.h"
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF534
+#  include "cdefBF534.h"
+# endif
+# if defined(CONFIG_BF537) || defined(CONFIG_BF536)
+#  include "cdefBF537.h"
+# endif
 #endif
-#endif
-
-#define BFIN_UART_NR_PORTS	2
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
 
 #endif
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
index fbeb35e..563ede9 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF534.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF534_H
 #define _CDEF_BF534_H
 
-#include <asm/blackfin.h>
-
-/* Include all Core registers and bit definitions 									*/
-#include "defBF534.h"
-
-/* Include core specific register pointer definitions 								*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Clock and System Control	(0xFFC00000 - 0xFFC000FF)								*/
 #define bfin_read_PLL_CTL()                  bfin_read16(PLL_CTL)
 #define bfin_read_PLL_DIV()                  bfin_read16(PLL_DIV)
@@ -355,16 +347,10 @@
 #define bfin_write_EBIU_SDSTAT(val)          bfin_write16(EBIU_SDSTAT,val)
 
 /* DMA Traffic Control Registers													*/
-#define bfin_read_DMA_TC_PER()                bfin_read16(DMA_TC_PER)
-#define bfin_write_DMA_TC_PER(val)            bfin_write16(DMA_TC_PER,val)
-#define bfin_read_DMA_TC_CNT()                bfin_read16(DMA_TC_CNT)
-#define bfin_write_DMA_TC_CNT(val)            bfin_write16(DMA_TC_CNT,val)
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define bfin_read_DMA_TCPER()                bfin_read16(DMA_TCPER)
-#define bfin_write_DMA_TCPER(val)            bfin_write16(DMA_TCPER,val)
-#define bfin_read_DMA_TCCNT()                bfin_read16(DMA_TCCNT)
-#define bfin_write_DMA_TCCNT(val)            bfin_write16(DMA_TCCNT,val)
+#define bfin_read_DMAC_TC_PER()              bfin_read16(DMAC_TC_PER)
+#define bfin_write_DMAC_TC_PER(val)          bfin_write16(DMAC_TC_PER,val)
+#define bfin_read_DMAC_TC_CNT()              bfin_read16(DMAC_TC_CNT)
+#define bfin_write_DMAC_TC_CNT(val)          bfin_write16(DMAC_TC_CNT,val)
 
 /* DMA Controller																	*/
 #define bfin_read_DMA0_CONFIG()              bfin_read16(DMA0_CONFIG)
@@ -1747,7 +1733,4 @@
 #define bfin_read_HMDMA1_BCOUNT()            bfin_read16(HMDMA1_BCOUNT)
 #define bfin_write_HMDMA1_BCOUNT(val)        bfin_write16(HMDMA1_BCOUNT,val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF534_H */
diff --git a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
index 9363c39..19ec21e 100644
--- a/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/cdefBF537.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later
  */
@@ -10,9 +10,6 @@
 /* Include MMRs Common to BF534 								*/
 #include "cdefBF534.h"
 
-/* Include all Core registers and bit definitions 									*/
-#include "defBF537.h"
-
 /* Include Macro "Defines" For EMAC (Unique to BF536/BF537		*/
 /* 10/100 Ethernet Controller	(0xFFC03000 - 0xFFC031FF) 						*/
 #define bfin_read_EMAC_OPMODE()              bfin_read32(EMAC_OPMODE)
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF534.h b/arch/blackfin/mach-bf537/include/mach/defBF534.h
index 0323e6b..725bb35 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF534.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF534.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _DEF_BF534_H
 #define _DEF_BF534_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
 /************************************************************************************
 ** System MMR Register Map
 *************************************************************************************/
@@ -193,12 +190,8 @@
 #define EBIU_SDSTAT			0xFFC00A1C	/* SDRAM Status Register                                                */
 
 /* DMA Traffic Control Registers													*/
-#define DMA_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
-
-/* Alternate deprecated register names (below) provided for backwards code compatibility */
-#define DMA_TCPER			0xFFC00B0C	/* Traffic Control Periods Register			*/
-#define DMA_TCCNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
+#define DMAC_TC_PER			0xFFC00B0C	/* Traffic Control Periods Register			*/
+#define DMAC_TC_CNT			0xFFC00B10	/* Traffic Control Current Counts Register	*/
 
 /* DMA Controller (0xFFC00C00 - 0xFFC00FFF)															*/
 #define DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register               */
@@ -1029,48 +1022,6 @@
 #define IWR_ENABLE(x)	(1 << ((x)&0x1F))	/* Wakeup Enable Peripheral #x          */
 #define IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x         */
 
-/* ************** UART CONTROLLER MASKS *************************/
-/* UARTx_LCR Masks												*/
-#define WLS(x)		(((x)-5) & 0x03)	/* Word Length Select   */
-#define STB			0x04	/* Stop Bits                    */
-#define PEN			0x08	/* Parity Enable                */
-#define EPS			0x10	/* Even Parity Select   */
-#define STP			0x20	/* Stick Parity                 */
-#define SB			0x40	/* Set Break                    */
-#define DLAB		0x80	/* Divisor Latch Access */
-
-/* UARTx_MCR Mask										*/
-#define LOOP_ENA		0x10	/* Loopback Mode Enable         */
-#define LOOP_ENA_P	0x04
-/* UARTx_LSR Masks										*/
-#define DR			0x01	/* Data Ready                           */
-#define OE			0x02	/* Overrun Error                        */
-#define PE			0x04	/* Parity Error                         */
-#define FE			0x08	/* Framing Error                        */
-#define BI			0x10	/* Break Interrupt                      */
-#define THRE		0x20	/* THR Empty                            */
-#define TEMT		0x40	/* TSR and UART_THR Empty       */
-
-/* UARTx_IER Masks															*/
-#define ERBFI		0x01	/* Enable Receive Buffer Full Interrupt         */
-#define ETBEI		0x02	/* Enable Transmit Buffer Empty Interrupt       */
-#define ELSI		0x04	/* Enable RX Status Interrupt                           */
-
-/* UARTx_IIR Masks														*/
-#define NINT		0x01	/* Pending Interrupt                                    */
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UARTx_GCTL Masks													*/
-#define UCEN		0x01	/* Enable UARTx Clocks                          */
-#define IREN		0x02	/* Enable IrDA Mode                                     */
-#define TPOLC		0x04	/* IrDA TX Polarity Change                      */
-#define RPOLC		0x08	/* IrDA RX Polarity Change                      */
-#define FPE			0x10	/* Force Parity Error On Transmit       */
-#define FFE			0x20	/* Force Framing Error On Transmit      */
-
 /*  ****************  GENERAL PURPOSE TIMER MASKS  **********************/
 /* TIMER_ENABLE Masks													*/
 #define TIMEN0			0x0001	/* Enable Timer 0                                       */
@@ -1141,62 +1092,6 @@
 #define EMU_RUN			0x0200	/* Emulation Behavior Select                    */
 #define ERR_TYP			0xC000	/* Error Type                                                   */
 
-/* ******************   GPIO PORTS F, G, H MASKS  ***********************/
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks 				*/
-/* Port F Masks 														*/
-#define PF0		0x0001
-#define PF1		0x0002
-#define PF2		0x0004
-#define PF3		0x0008
-#define PF4		0x0010
-#define PF5		0x0020
-#define PF6		0x0040
-#define PF7		0x0080
-#define PF8		0x0100
-#define PF9		0x0200
-#define PF10	0x0400
-#define PF11	0x0800
-#define PF12	0x1000
-#define PF13	0x2000
-#define PF14	0x4000
-#define PF15	0x8000
-
-/* Port G Masks															*/
-#define PG0		0x0001
-#define PG1		0x0002
-#define PG2		0x0004
-#define PG3		0x0008
-#define PG4		0x0010
-#define PG5		0x0020
-#define PG6		0x0040
-#define PG7		0x0080
-#define PG8		0x0100
-#define PG9		0x0200
-#define PG10	0x0400
-#define PG11	0x0800
-#define PG12	0x1000
-#define PG13	0x2000
-#define PG14	0x4000
-#define PG15	0x8000
-
-/* Port H Masks															*/
-#define PH0		0x0001
-#define PH1		0x0002
-#define PH2		0x0004
-#define PH3		0x0008
-#define PH4		0x0010
-#define PH5		0x0020
-#define PH6		0x0040
-#define PH7		0x0080
-#define PH8		0x0100
-#define PH9		0x0200
-#define PH10	0x0400
-#define PH11	0x0800
-#define PH12	0x1000
-#define PH13	0x2000
-#define PH14	0x4000
-#define PH15	0x8000
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  *************************/
 /* EBIU_AMGCTL Masks																	*/
 #define AMCKEN			0x0001	/* Enable CLKOUT                                                                        */
@@ -1523,7 +1418,7 @@
 #define	SADD_LEN	0x0002	/* Slave Address Length                                                 */
 #define	STDVAL		0x0004	/* Slave Transmit Data Valid                                    */
 #define	NAK			0x0008	/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010	/* General Call Adrress Matching Enabled                */
+#define	GEN			0x0010	/* General Call Address Matching Enabled                */
 
 /* TWI_SLAVE_STAT Masks															*/
 #define	SDIR		0x0001	/* Slave Transfer Direction (Transmit/Receive*) */
diff --git a/arch/blackfin/mach-bf537/include/mach/defBF537.h b/arch/blackfin/mach-bf537/include/mach/defBF537.h
index 8cb5d5c..3d471d7 100644
--- a/arch/blackfin/mach-bf537/include/mach/defBF537.h
+++ b/arch/blackfin/mach-bf537/include/mach/defBF537.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2008 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,9 +7,6 @@
 #ifndef _DEF_BF537_H
 #define _DEF_BF537_H
 
-/* Include all Core registers and bit definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /* Include all MMR and bit defines common to BF534 */
 #include "defBF534.h"
 
diff --git a/arch/blackfin/mach-bf537/include/mach/gpio.h b/arch/blackfin/mach-bf537/include/mach/gpio.h
index f80c299..fba606b 100644
--- a/arch/blackfin/mach-bf537/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf537/include/mach/gpio.h
@@ -62,4 +62,8 @@
 #define PORT_G GPIO_PG0
 #define PORT_H GPIO_PH0
 
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf537/include/mach/pll.h b/arch/blackfin/mach-bf537/include/mach/pll.h
index 169c106..94cca67 100644
--- a/arch/blackfin/mach-bf537/include/mach/pll.h
+++ b/arch/blackfin/mach-bf537/include/mach/pll.h
@@ -1,57 +1 @@
-/*
- * Copyright 2005-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr = bfin_read32(SIC_IWR);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR, IWR_ENABLE(0));
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR, iwr);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c
index c6fb0a5..e61424e 100644
--- a/arch/blackfin/mach-bf538/boards/ezkit.c
+++ b/arch/blackfin/mach-bf538/boards/ezkit.c
@@ -82,7 +82,7 @@
 #endif
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -125,7 +125,7 @@
 	},
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX, 0
 };
 
@@ -168,7 +168,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -282,9 +282,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -316,9 +316,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -350,7 +350,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -384,7 +384,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -402,7 +402,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf538/dma.c b/arch/blackfin/mach-bf538/dma.c
index 5dc0225..cce8ef5 100644
--- a/arch/blackfin/mach-bf538/dma.c
+++ b/arch/blackfin/mach-bf538/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
@@ -32,14 +32,14 @@
 	(struct dma_register *) DMA17_NEXT_DESC_PTR,
 	(struct dma_register *) DMA18_NEXT_DESC_PTR,
 	(struct dma_register *) DMA19_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA0_S1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
 };
 EXPORT_SYMBOL(dma_io_base_addr);
 
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
new file mode 100644
index 0000000..c66e276
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	3
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
index 5c14814..beb502e 100644
--- a/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf538/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_THR), v)
-#define UART_PUT_DLL(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLL), v)
-#define UART_PUT_IER(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_IER), v)
-#define UART_SET_IER(uart, v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart, v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_DLH), v)
-#define UART_PUT_LCR(uart, v)    bfin_write16(((uart)->port.membase + OFFSET_LCR), v)
-#define UART_PUT_GCTL(uart, v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL), v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS)
 # define CONFIG_SERIAL_BFIN_CTSRTS
 
@@ -54,50 +27,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-	struct uart_port	port;
-	unsigned int		old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list	cts_timer;
-	int		cts_pin;
-	int		rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -160,3 +89,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf538/include/mach/blackfin.h b/arch/blackfin/mach-bf538/include/mach/blackfin.h
index 08b5eab..791d084 100644
--- a/arch/blackfin/mach-bf538/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf538/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,31 +10,24 @@
 #define BF538_FAMILY
 
 #include "bf538.h"
-#include "defBF539.h"
 #include "anomaly.h"
 
-
-#if !defined(__ASSEMBLY__)
-#include "cdefBF538.h"
-
-#if defined(CONFIG_BF539)
-#include "cdefBF539.h"
+#include <asm/def_LPBlackfin.h>
+#ifdef CONFIG_BF538
+# include "defBF538.h"
 #endif
+#ifdef CONFIG_BF539
+# include "defBF539.h"
 #endif
 
-#define BFIN_UART_NR_PORTS	3
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF538
+#  include "cdefBF538.h"
+# endif
+# ifdef CONFIG_BF539
+#  include "cdefBF539.h"
+# endif
+#endif
 
 #endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
index 085b06b..f6a5679 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF538.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF538_H
 #define _CDEF_BF538_H
 
-#include <asm/blackfin.h>
-
-/*include all Core registers and bit definitions*/
-#include "defBF539.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 #define bfin_writePTR(addr, val) bfin_write32(addr, val)
 
 #define bfin_read_PLL_CTL()            bfin_read16(PLL_CTL)
@@ -487,10 +479,10 @@
 #define bfin_write_EBIU_SDRRC(val)     bfin_write16(EBIU_SDRRC, val)
 #define bfin_read_EBIU_SDSTAT()        bfin_read16(EBIU_SDSTAT)
 #define bfin_write_EBIU_SDSTAT(val)    bfin_write16(EBIU_SDSTAT, val)
-#define bfin_read_DMA0_TC_PER()        bfin_read16(DMA0_TC_PER)
-#define bfin_write_DMA0_TC_PER(val)    bfin_write16(DMA0_TC_PER, val)
-#define bfin_read_DMA0_TC_CNT()        bfin_read16(DMA0_TC_CNT)
-#define bfin_write_DMA0_TC_CNT(val)    bfin_write16(DMA0_TC_CNT, val)
+#define bfin_read_DMAC0_TC_PER()       bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val)   bfin_write16(DMAC0_TC_PER, val)
+#define bfin_read_DMAC0_TC_CNT()       bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val)   bfin_write16(DMAC0_TC_CNT, val)
 #define bfin_read_DMA0_NEXT_DESC_PTR() bfin_readPTR(DMA0_NEXT_DESC_PTR)
 #define bfin_write_DMA0_NEXT_DESC_PTR(val) bfin_writePTR(DMA0_NEXT_DESC_PTR, val)
 #define bfin_read_DMA0_START_ADDR()    bfin_readPTR(DMA0_START_ADDR)
@@ -699,10 +691,10 @@
 #define bfin_write_DMA7_CURR_X_COUNT(val) bfin_write16(DMA7_CURR_X_COUNT, val)
 #define bfin_read_DMA7_CURR_Y_COUNT()  bfin_read16(DMA7_CURR_Y_COUNT)
 #define bfin_write_DMA7_CURR_Y_COUNT(val) bfin_write16(DMA7_CURR_Y_COUNT, val)
-#define bfin_read_DMA1_TC_PER()        bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val)    bfin_write16(DMA1_TC_PER, val)
-#define bfin_read_DMA1_TC_CNT()        bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val)    bfin_write16(DMA1_TC_CNT, val)
+#define bfin_read_DMAC1_TC_PER()       bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val)   bfin_write16(DMAC1_TC_PER, val)
+#define bfin_read_DMAC1_TC_CNT()       bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val)   bfin_write16(DMAC1_TC_CNT, val)
 #define bfin_read_DMA8_NEXT_DESC_PTR() bfin_readPTR(DMA8_NEXT_DESC_PTR)
 #define bfin_write_DMA8_NEXT_DESC_PTR(val) bfin_writePTR(DMA8_NEXT_DESC_PTR, val)
 #define bfin_read_DMA8_START_ADDR()    bfin_readPTR(DMA8_START_ADDR)
@@ -1015,273 +1007,214 @@
 #define bfin_write_DMA19_CURR_X_COUNT(val) bfin_write16(DMA19_CURR_X_COUNT, val)
 #define bfin_read_DMA19_CURR_Y_COUNT() bfin_read16(DMA19_CURR_Y_COUNT)
 #define bfin_write_DMA19_CURR_Y_COUNT(val) bfin_write16(DMA19_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_START_ADDR() bfin_readPTR(MDMA0_D0_START_ADDR)
-#define bfin_write_MDMA0_D0_START_ADDR(val) bfin_writePTR(MDMA0_D0_START_ADDR, val)
-#define bfin_read_MDMA0_D0_CONFIG()    bfin_read16(MDMA0_D0_CONFIG)
-#define bfin_write_MDMA0_D0_CONFIG(val) bfin_write16(MDMA0_D0_CONFIG, val)
-#define bfin_read_MDMA0_D0_X_COUNT()   bfin_read16(MDMA0_D0_X_COUNT)
-#define bfin_write_MDMA0_D0_X_COUNT(val) bfin_write16(MDMA0_D0_X_COUNT, val)
-#define bfin_read_MDMA0_D0_X_MODIFY()  bfin_read16(MDMA0_D0_X_MODIFY)
-#define bfin_write_MDMA0_D0_X_MODIFY(val) bfin_write16(MDMA0_D0_X_MODIFY, val)
-#define bfin_read_MDMA0_D0_Y_COUNT()   bfin_read16(MDMA0_D0_Y_COUNT)
-#define bfin_write_MDMA0_D0_Y_COUNT(val) bfin_write16(MDMA0_D0_Y_COUNT, val)
-#define bfin_read_MDMA0_D0_Y_MODIFY()  bfin_read16(MDMA0_D0_Y_MODIFY)
-#define bfin_write_MDMA0_D0_Y_MODIFY(val) bfin_write16(MDMA0_D0_Y_MODIFY, val)
-#define bfin_read_MDMA0_D0_CURR_DESC_PTR() bfin_readPTR(MDMA0_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D0_CURR_ADDR() bfin_readPTR(MDMA0_D0_CURR_ADDR)
-#define bfin_write_MDMA0_D0_CURR_ADDR(val) bfin_writePTR(MDMA0_D0_CURR_ADDR, val)
-#define bfin_read_MDMA0_D0_IRQ_STATUS() bfin_read16(MDMA0_D0_IRQ_STATUS)
-#define bfin_write_MDMA0_D0_IRQ_STATUS(val) bfin_write16(MDMA0_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D0_PERIPHERAL_MAP() bfin_read16(MDMA0_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D0_CURR_X_COUNT() bfin_read16(MDMA0_D0_CURR_X_COUNT)
-#define bfin_write_MDMA0_D0_CURR_X_COUNT(val) bfin_write16(MDMA0_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D0_CURR_Y_COUNT() bfin_read16(MDMA0_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D0_CURR_Y_COUNT(val) bfin_write16(MDMA0_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_START_ADDR() bfin_readPTR(MDMA0_S0_START_ADDR)
-#define bfin_write_MDMA0_S0_START_ADDR(val) bfin_writePTR(MDMA0_S0_START_ADDR, val)
-#define bfin_read_MDMA0_S0_CONFIG()    bfin_read16(MDMA0_S0_CONFIG)
-#define bfin_write_MDMA0_S0_CONFIG(val) bfin_write16(MDMA0_S0_CONFIG, val)
-#define bfin_read_MDMA0_S0_X_COUNT()   bfin_read16(MDMA0_S0_X_COUNT)
-#define bfin_write_MDMA0_S0_X_COUNT(val) bfin_write16(MDMA0_S0_X_COUNT, val)
-#define bfin_read_MDMA0_S0_X_MODIFY()  bfin_read16(MDMA0_S0_X_MODIFY)
-#define bfin_write_MDMA0_S0_X_MODIFY(val) bfin_write16(MDMA0_S0_X_MODIFY, val)
-#define bfin_read_MDMA0_S0_Y_COUNT()   bfin_read16(MDMA0_S0_Y_COUNT)
-#define bfin_write_MDMA0_S0_Y_COUNT(val) bfin_write16(MDMA0_S0_Y_COUNT, val)
-#define bfin_read_MDMA0_S0_Y_MODIFY()  bfin_read16(MDMA0_S0_Y_MODIFY)
-#define bfin_write_MDMA0_S0_Y_MODIFY(val) bfin_write16(MDMA0_S0_Y_MODIFY, val)
-#define bfin_read_MDMA0_S0_CURR_DESC_PTR() bfin_readPTR(MDMA0_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S0_CURR_ADDR() bfin_readPTR(MDMA0_S0_CURR_ADDR)
-#define bfin_write_MDMA0_S0_CURR_ADDR(val) bfin_writePTR(MDMA0_S0_CURR_ADDR, val)
-#define bfin_read_MDMA0_S0_IRQ_STATUS() bfin_read16(MDMA0_S0_IRQ_STATUS)
-#define bfin_write_MDMA0_S0_IRQ_STATUS(val) bfin_write16(MDMA0_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S0_PERIPHERAL_MAP() bfin_read16(MDMA0_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S0_CURR_X_COUNT() bfin_read16(MDMA0_S0_CURR_X_COUNT)
-#define bfin_write_MDMA0_S0_CURR_X_COUNT(val) bfin_write16(MDMA0_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S0_CURR_Y_COUNT() bfin_read16(MDMA0_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S0_CURR_Y_COUNT(val) bfin_write16(MDMA0_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_START_ADDR() bfin_readPTR(MDMA0_D1_START_ADDR)
-#define bfin_write_MDMA0_D1_START_ADDR(val) bfin_writePTR(MDMA0_D1_START_ADDR, val)
-#define bfin_read_MDMA0_D1_CONFIG()    bfin_read16(MDMA0_D1_CONFIG)
-#define bfin_write_MDMA0_D1_CONFIG(val) bfin_write16(MDMA0_D1_CONFIG, val)
-#define bfin_read_MDMA0_D1_X_COUNT()   bfin_read16(MDMA0_D1_X_COUNT)
-#define bfin_write_MDMA0_D1_X_COUNT(val) bfin_write16(MDMA0_D1_X_COUNT, val)
-#define bfin_read_MDMA0_D1_X_MODIFY()  bfin_read16(MDMA0_D1_X_MODIFY)
-#define bfin_write_MDMA0_D1_X_MODIFY(val) bfin_write16(MDMA0_D1_X_MODIFY, val)
-#define bfin_read_MDMA0_D1_Y_COUNT()   bfin_read16(MDMA0_D1_Y_COUNT)
-#define bfin_write_MDMA0_D1_Y_COUNT(val) bfin_write16(MDMA0_D1_Y_COUNT, val)
-#define bfin_read_MDMA0_D1_Y_MODIFY()  bfin_read16(MDMA0_D1_Y_MODIFY)
-#define bfin_write_MDMA0_D1_Y_MODIFY(val) bfin_write16(MDMA0_D1_Y_MODIFY, val)
-#define bfin_read_MDMA0_D1_CURR_DESC_PTR() bfin_readPTR(MDMA0_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_D1_CURR_ADDR() bfin_readPTR(MDMA0_D1_CURR_ADDR)
-#define bfin_write_MDMA0_D1_CURR_ADDR(val) bfin_writePTR(MDMA0_D1_CURR_ADDR, val)
-#define bfin_read_MDMA0_D1_IRQ_STATUS() bfin_read16(MDMA0_D1_IRQ_STATUS)
-#define bfin_write_MDMA0_D1_IRQ_STATUS(val) bfin_write16(MDMA0_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_D1_PERIPHERAL_MAP() bfin_read16(MDMA0_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_D1_CURR_X_COUNT() bfin_read16(MDMA0_D1_CURR_X_COUNT)
-#define bfin_write_MDMA0_D1_CURR_X_COUNT(val) bfin_write16(MDMA0_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_D1_CURR_Y_COUNT() bfin_read16(MDMA0_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_D1_CURR_Y_COUNT(val) bfin_write16(MDMA0_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA0_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA0_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA0_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_START_ADDR() bfin_readPTR(MDMA0_S1_START_ADDR)
-#define bfin_write_MDMA0_S1_START_ADDR(val) bfin_writePTR(MDMA0_S1_START_ADDR, val)
-#define bfin_read_MDMA0_S1_CONFIG()    bfin_read16(MDMA0_S1_CONFIG)
-#define bfin_write_MDMA0_S1_CONFIG(val) bfin_write16(MDMA0_S1_CONFIG, val)
-#define bfin_read_MDMA0_S1_X_COUNT()   bfin_read16(MDMA0_S1_X_COUNT)
-#define bfin_write_MDMA0_S1_X_COUNT(val) bfin_write16(MDMA0_S1_X_COUNT, val)
-#define bfin_read_MDMA0_S1_X_MODIFY()  bfin_read16(MDMA0_S1_X_MODIFY)
-#define bfin_write_MDMA0_S1_X_MODIFY(val) bfin_write16(MDMA0_S1_X_MODIFY, val)
-#define bfin_read_MDMA0_S1_Y_COUNT()   bfin_read16(MDMA0_S1_Y_COUNT)
-#define bfin_write_MDMA0_S1_Y_COUNT(val) bfin_write16(MDMA0_S1_Y_COUNT, val)
-#define bfin_read_MDMA0_S1_Y_MODIFY()  bfin_read16(MDMA0_S1_Y_MODIFY)
-#define bfin_write_MDMA0_S1_Y_MODIFY(val) bfin_write16(MDMA0_S1_Y_MODIFY, val)
-#define bfin_read_MDMA0_S1_CURR_DESC_PTR() bfin_readPTR(MDMA0_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA0_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA0_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA0_S1_CURR_ADDR() bfin_readPTR(MDMA0_S1_CURR_ADDR)
-#define bfin_write_MDMA0_S1_CURR_ADDR(val) bfin_writePTR(MDMA0_S1_CURR_ADDR, val)
-#define bfin_read_MDMA0_S1_IRQ_STATUS() bfin_read16(MDMA0_S1_IRQ_STATUS)
-#define bfin_write_MDMA0_S1_IRQ_STATUS(val) bfin_write16(MDMA0_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA0_S1_PERIPHERAL_MAP() bfin_read16(MDMA0_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA0_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA0_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA0_S1_CURR_X_COUNT() bfin_read16(MDMA0_S1_CURR_X_COUNT)
-#define bfin_write_MDMA0_S1_CURR_X_COUNT(val) bfin_write16(MDMA0_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA0_S1_CURR_Y_COUNT() bfin_read16(MDMA0_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA0_S1_CURR_Y_COUNT(val) bfin_write16(MDMA0_S1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_START_ADDR() bfin_readPTR(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val) bfin_writePTR(MDMA1_D0_START_ADDR, val)
-#define bfin_read_MDMA1_D0_CONFIG()    bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val) bfin_write16(MDMA1_D0_CONFIG, val)
-#define bfin_read_MDMA1_D0_X_COUNT()   bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val) bfin_write16(MDMA1_D0_X_COUNT, val)
-#define bfin_read_MDMA1_D0_X_MODIFY()  bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val) bfin_write16(MDMA1_D0_X_MODIFY, val)
-#define bfin_read_MDMA1_D0_Y_COUNT()   bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val) bfin_write16(MDMA1_D0_Y_COUNT, val)
-#define bfin_read_MDMA1_D0_Y_MODIFY()  bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val) bfin_write16(MDMA1_D0_Y_MODIFY, val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR() bfin_readPTR(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D0_CURR_ADDR() bfin_readPTR(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val) bfin_writePTR(MDMA1_D0_CURR_ADDR, val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS() bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val) bfin_write16(MDMA1_D0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP() bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT() bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT() bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S0_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_START_ADDR() bfin_readPTR(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val) bfin_writePTR(MDMA1_S0_START_ADDR, val)
-#define bfin_read_MDMA1_S0_CONFIG()    bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val) bfin_write16(MDMA1_S0_CONFIG, val)
-#define bfin_read_MDMA1_S0_X_COUNT()   bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val) bfin_write16(MDMA1_S0_X_COUNT, val)
-#define bfin_read_MDMA1_S0_X_MODIFY()  bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val) bfin_write16(MDMA1_S0_X_MODIFY, val)
-#define bfin_read_MDMA1_S0_Y_COUNT()   bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val) bfin_write16(MDMA1_S0_Y_COUNT, val)
-#define bfin_read_MDMA1_S0_Y_MODIFY()  bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val) bfin_write16(MDMA1_S0_Y_MODIFY, val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR() bfin_readPTR(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S0_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S0_CURR_ADDR() bfin_readPTR(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val) bfin_writePTR(MDMA1_S0_CURR_ADDR, val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS() bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val) bfin_write16(MDMA1_S0_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP() bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT() bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT() bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_D1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_START_ADDR() bfin_readPTR(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val) bfin_writePTR(MDMA1_D1_START_ADDR, val)
-#define bfin_read_MDMA1_D1_CONFIG()    bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val) bfin_write16(MDMA1_D1_CONFIG, val)
-#define bfin_read_MDMA1_D1_X_COUNT()   bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val) bfin_write16(MDMA1_D1_X_COUNT, val)
-#define bfin_read_MDMA1_D1_X_MODIFY()  bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val) bfin_write16(MDMA1_D1_X_MODIFY, val)
-#define bfin_read_MDMA1_D1_Y_COUNT()   bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val) bfin_write16(MDMA1_D1_Y_COUNT, val)
-#define bfin_read_MDMA1_D1_Y_MODIFY()  bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val) bfin_write16(MDMA1_D1_Y_MODIFY, val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR() bfin_readPTR(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_D1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_D1_CURR_ADDR() bfin_readPTR(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val) bfin_writePTR(MDMA1_D1_CURR_ADDR, val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS() bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val) bfin_write16(MDMA1_D1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP() bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT() bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT() bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA1_S1_NEXT_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_START_ADDR() bfin_readPTR(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val) bfin_writePTR(MDMA1_S1_START_ADDR, val)
-#define bfin_read_MDMA1_S1_CONFIG()    bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val) bfin_write16(MDMA1_S1_CONFIG, val)
-#define bfin_read_MDMA1_S1_X_COUNT()   bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val) bfin_write16(MDMA1_S1_X_COUNT, val)
-#define bfin_read_MDMA1_S1_X_MODIFY()  bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val) bfin_write16(MDMA1_S1_X_MODIFY, val)
-#define bfin_read_MDMA1_S1_Y_COUNT()   bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val) bfin_write16(MDMA1_S1_Y_COUNT, val)
-#define bfin_read_MDMA1_S1_Y_MODIFY()  bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val) bfin_write16(MDMA1_S1_Y_MODIFY, val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR() bfin_readPTR(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA1_S1_CURR_DESC_PTR, val)
-#define bfin_read_MDMA1_S1_CURR_ADDR() bfin_readPTR(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val) bfin_writePTR(MDMA1_S1_CURR_ADDR, val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS() bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val) bfin_write16(MDMA1_S1_IRQ_STATUS, val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP() bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP, val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT() bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT, val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT() bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT, val)
-
-#define bfin_read_MDMA_S0_CONFIG()  bfin_read_MDMA0_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA0_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()  bfin_read_MDMA0_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA0_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read_MDMA0_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA0_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read_MDMA0_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA0_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT()  bfin_read_MDMA0_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA0_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT()  bfin_read_MDMA0_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA0_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR()  bfin_read_MDMA0_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA0_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG()  bfin_read_MDMA0_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA0_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()  bfin_read_MDMA0_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA0_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read_MDMA0_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA0_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read_MDMA0_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA0_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT()  bfin_read_MDMA0_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA0_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT()  bfin_read_MDMA0_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA0_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR()  bfin_read_MDMA0_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA0_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG()  bfin_read_MDMA0_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA0_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()  bfin_read_MDMA0_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA0_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read_MDMA0_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA0_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read_MDMA0_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA0_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT()  bfin_read_MDMA0_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA0_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT()  bfin_read_MDMA0_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA0_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR()  bfin_read_MDMA0_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA0_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG()  bfin_read_MDMA0_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA0_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()  bfin_read_MDMA0_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA0_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read_MDMA0_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA0_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read_MDMA0_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA0_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT()  bfin_read_MDMA0_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA0_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT()  bfin_read_MDMA0_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA0_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR()  bfin_read_MDMA0_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA0_D1_START_ADDR(val)
-
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR() bfin_readPTR(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D0_START_ADDR() bfin_readPTR(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val) bfin_writePTR(MDMA_D0_START_ADDR, val)
+#define bfin_read_MDMA_D0_CONFIG()    bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val) bfin_write16(MDMA_D0_CONFIG, val)
+#define bfin_read_MDMA_D0_X_COUNT()   bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write16(MDMA_D0_X_COUNT, val)
+#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write16(MDMA_D0_X_MODIFY, val)
+#define bfin_read_MDMA_D0_Y_COUNT()   bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write16(MDMA_D0_Y_COUNT, val)
+#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write16(MDMA_D0_Y_MODIFY, val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR() bfin_readPTR(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D0_CURR_ADDR() bfin_readPTR(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val) bfin_writePTR(MDMA_D0_CURR_ADDR, val)
+#define bfin_read_MDMA_D0_IRQ_STATUS() bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write16(MDMA_D0_IRQ_STATUS, val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP() bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT() bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT() bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR() bfin_readPTR(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S0_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S0_START_ADDR() bfin_readPTR(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val) bfin_writePTR(MDMA_S0_START_ADDR, val)
+#define bfin_read_MDMA_S0_CONFIG()    bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val) bfin_write16(MDMA_S0_CONFIG, val)
+#define bfin_read_MDMA_S0_X_COUNT()   bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write16(MDMA_S0_X_COUNT, val)
+#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write16(MDMA_S0_X_MODIFY, val)
+#define bfin_read_MDMA_S0_Y_COUNT()   bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write16(MDMA_S0_Y_COUNT, val)
+#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write16(MDMA_S0_Y_MODIFY, val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR() bfin_readPTR(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S0_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S0_CURR_ADDR() bfin_readPTR(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val) bfin_writePTR(MDMA_S0_CURR_ADDR, val)
+#define bfin_read_MDMA_S0_IRQ_STATUS() bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write16(MDMA_S0_IRQ_STATUS, val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP() bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT() bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT() bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR() bfin_readPTR(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D1_START_ADDR() bfin_readPTR(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val) bfin_writePTR(MDMA_D1_START_ADDR, val)
+#define bfin_read_MDMA_D1_CONFIG()    bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val) bfin_write16(MDMA_D1_CONFIG, val)
+#define bfin_read_MDMA_D1_X_COUNT()   bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write16(MDMA_D1_X_COUNT, val)
+#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write16(MDMA_D1_X_MODIFY, val)
+#define bfin_read_MDMA_D1_Y_COUNT()   bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write16(MDMA_D1_Y_COUNT, val)
+#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write16(MDMA_D1_Y_MODIFY, val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR() bfin_readPTR(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D1_CURR_ADDR() bfin_readPTR(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val) bfin_writePTR(MDMA_D1_CURR_ADDR, val)
+#define bfin_read_MDMA_D1_IRQ_STATUS() bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write16(MDMA_D1_IRQ_STATUS, val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP() bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT() bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT() bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR() bfin_readPTR(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S1_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S1_START_ADDR() bfin_readPTR(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val) bfin_writePTR(MDMA_S1_START_ADDR, val)
+#define bfin_read_MDMA_S1_CONFIG()    bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val) bfin_write16(MDMA_S1_CONFIG, val)
+#define bfin_read_MDMA_S1_X_COUNT()   bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write16(MDMA_S1_X_COUNT, val)
+#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write16(MDMA_S1_X_MODIFY, val)
+#define bfin_read_MDMA_S1_Y_COUNT()   bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write16(MDMA_S1_Y_COUNT, val)
+#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write16(MDMA_S1_Y_MODIFY, val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR() bfin_readPTR(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S1_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S1_CURR_ADDR() bfin_readPTR(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val) bfin_writePTR(MDMA_S1_CURR_ADDR, val)
+#define bfin_read_MDMA_S1_IRQ_STATUS() bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write16(MDMA_S1_IRQ_STATUS, val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP() bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT() bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT() bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR() bfin_readPTR(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D2_START_ADDR() bfin_readPTR(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val) bfin_writePTR(MDMA_D2_START_ADDR, val)
+#define bfin_read_MDMA_D2_CONFIG()    bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val) bfin_write16(MDMA_D2_CONFIG, val)
+#define bfin_read_MDMA_D2_X_COUNT()   bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val) bfin_write16(MDMA_D2_X_COUNT, val)
+#define bfin_read_MDMA_D2_X_MODIFY()  bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val) bfin_write16(MDMA_D2_X_MODIFY, val)
+#define bfin_read_MDMA_D2_Y_COUNT()   bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val) bfin_write16(MDMA_D2_Y_COUNT, val)
+#define bfin_read_MDMA_D2_Y_MODIFY()  bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val) bfin_write16(MDMA_D2_Y_MODIFY, val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR() bfin_readPTR(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D2_CURR_ADDR() bfin_readPTR(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val) bfin_writePTR(MDMA_D2_CURR_ADDR, val)
+#define bfin_read_MDMA_D2_IRQ_STATUS() bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val) bfin_write16(MDMA_D2_IRQ_STATUS, val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP() bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT() bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT() bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR() bfin_readPTR(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S2_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S2_START_ADDR() bfin_readPTR(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val) bfin_writePTR(MDMA_S2_START_ADDR, val)
+#define bfin_read_MDMA_S2_CONFIG()    bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val) bfin_write16(MDMA_S2_CONFIG, val)
+#define bfin_read_MDMA_S2_X_COUNT()   bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val) bfin_write16(MDMA_S2_X_COUNT, val)
+#define bfin_read_MDMA_S2_X_MODIFY()  bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val) bfin_write16(MDMA_S2_X_MODIFY, val)
+#define bfin_read_MDMA_S2_Y_COUNT()   bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val) bfin_write16(MDMA_S2_Y_COUNT, val)
+#define bfin_read_MDMA_S2_Y_MODIFY()  bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val) bfin_write16(MDMA_S2_Y_MODIFY, val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR() bfin_readPTR(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S2_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S2_CURR_ADDR() bfin_readPTR(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val) bfin_writePTR(MDMA_S2_CURR_ADDR, val)
+#define bfin_read_MDMA_S2_IRQ_STATUS() bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val) bfin_write16(MDMA_S2_IRQ_STATUS, val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP() bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT() bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT() bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR() bfin_readPTR(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_D3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_D3_START_ADDR() bfin_readPTR(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val) bfin_writePTR(MDMA_D3_START_ADDR, val)
+#define bfin_read_MDMA_D3_CONFIG()    bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val) bfin_write16(MDMA_D3_CONFIG, val)
+#define bfin_read_MDMA_D3_X_COUNT()   bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val) bfin_write16(MDMA_D3_X_COUNT, val)
+#define bfin_read_MDMA_D3_X_MODIFY()  bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val) bfin_write16(MDMA_D3_X_MODIFY, val)
+#define bfin_read_MDMA_D3_Y_COUNT()   bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val) bfin_write16(MDMA_D3_Y_COUNT, val)
+#define bfin_read_MDMA_D3_Y_MODIFY()  bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val) bfin_write16(MDMA_D3_Y_MODIFY, val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR() bfin_readPTR(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_D3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_D3_CURR_ADDR() bfin_readPTR(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val) bfin_writePTR(MDMA_D3_CURR_ADDR, val)
+#define bfin_read_MDMA_D3_IRQ_STATUS() bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val) bfin_write16(MDMA_D3_IRQ_STATUS, val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP() bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT() bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT() bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT, val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR() bfin_readPTR(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_writePTR(MDMA_S3_NEXT_DESC_PTR, val)
+#define bfin_read_MDMA_S3_START_ADDR() bfin_readPTR(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val) bfin_writePTR(MDMA_S3_START_ADDR, val)
+#define bfin_read_MDMA_S3_CONFIG()    bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val) bfin_write16(MDMA_S3_CONFIG, val)
+#define bfin_read_MDMA_S3_X_COUNT()   bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val) bfin_write16(MDMA_S3_X_COUNT, val)
+#define bfin_read_MDMA_S3_X_MODIFY()  bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val) bfin_write16(MDMA_S3_X_MODIFY, val)
+#define bfin_read_MDMA_S3_Y_COUNT()   bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val) bfin_write16(MDMA_S3_Y_COUNT, val)
+#define bfin_read_MDMA_S3_Y_MODIFY()  bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val) bfin_write16(MDMA_S3_Y_MODIFY, val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR() bfin_readPTR(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_writePTR(MDMA_S3_CURR_DESC_PTR, val)
+#define bfin_read_MDMA_S3_CURR_ADDR() bfin_readPTR(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val) bfin_writePTR(MDMA_S3_CURR_ADDR, val)
+#define bfin_read_MDMA_S3_IRQ_STATUS() bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val) bfin_write16(MDMA_S3_IRQ_STATUS, val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP() bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP, val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT() bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT, val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT() bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT, val)
 #define bfin_read_PPI_CONTROL()        bfin_read16(PPI_CONTROL)
 #define bfin_write_PPI_CONTROL(val)    bfin_write16(PPI_CONTROL, val)
 #define bfin_read_PPI_STATUS()         bfin_read16(PPI_STATUS)
@@ -2024,7 +1957,4 @@
 #define bfin_read_CAN_MB31_ID1()       bfin_read16(CAN_MB31_ID1)
 #define bfin_write_CAN_MB31_ID1(val)   bfin_write16(CAN_MB31_ID1, val)
 
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif
diff --git a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
index 198c4bb..acc15f3 100644
--- a/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/cdefBF539.h
@@ -1,6 +1,7 @@
-/* DO NOT EDIT THIS FILE
- * Automatically generated by generate-cdef-headers.xsl
- * DO NOT EDIT THIS FILE
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
  */
 
 #ifndef _CDEF_BF539_H
@@ -9,7 +10,6 @@
 /* Include MMRs Common to BF538 								*/
 #include "cdefBF538.h"
 
-
 #define bfin_read_MXVR_CONFIG()        bfin_read16(MXVR_CONFIG)
 #define bfin_write_MXVR_CONFIG(val)    bfin_write16(MXVR_CONFIG, val)
 #define bfin_read_MXVR_PLL_CTL_0()     bfin_read32(MXVR_PLL_CTL_0)
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF538.h b/arch/blackfin/mach-bf538/include/mach/defBF538.h
new file mode 100644
index 0000000..d27f81d
--- /dev/null
+++ b/arch/blackfin/mach-bf538/include/mach/defBF538.h
@@ -0,0 +1,1825 @@
+/*
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the ADI BSD license or the GPL-2 (or later)
+ */
+
+#ifndef _DEF_BF538_H
+#define _DEF_BF538_H
+
+/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
+#define	PLL_CTL			0xFFC00000	/* PLL Control register (16-bit) */
+#define	PLL_DIV			0xFFC00004	/* PLL Divide Register (16-bit) */
+#define	VR_CTL			0xFFC00008	/* Voltage Regulator Control Register (16-bit) */
+#define	PLL_STAT		0xFFC0000C	/* PLL Status register (16-bit) */
+#define	PLL_LOCKCNT		0xFFC00010	/* PLL Lock	Count register (16-bit) */
+#define	CHIPID			0xFFC00014	/* Chip	ID Register */
+
+/* CHIPID Masks */
+#define CHIPID_VERSION         0xF0000000
+#define CHIPID_FAMILY          0x0FFFF000
+#define CHIPID_MANUFACTURE     0x00000FFE
+
+/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
+#define	SWRST			0xFFC00100  /* Software	Reset Register (16-bit) */
+#define	SYSCR			0xFFC00104  /* System Configuration registe */
+#define	SIC_RVECT		0xFFC00108
+#define	SIC_IMASK0		0xFFC0010C  /* Interrupt Mask Register */
+#define	SIC_IAR0		0xFFC00110  /* Interrupt Assignment Register 0 */
+#define	SIC_IAR1		0xFFC00114  /* Interrupt Assignment Register 1 */
+#define	SIC_IAR2		0xFFC00118  /* Interrupt Assignment Register 2 */
+#define	SIC_IAR3			0xFFC0011C	/* Interrupt Assignment	Register 3 */
+#define	SIC_ISR0			0xFFC00120  /* Interrupt Status	Register */
+#define	SIC_IWR0			0xFFC00124  /* Interrupt Wakeup	Register */
+#define	SIC_IMASK1			0xFFC00128	/* Interrupt Mask Register 1 */
+#define	SIC_ISR1			0xFFC0012C	/* Interrupt Status Register 1 */
+#define	SIC_IWR1			0xFFC00130	/* Interrupt Wakeup Register 1 */
+#define	SIC_IAR4			0xFFC00134	/* Interrupt Assignment	Register 4 */
+#define	SIC_IAR5			0xFFC00138	/* Interrupt Assignment	Register 5 */
+#define	SIC_IAR6			0xFFC0013C	/* Interrupt Assignment	Register 6 */
+
+
+/* Watchdog Timer (0xFFC00200 -	0xFFC002FF) */
+#define	WDOG_CTL	0xFFC00200  /* Watchdog	Control	Register */
+#define	WDOG_CNT	0xFFC00204  /* Watchdog	Count Register */
+#define	WDOG_STAT	0xFFC00208  /* Watchdog	Status Register */
+
+
+/* Real	Time Clock (0xFFC00300 - 0xFFC003FF) */
+#define	RTC_STAT	0xFFC00300  /* RTC Status Register */
+#define	RTC_ICTL	0xFFC00304  /* RTC Interrupt Control Register */
+#define	RTC_ISTAT	0xFFC00308  /* RTC Interrupt Status Register */
+#define	RTC_SWCNT	0xFFC0030C  /* RTC Stopwatch Count Register */
+#define	RTC_ALARM	0xFFC00310  /* RTC Alarm Time Register */
+#define	RTC_FAST	0xFFC00314  /* RTC Prescaler Enable Register */
+#define	RTC_PREN		0xFFC00314  /* RTC Prescaler Enable Register (alternate	macro) */
+
+
+/* UART0 Controller (0xFFC00400	- 0xFFC004FF) */
+#define	UART0_THR	      0xFFC00400  /* Transmit Holding register */
+#define	UART0_RBR	      0xFFC00400  /* Receive Buffer register */
+#define	UART0_DLL	      0xFFC00400  /* Divisor Latch (Low-Byte) */
+#define	UART0_IER	      0xFFC00404  /* Interrupt Enable Register */
+#define	UART0_DLH	      0xFFC00404  /* Divisor Latch (High-Byte) */
+#define	UART0_IIR	      0xFFC00408  /* Interrupt Identification Register */
+#define	UART0_LCR	      0xFFC0040C  /* Line Control Register */
+#define	UART0_MCR			 0xFFC00410  /*	Modem Control Register */
+#define	UART0_LSR	      0xFFC00414  /* Line Status Register */
+#define	UART0_SCR	      0xFFC0041C  /* SCR Scratch Register */
+#define	UART0_GCTL		     0xFFC00424	 /* Global Control Register */
+
+
+/* SPI0	Controller (0xFFC00500 - 0xFFC005FF) */
+
+#define	SPI0_CTL			0xFFC00500  /* SPI0 Control Register */
+#define	SPI0_FLG			0xFFC00504  /* SPI0 Flag register */
+#define	SPI0_STAT			0xFFC00508  /* SPI0 Status register */
+#define	SPI0_TDBR			0xFFC0050C  /* SPI0 Transmit Data Buffer Register */
+#define	SPI0_RDBR			0xFFC00510  /* SPI0 Receive Data Buffer	Register */
+#define	SPI0_BAUD			0xFFC00514  /* SPI0 Baud rate Register */
+#define	SPI0_SHADOW			0xFFC00518  /* SPI0_RDBR Shadow	Register */
+#define SPI0_REGBASE			SPI0_CTL
+
+
+/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
+#define	TIMER0_CONFIG			0xFFC00600     /* Timer	0 Configuration	Register */
+#define	TIMER0_COUNTER				0xFFC00604     /* Timer	0 Counter Register */
+#define	TIMER0_PERIOD			0xFFC00608     /* Timer	0 Period Register */
+#define	TIMER0_WIDTH			0xFFC0060C     /* Timer	0 Width	Register */
+
+#define	TIMER1_CONFIG			0xFFC00610	/*  Timer 1 Configuration Register   */
+#define	TIMER1_COUNTER			0xFFC00614	/*  Timer 1 Counter Register	     */
+#define	TIMER1_PERIOD			0xFFC00618	/*  Timer 1 Period Register	     */
+#define	TIMER1_WIDTH			0xFFC0061C	/*  Timer 1 Width Register	     */
+
+#define	TIMER2_CONFIG			0xFFC00620	/* Timer 2 Configuration Register   */
+#define	TIMER2_COUNTER			0xFFC00624	/* Timer 2 Counter Register	    */
+#define	TIMER2_PERIOD			0xFFC00628	/* Timer 2 Period Register	    */
+#define	TIMER2_WIDTH			0xFFC0062C	/* Timer 2 Width Register	    */
+
+#define	TIMER_ENABLE				0xFFC00640	/* Timer Enable	Register */
+#define	TIMER_DISABLE				0xFFC00644	/* Timer Disable Register */
+#define	TIMER_STATUS				0xFFC00648	/* Timer Status	Register */
+
+
+/* Programmable	Flags (0xFFC00700 - 0xFFC007FF) */
+#define	FIO_FLAG_D				0xFFC00700  /* Flag Mask to directly specify state of pins */
+#define	FIO_FLAG_C			0xFFC00704  /* Peripheral Interrupt Flag Register (clear) */
+#define	FIO_FLAG_S			0xFFC00708  /* Peripheral Interrupt Flag Register (set) */
+#define	FIO_FLAG_T					0xFFC0070C  /* Flag Mask to directly toggle state of pins */
+#define	FIO_MASKA_D			0xFFC00710  /* Flag Mask Interrupt A Register (set directly) */
+#define	FIO_MASKA_C			0xFFC00714  /* Flag Mask Interrupt A Register (clear) */
+#define	FIO_MASKA_S			0xFFC00718  /* Flag Mask Interrupt A Register (set) */
+#define	FIO_MASKA_T			0xFFC0071C  /* Flag Mask Interrupt A Register (toggle) */
+#define	FIO_MASKB_D			0xFFC00720  /* Flag Mask Interrupt B Register (set directly) */
+#define	FIO_MASKB_C			0xFFC00724  /* Flag Mask Interrupt B Register (clear) */
+#define	FIO_MASKB_S			0xFFC00728  /* Flag Mask Interrupt B Register (set) */
+#define	FIO_MASKB_T			0xFFC0072C  /* Flag Mask Interrupt B Register (toggle) */
+#define	FIO_DIR				0xFFC00730  /* Peripheral Flag Direction Register */
+#define	FIO_POLAR			0xFFC00734  /* Flag Source Polarity Register */
+#define	FIO_EDGE			0xFFC00738  /* Flag Source Sensitivity Register */
+#define	FIO_BOTH			0xFFC0073C  /* Flag Set	on BOTH	Edges Register */
+#define	FIO_INEN					0xFFC00740  /* Flag Input Enable Register  */
+
+
+/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
+#define	SPORT0_TCR1				0xFFC00800  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_TCR2				0xFFC00804  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_TCLKDIV			0xFFC00808  /* SPORT0 Transmit Clock Divider */
+#define	SPORT0_TFSDIV			0xFFC0080C  /* SPORT0 Transmit Frame Sync Divider */
+#define	SPORT0_TX			0xFFC00810  /* SPORT0 TX Data Register */
+#define	SPORT0_RX			0xFFC00818  /* SPORT0 RX Data Register */
+#define	SPORT0_RCR1				0xFFC00820  /* SPORT0 Transmit Configuration 1 Register */
+#define	SPORT0_RCR2				0xFFC00824  /* SPORT0 Transmit Configuration 2 Register */
+#define	SPORT0_RCLKDIV			0xFFC00828  /* SPORT0 Receive Clock Divider */
+#define	SPORT0_RFSDIV			0xFFC0082C  /* SPORT0 Receive Frame Sync Divider */
+#define	SPORT0_STAT			0xFFC00830  /* SPORT0 Status Register */
+#define	SPORT0_CHNL			0xFFC00834  /* SPORT0 Current Channel Register */
+#define	SPORT0_MCMC1			0xFFC00838  /* SPORT0 Multi-Channel Configuration Register 1 */
+#define	SPORT0_MCMC2			0xFFC0083C  /* SPORT0 Multi-Channel Configuration Register 2 */
+#define	SPORT0_MTCS0			0xFFC00840  /* SPORT0 Multi-Channel Transmit Select Register 0 */
+#define	SPORT0_MTCS1			0xFFC00844  /* SPORT0 Multi-Channel Transmit Select Register 1 */
+#define	SPORT0_MTCS2			0xFFC00848  /* SPORT0 Multi-Channel Transmit Select Register 2 */
+#define	SPORT0_MTCS3			0xFFC0084C  /* SPORT0 Multi-Channel Transmit Select Register 3 */
+#define	SPORT0_MRCS0			0xFFC00850  /* SPORT0 Multi-Channel Receive Select Register 0 */
+#define	SPORT0_MRCS1			0xFFC00854  /* SPORT0 Multi-Channel Receive Select Register 1 */
+#define	SPORT0_MRCS2			0xFFC00858  /* SPORT0 Multi-Channel Receive Select Register 2 */
+#define	SPORT0_MRCS3			0xFFC0085C  /* SPORT0 Multi-Channel Receive Select Register 3 */
+
+
+/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
+#define	SPORT1_TCR1				0xFFC00900  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_TCR2				0xFFC00904  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_TCLKDIV			0xFFC00908  /* SPORT1 Transmit Clock Divider */
+#define	SPORT1_TFSDIV			0xFFC0090C  /* SPORT1 Transmit Frame Sync Divider */
+#define	SPORT1_TX			0xFFC00910  /* SPORT1 TX Data Register */
+#define	SPORT1_RX			0xFFC00918  /* SPORT1 RX Data Register */
+#define	SPORT1_RCR1				0xFFC00920  /* SPORT1 Transmit Configuration 1 Register */
+#define	SPORT1_RCR2				0xFFC00924  /* SPORT1 Transmit Configuration 2 Register */
+#define	SPORT1_RCLKDIV			0xFFC00928  /* SPORT1 Receive Clock Divider */
+#define	SPORT1_RFSDIV			0xFFC0092C  /* SPORT1 Receive Frame Sync Divider */
+#define	SPORT1_STAT			0xFFC00930  /* SPORT1 Status Register */
+#define	SPORT1_CHNL			0xFFC00934  /* SPORT1 Current Channel Register */
+#define	SPORT1_MCMC1			0xFFC00938  /* SPORT1 Multi-Channel Configuration Register 1 */
+#define	SPORT1_MCMC2			0xFFC0093C  /* SPORT1 Multi-Channel Configuration Register 2 */
+#define	SPORT1_MTCS0			0xFFC00940  /* SPORT1 Multi-Channel Transmit Select Register 0 */
+#define	SPORT1_MTCS1			0xFFC00944  /* SPORT1 Multi-Channel Transmit Select Register 1 */
+#define	SPORT1_MTCS2			0xFFC00948  /* SPORT1 Multi-Channel Transmit Select Register 2 */
+#define	SPORT1_MTCS3			0xFFC0094C  /* SPORT1 Multi-Channel Transmit Select Register 3 */
+#define	SPORT1_MRCS0			0xFFC00950  /* SPORT1 Multi-Channel Receive Select Register 0 */
+#define	SPORT1_MRCS1			0xFFC00954  /* SPORT1 Multi-Channel Receive Select Register 1 */
+#define	SPORT1_MRCS2			0xFFC00958  /* SPORT1 Multi-Channel Receive Select Register 2 */
+#define	SPORT1_MRCS3			0xFFC0095C  /* SPORT1 Multi-Channel Receive Select Register 3 */
+
+
+/* External Bus	Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
+/* Asynchronous	Memory Controller  */
+#define	EBIU_AMGCTL			0xFFC00A00  /* Asynchronous Memory Global Control Register */
+#define	EBIU_AMBCTL0		0xFFC00A04  /* Asynchronous Memory Bank	Control	Register 0 */
+#define	EBIU_AMBCTL1		0xFFC00A08  /* Asynchronous Memory Bank	Control	Register 1 */
+
+/* SDRAM Controller */
+#define	EBIU_SDGCTL			0xFFC00A10  /* SDRAM Global Control Register */
+#define	EBIU_SDBCTL			0xFFC00A14  /* SDRAM Bank Control Register */
+#define	EBIU_SDRRC			0xFFC00A18  /* SDRAM Refresh Rate Control Register */
+#define	EBIU_SDSTAT			0xFFC00A1C  /* SDRAM Status Register */
+
+
+
+/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
+
+#define	DMAC0_TC_PER			0xFFC00B0C	/* DMA Controller 0 Traffic Control Periods Register */
+#define	DMAC0_TC_CNT			0xFFC00B10	/* DMA Controller 0 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 0 (0xFFC00C00	- 0xFFC00FFF)							 */
+
+#define	DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register */
+#define	DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register */
+#define	DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register */
+#define	DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register */
+#define	DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register */
+#define	DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register */
+#define	DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register */
+#define	DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register */
+#define	DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register */
+#define	DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register */
+#define	DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map	Register */
+#define	DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register */
+#define	DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register */
+
+#define	DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register */
+#define	DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register */
+#define	DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register */
+#define	DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register */
+#define	DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register */
+#define	DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register */
+#define	DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register */
+#define	DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register */
+#define	DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register */
+#define	DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register */
+#define	DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map	Register */
+#define	DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register */
+#define	DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register */
+
+#define	DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register */
+#define	DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register */
+#define	DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register */
+#define	DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register */
+#define	DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register */
+#define	DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register */
+#define	DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register */
+#define	DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register */
+#define	DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register */
+#define	DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register */
+#define	DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map	Register */
+#define	DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register */
+#define	DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register */
+
+#define	DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register */
+#define	DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register */
+#define	DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register */
+#define	DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register */
+#define	DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register */
+#define	DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register */
+#define	DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register */
+#define	DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register */
+#define	DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register */
+#define	DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register */
+#define	DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map	Register */
+#define	DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register */
+#define	DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register */
+
+#define	DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register */
+#define	DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register */
+#define	DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register */
+#define	DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register */
+#define	DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register */
+#define	DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register */
+#define	DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register */
+#define	DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register */
+#define	DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register */
+#define	DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register */
+#define	DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map	Register */
+#define	DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register */
+#define	DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register */
+
+#define	DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register */
+#define	DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register */
+#define	DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register */
+#define	DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register */
+#define	DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register */
+#define	DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register */
+#define	DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register */
+#define	DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register */
+#define	DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register */
+#define	DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register */
+#define	DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map	Register */
+#define	DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register */
+#define	DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register */
+
+#define	DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register */
+#define	DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register */
+#define	DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register */
+#define	DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register */
+#define	DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register */
+#define	DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register */
+#define	DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register */
+#define	DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register */
+#define	DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register */
+#define	DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register */
+#define	DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map	Register */
+#define	DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register */
+#define	DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register */
+
+#define	DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register */
+#define	DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register */
+#define	DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register */
+#define	DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register */
+#define	DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register */
+#define	DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register */
+#define	DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register */
+#define	DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register */
+#define	DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register */
+#define	DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register */
+#define	DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map	Register */
+#define	DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register */
+#define	DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register */
+
+#define	MDMA_D0_NEXT_DESC_PTR	0xFFC00E00	/* MemDMA0 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D0_START_ADDR		0xFFC00E04	/* MemDMA0 Stream 0 Destination	Start Address Register */
+#define	MDMA_D0_CONFIG			0xFFC00E08	/* MemDMA0 Stream 0 Destination	Configuration Register */
+#define	MDMA_D0_X_COUNT		0xFFC00E10	/* MemDMA0 Stream 0 Destination	X Count	Register */
+#define	MDMA_D0_X_MODIFY		0xFFC00E14	/* MemDMA0 Stream 0 Destination	X Modify Register */
+#define	MDMA_D0_Y_COUNT		0xFFC00E18	/* MemDMA0 Stream 0 Destination	Y Count	Register */
+#define	MDMA_D0_Y_MODIFY		0xFFC00E1C	/* MemDMA0 Stream 0 Destination	Y Modify Register */
+#define	MDMA_D0_CURR_DESC_PTR	0xFFC00E20	/* MemDMA0 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D0_CURR_ADDR		0xFFC00E24	/* MemDMA0 Stream 0 Destination	Current	Address	Register */
+#define	MDMA_D0_IRQ_STATUS		0xFFC00E28	/* MemDMA0 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA_D0_PERIPHERAL_MAP	0xFFC00E2C	/* MemDMA0 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA_D0_CURR_X_COUNT	0xFFC00E30	/* MemDMA0 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA_D0_CURR_Y_COUNT	0xFFC00E38	/* MemDMA0 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA_S0_NEXT_DESC_PTR	0xFFC00E40	/* MemDMA0 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA_S0_START_ADDR		0xFFC00E44	/* MemDMA0 Stream 0 Source Start Address Register */
+#define	MDMA_S0_CONFIG			0xFFC00E48	/* MemDMA0 Stream 0 Source Configuration Register */
+#define	MDMA_S0_X_COUNT		0xFFC00E50	/* MemDMA0 Stream 0 Source X Count Register */
+#define	MDMA_S0_X_MODIFY		0xFFC00E54	/* MemDMA0 Stream 0 Source X Modify Register */
+#define	MDMA_S0_Y_COUNT		0xFFC00E58	/* MemDMA0 Stream 0 Source Y Count Register */
+#define	MDMA_S0_Y_MODIFY		0xFFC00E5C	/* MemDMA0 Stream 0 Source Y Modify Register */
+#define	MDMA_S0_CURR_DESC_PTR	0xFFC00E60	/* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA_S0_CURR_ADDR		0xFFC00E64	/* MemDMA0 Stream 0 Source Current Address Register */
+#define	MDMA_S0_IRQ_STATUS		0xFFC00E68	/* MemDMA0 Stream 0 Source Interrupt/Status Register */
+#define	MDMA_S0_PERIPHERAL_MAP	0xFFC00E6C	/* MemDMA0 Stream 0 Source Peripheral Map Register */
+#define	MDMA_S0_CURR_X_COUNT	0xFFC00E70	/* MemDMA0 Stream 0 Source Current X Count Register */
+#define	MDMA_S0_CURR_Y_COUNT	0xFFC00E78	/* MemDMA0 Stream 0 Source Current Y Count Register */
+
+#define	MDMA_D1_NEXT_DESC_PTR	0xFFC00E80	/* MemDMA0 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D1_START_ADDR		0xFFC00E84	/* MemDMA0 Stream 1 Destination	Start Address Register */
+#define	MDMA_D1_CONFIG			0xFFC00E88	/* MemDMA0 Stream 1 Destination	Configuration Register */
+#define	MDMA_D1_X_COUNT		0xFFC00E90	/* MemDMA0 Stream 1 Destination	X Count	Register */
+#define	MDMA_D1_X_MODIFY		0xFFC00E94	/* MemDMA0 Stream 1 Destination	X Modify Register */
+#define	MDMA_D1_Y_COUNT		0xFFC00E98	/* MemDMA0 Stream 1 Destination	Y Count	Register */
+#define	MDMA_D1_Y_MODIFY		0xFFC00E9C	/* MemDMA0 Stream 1 Destination	Y Modify Register */
+#define	MDMA_D1_CURR_DESC_PTR	0xFFC00EA0	/* MemDMA0 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D1_CURR_ADDR		0xFFC00EA4	/* MemDMA0 Stream 1 Destination	Current	Address	Register */
+#define	MDMA_D1_IRQ_STATUS		0xFFC00EA8	/* MemDMA0 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA_D1_PERIPHERAL_MAP	0xFFC00EAC	/* MemDMA0 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA_D1_CURR_X_COUNT	0xFFC00EB0	/* MemDMA0 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA_D1_CURR_Y_COUNT	0xFFC00EB8	/* MemDMA0 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA_S1_NEXT_DESC_PTR	0xFFC00EC0	/* MemDMA0 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA_S1_START_ADDR		0xFFC00EC4	/* MemDMA0 Stream 1 Source Start Address Register */
+#define	MDMA_S1_CONFIG			0xFFC00EC8	/* MemDMA0 Stream 1 Source Configuration Register */
+#define	MDMA_S1_X_COUNT		0xFFC00ED0	/* MemDMA0 Stream 1 Source X Count Register */
+#define	MDMA_S1_X_MODIFY		0xFFC00ED4	/* MemDMA0 Stream 1 Source X Modify Register */
+#define	MDMA_S1_Y_COUNT		0xFFC00ED8	/* MemDMA0 Stream 1 Source Y Count Register */
+#define	MDMA_S1_Y_MODIFY		0xFFC00EDC	/* MemDMA0 Stream 1 Source Y Modify Register */
+#define	MDMA_S1_CURR_DESC_PTR	0xFFC00EE0	/* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA_S1_CURR_ADDR		0xFFC00EE4	/* MemDMA0 Stream 1 Source Current Address Register */
+#define	MDMA_S1_IRQ_STATUS		0xFFC00EE8	/* MemDMA0 Stream 1 Source Interrupt/Status Register */
+#define	MDMA_S1_PERIPHERAL_MAP	0xFFC00EEC	/* MemDMA0 Stream 1 Source Peripheral Map Register */
+#define	MDMA_S1_CURR_X_COUNT	0xFFC00EF0	/* MemDMA0 Stream 1 Source Current X Count Register */
+#define	MDMA_S1_CURR_Y_COUNT	0xFFC00EF8	/* MemDMA0 Stream 1 Source Current Y Count Register */
+
+
+/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
+#define	PPI_CONTROL			0xFFC01000	/* PPI Control Register */
+#define	PPI_STATUS			0xFFC01004	/* PPI Status Register */
+#define	PPI_COUNT			0xFFC01008	/* PPI Transfer	Count Register */
+#define	PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register */
+#define	PPI_FRAME			0xFFC01010	/* PPI Frame Length Register */
+
+
+/* Two-Wire Interface 0	(0xFFC01400 - 0xFFC014FF)			 */
+#define	TWI0_CLKDIV			0xFFC01400	/* Serial Clock	Divider	Register */
+#define	TWI0_CONTROL		0xFFC01404	/* TWI0	Master Internal	Time Reference Register */
+#define	TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register */
+#define	TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register */
+#define	TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register */
+#define	TWI0_MASTER_CTL	0xFFC01414	/* Master Mode Control Register */
+#define	TWI0_MASTER_STAT	0xFFC01418	/* Master Mode Status Register */
+#define	TWI0_MASTER_ADDR	0xFFC0141C	/* Master Mode Address Register */
+#define	TWI0_INT_STAT		0xFFC01420	/* TWI0	Master Interrupt Register */
+#define	TWI0_INT_MASK		0xFFC01424	/* TWI0	Master Interrupt Mask Register */
+#define	TWI0_FIFO_CTL		0xFFC01428	/* FIFO	Control	Register */
+#define	TWI0_FIFO_STAT		0xFFC0142C	/* FIFO	Status Register */
+#define	TWI0_XMT_DATA8		0xFFC01480	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI0_XMT_DATA16		0xFFC01484	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI0_RCV_DATA8		0xFFC01488	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI0_RCV_DATA16		0xFFC0148C	/* FIFO	Receive	Data Double Byte Register */
+
+#define TWI0_REGBASE		TWI0_CLKDIV
+
+/* the following are for backwards compatibility */
+#define	TWI0_PRESCALE	 TWI0_CONTROL
+#define	TWI0_INT_SRC	 TWI0_INT_STAT
+#define	TWI0_INT_ENABLE	 TWI0_INT_MASK
+
+
+/* General-Purpose Ports  (0xFFC01500 -	0xFFC015FF)	 */
+
+/* GPIO	Port C Register	Names */
+#define PORTCIO_FER			0xFFC01500	/* GPIO	Pin Port C Configuration Register */
+#define PORTCIO				0xFFC01510	/* GPIO	Pin Port C Data	Register */
+#define PORTCIO_CLEAR			0xFFC01520	/* Clear GPIO Pin Port C Register */
+#define PORTCIO_SET			0xFFC01530	/* Set GPIO Pin	Port C Register */
+#define PORTCIO_TOGGLE			0xFFC01540	/* Toggle GPIO Pin Port	C Register */
+#define PORTCIO_DIR			0xFFC01550	/* GPIO	Pin Port C Direction Register */
+#define PORTCIO_INEN			0xFFC01560	/* GPIO	Pin Port C Input Enable	Register */
+
+/* GPIO	Port D Register	Names */
+#define PORTDIO_FER			0xFFC01504	/* GPIO	Pin Port D Configuration Register */
+#define PORTDIO				0xFFC01514	/* GPIO	Pin Port D Data	Register */
+#define PORTDIO_CLEAR			0xFFC01524	/* Clear GPIO Pin Port D Register */
+#define PORTDIO_SET			0xFFC01534	/* Set GPIO Pin	Port D Register */
+#define PORTDIO_TOGGLE			0xFFC01544	/* Toggle GPIO Pin Port	D Register */
+#define PORTDIO_DIR			0xFFC01554	/* GPIO	Pin Port D Direction Register */
+#define PORTDIO_INEN			0xFFC01564	/* GPIO	Pin Port D Input Enable	Register */
+
+/* GPIO	Port E Register	Names */
+#define PORTEIO_FER			0xFFC01508	/* GPIO	Pin Port E Configuration Register */
+#define PORTEIO				0xFFC01518	/* GPIO	Pin Port E Data	Register */
+#define PORTEIO_CLEAR			0xFFC01528	/* Clear GPIO Pin Port E Register */
+#define PORTEIO_SET			0xFFC01538	/* Set GPIO Pin	Port E Register */
+#define PORTEIO_TOGGLE			0xFFC01548	/* Toggle GPIO Pin Port	E Register */
+#define PORTEIO_DIR			0xFFC01558	/* GPIO	Pin Port E Direction Register */
+#define PORTEIO_INEN			0xFFC01568	/* GPIO	Pin Port E Input Enable	Register */
+
+/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
+
+#define	DMAC1_TC_PER			0xFFC01B0C	/* DMA Controller 1 Traffic Control Periods Register */
+#define	DMAC1_TC_CNT			0xFFC01B10	/* DMA Controller 1 Traffic Control Current Counts Register */
+
+
+
+/* DMA Controller 1 (0xFFC01C00	- 0xFFC01FFF)							 */
+#define	DMA8_NEXT_DESC_PTR		0xFFC01C00	/* DMA Channel 8 Next Descriptor Pointer Register */
+#define	DMA8_START_ADDR			0xFFC01C04	/* DMA Channel 8 Start Address Register */
+#define	DMA8_CONFIG				0xFFC01C08	/* DMA Channel 8 Configuration Register */
+#define	DMA8_X_COUNT			0xFFC01C10	/* DMA Channel 8 X Count Register */
+#define	DMA8_X_MODIFY			0xFFC01C14	/* DMA Channel 8 X Modify Register */
+#define	DMA8_Y_COUNT			0xFFC01C18	/* DMA Channel 8 Y Count Register */
+#define	DMA8_Y_MODIFY			0xFFC01C1C	/* DMA Channel 8 Y Modify Register */
+#define	DMA8_CURR_DESC_PTR		0xFFC01C20	/* DMA Channel 8 Current Descriptor Pointer Register */
+#define	DMA8_CURR_ADDR			0xFFC01C24	/* DMA Channel 8 Current Address Register */
+#define	DMA8_IRQ_STATUS			0xFFC01C28	/* DMA Channel 8 Interrupt/Status Register */
+#define	DMA8_PERIPHERAL_MAP		0xFFC01C2C	/* DMA Channel 8 Peripheral Map	Register */
+#define	DMA8_CURR_X_COUNT		0xFFC01C30	/* DMA Channel 8 Current X Count Register */
+#define	DMA8_CURR_Y_COUNT		0xFFC01C38	/* DMA Channel 8 Current Y Count Register */
+
+#define	DMA9_NEXT_DESC_PTR		0xFFC01C40	/* DMA Channel 9 Next Descriptor Pointer Register */
+#define	DMA9_START_ADDR			0xFFC01C44	/* DMA Channel 9 Start Address Register */
+#define	DMA9_CONFIG				0xFFC01C48	/* DMA Channel 9 Configuration Register */
+#define	DMA9_X_COUNT			0xFFC01C50	/* DMA Channel 9 X Count Register */
+#define	DMA9_X_MODIFY			0xFFC01C54	/* DMA Channel 9 X Modify Register */
+#define	DMA9_Y_COUNT			0xFFC01C58	/* DMA Channel 9 Y Count Register */
+#define	DMA9_Y_MODIFY			0xFFC01C5C	/* DMA Channel 9 Y Modify Register */
+#define	DMA9_CURR_DESC_PTR		0xFFC01C60	/* DMA Channel 9 Current Descriptor Pointer Register */
+#define	DMA9_CURR_ADDR			0xFFC01C64	/* DMA Channel 9 Current Address Register */
+#define	DMA9_IRQ_STATUS			0xFFC01C68	/* DMA Channel 9 Interrupt/Status Register */
+#define	DMA9_PERIPHERAL_MAP		0xFFC01C6C	/* DMA Channel 9 Peripheral Map	Register */
+#define	DMA9_CURR_X_COUNT		0xFFC01C70	/* DMA Channel 9 Current X Count Register */
+#define	DMA9_CURR_Y_COUNT		0xFFC01C78	/* DMA Channel 9 Current Y Count Register */
+
+#define	DMA10_NEXT_DESC_PTR		0xFFC01C80	/* DMA Channel 10 Next Descriptor Pointer Register */
+#define	DMA10_START_ADDR		0xFFC01C84	/* DMA Channel 10 Start	Address	Register */
+#define	DMA10_CONFIG			0xFFC01C88	/* DMA Channel 10 Configuration	Register */
+#define	DMA10_X_COUNT			0xFFC01C90	/* DMA Channel 10 X Count Register */
+#define	DMA10_X_MODIFY			0xFFC01C94	/* DMA Channel 10 X Modify Register */
+#define	DMA10_Y_COUNT			0xFFC01C98	/* DMA Channel 10 Y Count Register */
+#define	DMA10_Y_MODIFY			0xFFC01C9C	/* DMA Channel 10 Y Modify Register */
+#define	DMA10_CURR_DESC_PTR		0xFFC01CA0	/* DMA Channel 10 Current Descriptor Pointer Register */
+#define	DMA10_CURR_ADDR			0xFFC01CA4	/* DMA Channel 10 Current Address Register */
+#define	DMA10_IRQ_STATUS		0xFFC01CA8	/* DMA Channel 10 Interrupt/Status Register */
+#define	DMA10_PERIPHERAL_MAP	0xFFC01CAC	/* DMA Channel 10 Peripheral Map Register */
+#define	DMA10_CURR_X_COUNT		0xFFC01CB0	/* DMA Channel 10 Current X Count Register */
+#define	DMA10_CURR_Y_COUNT		0xFFC01CB8	/* DMA Channel 10 Current Y Count Register */
+
+#define	DMA11_NEXT_DESC_PTR		0xFFC01CC0	/* DMA Channel 11 Next Descriptor Pointer Register */
+#define	DMA11_START_ADDR		0xFFC01CC4	/* DMA Channel 11 Start	Address	Register */
+#define	DMA11_CONFIG			0xFFC01CC8	/* DMA Channel 11 Configuration	Register */
+#define	DMA11_X_COUNT			0xFFC01CD0	/* DMA Channel 11 X Count Register */
+#define	DMA11_X_MODIFY			0xFFC01CD4	/* DMA Channel 11 X Modify Register */
+#define	DMA11_Y_COUNT			0xFFC01CD8	/* DMA Channel 11 Y Count Register */
+#define	DMA11_Y_MODIFY			0xFFC01CDC	/* DMA Channel 11 Y Modify Register */
+#define	DMA11_CURR_DESC_PTR		0xFFC01CE0	/* DMA Channel 11 Current Descriptor Pointer Register */
+#define	DMA11_CURR_ADDR			0xFFC01CE4	/* DMA Channel 11 Current Address Register */
+#define	DMA11_IRQ_STATUS		0xFFC01CE8	/* DMA Channel 11 Interrupt/Status Register */
+#define	DMA11_PERIPHERAL_MAP	0xFFC01CEC	/* DMA Channel 11 Peripheral Map Register */
+#define	DMA11_CURR_X_COUNT		0xFFC01CF0	/* DMA Channel 11 Current X Count Register */
+#define	DMA11_CURR_Y_COUNT		0xFFC01CF8	/* DMA Channel 11 Current Y Count Register */
+
+#define	DMA12_NEXT_DESC_PTR		0xFFC01D00	/* DMA Channel 12 Next Descriptor Pointer Register */
+#define	DMA12_START_ADDR		0xFFC01D04	/* DMA Channel 12 Start	Address	Register */
+#define	DMA12_CONFIG			0xFFC01D08	/* DMA Channel 12 Configuration	Register */
+#define	DMA12_X_COUNT			0xFFC01D10	/* DMA Channel 12 X Count Register */
+#define	DMA12_X_MODIFY			0xFFC01D14	/* DMA Channel 12 X Modify Register */
+#define	DMA12_Y_COUNT			0xFFC01D18	/* DMA Channel 12 Y Count Register */
+#define	DMA12_Y_MODIFY			0xFFC01D1C	/* DMA Channel 12 Y Modify Register */
+#define	DMA12_CURR_DESC_PTR		0xFFC01D20	/* DMA Channel 12 Current Descriptor Pointer Register */
+#define	DMA12_CURR_ADDR			0xFFC01D24	/* DMA Channel 12 Current Address Register */
+#define	DMA12_IRQ_STATUS		0xFFC01D28	/* DMA Channel 12 Interrupt/Status Register */
+#define	DMA12_PERIPHERAL_MAP	0xFFC01D2C	/* DMA Channel 12 Peripheral Map Register */
+#define	DMA12_CURR_X_COUNT		0xFFC01D30	/* DMA Channel 12 Current X Count Register */
+#define	DMA12_CURR_Y_COUNT		0xFFC01D38	/* DMA Channel 12 Current Y Count Register */
+
+#define	DMA13_NEXT_DESC_PTR		0xFFC01D40	/* DMA Channel 13 Next Descriptor Pointer Register */
+#define	DMA13_START_ADDR		0xFFC01D44	/* DMA Channel 13 Start	Address	Register */
+#define	DMA13_CONFIG			0xFFC01D48	/* DMA Channel 13 Configuration	Register */
+#define	DMA13_X_COUNT			0xFFC01D50	/* DMA Channel 13 X Count Register */
+#define	DMA13_X_MODIFY			0xFFC01D54	/* DMA Channel 13 X Modify Register */
+#define	DMA13_Y_COUNT			0xFFC01D58	/* DMA Channel 13 Y Count Register */
+#define	DMA13_Y_MODIFY			0xFFC01D5C	/* DMA Channel 13 Y Modify Register */
+#define	DMA13_CURR_DESC_PTR		0xFFC01D60	/* DMA Channel 13 Current Descriptor Pointer Register */
+#define	DMA13_CURR_ADDR			0xFFC01D64	/* DMA Channel 13 Current Address Register */
+#define	DMA13_IRQ_STATUS		0xFFC01D68	/* DMA Channel 13 Interrupt/Status Register */
+#define	DMA13_PERIPHERAL_MAP	0xFFC01D6C	/* DMA Channel 13 Peripheral Map Register */
+#define	DMA13_CURR_X_COUNT		0xFFC01D70	/* DMA Channel 13 Current X Count Register */
+#define	DMA13_CURR_Y_COUNT		0xFFC01D78	/* DMA Channel 13 Current Y Count Register */
+
+#define	DMA14_NEXT_DESC_PTR		0xFFC01D80	/* DMA Channel 14 Next Descriptor Pointer Register */
+#define	DMA14_START_ADDR		0xFFC01D84	/* DMA Channel 14 Start	Address	Register */
+#define	DMA14_CONFIG			0xFFC01D88	/* DMA Channel 14 Configuration	Register */
+#define	DMA14_X_COUNT			0xFFC01D90	/* DMA Channel 14 X Count Register */
+#define	DMA14_X_MODIFY			0xFFC01D94	/* DMA Channel 14 X Modify Register */
+#define	DMA14_Y_COUNT			0xFFC01D98	/* DMA Channel 14 Y Count Register */
+#define	DMA14_Y_MODIFY			0xFFC01D9C	/* DMA Channel 14 Y Modify Register */
+#define	DMA14_CURR_DESC_PTR		0xFFC01DA0	/* DMA Channel 14 Current Descriptor Pointer Register */
+#define	DMA14_CURR_ADDR			0xFFC01DA4	/* DMA Channel 14 Current Address Register */
+#define	DMA14_IRQ_STATUS		0xFFC01DA8	/* DMA Channel 14 Interrupt/Status Register */
+#define	DMA14_PERIPHERAL_MAP	0xFFC01DAC	/* DMA Channel 14 Peripheral Map Register */
+#define	DMA14_CURR_X_COUNT		0xFFC01DB0	/* DMA Channel 14 Current X Count Register */
+#define	DMA14_CURR_Y_COUNT		0xFFC01DB8	/* DMA Channel 14 Current Y Count Register */
+
+#define	DMA15_NEXT_DESC_PTR		0xFFC01DC0	/* DMA Channel 15 Next Descriptor Pointer Register */
+#define	DMA15_START_ADDR		0xFFC01DC4	/* DMA Channel 15 Start	Address	Register */
+#define	DMA15_CONFIG			0xFFC01DC8	/* DMA Channel 15 Configuration	Register */
+#define	DMA15_X_COUNT			0xFFC01DD0	/* DMA Channel 15 X Count Register */
+#define	DMA15_X_MODIFY			0xFFC01DD4	/* DMA Channel 15 X Modify Register */
+#define	DMA15_Y_COUNT			0xFFC01DD8	/* DMA Channel 15 Y Count Register */
+#define	DMA15_Y_MODIFY			0xFFC01DDC	/* DMA Channel 15 Y Modify Register */
+#define	DMA15_CURR_DESC_PTR		0xFFC01DE0	/* DMA Channel 15 Current Descriptor Pointer Register */
+#define	DMA15_CURR_ADDR			0xFFC01DE4	/* DMA Channel 15 Current Address Register */
+#define	DMA15_IRQ_STATUS		0xFFC01DE8	/* DMA Channel 15 Interrupt/Status Register */
+#define	DMA15_PERIPHERAL_MAP	0xFFC01DEC	/* DMA Channel 15 Peripheral Map Register */
+#define	DMA15_CURR_X_COUNT		0xFFC01DF0	/* DMA Channel 15 Current X Count Register */
+#define	DMA15_CURR_Y_COUNT		0xFFC01DF8	/* DMA Channel 15 Current Y Count Register */
+
+#define	DMA16_NEXT_DESC_PTR		0xFFC01E00	/* DMA Channel 16 Next Descriptor Pointer Register */
+#define	DMA16_START_ADDR		0xFFC01E04	/* DMA Channel 16 Start	Address	Register */
+#define	DMA16_CONFIG			0xFFC01E08	/* DMA Channel 16 Configuration	Register */
+#define	DMA16_X_COUNT			0xFFC01E10	/* DMA Channel 16 X Count Register */
+#define	DMA16_X_MODIFY			0xFFC01E14	/* DMA Channel 16 X Modify Register */
+#define	DMA16_Y_COUNT			0xFFC01E18	/* DMA Channel 16 Y Count Register */
+#define	DMA16_Y_MODIFY			0xFFC01E1C	/* DMA Channel 16 Y Modify Register */
+#define	DMA16_CURR_DESC_PTR		0xFFC01E20	/* DMA Channel 16 Current Descriptor Pointer Register */
+#define	DMA16_CURR_ADDR			0xFFC01E24	/* DMA Channel 16 Current Address Register */
+#define	DMA16_IRQ_STATUS		0xFFC01E28	/* DMA Channel 16 Interrupt/Status Register */
+#define	DMA16_PERIPHERAL_MAP	0xFFC01E2C	/* DMA Channel 16 Peripheral Map Register */
+#define	DMA16_CURR_X_COUNT		0xFFC01E30	/* DMA Channel 16 Current X Count Register */
+#define	DMA16_CURR_Y_COUNT		0xFFC01E38	/* DMA Channel 16 Current Y Count Register */
+
+#define	DMA17_NEXT_DESC_PTR		0xFFC01E40	/* DMA Channel 17 Next Descriptor Pointer Register */
+#define	DMA17_START_ADDR		0xFFC01E44	/* DMA Channel 17 Start	Address	Register */
+#define	DMA17_CONFIG			0xFFC01E48	/* DMA Channel 17 Configuration	Register */
+#define	DMA17_X_COUNT			0xFFC01E50	/* DMA Channel 17 X Count Register */
+#define	DMA17_X_MODIFY			0xFFC01E54	/* DMA Channel 17 X Modify Register */
+#define	DMA17_Y_COUNT			0xFFC01E58	/* DMA Channel 17 Y Count Register */
+#define	DMA17_Y_MODIFY			0xFFC01E5C	/* DMA Channel 17 Y Modify Register */
+#define	DMA17_CURR_DESC_PTR		0xFFC01E60	/* DMA Channel 17 Current Descriptor Pointer Register */
+#define	DMA17_CURR_ADDR			0xFFC01E64	/* DMA Channel 17 Current Address Register */
+#define	DMA17_IRQ_STATUS		0xFFC01E68	/* DMA Channel 17 Interrupt/Status Register */
+#define	DMA17_PERIPHERAL_MAP	0xFFC01E6C	/* DMA Channel 17 Peripheral Map Register */
+#define	DMA17_CURR_X_COUNT		0xFFC01E70	/* DMA Channel 17 Current X Count Register */
+#define	DMA17_CURR_Y_COUNT		0xFFC01E78	/* DMA Channel 17 Current Y Count Register */
+
+#define	DMA18_NEXT_DESC_PTR		0xFFC01E80	/* DMA Channel 18 Next Descriptor Pointer Register */
+#define	DMA18_START_ADDR		0xFFC01E84	/* DMA Channel 18 Start	Address	Register */
+#define	DMA18_CONFIG			0xFFC01E88	/* DMA Channel 18 Configuration	Register */
+#define	DMA18_X_COUNT			0xFFC01E90	/* DMA Channel 18 X Count Register */
+#define	DMA18_X_MODIFY			0xFFC01E94	/* DMA Channel 18 X Modify Register */
+#define	DMA18_Y_COUNT			0xFFC01E98	/* DMA Channel 18 Y Count Register */
+#define	DMA18_Y_MODIFY			0xFFC01E9C	/* DMA Channel 18 Y Modify Register */
+#define	DMA18_CURR_DESC_PTR		0xFFC01EA0	/* DMA Channel 18 Current Descriptor Pointer Register */
+#define	DMA18_CURR_ADDR			0xFFC01EA4	/* DMA Channel 18 Current Address Register */
+#define	DMA18_IRQ_STATUS		0xFFC01EA8	/* DMA Channel 18 Interrupt/Status Register */
+#define	DMA18_PERIPHERAL_MAP	0xFFC01EAC	/* DMA Channel 18 Peripheral Map Register */
+#define	DMA18_CURR_X_COUNT		0xFFC01EB0	/* DMA Channel 18 Current X Count Register */
+#define	DMA18_CURR_Y_COUNT		0xFFC01EB8	/* DMA Channel 18 Current Y Count Register */
+
+#define	DMA19_NEXT_DESC_PTR		0xFFC01EC0	/* DMA Channel 19 Next Descriptor Pointer Register */
+#define	DMA19_START_ADDR		0xFFC01EC4	/* DMA Channel 19 Start	Address	Register */
+#define	DMA19_CONFIG			0xFFC01EC8	/* DMA Channel 19 Configuration	Register */
+#define	DMA19_X_COUNT			0xFFC01ED0	/* DMA Channel 19 X Count Register */
+#define	DMA19_X_MODIFY			0xFFC01ED4	/* DMA Channel 19 X Modify Register */
+#define	DMA19_Y_COUNT			0xFFC01ED8	/* DMA Channel 19 Y Count Register */
+#define	DMA19_Y_MODIFY			0xFFC01EDC	/* DMA Channel 19 Y Modify Register */
+#define	DMA19_CURR_DESC_PTR		0xFFC01EE0	/* DMA Channel 19 Current Descriptor Pointer Register */
+#define	DMA19_CURR_ADDR			0xFFC01EE4	/* DMA Channel 19 Current Address Register */
+#define	DMA19_IRQ_STATUS		0xFFC01EE8	/* DMA Channel 19 Interrupt/Status Register */
+#define	DMA19_PERIPHERAL_MAP	0xFFC01EEC	/* DMA Channel 19 Peripheral Map Register */
+#define	DMA19_CURR_X_COUNT		0xFFC01EF0	/* DMA Channel 19 Current X Count Register */
+#define	DMA19_CURR_Y_COUNT		0xFFC01EF8	/* DMA Channel 19 Current Y Count Register */
+
+#define	MDMA_D2_NEXT_DESC_PTR	0xFFC01F00	/* MemDMA1 Stream 0 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D2_START_ADDR		0xFFC01F04	/* MemDMA1 Stream 0 Destination	Start Address Register */
+#define	MDMA_D2_CONFIG			0xFFC01F08	/* MemDMA1 Stream 0 Destination	Configuration Register */
+#define	MDMA_D2_X_COUNT		0xFFC01F10	/* MemDMA1 Stream 0 Destination	X Count	Register */
+#define	MDMA_D2_X_MODIFY		0xFFC01F14	/* MemDMA1 Stream 0 Destination	X Modify Register */
+#define	MDMA_D2_Y_COUNT		0xFFC01F18	/* MemDMA1 Stream 0 Destination	Y Count	Register */
+#define	MDMA_D2_Y_MODIFY		0xFFC01F1C	/* MemDMA1 Stream 0 Destination	Y Modify Register */
+#define	MDMA_D2_CURR_DESC_PTR	0xFFC01F20	/* MemDMA1 Stream 0 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D2_CURR_ADDR		0xFFC01F24	/* MemDMA1 Stream 0 Destination	Current	Address	Register */
+#define	MDMA_D2_IRQ_STATUS		0xFFC01F28	/* MemDMA1 Stream 0 Destination	Interrupt/Status Register */
+#define	MDMA_D2_PERIPHERAL_MAP	0xFFC01F2C	/* MemDMA1 Stream 0 Destination	Peripheral Map Register */
+#define	MDMA_D2_CURR_X_COUNT	0xFFC01F30	/* MemDMA1 Stream 0 Destination	Current	X Count	Register */
+#define	MDMA_D2_CURR_Y_COUNT	0xFFC01F38	/* MemDMA1 Stream 0 Destination	Current	Y Count	Register */
+
+#define	MDMA_S2_NEXT_DESC_PTR	0xFFC01F40	/* MemDMA1 Stream 0 Source Next	Descriptor Pointer Register */
+#define	MDMA_S2_START_ADDR		0xFFC01F44	/* MemDMA1 Stream 0 Source Start Address Register */
+#define	MDMA_S2_CONFIG			0xFFC01F48	/* MemDMA1 Stream 0 Source Configuration Register */
+#define	MDMA_S2_X_COUNT		0xFFC01F50	/* MemDMA1 Stream 0 Source X Count Register */
+#define	MDMA_S2_X_MODIFY		0xFFC01F54	/* MemDMA1 Stream 0 Source X Modify Register */
+#define	MDMA_S2_Y_COUNT		0xFFC01F58	/* MemDMA1 Stream 0 Source Y Count Register */
+#define	MDMA_S2_Y_MODIFY		0xFFC01F5C	/* MemDMA1 Stream 0 Source Y Modify Register */
+#define	MDMA_S2_CURR_DESC_PTR	0xFFC01F60	/* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
+#define	MDMA_S2_CURR_ADDR		0xFFC01F64	/* MemDMA1 Stream 0 Source Current Address Register */
+#define	MDMA_S2_IRQ_STATUS		0xFFC01F68	/* MemDMA1 Stream 0 Source Interrupt/Status Register */
+#define	MDMA_S2_PERIPHERAL_MAP	0xFFC01F6C	/* MemDMA1 Stream 0 Source Peripheral Map Register */
+#define	MDMA_S2_CURR_X_COUNT	0xFFC01F70	/* MemDMA1 Stream 0 Source Current X Count Register */
+#define	MDMA_S2_CURR_Y_COUNT	0xFFC01F78	/* MemDMA1 Stream 0 Source Current Y Count Register */
+
+#define	MDMA_D3_NEXT_DESC_PTR	0xFFC01F80	/* MemDMA1 Stream 1 Destination	Next Descriptor	Pointer	Register */
+#define	MDMA_D3_START_ADDR		0xFFC01F84	/* MemDMA1 Stream 1 Destination	Start Address Register */
+#define	MDMA_D3_CONFIG			0xFFC01F88	/* MemDMA1 Stream 1 Destination	Configuration Register */
+#define	MDMA_D3_X_COUNT		0xFFC01F90	/* MemDMA1 Stream 1 Destination	X Count	Register */
+#define	MDMA_D3_X_MODIFY		0xFFC01F94	/* MemDMA1 Stream 1 Destination	X Modify Register */
+#define	MDMA_D3_Y_COUNT		0xFFC01F98	/* MemDMA1 Stream 1 Destination	Y Count	Register */
+#define	MDMA_D3_Y_MODIFY		0xFFC01F9C	/* MemDMA1 Stream 1 Destination	Y Modify Register */
+#define	MDMA_D3_CURR_DESC_PTR	0xFFC01FA0	/* MemDMA1 Stream 1 Destination	Current	Descriptor Pointer Register */
+#define	MDMA_D3_CURR_ADDR		0xFFC01FA4	/* MemDMA1 Stream 1 Destination	Current	Address	Register */
+#define	MDMA_D3_IRQ_STATUS		0xFFC01FA8	/* MemDMA1 Stream 1 Destination	Interrupt/Status Register */
+#define	MDMA_D3_PERIPHERAL_MAP	0xFFC01FAC	/* MemDMA1 Stream 1 Destination	Peripheral Map Register */
+#define	MDMA_D3_CURR_X_COUNT	0xFFC01FB0	/* MemDMA1 Stream 1 Destination	Current	X Count	Register */
+#define	MDMA_D3_CURR_Y_COUNT	0xFFC01FB8	/* MemDMA1 Stream 1 Destination	Current	Y Count	Register */
+
+#define	MDMA_S3_NEXT_DESC_PTR	0xFFC01FC0	/* MemDMA1 Stream 1 Source Next	Descriptor Pointer Register */
+#define	MDMA_S3_START_ADDR		0xFFC01FC4	/* MemDMA1 Stream 1 Source Start Address Register */
+#define	MDMA_S3_CONFIG			0xFFC01FC8	/* MemDMA1 Stream 1 Source Configuration Register */
+#define	MDMA_S3_X_COUNT		0xFFC01FD0	/* MemDMA1 Stream 1 Source X Count Register */
+#define	MDMA_S3_X_MODIFY		0xFFC01FD4	/* MemDMA1 Stream 1 Source X Modify Register */
+#define	MDMA_S3_Y_COUNT		0xFFC01FD8	/* MemDMA1 Stream 1 Source Y Count Register */
+#define	MDMA_S3_Y_MODIFY		0xFFC01FDC	/* MemDMA1 Stream 1 Source Y Modify Register */
+#define	MDMA_S3_CURR_DESC_PTR	0xFFC01FE0	/* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
+#define	MDMA_S3_CURR_ADDR		0xFFC01FE4	/* MemDMA1 Stream 1 Source Current Address Register */
+#define	MDMA_S3_IRQ_STATUS		0xFFC01FE8	/* MemDMA1 Stream 1 Source Interrupt/Status Register */
+#define	MDMA_S3_PERIPHERAL_MAP	0xFFC01FEC	/* MemDMA1 Stream 1 Source Peripheral Map Register */
+#define	MDMA_S3_CURR_X_COUNT	0xFFC01FF0	/* MemDMA1 Stream 1 Source Current X Count Register */
+#define	MDMA_S3_CURR_Y_COUNT	0xFFC01FF8	/* MemDMA1 Stream 1 Source Current Y Count Register */
+
+
+/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)	 */
+#define	UART1_THR			0xFFC02000	/* Transmit Holding register */
+#define	UART1_RBR			0xFFC02000	/* Receive Buffer register */
+#define	UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte) */
+#define	UART1_IER			0xFFC02004	/* Interrupt Enable Register */
+#define	UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte) */
+#define	UART1_IIR			0xFFC02008	/* Interrupt Identification Register */
+#define	UART1_LCR			0xFFC0200C	/* Line	Control	Register */
+#define	UART1_MCR			0xFFC02010	/* Modem Control Register */
+#define	UART1_LSR			0xFFC02014	/* Line	Status Register */
+#define	UART1_SCR			0xFFC0201C	/* SCR Scratch Register */
+#define	UART1_GCTL			0xFFC02024	/* Global Control Register */
+
+
+/* UART2 Controller		(0xFFC02100 - 0xFFC021FF)	 */
+#define	UART2_THR			0xFFC02100	/* Transmit Holding register */
+#define	UART2_RBR			0xFFC02100	/* Receive Buffer register */
+#define	UART2_DLL			0xFFC02100	/* Divisor Latch (Low-Byte) */
+#define	UART2_IER			0xFFC02104	/* Interrupt Enable Register */
+#define	UART2_DLH			0xFFC02104	/* Divisor Latch (High-Byte) */
+#define	UART2_IIR			0xFFC02108	/* Interrupt Identification Register */
+#define	UART2_LCR			0xFFC0210C	/* Line	Control	Register */
+#define	UART2_MCR			0xFFC02110	/* Modem Control Register */
+#define	UART2_LSR			0xFFC02114	/* Line	Status Register */
+#define	UART2_SCR			0xFFC0211C	/* SCR Scratch Register */
+#define	UART2_GCTL			0xFFC02124	/* Global Control Register */
+
+
+/* Two-Wire Interface 1	(0xFFC02200 - 0xFFC022FF)			 */
+#define	TWI1_CLKDIV			0xFFC02200	/* Serial Clock	Divider	Register */
+#define	TWI1_CONTROL		0xFFC02204	/* TWI1	Master Internal	Time Reference Register */
+#define	TWI1_SLAVE_CTL		0xFFC02208	/* Slave Mode Control Register */
+#define	TWI1_SLAVE_STAT		0xFFC0220C	/* Slave Mode Status Register */
+#define	TWI1_SLAVE_ADDR		0xFFC02210	/* Slave Mode Address Register */
+#define	TWI1_MASTER_CTL	0xFFC02214	/* Master Mode Control Register */
+#define	TWI1_MASTER_STAT	0xFFC02218	/* Master Mode Status Register */
+#define	TWI1_MASTER_ADDR	0xFFC0221C	/* Master Mode Address Register */
+#define	TWI1_INT_STAT		0xFFC02220	/* TWI1	Master Interrupt Register */
+#define	TWI1_INT_MASK		0xFFC02224	/* TWI1	Master Interrupt Mask Register */
+#define	TWI1_FIFO_CTL		0xFFC02228	/* FIFO	Control	Register */
+#define	TWI1_FIFO_STAT		0xFFC0222C	/* FIFO	Status Register */
+#define	TWI1_XMT_DATA8		0xFFC02280	/* FIFO	Transmit Data Single Byte Register */
+#define	TWI1_XMT_DATA16		0xFFC02284	/* FIFO	Transmit Data Double Byte Register */
+#define	TWI1_RCV_DATA8		0xFFC02288	/* FIFO	Receive	Data Single Byte Register */
+#define	TWI1_RCV_DATA16		0xFFC0228C	/* FIFO	Receive	Data Double Byte Register */
+#define TWI1_REGBASE		TWI1_CLKDIV
+
+
+/* the following are for backwards compatibility */
+#define	TWI1_PRESCALE	  TWI1_CONTROL
+#define	TWI1_INT_SRC	  TWI1_INT_STAT
+#define	TWI1_INT_ENABLE	  TWI1_INT_MASK
+
+
+/* SPI1	Controller		(0xFFC02300 - 0xFFC023FF)	 */
+#define	SPI1_CTL			0xFFC02300  /* SPI1 Control Register */
+#define	SPI1_FLG			0xFFC02304  /* SPI1 Flag register */
+#define	SPI1_STAT			0xFFC02308  /* SPI1 Status register */
+#define	SPI1_TDBR			0xFFC0230C  /* SPI1 Transmit Data Buffer Register */
+#define	SPI1_RDBR			0xFFC02310  /* SPI1 Receive Data Buffer	Register */
+#define	SPI1_BAUD			0xFFC02314  /* SPI1 Baud rate Register */
+#define	SPI1_SHADOW			0xFFC02318  /* SPI1_RDBR Shadow	Register */
+#define SPI1_REGBASE			SPI1_CTL
+
+/* SPI2	Controller		(0xFFC02400 - 0xFFC024FF)	 */
+#define	SPI2_CTL			0xFFC02400  /* SPI2 Control Register */
+#define	SPI2_FLG			0xFFC02404  /* SPI2 Flag register */
+#define	SPI2_STAT			0xFFC02408  /* SPI2 Status register */
+#define	SPI2_TDBR			0xFFC0240C  /* SPI2 Transmit Data Buffer Register */
+#define	SPI2_RDBR			0xFFC02410  /* SPI2 Receive Data Buffer	Register */
+#define	SPI2_BAUD			0xFFC02414  /* SPI2 Baud rate Register */
+#define	SPI2_SHADOW			0xFFC02418  /* SPI2_RDBR Shadow	Register */
+#define SPI2_REGBASE			SPI2_CTL
+
+/* SPORT2 Controller		(0xFFC02500 - 0xFFC025FF)			 */
+#define	SPORT2_TCR1			0xFFC02500	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_TCR2			0xFFC02504	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_TCLKDIV		0xFFC02508	/* SPORT2 Transmit Clock Divider */
+#define	SPORT2_TFSDIV		0xFFC0250C	/* SPORT2 Transmit Frame Sync Divider */
+#define	SPORT2_TX			0xFFC02510	/* SPORT2 TX Data Register */
+#define	SPORT2_RX			0xFFC02518	/* SPORT2 RX Data Register */
+#define	SPORT2_RCR1			0xFFC02520	/* SPORT2 Transmit Configuration 1 Register */
+#define	SPORT2_RCR2			0xFFC02524	/* SPORT2 Transmit Configuration 2 Register */
+#define	SPORT2_RCLKDIV		0xFFC02528	/* SPORT2 Receive Clock	Divider */
+#define	SPORT2_RFSDIV		0xFFC0252C	/* SPORT2 Receive Frame	Sync Divider */
+#define	SPORT2_STAT			0xFFC02530	/* SPORT2 Status Register */
+#define	SPORT2_CHNL			0xFFC02534	/* SPORT2 Current Channel Register */
+#define	SPORT2_MCMC1		0xFFC02538	/* SPORT2 Multi-Channel	Configuration Register 1 */
+#define	SPORT2_MCMC2		0xFFC0253C	/* SPORT2 Multi-Channel	Configuration Register 2 */
+#define	SPORT2_MTCS0		0xFFC02540	/* SPORT2 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT2_MTCS1		0xFFC02544	/* SPORT2 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT2_MTCS2		0xFFC02548	/* SPORT2 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT2_MTCS3		0xFFC0254C	/* SPORT2 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT2_MRCS0		0xFFC02550	/* SPORT2 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT2_MRCS1		0xFFC02554	/* SPORT2 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT2_MRCS2		0xFFC02558	/* SPORT2 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT2_MRCS3		0xFFC0255C	/* SPORT2 Multi-Channel	Receive	Select Register	3 */
+
+
+/* SPORT3 Controller		(0xFFC02600 - 0xFFC026FF)			 */
+#define	SPORT3_TCR1			0xFFC02600	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_TCR2			0xFFC02604	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_TCLKDIV		0xFFC02608	/* SPORT3 Transmit Clock Divider */
+#define	SPORT3_TFSDIV		0xFFC0260C	/* SPORT3 Transmit Frame Sync Divider */
+#define	SPORT3_TX			0xFFC02610	/* SPORT3 TX Data Register */
+#define	SPORT3_RX			0xFFC02618	/* SPORT3 RX Data Register */
+#define	SPORT3_RCR1			0xFFC02620	/* SPORT3 Transmit Configuration 1 Register */
+#define	SPORT3_RCR2			0xFFC02624	/* SPORT3 Transmit Configuration 2 Register */
+#define	SPORT3_RCLKDIV		0xFFC02628	/* SPORT3 Receive Clock	Divider */
+#define	SPORT3_RFSDIV		0xFFC0262C	/* SPORT3 Receive Frame	Sync Divider */
+#define	SPORT3_STAT			0xFFC02630	/* SPORT3 Status Register */
+#define	SPORT3_CHNL			0xFFC02634	/* SPORT3 Current Channel Register */
+#define	SPORT3_MCMC1		0xFFC02638	/* SPORT3 Multi-Channel	Configuration Register 1 */
+#define	SPORT3_MCMC2		0xFFC0263C	/* SPORT3 Multi-Channel	Configuration Register 2 */
+#define	SPORT3_MTCS0		0xFFC02640	/* SPORT3 Multi-Channel	Transmit Select	Register 0 */
+#define	SPORT3_MTCS1		0xFFC02644	/* SPORT3 Multi-Channel	Transmit Select	Register 1 */
+#define	SPORT3_MTCS2		0xFFC02648	/* SPORT3 Multi-Channel	Transmit Select	Register 2 */
+#define	SPORT3_MTCS3		0xFFC0264C	/* SPORT3 Multi-Channel	Transmit Select	Register 3 */
+#define	SPORT3_MRCS0		0xFFC02650	/* SPORT3 Multi-Channel	Receive	Select Register	0 */
+#define	SPORT3_MRCS1		0xFFC02654	/* SPORT3 Multi-Channel	Receive	Select Register	1 */
+#define	SPORT3_MRCS2		0xFFC02658	/* SPORT3 Multi-Channel	Receive	Select Register	2 */
+#define	SPORT3_MRCS3		0xFFC0265C	/* SPORT3 Multi-Channel	Receive	Select Register	3 */
+
+
+/* CAN Controller		(0xFFC02A00 - 0xFFC02FFF)				 */
+/* For Mailboxes 0-15											 */
+#define	CAN_MC1				0xFFC02A00	/* Mailbox config reg 1	 */
+#define	CAN_MD1				0xFFC02A04	/* Mailbox direction reg 1 */
+#define	CAN_TRS1			0xFFC02A08	/* Transmit Request Set	reg 1 */
+#define	CAN_TRR1			0xFFC02A0C	/* Transmit Request Reset reg 1 */
+#define	CAN_TA1				0xFFC02A10	/* Transmit Acknowledge	reg 1 */
+#define	CAN_AA1				0xFFC02A14	/* Transmit Abort Acknowledge reg 1 */
+#define	CAN_RMP1			0xFFC02A18	/* Receive Message Pending reg 1 */
+#define	CAN_RML1			0xFFC02A1C	/* Receive Message Lost	reg 1 */
+#define	CAN_MBTIF1			0xFFC02A20	/* Mailbox Transmit Interrupt Flag reg 1 */
+#define	CAN_MBRIF1			0xFFC02A24	/* Mailbox Receive  Interrupt Flag reg 1 */
+#define	CAN_MBIM1			0xFFC02A28	/* Mailbox Interrupt Mask reg 1 */
+#define	CAN_RFH1			0xFFC02A2C	/* Remote Frame	Handling reg 1 */
+#define	CAN_OPSS1			0xFFC02A30	/* Overwrite Protection	Single Shot Xmission reg 1 */
+
+/* For Mailboxes 16-31											 */
+#define	CAN_MC2				0xFFC02A40	/* Mailbox config reg 2	 */
+#define	CAN_MD2				0xFFC02A44	/* Mailbox direction reg 2 */
+#define	CAN_TRS2			0xFFC02A48	/* Transmit Request Set	reg 2 */
+#define	CAN_TRR2			0xFFC02A4C	/* Transmit Request Reset reg 2 */
+#define	CAN_TA2				0xFFC02A50	/* Transmit Acknowledge	reg 2 */
+#define	CAN_AA2				0xFFC02A54	/* Transmit Abort Acknowledge reg 2 */
+#define	CAN_RMP2			0xFFC02A58	/* Receive Message Pending reg 2 */
+#define	CAN_RML2			0xFFC02A5C	/* Receive Message Lost	reg 2 */
+#define	CAN_MBTIF2			0xFFC02A60	/* Mailbox Transmit Interrupt Flag reg 2 */
+#define	CAN_MBRIF2			0xFFC02A64	/* Mailbox Receive  Interrupt Flag reg 2 */
+#define	CAN_MBIM2			0xFFC02A68	/* Mailbox Interrupt Mask reg 2 */
+#define	CAN_RFH2			0xFFC02A6C	/* Remote Frame	Handling reg 2 */
+#define	CAN_OPSS2			0xFFC02A70	/* Overwrite Protection	Single Shot Xmission reg 2 */
+
+#define	CAN_CLOCK			0xFFC02A80	/* Bit Timing Configuration register 0 */
+#define	CAN_TIMING			0xFFC02A84	/* Bit Timing Configuration register 1 */
+
+#define	CAN_DEBUG			0xFFC02A88	/* Debug Register		 */
+/* the following is for	backwards compatibility */
+#define	CAN_CNF		 CAN_DEBUG
+
+#define	CAN_STATUS			0xFFC02A8C	/* Global Status Register */
+#define	CAN_CEC				0xFFC02A90	/* Error Counter Register */
+#define	CAN_GIS				0xFFC02A94	/* Global Interrupt Status Register */
+#define	CAN_GIM				0xFFC02A98	/* Global Interrupt Mask Register */
+#define	CAN_GIF				0xFFC02A9C	/* Global Interrupt Flag Register */
+#define	CAN_CONTROL			0xFFC02AA0	/* Master Control Register */
+#define	CAN_INTR			0xFFC02AA4	/* Interrupt Pending Register */
+#define	CAN_MBTD			0xFFC02AAC	/* Mailbox Temporary Disable Feature */
+#define	CAN_EWR				0xFFC02AB0	/* Programmable	Warning	Level */
+#define	CAN_ESR				0xFFC02AB4	/* Error Status	Register */
+#define	CAN_UCCNT			0xFFC02AC4	/* Universal Counter	 */
+#define	CAN_UCRC			0xFFC02AC8	/* Universal Counter Reload/Capture Register */
+#define	CAN_UCCNF			0xFFC02ACC	/* Universal Counter Configuration Register */
+
+/* Mailbox Acceptance Masks					 */
+#define	CAN_AM00L			0xFFC02B00	/* Mailbox 0 Low Acceptance Mask */
+#define	CAN_AM00H			0xFFC02B04	/* Mailbox 0 High Acceptance Mask */
+#define	CAN_AM01L			0xFFC02B08	/* Mailbox 1 Low Acceptance Mask */
+#define	CAN_AM01H			0xFFC02B0C	/* Mailbox 1 High Acceptance Mask */
+#define	CAN_AM02L			0xFFC02B10	/* Mailbox 2 Low Acceptance Mask */
+#define	CAN_AM02H			0xFFC02B14	/* Mailbox 2 High Acceptance Mask */
+#define	CAN_AM03L			0xFFC02B18	/* Mailbox 3 Low Acceptance Mask */
+#define	CAN_AM03H			0xFFC02B1C	/* Mailbox 3 High Acceptance Mask */
+#define	CAN_AM04L			0xFFC02B20	/* Mailbox 4 Low Acceptance Mask */
+#define	CAN_AM04H			0xFFC02B24	/* Mailbox 4 High Acceptance Mask */
+#define	CAN_AM05L			0xFFC02B28	/* Mailbox 5 Low Acceptance Mask */
+#define	CAN_AM05H			0xFFC02B2C	/* Mailbox 5 High Acceptance Mask */
+#define	CAN_AM06L			0xFFC02B30	/* Mailbox 6 Low Acceptance Mask */
+#define	CAN_AM06H			0xFFC02B34	/* Mailbox 6 High Acceptance Mask */
+#define	CAN_AM07L			0xFFC02B38	/* Mailbox 7 Low Acceptance Mask */
+#define	CAN_AM07H			0xFFC02B3C	/* Mailbox 7 High Acceptance Mask */
+#define	CAN_AM08L			0xFFC02B40	/* Mailbox 8 Low Acceptance Mask */
+#define	CAN_AM08H			0xFFC02B44	/* Mailbox 8 High Acceptance Mask */
+#define	CAN_AM09L			0xFFC02B48	/* Mailbox 9 Low Acceptance Mask */
+#define	CAN_AM09H			0xFFC02B4C	/* Mailbox 9 High Acceptance Mask */
+#define	CAN_AM10L			0xFFC02B50	/* Mailbox 10 Low Acceptance Mask */
+#define	CAN_AM10H			0xFFC02B54	/* Mailbox 10 High Acceptance Mask */
+#define	CAN_AM11L			0xFFC02B58	/* Mailbox 11 Low Acceptance Mask */
+#define	CAN_AM11H			0xFFC02B5C	/* Mailbox 11 High Acceptance Mask */
+#define	CAN_AM12L			0xFFC02B60	/* Mailbox 12 Low Acceptance Mask */
+#define	CAN_AM12H			0xFFC02B64	/* Mailbox 12 High Acceptance Mask */
+#define	CAN_AM13L			0xFFC02B68	/* Mailbox 13 Low Acceptance Mask */
+#define	CAN_AM13H			0xFFC02B6C	/* Mailbox 13 High Acceptance Mask */
+#define	CAN_AM14L			0xFFC02B70	/* Mailbox 14 Low Acceptance Mask */
+#define	CAN_AM14H			0xFFC02B74	/* Mailbox 14 High Acceptance Mask */
+#define	CAN_AM15L			0xFFC02B78	/* Mailbox 15 Low Acceptance Mask */
+#define	CAN_AM15H			0xFFC02B7C	/* Mailbox 15 High Acceptance Mask */
+
+#define	CAN_AM16L			0xFFC02B80	/* Mailbox 16 Low Acceptance Mask */
+#define	CAN_AM16H			0xFFC02B84	/* Mailbox 16 High Acceptance Mask */
+#define	CAN_AM17L			0xFFC02B88	/* Mailbox 17 Low Acceptance Mask */
+#define	CAN_AM17H			0xFFC02B8C	/* Mailbox 17 High Acceptance Mask */
+#define	CAN_AM18L			0xFFC02B90	/* Mailbox 18 Low Acceptance Mask */
+#define	CAN_AM18H			0xFFC02B94	/* Mailbox 18 High Acceptance Mask */
+#define	CAN_AM19L			0xFFC02B98	/* Mailbox 19 Low Acceptance Mask */
+#define	CAN_AM19H			0xFFC02B9C	/* Mailbox 19 High Acceptance Mask */
+#define	CAN_AM20L			0xFFC02BA0	/* Mailbox 20 Low Acceptance Mask */
+#define	CAN_AM20H			0xFFC02BA4	/* Mailbox 20 High Acceptance Mask */
+#define	CAN_AM21L			0xFFC02BA8	/* Mailbox 21 Low Acceptance Mask */
+#define	CAN_AM21H			0xFFC02BAC	/* Mailbox 21 High Acceptance Mask */
+#define	CAN_AM22L			0xFFC02BB0	/* Mailbox 22 Low Acceptance Mask */
+#define	CAN_AM22H			0xFFC02BB4	/* Mailbox 22 High Acceptance Mask */
+#define	CAN_AM23L			0xFFC02BB8	/* Mailbox 23 Low Acceptance Mask */
+#define	CAN_AM23H			0xFFC02BBC	/* Mailbox 23 High Acceptance Mask */
+#define	CAN_AM24L			0xFFC02BC0	/* Mailbox 24 Low Acceptance Mask */
+#define	CAN_AM24H			0xFFC02BC4	/* Mailbox 24 High Acceptance Mask */
+#define	CAN_AM25L			0xFFC02BC8	/* Mailbox 25 Low Acceptance Mask */
+#define	CAN_AM25H			0xFFC02BCC	/* Mailbox 25 High Acceptance Mask */
+#define	CAN_AM26L			0xFFC02BD0	/* Mailbox 26 Low Acceptance Mask */
+#define	CAN_AM26H			0xFFC02BD4	/* Mailbox 26 High Acceptance Mask */
+#define	CAN_AM27L			0xFFC02BD8	/* Mailbox 27 Low Acceptance Mask */
+#define	CAN_AM27H			0xFFC02BDC	/* Mailbox 27 High Acceptance Mask */
+#define	CAN_AM28L			0xFFC02BE0	/* Mailbox 28 Low Acceptance Mask */
+#define	CAN_AM28H			0xFFC02BE4	/* Mailbox 28 High Acceptance Mask */
+#define	CAN_AM29L			0xFFC02BE8	/* Mailbox 29 Low Acceptance Mask */
+#define	CAN_AM29H			0xFFC02BEC	/* Mailbox 29 High Acceptance Mask */
+#define	CAN_AM30L			0xFFC02BF0	/* Mailbox 30 Low Acceptance Mask */
+#define	CAN_AM30H			0xFFC02BF4	/* Mailbox 30 High Acceptance Mask */
+#define	CAN_AM31L			0xFFC02BF8	/* Mailbox 31 Low Acceptance Mask */
+#define	CAN_AM31H			0xFFC02BFC	/* Mailbox 31 High Acceptance Mask */
+
+/* CAN Acceptance Mask Macros */
+#define	CAN_AM_L(x)			(CAN_AM00L+((x)*0x8))
+#define	CAN_AM_H(x)			(CAN_AM00H+((x)*0x8))
+
+/* Mailbox Registers									 */
+#define	CAN_MB00_DATA0		0xFFC02C00	/* Mailbox 0 Data Word 0 [15:0]	Register */
+#define	CAN_MB00_DATA1		0xFFC02C04	/* Mailbox 0 Data Word 1 [31:16] Register */
+#define	CAN_MB00_DATA2		0xFFC02C08	/* Mailbox 0 Data Word 2 [47:32] Register */
+#define	CAN_MB00_DATA3		0xFFC02C0C	/* Mailbox 0 Data Word 3 [63:48] Register */
+#define	CAN_MB00_LENGTH		0xFFC02C10	/* Mailbox 0 Data Length Code Register */
+#define	CAN_MB00_TIMESTAMP	0xFFC02C14	/* Mailbox 0 Time Stamp	Value Register */
+#define	CAN_MB00_ID0		0xFFC02C18	/* Mailbox 0 Identifier	Low Register */
+#define	CAN_MB00_ID1		0xFFC02C1C	/* Mailbox 0 Identifier	High Register */
+
+#define	CAN_MB01_DATA0		0xFFC02C20	/* Mailbox 1 Data Word 0 [15:0]	Register */
+#define	CAN_MB01_DATA1		0xFFC02C24	/* Mailbox 1 Data Word 1 [31:16] Register */
+#define	CAN_MB01_DATA2		0xFFC02C28	/* Mailbox 1 Data Word 2 [47:32] Register */
+#define	CAN_MB01_DATA3		0xFFC02C2C	/* Mailbox 1 Data Word 3 [63:48] Register */
+#define	CAN_MB01_LENGTH		0xFFC02C30	/* Mailbox 1 Data Length Code Register */
+#define	CAN_MB01_TIMESTAMP	0xFFC02C34	/* Mailbox 1 Time Stamp	Value Register */
+#define	CAN_MB01_ID0		0xFFC02C38	/* Mailbox 1 Identifier	Low Register */
+#define	CAN_MB01_ID1		0xFFC02C3C	/* Mailbox 1 Identifier	High Register */
+
+#define	CAN_MB02_DATA0		0xFFC02C40	/* Mailbox 2 Data Word 0 [15:0]	Register */
+#define	CAN_MB02_DATA1		0xFFC02C44	/* Mailbox 2 Data Word 1 [31:16] Register */
+#define	CAN_MB02_DATA2		0xFFC02C48	/* Mailbox 2 Data Word 2 [47:32] Register */
+#define	CAN_MB02_DATA3		0xFFC02C4C	/* Mailbox 2 Data Word 3 [63:48] Register */
+#define	CAN_MB02_LENGTH		0xFFC02C50	/* Mailbox 2 Data Length Code Register */
+#define	CAN_MB02_TIMESTAMP	0xFFC02C54	/* Mailbox 2 Time Stamp	Value Register */
+#define	CAN_MB02_ID0		0xFFC02C58	/* Mailbox 2 Identifier	Low Register */
+#define	CAN_MB02_ID1		0xFFC02C5C	/* Mailbox 2 Identifier	High Register */
+
+#define	CAN_MB03_DATA0		0xFFC02C60	/* Mailbox 3 Data Word 0 [15:0]	Register */
+#define	CAN_MB03_DATA1		0xFFC02C64	/* Mailbox 3 Data Word 1 [31:16] Register */
+#define	CAN_MB03_DATA2		0xFFC02C68	/* Mailbox 3 Data Word 2 [47:32] Register */
+#define	CAN_MB03_DATA3		0xFFC02C6C	/* Mailbox 3 Data Word 3 [63:48] Register */
+#define	CAN_MB03_LENGTH		0xFFC02C70	/* Mailbox 3 Data Length Code Register */
+#define	CAN_MB03_TIMESTAMP	0xFFC02C74	/* Mailbox 3 Time Stamp	Value Register */
+#define	CAN_MB03_ID0		0xFFC02C78	/* Mailbox 3 Identifier	Low Register */
+#define	CAN_MB03_ID1		0xFFC02C7C	/* Mailbox 3 Identifier	High Register */
+
+#define	CAN_MB04_DATA0		0xFFC02C80	/* Mailbox 4 Data Word 0 [15:0]	Register */
+#define	CAN_MB04_DATA1		0xFFC02C84	/* Mailbox 4 Data Word 1 [31:16] Register */
+#define	CAN_MB04_DATA2		0xFFC02C88	/* Mailbox 4 Data Word 2 [47:32] Register */
+#define	CAN_MB04_DATA3		0xFFC02C8C	/* Mailbox 4 Data Word 3 [63:48] Register */
+#define	CAN_MB04_LENGTH		0xFFC02C90	/* Mailbox 4 Data Length Code Register */
+#define	CAN_MB04_TIMESTAMP	0xFFC02C94	/* Mailbox 4 Time Stamp	Value Register */
+#define	CAN_MB04_ID0		0xFFC02C98	/* Mailbox 4 Identifier	Low Register */
+#define	CAN_MB04_ID1		0xFFC02C9C	/* Mailbox 4 Identifier	High Register */
+
+#define	CAN_MB05_DATA0		0xFFC02CA0	/* Mailbox 5 Data Word 0 [15:0]	Register */
+#define	CAN_MB05_DATA1		0xFFC02CA4	/* Mailbox 5 Data Word 1 [31:16] Register */
+#define	CAN_MB05_DATA2		0xFFC02CA8	/* Mailbox 5 Data Word 2 [47:32] Register */
+#define	CAN_MB05_DATA3		0xFFC02CAC	/* Mailbox 5 Data Word 3 [63:48] Register */
+#define	CAN_MB05_LENGTH		0xFFC02CB0	/* Mailbox 5 Data Length Code Register */
+#define	CAN_MB05_TIMESTAMP	0xFFC02CB4	/* Mailbox 5 Time Stamp	Value Register */
+#define	CAN_MB05_ID0		0xFFC02CB8	/* Mailbox 5 Identifier	Low Register */
+#define	CAN_MB05_ID1		0xFFC02CBC	/* Mailbox 5 Identifier	High Register */
+
+#define	CAN_MB06_DATA0		0xFFC02CC0	/* Mailbox 6 Data Word 0 [15:0]	Register */
+#define	CAN_MB06_DATA1		0xFFC02CC4	/* Mailbox 6 Data Word 1 [31:16] Register */
+#define	CAN_MB06_DATA2		0xFFC02CC8	/* Mailbox 6 Data Word 2 [47:32] Register */
+#define	CAN_MB06_DATA3		0xFFC02CCC	/* Mailbox 6 Data Word 3 [63:48] Register */
+#define	CAN_MB06_LENGTH		0xFFC02CD0	/* Mailbox 6 Data Length Code Register */
+#define	CAN_MB06_TIMESTAMP	0xFFC02CD4	/* Mailbox 6 Time Stamp	Value Register */
+#define	CAN_MB06_ID0		0xFFC02CD8	/* Mailbox 6 Identifier	Low Register */
+#define	CAN_MB06_ID1		0xFFC02CDC	/* Mailbox 6 Identifier	High Register */
+
+#define	CAN_MB07_DATA0		0xFFC02CE0	/* Mailbox 7 Data Word 0 [15:0]	Register */
+#define	CAN_MB07_DATA1		0xFFC02CE4	/* Mailbox 7 Data Word 1 [31:16] Register */
+#define	CAN_MB07_DATA2		0xFFC02CE8	/* Mailbox 7 Data Word 2 [47:32] Register */
+#define	CAN_MB07_DATA3		0xFFC02CEC	/* Mailbox 7 Data Word 3 [63:48] Register */
+#define	CAN_MB07_LENGTH		0xFFC02CF0	/* Mailbox 7 Data Length Code Register */
+#define	CAN_MB07_TIMESTAMP	0xFFC02CF4	/* Mailbox 7 Time Stamp	Value Register */
+#define	CAN_MB07_ID0		0xFFC02CF8	/* Mailbox 7 Identifier	Low Register */
+#define	CAN_MB07_ID1		0xFFC02CFC	/* Mailbox 7 Identifier	High Register */
+
+#define	CAN_MB08_DATA0		0xFFC02D00	/* Mailbox 8 Data Word 0 [15:0]	Register */
+#define	CAN_MB08_DATA1		0xFFC02D04	/* Mailbox 8 Data Word 1 [31:16] Register */
+#define	CAN_MB08_DATA2		0xFFC02D08	/* Mailbox 8 Data Word 2 [47:32] Register */
+#define	CAN_MB08_DATA3		0xFFC02D0C	/* Mailbox 8 Data Word 3 [63:48] Register */
+#define	CAN_MB08_LENGTH		0xFFC02D10	/* Mailbox 8 Data Length Code Register */
+#define	CAN_MB08_TIMESTAMP	0xFFC02D14	/* Mailbox 8 Time Stamp	Value Register */
+#define	CAN_MB08_ID0		0xFFC02D18	/* Mailbox 8 Identifier	Low Register */
+#define	CAN_MB08_ID1		0xFFC02D1C	/* Mailbox 8 Identifier	High Register */
+
+#define	CAN_MB09_DATA0		0xFFC02D20	/* Mailbox 9 Data Word 0 [15:0]	Register */
+#define	CAN_MB09_DATA1		0xFFC02D24	/* Mailbox 9 Data Word 1 [31:16] Register */
+#define	CAN_MB09_DATA2		0xFFC02D28	/* Mailbox 9 Data Word 2 [47:32] Register */
+#define	CAN_MB09_DATA3		0xFFC02D2C	/* Mailbox 9 Data Word 3 [63:48] Register */
+#define	CAN_MB09_LENGTH		0xFFC02D30	/* Mailbox 9 Data Length Code Register */
+#define	CAN_MB09_TIMESTAMP	0xFFC02D34	/* Mailbox 9 Time Stamp	Value Register */
+#define	CAN_MB09_ID0		0xFFC02D38	/* Mailbox 9 Identifier	Low Register */
+#define	CAN_MB09_ID1		0xFFC02D3C	/* Mailbox 9 Identifier	High Register */
+
+#define	CAN_MB10_DATA0		0xFFC02D40	/* Mailbox 10 Data Word	0 [15:0] Register */
+#define	CAN_MB10_DATA1		0xFFC02D44	/* Mailbox 10 Data Word	1 [31:16] Register */
+#define	CAN_MB10_DATA2		0xFFC02D48	/* Mailbox 10 Data Word	2 [47:32] Register */
+#define	CAN_MB10_DATA3		0xFFC02D4C	/* Mailbox 10 Data Word	3 [63:48] Register */
+#define	CAN_MB10_LENGTH		0xFFC02D50	/* Mailbox 10 Data Length Code Register */
+#define	CAN_MB10_TIMESTAMP	0xFFC02D54	/* Mailbox 10 Time Stamp Value Register */
+#define	CAN_MB10_ID0		0xFFC02D58	/* Mailbox 10 Identifier Low Register */
+#define	CAN_MB10_ID1		0xFFC02D5C	/* Mailbox 10 Identifier High Register */
+
+#define	CAN_MB11_DATA0		0xFFC02D60	/* Mailbox 11 Data Word	0 [15:0] Register */
+#define	CAN_MB11_DATA1		0xFFC02D64	/* Mailbox 11 Data Word	1 [31:16] Register */
+#define	CAN_MB11_DATA2		0xFFC02D68	/* Mailbox 11 Data Word	2 [47:32] Register */
+#define	CAN_MB11_DATA3		0xFFC02D6C	/* Mailbox 11 Data Word	3 [63:48] Register */
+#define	CAN_MB11_LENGTH		0xFFC02D70	/* Mailbox 11 Data Length Code Register */
+#define	CAN_MB11_TIMESTAMP	0xFFC02D74	/* Mailbox 11 Time Stamp Value Register */
+#define	CAN_MB11_ID0		0xFFC02D78	/* Mailbox 11 Identifier Low Register */
+#define	CAN_MB11_ID1		0xFFC02D7C	/* Mailbox 11 Identifier High Register */
+
+#define	CAN_MB12_DATA0		0xFFC02D80	/* Mailbox 12 Data Word	0 [15:0] Register */
+#define	CAN_MB12_DATA1		0xFFC02D84	/* Mailbox 12 Data Word	1 [31:16] Register */
+#define	CAN_MB12_DATA2		0xFFC02D88	/* Mailbox 12 Data Word	2 [47:32] Register */
+#define	CAN_MB12_DATA3		0xFFC02D8C	/* Mailbox 12 Data Word	3 [63:48] Register */
+#define	CAN_MB12_LENGTH		0xFFC02D90	/* Mailbox 12 Data Length Code Register */
+#define	CAN_MB12_TIMESTAMP	0xFFC02D94	/* Mailbox 12 Time Stamp Value Register */
+#define	CAN_MB12_ID0		0xFFC02D98	/* Mailbox 12 Identifier Low Register */
+#define	CAN_MB12_ID1		0xFFC02D9C	/* Mailbox 12 Identifier High Register */
+
+#define	CAN_MB13_DATA0		0xFFC02DA0	/* Mailbox 13 Data Word	0 [15:0] Register */
+#define	CAN_MB13_DATA1		0xFFC02DA4	/* Mailbox 13 Data Word	1 [31:16] Register */
+#define	CAN_MB13_DATA2		0xFFC02DA8	/* Mailbox 13 Data Word	2 [47:32] Register */
+#define	CAN_MB13_DATA3		0xFFC02DAC	/* Mailbox 13 Data Word	3 [63:48] Register */
+#define	CAN_MB13_LENGTH		0xFFC02DB0	/* Mailbox 13 Data Length Code Register */
+#define	CAN_MB13_TIMESTAMP	0xFFC02DB4	/* Mailbox 13 Time Stamp Value Register */
+#define	CAN_MB13_ID0		0xFFC02DB8	/* Mailbox 13 Identifier Low Register */
+#define	CAN_MB13_ID1		0xFFC02DBC	/* Mailbox 13 Identifier High Register */
+
+#define	CAN_MB14_DATA0		0xFFC02DC0	/* Mailbox 14 Data Word	0 [15:0] Register */
+#define	CAN_MB14_DATA1		0xFFC02DC4	/* Mailbox 14 Data Word	1 [31:16] Register */
+#define	CAN_MB14_DATA2		0xFFC02DC8	/* Mailbox 14 Data Word	2 [47:32] Register */
+#define	CAN_MB14_DATA3		0xFFC02DCC	/* Mailbox 14 Data Word	3 [63:48] Register */
+#define	CAN_MB14_LENGTH		0xFFC02DD0	/* Mailbox 14 Data Length Code Register */
+#define	CAN_MB14_TIMESTAMP	0xFFC02DD4	/* Mailbox 14 Time Stamp Value Register */
+#define	CAN_MB14_ID0		0xFFC02DD8	/* Mailbox 14 Identifier Low Register */
+#define	CAN_MB14_ID1		0xFFC02DDC	/* Mailbox 14 Identifier High Register */
+
+#define	CAN_MB15_DATA0		0xFFC02DE0	/* Mailbox 15 Data Word	0 [15:0] Register */
+#define	CAN_MB15_DATA1		0xFFC02DE4	/* Mailbox 15 Data Word	1 [31:16] Register */
+#define	CAN_MB15_DATA2		0xFFC02DE8	/* Mailbox 15 Data Word	2 [47:32] Register */
+#define	CAN_MB15_DATA3		0xFFC02DEC	/* Mailbox 15 Data Word	3 [63:48] Register */
+#define	CAN_MB15_LENGTH		0xFFC02DF0	/* Mailbox 15 Data Length Code Register */
+#define	CAN_MB15_TIMESTAMP	0xFFC02DF4	/* Mailbox 15 Time Stamp Value Register */
+#define	CAN_MB15_ID0		0xFFC02DF8	/* Mailbox 15 Identifier Low Register */
+#define	CAN_MB15_ID1		0xFFC02DFC	/* Mailbox 15 Identifier High Register */
+
+#define	CAN_MB16_DATA0		0xFFC02E00	/* Mailbox 16 Data Word	0 [15:0] Register */
+#define	CAN_MB16_DATA1		0xFFC02E04	/* Mailbox 16 Data Word	1 [31:16] Register */
+#define	CAN_MB16_DATA2		0xFFC02E08	/* Mailbox 16 Data Word	2 [47:32] Register */
+#define	CAN_MB16_DATA3		0xFFC02E0C	/* Mailbox 16 Data Word	3 [63:48] Register */
+#define	CAN_MB16_LENGTH		0xFFC02E10	/* Mailbox 16 Data Length Code Register */
+#define	CAN_MB16_TIMESTAMP	0xFFC02E14	/* Mailbox 16 Time Stamp Value Register */
+#define	CAN_MB16_ID0		0xFFC02E18	/* Mailbox 16 Identifier Low Register */
+#define	CAN_MB16_ID1		0xFFC02E1C	/* Mailbox 16 Identifier High Register */
+
+#define	CAN_MB17_DATA0		0xFFC02E20	/* Mailbox 17 Data Word	0 [15:0] Register */
+#define	CAN_MB17_DATA1		0xFFC02E24	/* Mailbox 17 Data Word	1 [31:16] Register */
+#define	CAN_MB17_DATA2		0xFFC02E28	/* Mailbox 17 Data Word	2 [47:32] Register */
+#define	CAN_MB17_DATA3		0xFFC02E2C	/* Mailbox 17 Data Word	3 [63:48] Register */
+#define	CAN_MB17_LENGTH		0xFFC02E30	/* Mailbox 17 Data Length Code Register */
+#define	CAN_MB17_TIMESTAMP	0xFFC02E34	/* Mailbox 17 Time Stamp Value Register */
+#define	CAN_MB17_ID0		0xFFC02E38	/* Mailbox 17 Identifier Low Register */
+#define	CAN_MB17_ID1		0xFFC02E3C	/* Mailbox 17 Identifier High Register */
+
+#define	CAN_MB18_DATA0		0xFFC02E40	/* Mailbox 18 Data Word	0 [15:0] Register */
+#define	CAN_MB18_DATA1		0xFFC02E44	/* Mailbox 18 Data Word	1 [31:16] Register */
+#define	CAN_MB18_DATA2		0xFFC02E48	/* Mailbox 18 Data Word	2 [47:32] Register */
+#define	CAN_MB18_DATA3		0xFFC02E4C	/* Mailbox 18 Data Word	3 [63:48] Register */
+#define	CAN_MB18_LENGTH		0xFFC02E50	/* Mailbox 18 Data Length Code Register */
+#define	CAN_MB18_TIMESTAMP	0xFFC02E54	/* Mailbox 18 Time Stamp Value Register */
+#define	CAN_MB18_ID0		0xFFC02E58	/* Mailbox 18 Identifier Low Register */
+#define	CAN_MB18_ID1		0xFFC02E5C	/* Mailbox 18 Identifier High Register */
+
+#define	CAN_MB19_DATA0		0xFFC02E60	/* Mailbox 19 Data Word	0 [15:0] Register */
+#define	CAN_MB19_DATA1		0xFFC02E64	/* Mailbox 19 Data Word	1 [31:16] Register */
+#define	CAN_MB19_DATA2		0xFFC02E68	/* Mailbox 19 Data Word	2 [47:32] Register */
+#define	CAN_MB19_DATA3		0xFFC02E6C	/* Mailbox 19 Data Word	3 [63:48] Register */
+#define	CAN_MB19_LENGTH		0xFFC02E70	/* Mailbox 19 Data Length Code Register */
+#define	CAN_MB19_TIMESTAMP	0xFFC02E74	/* Mailbox 19 Time Stamp Value Register */
+#define	CAN_MB19_ID0		0xFFC02E78	/* Mailbox 19 Identifier Low Register */
+#define	CAN_MB19_ID1		0xFFC02E7C	/* Mailbox 19 Identifier High Register */
+
+#define	CAN_MB20_DATA0		0xFFC02E80	/* Mailbox 20 Data Word	0 [15:0] Register */
+#define	CAN_MB20_DATA1		0xFFC02E84	/* Mailbox 20 Data Word	1 [31:16] Register */
+#define	CAN_MB20_DATA2		0xFFC02E88	/* Mailbox 20 Data Word	2 [47:32] Register */
+#define	CAN_MB20_DATA3		0xFFC02E8C	/* Mailbox 20 Data Word	3 [63:48] Register */
+#define	CAN_MB20_LENGTH		0xFFC02E90	/* Mailbox 20 Data Length Code Register */
+#define	CAN_MB20_TIMESTAMP	0xFFC02E94	/* Mailbox 20 Time Stamp Value Register */
+#define	CAN_MB20_ID0		0xFFC02E98	/* Mailbox 20 Identifier Low Register */
+#define	CAN_MB20_ID1		0xFFC02E9C	/* Mailbox 20 Identifier High Register */
+
+#define	CAN_MB21_DATA0		0xFFC02EA0	/* Mailbox 21 Data Word	0 [15:0] Register */
+#define	CAN_MB21_DATA1		0xFFC02EA4	/* Mailbox 21 Data Word	1 [31:16] Register */
+#define	CAN_MB21_DATA2		0xFFC02EA8	/* Mailbox 21 Data Word	2 [47:32] Register */
+#define	CAN_MB21_DATA3		0xFFC02EAC	/* Mailbox 21 Data Word	3 [63:48] Register */
+#define	CAN_MB21_LENGTH		0xFFC02EB0	/* Mailbox 21 Data Length Code Register */
+#define	CAN_MB21_TIMESTAMP	0xFFC02EB4	/* Mailbox 21 Time Stamp Value Register */
+#define	CAN_MB21_ID0		0xFFC02EB8	/* Mailbox 21 Identifier Low Register */
+#define	CAN_MB21_ID1		0xFFC02EBC	/* Mailbox 21 Identifier High Register */
+
+#define	CAN_MB22_DATA0		0xFFC02EC0	/* Mailbox 22 Data Word	0 [15:0] Register */
+#define	CAN_MB22_DATA1		0xFFC02EC4	/* Mailbox 22 Data Word	1 [31:16] Register */
+#define	CAN_MB22_DATA2		0xFFC02EC8	/* Mailbox 22 Data Word	2 [47:32] Register */
+#define	CAN_MB22_DATA3		0xFFC02ECC	/* Mailbox 22 Data Word	3 [63:48] Register */
+#define	CAN_MB22_LENGTH		0xFFC02ED0	/* Mailbox 22 Data Length Code Register */
+#define	CAN_MB22_TIMESTAMP	0xFFC02ED4	/* Mailbox 22 Time Stamp Value Register */
+#define	CAN_MB22_ID0		0xFFC02ED8	/* Mailbox 22 Identifier Low Register */
+#define	CAN_MB22_ID1		0xFFC02EDC	/* Mailbox 22 Identifier High Register */
+
+#define	CAN_MB23_DATA0		0xFFC02EE0	/* Mailbox 23 Data Word	0 [15:0] Register */
+#define	CAN_MB23_DATA1		0xFFC02EE4	/* Mailbox 23 Data Word	1 [31:16] Register */
+#define	CAN_MB23_DATA2		0xFFC02EE8	/* Mailbox 23 Data Word	2 [47:32] Register */
+#define	CAN_MB23_DATA3		0xFFC02EEC	/* Mailbox 23 Data Word	3 [63:48] Register */
+#define	CAN_MB23_LENGTH		0xFFC02EF0	/* Mailbox 23 Data Length Code Register */
+#define	CAN_MB23_TIMESTAMP	0xFFC02EF4	/* Mailbox 23 Time Stamp Value Register */
+#define	CAN_MB23_ID0		0xFFC02EF8	/* Mailbox 23 Identifier Low Register */
+#define	CAN_MB23_ID1		0xFFC02EFC	/* Mailbox 23 Identifier High Register */
+
+#define	CAN_MB24_DATA0		0xFFC02F00	/* Mailbox 24 Data Word	0 [15:0] Register */
+#define	CAN_MB24_DATA1		0xFFC02F04	/* Mailbox 24 Data Word	1 [31:16] Register */
+#define	CAN_MB24_DATA2		0xFFC02F08	/* Mailbox 24 Data Word	2 [47:32] Register */
+#define	CAN_MB24_DATA3		0xFFC02F0C	/* Mailbox 24 Data Word	3 [63:48] Register */
+#define	CAN_MB24_LENGTH		0xFFC02F10	/* Mailbox 24 Data Length Code Register */
+#define	CAN_MB24_TIMESTAMP	0xFFC02F14	/* Mailbox 24 Time Stamp Value Register */
+#define	CAN_MB24_ID0		0xFFC02F18	/* Mailbox 24 Identifier Low Register */
+#define	CAN_MB24_ID1		0xFFC02F1C	/* Mailbox 24 Identifier High Register */
+
+#define	CAN_MB25_DATA0		0xFFC02F20	/* Mailbox 25 Data Word	0 [15:0] Register */
+#define	CAN_MB25_DATA1		0xFFC02F24	/* Mailbox 25 Data Word	1 [31:16] Register */
+#define	CAN_MB25_DATA2		0xFFC02F28	/* Mailbox 25 Data Word	2 [47:32] Register */
+#define	CAN_MB25_DATA3		0xFFC02F2C	/* Mailbox 25 Data Word	3 [63:48] Register */
+#define	CAN_MB25_LENGTH		0xFFC02F30	/* Mailbox 25 Data Length Code Register */
+#define	CAN_MB25_TIMESTAMP	0xFFC02F34	/* Mailbox 25 Time Stamp Value Register */
+#define	CAN_MB25_ID0		0xFFC02F38	/* Mailbox 25 Identifier Low Register */
+#define	CAN_MB25_ID1		0xFFC02F3C	/* Mailbox 25 Identifier High Register */
+
+#define	CAN_MB26_DATA0		0xFFC02F40	/* Mailbox 26 Data Word	0 [15:0] Register */
+#define	CAN_MB26_DATA1		0xFFC02F44	/* Mailbox 26 Data Word	1 [31:16] Register */
+#define	CAN_MB26_DATA2		0xFFC02F48	/* Mailbox 26 Data Word	2 [47:32] Register */
+#define	CAN_MB26_DATA3		0xFFC02F4C	/* Mailbox 26 Data Word	3 [63:48] Register */
+#define	CAN_MB26_LENGTH		0xFFC02F50	/* Mailbox 26 Data Length Code Register */
+#define	CAN_MB26_TIMESTAMP	0xFFC02F54	/* Mailbox 26 Time Stamp Value Register */
+#define	CAN_MB26_ID0		0xFFC02F58	/* Mailbox 26 Identifier Low Register */
+#define	CAN_MB26_ID1		0xFFC02F5C	/* Mailbox 26 Identifier High Register */
+
+#define	CAN_MB27_DATA0		0xFFC02F60	/* Mailbox 27 Data Word	0 [15:0] Register */
+#define	CAN_MB27_DATA1		0xFFC02F64	/* Mailbox 27 Data Word	1 [31:16] Register */
+#define	CAN_MB27_DATA2		0xFFC02F68	/* Mailbox 27 Data Word	2 [47:32] Register */
+#define	CAN_MB27_DATA3		0xFFC02F6C	/* Mailbox 27 Data Word	3 [63:48] Register */
+#define	CAN_MB27_LENGTH		0xFFC02F70	/* Mailbox 27 Data Length Code Register */
+#define	CAN_MB27_TIMESTAMP	0xFFC02F74	/* Mailbox 27 Time Stamp Value Register */
+#define	CAN_MB27_ID0		0xFFC02F78	/* Mailbox 27 Identifier Low Register */
+#define	CAN_MB27_ID1		0xFFC02F7C	/* Mailbox 27 Identifier High Register */
+
+#define	CAN_MB28_DATA0		0xFFC02F80	/* Mailbox 28 Data Word	0 [15:0] Register */
+#define	CAN_MB28_DATA1		0xFFC02F84	/* Mailbox 28 Data Word	1 [31:16] Register */
+#define	CAN_MB28_DATA2		0xFFC02F88	/* Mailbox 28 Data Word	2 [47:32] Register */
+#define	CAN_MB28_DATA3		0xFFC02F8C	/* Mailbox 28 Data Word	3 [63:48] Register */
+#define	CAN_MB28_LENGTH		0xFFC02F90	/* Mailbox 28 Data Length Code Register */
+#define	CAN_MB28_TIMESTAMP	0xFFC02F94	/* Mailbox 28 Time Stamp Value Register */
+#define	CAN_MB28_ID0		0xFFC02F98	/* Mailbox 28 Identifier Low Register */
+#define	CAN_MB28_ID1		0xFFC02F9C	/* Mailbox 28 Identifier High Register */
+
+#define	CAN_MB29_DATA0		0xFFC02FA0	/* Mailbox 29 Data Word	0 [15:0] Register */
+#define	CAN_MB29_DATA1		0xFFC02FA4	/* Mailbox 29 Data Word	1 [31:16] Register */
+#define	CAN_MB29_DATA2		0xFFC02FA8	/* Mailbox 29 Data Word	2 [47:32] Register */
+#define	CAN_MB29_DATA3		0xFFC02FAC	/* Mailbox 29 Data Word	3 [63:48] Register */
+#define	CAN_MB29_LENGTH		0xFFC02FB0	/* Mailbox 29 Data Length Code Register */
+#define	CAN_MB29_TIMESTAMP	0xFFC02FB4	/* Mailbox 29 Time Stamp Value Register */
+#define	CAN_MB29_ID0		0xFFC02FB8	/* Mailbox 29 Identifier Low Register */
+#define	CAN_MB29_ID1		0xFFC02FBC	/* Mailbox 29 Identifier High Register */
+
+#define	CAN_MB30_DATA0		0xFFC02FC0	/* Mailbox 30 Data Word	0 [15:0] Register */
+#define	CAN_MB30_DATA1		0xFFC02FC4	/* Mailbox 30 Data Word	1 [31:16] Register */
+#define	CAN_MB30_DATA2		0xFFC02FC8	/* Mailbox 30 Data Word	2 [47:32] Register */
+#define	CAN_MB30_DATA3		0xFFC02FCC	/* Mailbox 30 Data Word	3 [63:48] Register */
+#define	CAN_MB30_LENGTH		0xFFC02FD0	/* Mailbox 30 Data Length Code Register */
+#define	CAN_MB30_TIMESTAMP	0xFFC02FD4	/* Mailbox 30 Time Stamp Value Register */
+#define	CAN_MB30_ID0		0xFFC02FD8	/* Mailbox 30 Identifier Low Register */
+#define	CAN_MB30_ID1		0xFFC02FDC	/* Mailbox 30 Identifier High Register */
+
+#define	CAN_MB31_DATA0		0xFFC02FE0	/* Mailbox 31 Data Word	0 [15:0] Register */
+#define	CAN_MB31_DATA1		0xFFC02FE4	/* Mailbox 31 Data Word	1 [31:16] Register */
+#define	CAN_MB31_DATA2		0xFFC02FE8	/* Mailbox 31 Data Word	2 [47:32] Register */
+#define	CAN_MB31_DATA3		0xFFC02FEC	/* Mailbox 31 Data Word	3 [63:48] Register */
+#define	CAN_MB31_LENGTH		0xFFC02FF0	/* Mailbox 31 Data Length Code Register */
+#define	CAN_MB31_TIMESTAMP	0xFFC02FF4	/* Mailbox 31 Time Stamp Value Register */
+#define	CAN_MB31_ID0		0xFFC02FF8	/* Mailbox 31 Identifier Low Register */
+#define	CAN_MB31_ID1		0xFFC02FFC	/* Mailbox 31 Identifier High Register */
+
+/* CAN Mailbox Area Macros */
+#define	CAN_MB_ID1(x)		(CAN_MB00_ID1+((x)*0x20))
+#define	CAN_MB_ID0(x)		(CAN_MB00_ID0+((x)*0x20))
+#define	CAN_MB_TIMESTAMP(x)	(CAN_MB00_TIMESTAMP+((x)*0x20))
+#define	CAN_MB_LENGTH(x)	(CAN_MB00_LENGTH+((x)*0x20))
+#define	CAN_MB_DATA3(x)		(CAN_MB00_DATA3+((x)*0x20))
+#define	CAN_MB_DATA2(x)		(CAN_MB00_DATA2+((x)*0x20))
+#define	CAN_MB_DATA1(x)		(CAN_MB00_DATA1+((x)*0x20))
+#define	CAN_MB_DATA0(x)		(CAN_MB00_DATA0+((x)*0x20))
+
+
+/*********************************************************************************** */
+/* System MMR Register Bits and	Macros */
+/******************************************************************************* */
+
+/* SWRST Mask */
+#define	SYSTEM_RESET	0x0007	/* Initiates A System Software Reset */
+#define	DOUBLE_FAULT	0x0008	/* Core	Double Fault Causes Reset */
+#define	RESET_DOUBLE	0x2000	/* SW Reset Generated By Core Double-Fault */
+#define	RESET_WDOG		0x4000	/* SW Reset Generated By Watchdog Timer */
+#define	RESET_SOFTWARE	0x8000	/* SW Reset Occurred Since Last	Read Of	SWRST */
+
+/* SYSCR Masks													 */
+#define	BMODE			0x0006	/* Boot	Mode - Latched During HW Reset From Mode Pins */
+#define	NOBOOT			0x0010	/* Execute From	L1 or ASYNC Bank 0 When	BMODE =	0 */
+
+
+/* *************  SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
+
+/* Peripheral Masks For	SIC0_ISR, SIC0_IWR, SIC0_IMASK */
+#define	PLL_WAKEUP_IRQ		0x00000001	/* PLL Wakeup Interrupt	Request */
+#define	DMAC0_ERR_IRQ		0x00000002	/* DMA Controller 0 Error Interrupt Request */
+#define	PPI_ERR_IRQ		0x00000004	/* PPI Error Interrupt Request */
+#define	SPORT0_ERR_IRQ		0x00000008	/* SPORT0 Error	Interrupt Request */
+#define	SPORT1_ERR_IRQ		0x00000010	/* SPORT1 Error	Interrupt Request */
+#define	SPI0_ERR_IRQ		0x00000020	/* SPI0	Error Interrupt	Request */
+#define	UART0_ERR_IRQ		0x00000040	/* UART0 Error Interrupt Request */
+#define	RTC_IRQ			0x00000080	/* Real-Time Clock Interrupt Request */
+#define	DMA0_IRQ		0x00000100	/* DMA Channel 0 (PPI) Interrupt Request */
+#define	DMA1_IRQ		0x00000200	/* DMA Channel 1 (SPORT0 RX) Interrupt Request */
+#define	DMA2_IRQ		0x00000400	/* DMA Channel 2 (SPORT0 TX) Interrupt Request */
+#define	DMA3_IRQ		0x00000800	/* DMA Channel 3 (SPORT1 RX) Interrupt Request */
+#define	DMA4_IRQ		0x00001000	/* DMA Channel 4 (SPORT1 TX) Interrupt Request */
+#define	DMA5_IRQ		0x00002000	/* DMA Channel 5 (SPI) Interrupt Request */
+#define	DMA6_IRQ		0x00004000	/* DMA Channel 6 (UART RX) Interrupt Request */
+#define	DMA7_IRQ		0x00008000	/* DMA Channel 7 (UART TX) Interrupt Request */
+#define	TIMER0_IRQ		0x00010000	/* Timer 0 Interrupt Request */
+#define	TIMER1_IRQ		0x00020000	/* Timer 1 Interrupt Request */
+#define	TIMER2_IRQ		0x00040000	/* Timer 2 Interrupt Request */
+#define	PFA_IRQ			0x00080000	/* Programmable	Flag Interrupt Request A */
+#define	PFB_IRQ			0x00100000	/* Programmable	Flag Interrupt Request B */
+#define	MDMA0_0_IRQ		0x00200000	/* MemDMA0 Stream 0 Interrupt Request */
+#define	MDMA0_1_IRQ		0x00400000	/* MemDMA0 Stream 1 Interrupt Request */
+#define	WDOG_IRQ		0x00800000	/* Software Watchdog Timer Interrupt Request */
+#define	DMAC1_ERR_IRQ		0x01000000	/* DMA Controller 1 Error Interrupt Request */
+#define	SPORT2_ERR_IRQ		0x02000000	/* SPORT2 Error	Interrupt Request */
+#define	SPORT3_ERR_IRQ		0x04000000	/* SPORT3 Error	Interrupt Request */
+#define	MXVR_SD_IRQ		0x08000000	/* MXVR	Synchronous Data Interrupt Request */
+#define	SPI1_ERR_IRQ		0x10000000	/* SPI1	Error Interrupt	Request */
+#define	SPI2_ERR_IRQ		0x20000000	/* SPI2	Error Interrupt	Request */
+#define	UART1_ERR_IRQ		0x40000000	/* UART1 Error Interrupt Request */
+#define	UART2_ERR_IRQ		0x80000000	/* UART2 Error Interrupt Request */
+
+/* the following are for backwards compatibility */
+#define	DMA0_ERR_IRQ		DMAC0_ERR_IRQ
+#define	DMA1_ERR_IRQ		DMAC1_ERR_IRQ
+
+
+/* Peripheral Masks For	SIC_ISR1, SIC_IWR1, SIC_IMASK1	 */
+#define	CAN_ERR_IRQ			0x00000001	/* CAN Error Interrupt Request */
+#define	DMA8_IRQ			0x00000002	/* DMA Channel 8 (SPORT2 RX) Interrupt Request */
+#define	DMA9_IRQ			0x00000004	/* DMA Channel 9 (SPORT2 TX) Interrupt Request */
+#define	DMA10_IRQ			0x00000008	/* DMA Channel 10 (SPORT3 RX) Interrupt	Request */
+#define	DMA11_IRQ			0x00000010	/* DMA Channel 11 (SPORT3 TX) Interrupt	Request */
+#define	DMA12_IRQ			0x00000020	/* DMA Channel 12 Interrupt Request */
+#define	DMA13_IRQ			0x00000040	/* DMA Channel 13 Interrupt Request */
+#define	DMA14_IRQ			0x00000080	/* DMA Channel 14 (SPI1) Interrupt Request */
+#define	DMA15_IRQ			0x00000100	/* DMA Channel 15 (SPI2) Interrupt Request */
+#define	DMA16_IRQ			0x00000200	/* DMA Channel 16 (UART1 RX) Interrupt Request */
+#define	DMA17_IRQ			0x00000400	/* DMA Channel 17 (UART1 TX) Interrupt Request */
+#define	DMA18_IRQ			0x00000800	/* DMA Channel 18 (UART2 RX) Interrupt Request */
+#define	DMA19_IRQ			0x00001000	/* DMA Channel 19 (UART2 TX) Interrupt Request */
+#define	TWI0_IRQ			0x00002000	/* TWI0	Interrupt Request */
+#define	TWI1_IRQ			0x00004000	/* TWI1	Interrupt Request */
+#define	CAN_RX_IRQ			0x00008000	/* CAN Receive Interrupt Request */
+#define	CAN_TX_IRQ			0x00010000	/* CAN Transmit	Interrupt Request */
+#define	MDMA1_0_IRQ			0x00020000	/* MemDMA1 Stream 0 Interrupt Request */
+#define	MDMA1_1_IRQ			0x00040000	/* MemDMA1 Stream 1 Interrupt Request */
+#define	MXVR_STAT_IRQ			0x00080000	/* MXVR	Status Interrupt Request */
+#define	MXVR_CM_IRQ			0x00100000	/* MXVR	Control	Message	Interrupt Request */
+#define	MXVR_AP_IRQ			0x00200000	/* MXVR	Asynchronous Packet Interrupt */
+
+/* the following are for backwards compatibility */
+#define	MDMA0_IRQ		MDMA1_0_IRQ
+#define	MDMA1_IRQ		MDMA1_1_IRQ
+
+#ifdef _MISRA_RULES
+#define	_MF15 0xFu
+#define	_MF7 7u
+#else
+#define	_MF15 0xF
+#define	_MF7 7
+#endif /* _MISRA_RULES */
+
+/* SIC_IMASKx Masks											 */
+#define	SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts */
+#define	SIC_MASK_ALL	0xFFFFFFFF					/* Mask	all peripheral interrupts */
+#ifdef _MISRA_RULES
+#define	SIC_MASK(x)		(1 << ((x)&0x1Fu))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Unmask Peripheral #x	interrupt */
+#else
+#define	SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask	Peripheral #x interrupt */
+#define	SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x	interrupt */
+#endif /* _MISRA_RULES */
+
+/* SIC_IWRx Masks											 */
+#define	IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals */
+#define	IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals */
+#ifdef _MISRA_RULES
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1Fu))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Wakeup Disable Peripheral #x */
+#else
+#define	IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x */
+#define	IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x */
+#endif /* _MISRA_RULES */
+
+/*  *********  PARALLEL	PERIPHERAL INTERFACE (PPI) MASKS ****************   */
+/*  PPI_CONTROL	Masks	      */
+#define	PORT_EN		0x0001	/* PPI Port Enable  */
+#define	PORT_DIR	0x0002	/* PPI Port Direction	    */
+#define	XFR_TYPE	0x000C	/* PPI Transfer	Type  */
+#define	PORT_CFG	0x0030	/* PPI Port Configuration */
+#define	FLD_SEL		0x0040	/* PPI Active Field Select */
+#define	PACK_EN		0x0080	/* PPI Packing Mode */
+/* previous versions of	defBF539.h erroneously included	DMA32 (PPI 32-bit DMA Enable) */
+#define	SKIP_EN		0x0200	/* PPI Skip Element Enable */
+#define	SKIP_EO		0x0400	/* PPI Skip Even/Odd Elements */
+#define	DLENGTH		0x3800	/* PPI Data Length  */
+#define	DLEN_8		0x0	     /*	PPI Data Length	mask for DLEN=8 */
+#define	DLEN_10		0x0800		/* Data	Length = 10 Bits */
+#define	DLEN_11		0x1000		/* Data	Length = 11 Bits */
+#define	DLEN_12		0x1800		/* Data	Length = 12 Bits */
+#define	DLEN_13		0x2000		/* Data	Length = 13 Bits */
+#define	DLEN_14		0x2800		/* Data	Length = 14 Bits */
+#define	DLEN_15		0x3000		/* Data	Length = 15 Bits */
+#define	DLEN_16		0x3800		/* Data	Length = 16 Bits */
+#ifdef _MISRA_RULES
+#define	DLEN(x)		((((x)-9u) & 0x07u) << 11)  /* PPI Data	Length (only works for x=10-->x=16) */
+#else
+#define	DLEN(x)		((((x)-9) & 0x07) << 11)  /* PPI Data Length (only works for x=10-->x=16) */
+#endif /* _MISRA_RULES */
+#define	POL			0xC000	/* PPI Signal Polarities       */
+#define	POLC		0x4000		/* PPI Clock Polarity */
+#define	POLS		0x8000		/* PPI Frame Sync Polarity */
+
+
+/* PPI_STATUS Masks					     */
+#define	FLD			0x0400	/* Field Indicator   */
+#define	FT_ERR		0x0800	/* Frame Track Error */
+#define	OVR			0x1000	/* FIFO	Overflow Error */
+#define	UNDR		0x2000	/* FIFO	Underrun Error */
+#define	ERR_DET		0x4000	/* Error Detected Indicator */
+#define	ERR_NCOR	0x8000	/* Error Not Corrected Indicator */
+
+
+/* **********  DMA CONTROLLER MASKS  ***********************/
+
+/* DMAx_PERIPHERAL_MAP,	MDMA_yy_PERIPHERAL_MAP Masks */
+
+#define	CTYPE			0x0040	/* DMA Channel Type Indicator */
+#define	CTYPE_P			0x6		/* DMA Channel Type Indicator BIT POSITION */
+#define	PCAP8			0x0080	/* DMA 8-bit Operation Indicator   */
+#define	PCAP16			0x0100	/* DMA 16-bit Operation	Indicator */
+#define	PCAP32			0x0200	/* DMA 32-bit Operation	Indicator */
+#define	PCAPWR			0x0400	/* DMA Write Operation Indicator */
+#define	PCAPRD			0x0800	/* DMA Read Operation Indicator */
+#define	PMAP			0xF000	/* DMA Peripheral Map Field */
+
+/* PMAP	Encodings For DMA Controller 0 */
+#define	PMAP_PPI		0x0000	/* PMAP	PPI Port DMA */
+#define	PMAP_SPORT0RX	0x1000	/* PMAP	SPORT0 Receive DMA */
+#define	PMAP_SPORT0TX	0x2000	/* PMAP	SPORT0 Transmit	DMA */
+#define	PMAP_SPORT1RX	0x3000	/* PMAP	SPORT1 Receive DMA */
+#define	PMAP_SPORT1TX	0x4000	/* PMAP	SPORT1 Transmit	DMA */
+#define	PMAP_SPI0		0x5000	/* PMAP	SPI DMA */
+#define	PMAP_UART0RX		0x6000	/* PMAP	UART Receive DMA */
+#define	PMAP_UART0TX		0x7000	/* PMAP	UART Transmit DMA */
+
+/* PMAP	Encodings For DMA Controller 1 */
+#define	PMAP_SPORT2RX	    0x0000  /* PMAP SPORT2 Receive DMA */
+#define	PMAP_SPORT2TX	    0x1000  /* PMAP SPORT2 Transmit DMA */
+#define	PMAP_SPORT3RX	    0x2000  /* PMAP SPORT3 Receive DMA */
+#define	PMAP_SPORT3TX	    0x3000  /* PMAP SPORT3 Transmit DMA */
+#define	PMAP_SPI1	    0x6000  /* PMAP SPI1 DMA */
+#define	PMAP_SPI2	    0x7000  /* PMAP SPI2 DMA */
+#define	PMAP_UART1RX	    0x8000  /* PMAP UART1 Receive DMA */
+#define	PMAP_UART1TX	    0x9000  /* PMAP UART1 Transmit DMA */
+#define	PMAP_UART2RX	    0xA000  /* PMAP UART2 Receive DMA */
+#define	PMAP_UART2TX	    0xB000  /* PMAP UART2 Transmit DMA */
+
+
+/*  *************  GENERAL PURPOSE TIMER MASKS	******************** */
+/* PWM Timer bit definitions */
+/* TIMER_ENABLE	Register */
+#define	TIMEN0			0x0001	/* Enable Timer	0 */
+#define	TIMEN1			0x0002	/* Enable Timer	1 */
+#define	TIMEN2			0x0004	/* Enable Timer	2 */
+
+#define	TIMEN0_P		0x00
+#define	TIMEN1_P		0x01
+#define	TIMEN2_P		0x02
+
+/* TIMER_DISABLE Register */
+#define	TIMDIS0			0x0001	/* Disable Timer 0 */
+#define	TIMDIS1			0x0002	/* Disable Timer 1 */
+#define	TIMDIS2			0x0004	/* Disable Timer 2 */
+
+#define	TIMDIS0_P		0x00
+#define	TIMDIS1_P		0x01
+#define	TIMDIS2_P		0x02
+
+/* TIMER_STATUS	Register */
+#define	TIMIL0			0x0001	/* Timer 0 Interrupt */
+#define	TIMIL1			0x0002	/* Timer 1 Interrupt */
+#define	TIMIL2			0x0004	/* Timer 2 Interrupt */
+#define	TOVF_ERR0		0x0010	/* Timer 0 Counter Overflow */
+#define	TOVF_ERR1		0x0020	/* Timer 1 Counter Overflow */
+#define	TOVF_ERR2		0x0040	/* Timer 2 Counter Overflow */
+#define	TRUN0			0x1000	/* Timer 0 Slave Enable	Status */
+#define	TRUN1			0x2000	/* Timer 1 Slave Enable	Status */
+#define	TRUN2			0x4000	/* Timer 2 Slave Enable	Status */
+
+#define	TIMIL0_P		0x00
+#define	TIMIL1_P		0x01
+#define	TIMIL2_P		0x02
+#define	TOVF_ERR0_P		0x04
+#define	TOVF_ERR1_P		0x05
+#define	TOVF_ERR2_P		0x06
+#define	TRUN0_P			0x0C
+#define	TRUN1_P			0x0D
+#define	TRUN2_P			0x0E
+
+/* Alternate Deprecated	Macros Provided	For Backwards Code Compatibility */
+#define	TOVL_ERR0		TOVF_ERR0
+#define	TOVL_ERR1		TOVF_ERR1
+#define	TOVL_ERR2		TOVF_ERR2
+#define	TOVL_ERR0_P		TOVF_ERR0_P
+#define	TOVL_ERR1_P	TOVF_ERR1_P
+#define	TOVL_ERR2_P	TOVF_ERR2_P
+
+/* TIMERx_CONFIG Registers */
+#define	PWM_OUT			0x0001
+#define	WDTH_CAP		0x0002
+#define	EXT_CLK			0x0003
+#define	PULSE_HI		0x0004
+#define	PERIOD_CNT		0x0008
+#define	IRQ_ENA			0x0010
+#define	TIN_SEL			0x0020
+#define	OUT_DIS			0x0040
+#define	CLK_SEL			0x0080
+#define	TOGGLE_HI		0x0100
+#define	EMU_RUN			0x0200
+#ifdef _MISRA_RULES
+#define	ERR_TYP(x)		(((x) &	0x03u) << 14)
+#else
+#define	ERR_TYP(x)		(((x) &	0x03) << 14)
+#endif /* _MISRA_RULES */
+
+#define	TMODE_P0		0x00
+#define	TMODE_P1		0x01
+#define	PULSE_HI_P		0x02
+#define	PERIOD_CNT_P	0x03
+#define	IRQ_ENA_P		0x04
+#define	TIN_SEL_P		0x05
+#define	OUT_DIS_P		0x06
+#define	CLK_SEL_P		0x07
+#define	TOGGLE_HI_P		0x08
+#define	EMU_RUN_P		0x09
+#define	ERR_TYP_P0		0x0E
+#define	ERR_TYP_P1		0x0F
+
+/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS	************* */
+/* EBIU_AMGCTL Masks */
+#define	AMCKEN		0x0001	/* Enable CLKOUT */
+#define	AMBEN_NONE	0x0000	/* All Banks Disabled */
+#define	AMBEN_B0	0x0002	/* Enable Asynchronous Memory Bank 0 only */
+#define	AMBEN_B0_B1	0x0004	/* Enable Asynchronous Memory Banks 0 &	1 only */
+#define	AMBEN_B0_B1_B2	0x0006	/* Enable Asynchronous Memory Banks 0, 1, and 2 */
+#define	AMBEN_ALL	0x0008	/* Enable Asynchronous Memory Banks (all) 0, 1,	2, and 3 */
+#define	CDPRIO		0x0100	/* DMA has priority over core for external accesses */
+
+/* EBIU_AMGCTL Bit Positions */
+#define	AMCKEN_P		0x0000	/* Enable CLKOUT */
+#define	AMBEN_P0		0x0001	/* Asynchronous	Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
+#define	AMBEN_P1		0x0002	/* Asynchronous	Memory Enable, 010 - banks 0&1 enabled,	 011 - banks 0-3 enabled */
+#define	AMBEN_P2		0x0003	/* Asynchronous	Memory Enable, 1xx - All banks (bank 0,	1, 2, and 3) enabled */
+
+/* EBIU_AMBCTL0	Masks */
+#define	B0RDYEN			0x00000001  /* Bank 0 RDY Enable, 0=disable, 1=enable */
+#define	B0RDYPOL		0x00000002  /* Bank 0 RDY Active high, 0=active	low, 1=active high */
+#define	B0TT_1			0x00000004  /* Bank 0 Transition Time from Read	to Write = 1 cycle */
+#define	B0TT_2			0x00000008  /* Bank 0 Transition Time from Read	to Write = 2 cycles */
+#define	B0TT_3			0x0000000C  /* Bank 0 Transition Time from Read	to Write = 3 cycles */
+#define	B0TT_4			0x00000000  /* Bank 0 Transition Time from Read	to Write = 4 cycles */
+#define	B0ST_1			0x00000010  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
+#define	B0ST_2			0x00000020  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
+#define	B0ST_3			0x00000030  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
+#define	B0ST_4			0x00000000  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
+#define	B0HT_1			0x00000040  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 1 cycle */
+#define	B0HT_2			0x00000080  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 2 cycles */
+#define	B0HT_3			0x000000C0  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 3 cycles */
+#define	B0HT_0			0x00000000  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 0 cycles */
+#define	B0RAT_1			0x00000100  /* Bank 0 Read Access Time = 1 cycle */
+#define	B0RAT_2			0x00000200  /* Bank 0 Read Access Time = 2 cycles */
+#define	B0RAT_3			0x00000300  /* Bank 0 Read Access Time = 3 cycles */
+#define	B0RAT_4			0x00000400  /* Bank 0 Read Access Time = 4 cycles */
+#define	B0RAT_5			0x00000500  /* Bank 0 Read Access Time = 5 cycles */
+#define	B0RAT_6			0x00000600  /* Bank 0 Read Access Time = 6 cycles */
+#define	B0RAT_7			0x00000700  /* Bank 0 Read Access Time = 7 cycles */
+#define	B0RAT_8			0x00000800  /* Bank 0 Read Access Time = 8 cycles */
+#define	B0RAT_9			0x00000900  /* Bank 0 Read Access Time = 9 cycles */
+#define	B0RAT_10		0x00000A00  /* Bank 0 Read Access Time = 10 cycles */
+#define	B0RAT_11		0x00000B00  /* Bank 0 Read Access Time = 11 cycles */
+#define	B0RAT_12		0x00000C00  /* Bank 0 Read Access Time = 12 cycles */
+#define	B0RAT_13		0x00000D00  /* Bank 0 Read Access Time = 13 cycles */
+#define	B0RAT_14		0x00000E00  /* Bank 0 Read Access Time = 14 cycles */
+#define	B0RAT_15		0x00000F00  /* Bank 0 Read Access Time = 15 cycles */
+#define	B0WAT_1			0x00001000  /* Bank 0 Write Access Time	= 1 cycle */
+#define	B0WAT_2			0x00002000  /* Bank 0 Write Access Time	= 2 cycles */
+#define	B0WAT_3			0x00003000  /* Bank 0 Write Access Time	= 3 cycles */
+#define	B0WAT_4			0x00004000  /* Bank 0 Write Access Time	= 4 cycles */
+#define	B0WAT_5			0x00005000  /* Bank 0 Write Access Time	= 5 cycles */
+#define	B0WAT_6			0x00006000  /* Bank 0 Write Access Time	= 6 cycles */
+#define	B0WAT_7			0x00007000  /* Bank 0 Write Access Time	= 7 cycles */
+#define	B0WAT_8			0x00008000  /* Bank 0 Write Access Time	= 8 cycles */
+#define	B0WAT_9			0x00009000  /* Bank 0 Write Access Time	= 9 cycles */
+#define	B0WAT_10		0x0000A000  /* Bank 0 Write Access Time	= 10 cycles */
+#define	B0WAT_11		0x0000B000  /* Bank 0 Write Access Time	= 11 cycles */
+#define	B0WAT_12		0x0000C000  /* Bank 0 Write Access Time	= 12 cycles */
+#define	B0WAT_13		0x0000D000  /* Bank 0 Write Access Time	= 13 cycles */
+#define	B0WAT_14		0x0000E000  /* Bank 0 Write Access Time	= 14 cycles */
+#define	B0WAT_15		0x0000F000  /* Bank 0 Write Access Time	= 15 cycles */
+#define	B1RDYEN			0x00010000  /* Bank 1 RDY enable, 0=disable, 1=enable */
+#define	B1RDYPOL		0x00020000  /* Bank 1 RDY Active high, 0=active	low, 1=active high */
+#define	B1TT_1			0x00040000  /* Bank 1 Transition Time from Read	to Write = 1 cycle */
+#define	B1TT_2			0x00080000  /* Bank 1 Transition Time from Read	to Write = 2 cycles */
+#define	B1TT_3			0x000C0000  /* Bank 1 Transition Time from Read	to Write = 3 cycles */
+#define	B1TT_4			0x00000000  /* Bank 1 Transition Time from Read	to Write = 4 cycles */
+#define	B1ST_1			0x00100000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B1ST_2			0x00200000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B1ST_3			0x00300000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B1ST_4			0x00000000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B1HT_1			0x00400000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B1HT_2			0x00800000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B1HT_3			0x00C00000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B1HT_0			0x00000000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B1RAT_1			0x01000000  /* Bank 1 Read Access Time = 1 cycle */
+#define	B1RAT_2			0x02000000  /* Bank 1 Read Access Time = 2 cycles */
+#define	B1RAT_3			0x03000000  /* Bank 1 Read Access Time = 3 cycles */
+#define	B1RAT_4			0x04000000  /* Bank 1 Read Access Time = 4 cycles */
+#define	B1RAT_5			0x05000000  /* Bank 1 Read Access Time = 5 cycles */
+#define	B1RAT_6			0x06000000  /* Bank 1 Read Access Time = 6 cycles */
+#define	B1RAT_7			0x07000000  /* Bank 1 Read Access Time = 7 cycles */
+#define	B1RAT_8			0x08000000  /* Bank 1 Read Access Time = 8 cycles */
+#define	B1RAT_9			0x09000000  /* Bank 1 Read Access Time = 9 cycles */
+#define	B1RAT_10		0x0A000000  /* Bank 1 Read Access Time = 10 cycles */
+#define	B1RAT_11		0x0B000000  /* Bank 1 Read Access Time = 11 cycles */
+#define	B1RAT_12		0x0C000000  /* Bank 1 Read Access Time = 12 cycles */
+#define	B1RAT_13		0x0D000000  /* Bank 1 Read Access Time = 13 cycles */
+#define	B1RAT_14		0x0E000000  /* Bank 1 Read Access Time = 14 cycles */
+#define	B1RAT_15		0x0F000000  /* Bank 1 Read Access Time = 15 cycles */
+#define	B1WAT_1			0x10000000 /* Bank 1 Write Access Time = 1 cycle */
+#define	B1WAT_2			0x20000000  /* Bank 1 Write Access Time	= 2 cycles */
+#define	B1WAT_3			0x30000000  /* Bank 1 Write Access Time	= 3 cycles */
+#define	B1WAT_4			0x40000000  /* Bank 1 Write Access Time	= 4 cycles */
+#define	B1WAT_5			0x50000000  /* Bank 1 Write Access Time	= 5 cycles */
+#define	B1WAT_6			0x60000000  /* Bank 1 Write Access Time	= 6 cycles */
+#define	B1WAT_7			0x70000000  /* Bank 1 Write Access Time	= 7 cycles */
+#define	B1WAT_8			0x80000000  /* Bank 1 Write Access Time	= 8 cycles */
+#define	B1WAT_9			0x90000000  /* Bank 1 Write Access Time	= 9 cycles */
+#define	B1WAT_10		0xA0000000  /* Bank 1 Write Access Time	= 10 cycles */
+#define	B1WAT_11		0xB0000000  /* Bank 1 Write Access Time	= 11 cycles */
+#define	B1WAT_12		0xC0000000  /* Bank 1 Write Access Time	= 12 cycles */
+#define	B1WAT_13		0xD0000000  /* Bank 1 Write Access Time	= 13 cycles */
+#define	B1WAT_14		0xE0000000  /* Bank 1 Write Access Time	= 14 cycles */
+#define	B1WAT_15		0xF0000000  /* Bank 1 Write Access Time	= 15 cycles */
+
+/* EBIU_AMBCTL1	Masks */
+#define	B2RDYEN			0x00000001  /* Bank 2 RDY Enable, 0=disable, 1=enable */
+#define	B2RDYPOL		0x00000002  /* Bank 2 RDY Active high, 0=active	low, 1=active high */
+#define	B2TT_1			0x00000004  /* Bank 2 Transition Time from Read	to Write = 1 cycle */
+#define	B2TT_2			0x00000008  /* Bank 2 Transition Time from Read	to Write = 2 cycles */
+#define	B2TT_3			0x0000000C  /* Bank 2 Transition Time from Read	to Write = 3 cycles */
+#define	B2TT_4			0x00000000  /* Bank 2 Transition Time from Read	to Write = 4 cycles */
+#define	B2ST_1			0x00000010  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B2ST_2			0x00000020  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B2ST_3			0x00000030  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B2ST_4			0x00000000  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B2HT_1			0x00000040  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B2HT_2			0x00000080  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B2HT_3			0x000000C0  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B2HT_0			0x00000000  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B2RAT_1			0x00000100  /* Bank 2 Read Access Time = 1 cycle */
+#define	B2RAT_2			0x00000200  /* Bank 2 Read Access Time = 2 cycles */
+#define	B2RAT_3			0x00000300  /* Bank 2 Read Access Time = 3 cycles */
+#define	B2RAT_4			0x00000400  /* Bank 2 Read Access Time = 4 cycles */
+#define	B2RAT_5			0x00000500  /* Bank 2 Read Access Time = 5 cycles */
+#define	B2RAT_6			0x00000600  /* Bank 2 Read Access Time = 6 cycles */
+#define	B2RAT_7			0x00000700  /* Bank 2 Read Access Time = 7 cycles */
+#define	B2RAT_8			0x00000800  /* Bank 2 Read Access Time = 8 cycles */
+#define	B2RAT_9			0x00000900  /* Bank 2 Read Access Time = 9 cycles */
+#define	B2RAT_10		0x00000A00  /* Bank 2 Read Access Time = 10 cycles */
+#define	B2RAT_11		0x00000B00  /* Bank 2 Read Access Time = 11 cycles */
+#define	B2RAT_12		0x00000C00  /* Bank 2 Read Access Time = 12 cycles */
+#define	B2RAT_13		0x00000D00  /* Bank 2 Read Access Time = 13 cycles */
+#define	B2RAT_14		0x00000E00  /* Bank 2 Read Access Time = 14 cycles */
+#define	B2RAT_15		0x00000F00  /* Bank 2 Read Access Time = 15 cycles */
+#define	B2WAT_1			0x00001000  /* Bank 2 Write Access Time	= 1 cycle */
+#define	B2WAT_2			0x00002000  /* Bank 2 Write Access Time	= 2 cycles */
+#define	B2WAT_3			0x00003000  /* Bank 2 Write Access Time	= 3 cycles */
+#define	B2WAT_4			0x00004000  /* Bank 2 Write Access Time	= 4 cycles */
+#define	B2WAT_5			0x00005000  /* Bank 2 Write Access Time	= 5 cycles */
+#define	B2WAT_6			0x00006000  /* Bank 2 Write Access Time	= 6 cycles */
+#define	B2WAT_7			0x00007000  /* Bank 2 Write Access Time	= 7 cycles */
+#define	B2WAT_8			0x00008000  /* Bank 2 Write Access Time	= 8 cycles */
+#define	B2WAT_9			0x00009000  /* Bank 2 Write Access Time	= 9 cycles */
+#define	B2WAT_10		0x0000A000  /* Bank 2 Write Access Time	= 10 cycles */
+#define	B2WAT_11		0x0000B000  /* Bank 2 Write Access Time	= 11 cycles */
+#define	B2WAT_12		0x0000C000  /* Bank 2 Write Access Time	= 12 cycles */
+#define	B2WAT_13		0x0000D000  /* Bank 2 Write Access Time	= 13 cycles */
+#define	B2WAT_14		0x0000E000  /* Bank 2 Write Access Time	= 14 cycles */
+#define	B2WAT_15		0x0000F000  /* Bank 2 Write Access Time	= 15 cycles */
+#define	B3RDYEN			0x00010000  /* Bank 3 RDY enable, 0=disable, 1=enable */
+#define	B3RDYPOL		0x00020000  /* Bank 3 RDY Active high, 0=active	low, 1=active high */
+#define	B3TT_1			0x00040000  /* Bank 3 Transition Time from Read	to Write = 1 cycle */
+#define	B3TT_2			0x00080000  /* Bank 3 Transition Time from Read	to Write = 2 cycles */
+#define	B3TT_3			0x000C0000  /* Bank 3 Transition Time from Read	to Write = 3 cycles */
+#define	B3TT_4			0x00000000  /* Bank 3 Transition Time from Read	to Write = 4 cycles */
+#define	B3ST_1			0x00100000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
+#define	B3ST_2			0x00200000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
+#define	B3ST_3			0x00300000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
+#define	B3ST_4			0x00000000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
+#define	B3HT_1			0x00400000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
+#define	B3HT_2			0x00800000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
+#define	B3HT_3			0x00C00000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
+#define	B3HT_0			0x00000000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
+#define	B3RAT_1			0x01000000 /* Bank 3 Read Access Time =	1 cycle */
+#define	B3RAT_2			0x02000000  /* Bank 3 Read Access Time = 2 cycles */
+#define	B3RAT_3			0x03000000  /* Bank 3 Read Access Time = 3 cycles */
+#define	B3RAT_4			0x04000000  /* Bank 3 Read Access Time = 4 cycles */
+#define	B3RAT_5			0x05000000  /* Bank 3 Read Access Time = 5 cycles */
+#define	B3RAT_6			0x06000000  /* Bank 3 Read Access Time = 6 cycles */
+#define	B3RAT_7			0x07000000  /* Bank 3 Read Access Time = 7 cycles */
+#define	B3RAT_8			0x08000000  /* Bank 3 Read Access Time = 8 cycles */
+#define	B3RAT_9			0x09000000  /* Bank 3 Read Access Time = 9 cycles */
+#define	B3RAT_10		0x0A000000  /* Bank 3 Read Access Time = 10 cycles */
+#define	B3RAT_11		0x0B000000  /* Bank 3 Read Access Time = 11 cycles */
+#define	B3RAT_12		0x0C000000  /* Bank 3 Read Access Time = 12 cycles */
+#define	B3RAT_13		0x0D000000  /* Bank 3 Read Access Time = 13 cycles */
+#define	B3RAT_14		0x0E000000  /* Bank 3 Read Access Time = 14 cycles */
+#define	B3RAT_15		0x0F000000  /* Bank 3 Read Access Time = 15 cycles */
+#define	B3WAT_1			0x10000000 /* Bank 3 Write Access Time = 1 cycle */
+#define	B3WAT_2			0x20000000  /* Bank 3 Write Access Time	= 2 cycles */
+#define	B3WAT_3			0x30000000  /* Bank 3 Write Access Time	= 3 cycles */
+#define	B3WAT_4			0x40000000  /* Bank 3 Write Access Time	= 4 cycles */
+#define	B3WAT_5			0x50000000  /* Bank 3 Write Access Time	= 5 cycles */
+#define	B3WAT_6			0x60000000  /* Bank 3 Write Access Time	= 6 cycles */
+#define	B3WAT_7			0x70000000  /* Bank 3 Write Access Time	= 7 cycles */
+#define	B3WAT_8			0x80000000  /* Bank 3 Write Access Time	= 8 cycles */
+#define	B3WAT_9			0x90000000  /* Bank 3 Write Access Time	= 9 cycles */
+#define	B3WAT_10		0xA0000000  /* Bank 3 Write Access Time	= 10 cycles */
+#define	B3WAT_11		0xB0000000  /* Bank 3 Write Access Time	= 11 cycles */
+#define	B3WAT_12		0xC0000000  /* Bank 3 Write Access Time	= 12 cycles */
+#define	B3WAT_13		0xD0000000  /* Bank 3 Write Access Time	= 13 cycles */
+#define	B3WAT_14		0xE0000000  /* Bank 3 Write Access Time	= 14 cycles */
+#define	B3WAT_15		0xF0000000  /* Bank 3 Write Access Time	= 15 cycles */
+
+/* **********************  SDRAM CONTROLLER MASKS  *************************** */
+/* EBIU_SDGCTL Masks */
+#define	SCTLE			0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
+#define	CL_2			0x00000008 /* SDRAM CAS	latency	= 2 cycles */
+#define	CL_3			0x0000000C /* SDRAM CAS	latency	= 3 cycles */
+#define	PFE				0x00000010 /* Enable SDRAM prefetch */
+#define	PFP				0x00000020 /* Prefetch has priority over AMC requests */
+#define	PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh */
+#define	PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In	Self-Refresh */
+#define	PASR_B0			0x00000020	/* Only	SDRAM Bank 0 Is	Refreshed In Self-Refresh */
+#define	TRAS_1			0x00000040 /* SDRAM tRAS = 1 cycle */
+#define	TRAS_2			0x00000080 /* SDRAM tRAS = 2 cycles */
+#define	TRAS_3			0x000000C0 /* SDRAM tRAS = 3 cycles */
+#define	TRAS_4			0x00000100 /* SDRAM tRAS = 4 cycles */
+#define	TRAS_5			0x00000140 /* SDRAM tRAS = 5 cycles */
+#define	TRAS_6			0x00000180 /* SDRAM tRAS = 6 cycles */
+#define	TRAS_7			0x000001C0 /* SDRAM tRAS = 7 cycles */
+#define	TRAS_8			0x00000200 /* SDRAM tRAS = 8 cycles */
+#define	TRAS_9			0x00000240 /* SDRAM tRAS = 9 cycles */
+#define	TRAS_10			0x00000280 /* SDRAM tRAS = 10 cycles */
+#define	TRAS_11			0x000002C0 /* SDRAM tRAS = 11 cycles */
+#define	TRAS_12			0x00000300 /* SDRAM tRAS = 12 cycles */
+#define	TRAS_13			0x00000340 /* SDRAM tRAS = 13 cycles */
+#define	TRAS_14			0x00000380 /* SDRAM tRAS = 14 cycles */
+#define	TRAS_15			0x000003C0 /* SDRAM tRAS = 15 cycles */
+#define	TRP_1			0x00000800 /* SDRAM tRP	= 1 cycle */
+#define	TRP_2			0x00001000 /* SDRAM tRP	= 2 cycles */
+#define	TRP_3			0x00001800 /* SDRAM tRP	= 3 cycles */
+#define	TRP_4			0x00002000 /* SDRAM tRP	= 4 cycles */
+#define	TRP_5			0x00002800 /* SDRAM tRP	= 5 cycles */
+#define	TRP_6			0x00003000 /* SDRAM tRP	= 6 cycles */
+#define	TRP_7			0x00003800 /* SDRAM tRP	= 7 cycles */
+#define	TRCD_1			0x00008000 /* SDRAM tRCD = 1 cycle */
+#define	TRCD_2			0x00010000 /* SDRAM tRCD = 2 cycles */
+#define	TRCD_3			0x00018000 /* SDRAM tRCD = 3 cycles */
+#define	TRCD_4			0x00020000 /* SDRAM tRCD = 4 cycles */
+#define	TRCD_5			0x00028000 /* SDRAM tRCD = 5 cycles */
+#define	TRCD_6			0x00030000 /* SDRAM tRCD = 6 cycles */
+#define	TRCD_7			0x00038000 /* SDRAM tRCD = 7 cycles */
+#define	TWR_1			0x00080000 /* SDRAM tWR	= 1 cycle */
+#define	TWR_2			0x00100000 /* SDRAM tWR	= 2 cycles */
+#define	TWR_3			0x00180000 /* SDRAM tWR	= 3 cycles */
+#define	PUPSD			0x00200000 /*Power-up start delay */
+#define	PSM				0x00400000 /* SDRAM power-up sequence =	Precharge, mode	register set, 8	CBR refresh cycles */
+#define	PSS				0x00800000 /* enable SDRAM power-up sequence on	next SDRAM access */
+#define	SRFS			0x01000000 /* Start SDRAM self-refresh mode */
+#define	EBUFE			0x02000000 /* Enable external buffering	timing */
+#define	FBBRW			0x04000000 /* Fast back-to-back	read write enable */
+#define	EMREN			0x10000000 /* Extended mode register enable */
+#define	TCSR			0x20000000 /* Temp compensated self refresh value 85 deg C */
+#define	CDDBG			0x40000000 /* Tristate SDRAM controls during bus grant */
+
+/* EBIU_SDBCTL Masks */
+#define	EBE				0x00000001 /* Enable SDRAM external bank */
+#define	EBSZ_16			0x00000000 /* SDRAM external bank size = 16MB */
+#define	EBSZ_32			0x00000002 /* SDRAM external bank size = 32MB */
+#define	EBSZ_64			0x00000004 /* SDRAM external bank size = 64MB */
+#define	EBSZ_128		0x00000006 /* SDRAM external bank size = 128MB */
+#define	EBSZ_256		0x00000008 /* SDRAM External Bank Size = 256MB */
+#define	EBSZ_512		0x0000000A /* SDRAM External Bank Size = 512MB */
+#define	EBCAW_8			0x00000000 /* SDRAM external bank column address width = 8 bits */
+#define	EBCAW_9			0x00000010 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_10		0x00000020 /* SDRAM external bank column address width = 9 bits */
+#define	EBCAW_11		0x00000030 /* SDRAM external bank column address width = 9 bits */
+
+/* EBIU_SDSTAT Masks */
+#define	SDCI			0x00000001 /* SDRAM controller is idle */
+#define	SDSRA			0x00000002 /* SDRAM SDRAM self refresh is active */
+#define	SDPUA			0x00000004 /* SDRAM power up active  */
+#define	SDRS			0x00000008 /* SDRAM is in reset	state */
+#define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
+#define	BGSTAT			0x00000020 /* Bus granted */
+
+
+/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
+/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
+#ifdef _MISRA_RULES
+#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
+#else
+#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
+#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
+#endif /* _MISRA_RULES */
+
+/* TWIx_PRESCALE Masks								 */
+#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
+#define	TWI_ENA		0x0080		/* TWI Enable		 */
+#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
+
+/* TWIx_SLAVE_CTRL Masks								 */
+#define	SEN			0x0001		/* Slave Enable		 */
+#define	SADD_LEN	0x0002		/* Slave Address Length */
+#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
+#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
+#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
+
+/* TWIx_SLAVE_STAT Masks								 */
+#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
+#define	GCALL		0x0002		/* General Call	Indicator */
+
+/* TWIx_MASTER_CTRL Masks						 */
+#define	MEN			0x0001		/* Master Mode Enable */
+#define	MADD_LEN	0x0002		/* Master Address Length */
+#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
+#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
+#define	STOP		0x0010		/* Issue Stop Condition */
+#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
+#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
+#define	SDAOVR		0x4000		/* Serial Data Override */
+#define	SCLOVR		0x8000		/* Serial Clock	Override */
+
+/* TWIx_MASTER_STAT Masks							 */
+#define	MPROG		0x0001		/* Master Transfer In Progress */
+#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
+#define	ANAK		0x0004		/* Address Not Acknowledged */
+#define	DNAK		0x0008		/* Data	Not Acknowledged */
+#define	BUFRDERR	0x0010		/* Buffer Read Error */
+#define	BUFWRERR	0x0020		/* Buffer Write	Error */
+#define	SDASEN		0x0040		/* Serial Data Sense */
+#define	SCLSEN		0x0080		/* Serial Clock	Sense */
+#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
+
+/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
+#define	SINIT		0x0001		/* Slave Transfer Initiated */
+#define	SCOMP		0x0002		/* Slave Transfer Complete */
+#define	SERR		0x0004		/* Slave Transfer Error */
+#define	SOVF		0x0008		/* Slave Overflow */
+#define	MCOMP		0x0010		/* Master Transfer Complete */
+#define	MERR		0x0020		/* Master Transfer Error */
+#define	XMTSERV		0x0040		/* Transmit FIFO Service */
+#define	RCVSERV		0x0080		/* Receive FIFO	Service */
+
+/* TWIx_FIFO_CTL Masks					 */
+#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
+#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
+#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
+#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
+
+/* TWIx_FIFO_STAT Masks								 */
+#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
+#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
+#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
+#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
+
+#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
+#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
+#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
+#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
+
+#endif
diff --git a/arch/blackfin/mach-bf538/include/mach/defBF539.h b/arch/blackfin/mach-bf538/include/mach/defBF539.h
index 7a8ac5f..8100bcd 100644
--- a/arch/blackfin/mach-bf538/include/mach/defBF539.h
+++ b/arch/blackfin/mach-bf538/include/mach/defBF539.h
@@ -1,859 +1,13 @@
 /*
- * Copyright 2008-2009 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
 
-/* SYSTEM & MM REGISTER BIT & ADDRESS DEFINITIONS FOR ADSP-BF538/9 */
-
 #ifndef _DEF_BF539_H
 #define _DEF_BF539_H
 
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-
-/*********************************************************************************** */
-/* System MMR Register Map */
-/*********************************************************************************** */
-/* Clock/Regulator Control (0xFFC00000 - 0xFFC000FF) */
-#define	PLL_CTL			0xFFC00000	/* PLL Control register (16-bit) */
-#define	PLL_DIV			0xFFC00004	/* PLL Divide Register (16-bit) */
-#define	VR_CTL			0xFFC00008	/* Voltage Regulator Control Register (16-bit) */
-#define	PLL_STAT		0xFFC0000C	/* PLL Status register (16-bit) */
-#define	PLL_LOCKCNT		0xFFC00010	/* PLL Lock	Count register (16-bit) */
-#define	CHIPID			0xFFC00014	/* Chip	ID Register */
-
-/* CHIPID Masks */
-#define CHIPID_VERSION         0xF0000000
-#define CHIPID_FAMILY          0x0FFFF000
-#define CHIPID_MANUFACTURE     0x00000FFE
-
-/* System Interrupt Controller (0xFFC00100 - 0xFFC001FF) */
-#define	SWRST			0xFFC00100  /* Software	Reset Register (16-bit) */
-#define	SYSCR			0xFFC00104  /* System Configuration registe */
-#define	SIC_RVECT		0xFFC00108
-#define	SIC_IMASK0		0xFFC0010C  /* Interrupt Mask Register */
-#define	SIC_IAR0		0xFFC00110  /* Interrupt Assignment Register 0 */
-#define	SIC_IAR1		0xFFC00114  /* Interrupt Assignment Register 1 */
-#define	SIC_IAR2		0xFFC00118  /* Interrupt Assignment Register 2 */
-#define	SIC_IAR3			0xFFC0011C	/* Interrupt Assignment	Register 3 */
-#define	SIC_ISR0			0xFFC00120  /* Interrupt Status	Register */
-#define	SIC_IWR0			0xFFC00124  /* Interrupt Wakeup	Register */
-#define	SIC_IMASK1			0xFFC00128	/* Interrupt Mask Register 1 */
-#define	SIC_ISR1			0xFFC0012C	/* Interrupt Status Register 1 */
-#define	SIC_IWR1			0xFFC00130	/* Interrupt Wakeup Register 1 */
-#define	SIC_IAR4			0xFFC00134	/* Interrupt Assignment	Register 4 */
-#define	SIC_IAR5			0xFFC00138	/* Interrupt Assignment	Register 5 */
-#define	SIC_IAR6			0xFFC0013C	/* Interrupt Assignment	Register 6 */
-
-
-/* Watchdog Timer (0xFFC00200 -	0xFFC002FF) */
-#define	WDOG_CTL	0xFFC00200  /* Watchdog	Control	Register */
-#define	WDOG_CNT	0xFFC00204  /* Watchdog	Count Register */
-#define	WDOG_STAT	0xFFC00208  /* Watchdog	Status Register */
-
-
-/* Real	Time Clock (0xFFC00300 - 0xFFC003FF) */
-#define	RTC_STAT	0xFFC00300  /* RTC Status Register */
-#define	RTC_ICTL	0xFFC00304  /* RTC Interrupt Control Register */
-#define	RTC_ISTAT	0xFFC00308  /* RTC Interrupt Status Register */
-#define	RTC_SWCNT	0xFFC0030C  /* RTC Stopwatch Count Register */
-#define	RTC_ALARM	0xFFC00310  /* RTC Alarm Time Register */
-#define	RTC_FAST	0xFFC00314  /* RTC Prescaler Enable Register */
-#define	RTC_PREN		0xFFC00314  /* RTC Prescaler Enable Register (alternate	macro) */
-
-
-/* UART0 Controller (0xFFC00400	- 0xFFC004FF) */
-#define	UART0_THR	      0xFFC00400  /* Transmit Holding register */
-#define	UART0_RBR	      0xFFC00400  /* Receive Buffer register */
-#define	UART0_DLL	      0xFFC00400  /* Divisor Latch (Low-Byte) */
-#define	UART0_IER	      0xFFC00404  /* Interrupt Enable Register */
-#define	UART0_DLH	      0xFFC00404  /* Divisor Latch (High-Byte) */
-#define	UART0_IIR	      0xFFC00408  /* Interrupt Identification Register */
-#define	UART0_LCR	      0xFFC0040C  /* Line Control Register */
-#define	UART0_MCR			 0xFFC00410  /*	Modem Control Register */
-#define	UART0_LSR	      0xFFC00414  /* Line Status Register */
-#define	UART0_SCR	      0xFFC0041C  /* SCR Scratch Register */
-#define	UART0_GCTL		     0xFFC00424	 /* Global Control Register */
-
-
-/* SPI0	Controller (0xFFC00500 - 0xFFC005FF) */
-
-#define	SPI0_CTL			0xFFC00500  /* SPI0 Control Register */
-#define	SPI0_FLG			0xFFC00504  /* SPI0 Flag register */
-#define	SPI0_STAT			0xFFC00508  /* SPI0 Status register */
-#define	SPI0_TDBR			0xFFC0050C  /* SPI0 Transmit Data Buffer Register */
-#define	SPI0_RDBR			0xFFC00510  /* SPI0 Receive Data Buffer	Register */
-#define	SPI0_BAUD			0xFFC00514  /* SPI0 Baud rate Register */
-#define	SPI0_SHADOW			0xFFC00518  /* SPI0_RDBR Shadow	Register */
-#define SPI0_REGBASE			SPI0_CTL
-
-
-/* TIMER 0, 1, 2 Registers (0xFFC00600 - 0xFFC006FF) */
-#define	TIMER0_CONFIG			0xFFC00600     /* Timer	0 Configuration	Register */
-#define	TIMER0_COUNTER				0xFFC00604     /* Timer	0 Counter Register */
-#define	TIMER0_PERIOD			0xFFC00608     /* Timer	0 Period Register */
-#define	TIMER0_WIDTH			0xFFC0060C     /* Timer	0 Width	Register */
-
-#define	TIMER1_CONFIG			0xFFC00610	/*  Timer 1 Configuration Register   */
-#define	TIMER1_COUNTER			0xFFC00614	/*  Timer 1 Counter Register	     */
-#define	TIMER1_PERIOD			0xFFC00618	/*  Timer 1 Period Register	     */
-#define	TIMER1_WIDTH			0xFFC0061C	/*  Timer 1 Width Register	     */
-
-#define	TIMER2_CONFIG			0xFFC00620	/* Timer 2 Configuration Register   */
-#define	TIMER2_COUNTER			0xFFC00624	/* Timer 2 Counter Register	    */
-#define	TIMER2_PERIOD			0xFFC00628	/* Timer 2 Period Register	    */
-#define	TIMER2_WIDTH			0xFFC0062C	/* Timer 2 Width Register	    */
-
-#define	TIMER_ENABLE				0xFFC00640	/* Timer Enable	Register */
-#define	TIMER_DISABLE				0xFFC00644	/* Timer Disable Register */
-#define	TIMER_STATUS				0xFFC00648	/* Timer Status	Register */
-
-
-/* Programmable	Flags (0xFFC00700 - 0xFFC007FF) */
-#define	FIO_FLAG_D				0xFFC00700  /* Flag Mask to directly specify state of pins */
-#define	FIO_FLAG_C			0xFFC00704  /* Peripheral Interrupt Flag Register (clear) */
-#define	FIO_FLAG_S			0xFFC00708  /* Peripheral Interrupt Flag Register (set) */
-#define	FIO_FLAG_T					0xFFC0070C  /* Flag Mask to directly toggle state of pins */
-#define	FIO_MASKA_D			0xFFC00710  /* Flag Mask Interrupt A Register (set directly) */
-#define	FIO_MASKA_C			0xFFC00714  /* Flag Mask Interrupt A Register (clear) */
-#define	FIO_MASKA_S			0xFFC00718  /* Flag Mask Interrupt A Register (set) */
-#define	FIO_MASKA_T			0xFFC0071C  /* Flag Mask Interrupt A Register (toggle) */
-#define	FIO_MASKB_D			0xFFC00720  /* Flag Mask Interrupt B Register (set directly) */
-#define	FIO_MASKB_C			0xFFC00724  /* Flag Mask Interrupt B Register (clear) */
-#define	FIO_MASKB_S			0xFFC00728  /* Flag Mask Interrupt B Register (set) */
-#define	FIO_MASKB_T			0xFFC0072C  /* Flag Mask Interrupt B Register (toggle) */
-#define	FIO_DIR				0xFFC00730  /* Peripheral Flag Direction Register */
-#define	FIO_POLAR			0xFFC00734  /* Flag Source Polarity Register */
-#define	FIO_EDGE			0xFFC00738  /* Flag Source Sensitivity Register */
-#define	FIO_BOTH			0xFFC0073C  /* Flag Set	on BOTH	Edges Register */
-#define	FIO_INEN					0xFFC00740  /* Flag Input Enable Register  */
-
-
-/* SPORT0 Controller (0xFFC00800 - 0xFFC008FF) */
-#define	SPORT0_TCR1				0xFFC00800  /* SPORT0 Transmit Configuration 1 Register */
-#define	SPORT0_TCR2				0xFFC00804  /* SPORT0 Transmit Configuration 2 Register */
-#define	SPORT0_TCLKDIV			0xFFC00808  /* SPORT0 Transmit Clock Divider */
-#define	SPORT0_TFSDIV			0xFFC0080C  /* SPORT0 Transmit Frame Sync Divider */
-#define	SPORT0_TX			0xFFC00810  /* SPORT0 TX Data Register */
-#define	SPORT0_RX			0xFFC00818  /* SPORT0 RX Data Register */
-#define	SPORT0_RCR1				0xFFC00820  /* SPORT0 Transmit Configuration 1 Register */
-#define	SPORT0_RCR2				0xFFC00824  /* SPORT0 Transmit Configuration 2 Register */
-#define	SPORT0_RCLKDIV			0xFFC00828  /* SPORT0 Receive Clock Divider */
-#define	SPORT0_RFSDIV			0xFFC0082C  /* SPORT0 Receive Frame Sync Divider */
-#define	SPORT0_STAT			0xFFC00830  /* SPORT0 Status Register */
-#define	SPORT0_CHNL			0xFFC00834  /* SPORT0 Current Channel Register */
-#define	SPORT0_MCMC1			0xFFC00838  /* SPORT0 Multi-Channel Configuration Register 1 */
-#define	SPORT0_MCMC2			0xFFC0083C  /* SPORT0 Multi-Channel Configuration Register 2 */
-#define	SPORT0_MTCS0			0xFFC00840  /* SPORT0 Multi-Channel Transmit Select Register 0 */
-#define	SPORT0_MTCS1			0xFFC00844  /* SPORT0 Multi-Channel Transmit Select Register 1 */
-#define	SPORT0_MTCS2			0xFFC00848  /* SPORT0 Multi-Channel Transmit Select Register 2 */
-#define	SPORT0_MTCS3			0xFFC0084C  /* SPORT0 Multi-Channel Transmit Select Register 3 */
-#define	SPORT0_MRCS0			0xFFC00850  /* SPORT0 Multi-Channel Receive Select Register 0 */
-#define	SPORT0_MRCS1			0xFFC00854  /* SPORT0 Multi-Channel Receive Select Register 1 */
-#define	SPORT0_MRCS2			0xFFC00858  /* SPORT0 Multi-Channel Receive Select Register 2 */
-#define	SPORT0_MRCS3			0xFFC0085C  /* SPORT0 Multi-Channel Receive Select Register 3 */
-
-
-/* SPORT1 Controller (0xFFC00900 - 0xFFC009FF) */
-#define	SPORT1_TCR1				0xFFC00900  /* SPORT1 Transmit Configuration 1 Register */
-#define	SPORT1_TCR2				0xFFC00904  /* SPORT1 Transmit Configuration 2 Register */
-#define	SPORT1_TCLKDIV			0xFFC00908  /* SPORT1 Transmit Clock Divider */
-#define	SPORT1_TFSDIV			0xFFC0090C  /* SPORT1 Transmit Frame Sync Divider */
-#define	SPORT1_TX			0xFFC00910  /* SPORT1 TX Data Register */
-#define	SPORT1_RX			0xFFC00918  /* SPORT1 RX Data Register */
-#define	SPORT1_RCR1				0xFFC00920  /* SPORT1 Transmit Configuration 1 Register */
-#define	SPORT1_RCR2				0xFFC00924  /* SPORT1 Transmit Configuration 2 Register */
-#define	SPORT1_RCLKDIV			0xFFC00928  /* SPORT1 Receive Clock Divider */
-#define	SPORT1_RFSDIV			0xFFC0092C  /* SPORT1 Receive Frame Sync Divider */
-#define	SPORT1_STAT			0xFFC00930  /* SPORT1 Status Register */
-#define	SPORT1_CHNL			0xFFC00934  /* SPORT1 Current Channel Register */
-#define	SPORT1_MCMC1			0xFFC00938  /* SPORT1 Multi-Channel Configuration Register 1 */
-#define	SPORT1_MCMC2			0xFFC0093C  /* SPORT1 Multi-Channel Configuration Register 2 */
-#define	SPORT1_MTCS0			0xFFC00940  /* SPORT1 Multi-Channel Transmit Select Register 0 */
-#define	SPORT1_MTCS1			0xFFC00944  /* SPORT1 Multi-Channel Transmit Select Register 1 */
-#define	SPORT1_MTCS2			0xFFC00948  /* SPORT1 Multi-Channel Transmit Select Register 2 */
-#define	SPORT1_MTCS3			0xFFC0094C  /* SPORT1 Multi-Channel Transmit Select Register 3 */
-#define	SPORT1_MRCS0			0xFFC00950  /* SPORT1 Multi-Channel Receive Select Register 0 */
-#define	SPORT1_MRCS1			0xFFC00954  /* SPORT1 Multi-Channel Receive Select Register 1 */
-#define	SPORT1_MRCS2			0xFFC00958  /* SPORT1 Multi-Channel Receive Select Register 2 */
-#define	SPORT1_MRCS3			0xFFC0095C  /* SPORT1 Multi-Channel Receive Select Register 3 */
-
-
-/* External Bus	Interface Unit (0xFFC00A00 - 0xFFC00AFF) */
-/* Asynchronous	Memory Controller  */
-#define	EBIU_AMGCTL			0xFFC00A00  /* Asynchronous Memory Global Control Register */
-#define	EBIU_AMBCTL0		0xFFC00A04  /* Asynchronous Memory Bank	Control	Register 0 */
-#define	EBIU_AMBCTL1		0xFFC00A08  /* Asynchronous Memory Bank	Control	Register 1 */
-
-/* SDRAM Controller */
-#define	EBIU_SDGCTL			0xFFC00A10  /* SDRAM Global Control Register */
-#define	EBIU_SDBCTL			0xFFC00A14  /* SDRAM Bank Control Register */
-#define	EBIU_SDRRC			0xFFC00A18  /* SDRAM Refresh Rate Control Register */
-#define	EBIU_SDSTAT			0xFFC00A1C  /* SDRAM Status Register */
-
-
-
-/* DMA Controller 0 Traffic Control Registers (0xFFC00B00 - 0xFFC00BFF) */
-
-#define	DMAC0_TC_PER			0xFFC00B0C	/* DMA Controller 0 Traffic Control Periods Register */
-#define	DMAC0_TC_CNT			0xFFC00B10	/* DMA Controller 0 Traffic Control Current Counts Register */
-
-/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
-#define	DMA0_TCPER			DMAC0_TC_PER
-#define	DMA0_TCCNT			DMAC0_TC_CNT
-
-
-/* DMA Controller 0 (0xFFC00C00	- 0xFFC00FFF)							 */
-
-#define	DMA0_NEXT_DESC_PTR		0xFFC00C00	/* DMA Channel 0 Next Descriptor Pointer Register */
-#define	DMA0_START_ADDR			0xFFC00C04	/* DMA Channel 0 Start Address Register */
-#define	DMA0_CONFIG				0xFFC00C08	/* DMA Channel 0 Configuration Register */
-#define	DMA0_X_COUNT			0xFFC00C10	/* DMA Channel 0 X Count Register */
-#define	DMA0_X_MODIFY			0xFFC00C14	/* DMA Channel 0 X Modify Register */
-#define	DMA0_Y_COUNT			0xFFC00C18	/* DMA Channel 0 Y Count Register */
-#define	DMA0_Y_MODIFY			0xFFC00C1C	/* DMA Channel 0 Y Modify Register */
-#define	DMA0_CURR_DESC_PTR		0xFFC00C20	/* DMA Channel 0 Current Descriptor Pointer Register */
-#define	DMA0_CURR_ADDR			0xFFC00C24	/* DMA Channel 0 Current Address Register */
-#define	DMA0_IRQ_STATUS			0xFFC00C28	/* DMA Channel 0 Interrupt/Status Register */
-#define	DMA0_PERIPHERAL_MAP		0xFFC00C2C	/* DMA Channel 0 Peripheral Map	Register */
-#define	DMA0_CURR_X_COUNT		0xFFC00C30	/* DMA Channel 0 Current X Count Register */
-#define	DMA0_CURR_Y_COUNT		0xFFC00C38	/* DMA Channel 0 Current Y Count Register */
-
-#define	DMA1_NEXT_DESC_PTR		0xFFC00C40	/* DMA Channel 1 Next Descriptor Pointer Register */
-#define	DMA1_START_ADDR			0xFFC00C44	/* DMA Channel 1 Start Address Register */
-#define	DMA1_CONFIG				0xFFC00C48	/* DMA Channel 1 Configuration Register */
-#define	DMA1_X_COUNT			0xFFC00C50	/* DMA Channel 1 X Count Register */
-#define	DMA1_X_MODIFY			0xFFC00C54	/* DMA Channel 1 X Modify Register */
-#define	DMA1_Y_COUNT			0xFFC00C58	/* DMA Channel 1 Y Count Register */
-#define	DMA1_Y_MODIFY			0xFFC00C5C	/* DMA Channel 1 Y Modify Register */
-#define	DMA1_CURR_DESC_PTR		0xFFC00C60	/* DMA Channel 1 Current Descriptor Pointer Register */
-#define	DMA1_CURR_ADDR			0xFFC00C64	/* DMA Channel 1 Current Address Register */
-#define	DMA1_IRQ_STATUS			0xFFC00C68	/* DMA Channel 1 Interrupt/Status Register */
-#define	DMA1_PERIPHERAL_MAP		0xFFC00C6C	/* DMA Channel 1 Peripheral Map	Register */
-#define	DMA1_CURR_X_COUNT		0xFFC00C70	/* DMA Channel 1 Current X Count Register */
-#define	DMA1_CURR_Y_COUNT		0xFFC00C78	/* DMA Channel 1 Current Y Count Register */
-
-#define	DMA2_NEXT_DESC_PTR		0xFFC00C80	/* DMA Channel 2 Next Descriptor Pointer Register */
-#define	DMA2_START_ADDR			0xFFC00C84	/* DMA Channel 2 Start Address Register */
-#define	DMA2_CONFIG				0xFFC00C88	/* DMA Channel 2 Configuration Register */
-#define	DMA2_X_COUNT			0xFFC00C90	/* DMA Channel 2 X Count Register */
-#define	DMA2_X_MODIFY			0xFFC00C94	/* DMA Channel 2 X Modify Register */
-#define	DMA2_Y_COUNT			0xFFC00C98	/* DMA Channel 2 Y Count Register */
-#define	DMA2_Y_MODIFY			0xFFC00C9C	/* DMA Channel 2 Y Modify Register */
-#define	DMA2_CURR_DESC_PTR		0xFFC00CA0	/* DMA Channel 2 Current Descriptor Pointer Register */
-#define	DMA2_CURR_ADDR			0xFFC00CA4	/* DMA Channel 2 Current Address Register */
-#define	DMA2_IRQ_STATUS			0xFFC00CA8	/* DMA Channel 2 Interrupt/Status Register */
-#define	DMA2_PERIPHERAL_MAP		0xFFC00CAC	/* DMA Channel 2 Peripheral Map	Register */
-#define	DMA2_CURR_X_COUNT		0xFFC00CB0	/* DMA Channel 2 Current X Count Register */
-#define	DMA2_CURR_Y_COUNT		0xFFC00CB8	/* DMA Channel 2 Current Y Count Register */
-
-#define	DMA3_NEXT_DESC_PTR		0xFFC00CC0	/* DMA Channel 3 Next Descriptor Pointer Register */
-#define	DMA3_START_ADDR			0xFFC00CC4	/* DMA Channel 3 Start Address Register */
-#define	DMA3_CONFIG				0xFFC00CC8	/* DMA Channel 3 Configuration Register */
-#define	DMA3_X_COUNT			0xFFC00CD0	/* DMA Channel 3 X Count Register */
-#define	DMA3_X_MODIFY			0xFFC00CD4	/* DMA Channel 3 X Modify Register */
-#define	DMA3_Y_COUNT			0xFFC00CD8	/* DMA Channel 3 Y Count Register */
-#define	DMA3_Y_MODIFY			0xFFC00CDC	/* DMA Channel 3 Y Modify Register */
-#define	DMA3_CURR_DESC_PTR		0xFFC00CE0	/* DMA Channel 3 Current Descriptor Pointer Register */
-#define	DMA3_CURR_ADDR			0xFFC00CE4	/* DMA Channel 3 Current Address Register */
-#define	DMA3_IRQ_STATUS			0xFFC00CE8	/* DMA Channel 3 Interrupt/Status Register */
-#define	DMA3_PERIPHERAL_MAP		0xFFC00CEC	/* DMA Channel 3 Peripheral Map	Register */
-#define	DMA3_CURR_X_COUNT		0xFFC00CF0	/* DMA Channel 3 Current X Count Register */
-#define	DMA3_CURR_Y_COUNT		0xFFC00CF8	/* DMA Channel 3 Current Y Count Register */
-
-#define	DMA4_NEXT_DESC_PTR		0xFFC00D00	/* DMA Channel 4 Next Descriptor Pointer Register */
-#define	DMA4_START_ADDR			0xFFC00D04	/* DMA Channel 4 Start Address Register */
-#define	DMA4_CONFIG				0xFFC00D08	/* DMA Channel 4 Configuration Register */
-#define	DMA4_X_COUNT			0xFFC00D10	/* DMA Channel 4 X Count Register */
-#define	DMA4_X_MODIFY			0xFFC00D14	/* DMA Channel 4 X Modify Register */
-#define	DMA4_Y_COUNT			0xFFC00D18	/* DMA Channel 4 Y Count Register */
-#define	DMA4_Y_MODIFY			0xFFC00D1C	/* DMA Channel 4 Y Modify Register */
-#define	DMA4_CURR_DESC_PTR		0xFFC00D20	/* DMA Channel 4 Current Descriptor Pointer Register */
-#define	DMA4_CURR_ADDR			0xFFC00D24	/* DMA Channel 4 Current Address Register */
-#define	DMA4_IRQ_STATUS			0xFFC00D28	/* DMA Channel 4 Interrupt/Status Register */
-#define	DMA4_PERIPHERAL_MAP		0xFFC00D2C	/* DMA Channel 4 Peripheral Map	Register */
-#define	DMA4_CURR_X_COUNT		0xFFC00D30	/* DMA Channel 4 Current X Count Register */
-#define	DMA4_CURR_Y_COUNT		0xFFC00D38	/* DMA Channel 4 Current Y Count Register */
-
-#define	DMA5_NEXT_DESC_PTR		0xFFC00D40	/* DMA Channel 5 Next Descriptor Pointer Register */
-#define	DMA5_START_ADDR			0xFFC00D44	/* DMA Channel 5 Start Address Register */
-#define	DMA5_CONFIG				0xFFC00D48	/* DMA Channel 5 Configuration Register */
-#define	DMA5_X_COUNT			0xFFC00D50	/* DMA Channel 5 X Count Register */
-#define	DMA5_X_MODIFY			0xFFC00D54	/* DMA Channel 5 X Modify Register */
-#define	DMA5_Y_COUNT			0xFFC00D58	/* DMA Channel 5 Y Count Register */
-#define	DMA5_Y_MODIFY			0xFFC00D5C	/* DMA Channel 5 Y Modify Register */
-#define	DMA5_CURR_DESC_PTR		0xFFC00D60	/* DMA Channel 5 Current Descriptor Pointer Register */
-#define	DMA5_CURR_ADDR			0xFFC00D64	/* DMA Channel 5 Current Address Register */
-#define	DMA5_IRQ_STATUS			0xFFC00D68	/* DMA Channel 5 Interrupt/Status Register */
-#define	DMA5_PERIPHERAL_MAP		0xFFC00D6C	/* DMA Channel 5 Peripheral Map	Register */
-#define	DMA5_CURR_X_COUNT		0xFFC00D70	/* DMA Channel 5 Current X Count Register */
-#define	DMA5_CURR_Y_COUNT		0xFFC00D78	/* DMA Channel 5 Current Y Count Register */
-
-#define	DMA6_NEXT_DESC_PTR		0xFFC00D80	/* DMA Channel 6 Next Descriptor Pointer Register */
-#define	DMA6_START_ADDR			0xFFC00D84	/* DMA Channel 6 Start Address Register */
-#define	DMA6_CONFIG				0xFFC00D88	/* DMA Channel 6 Configuration Register */
-#define	DMA6_X_COUNT			0xFFC00D90	/* DMA Channel 6 X Count Register */
-#define	DMA6_X_MODIFY			0xFFC00D94	/* DMA Channel 6 X Modify Register */
-#define	DMA6_Y_COUNT			0xFFC00D98	/* DMA Channel 6 Y Count Register */
-#define	DMA6_Y_MODIFY			0xFFC00D9C	/* DMA Channel 6 Y Modify Register */
-#define	DMA6_CURR_DESC_PTR		0xFFC00DA0	/* DMA Channel 6 Current Descriptor Pointer Register */
-#define	DMA6_CURR_ADDR			0xFFC00DA4	/* DMA Channel 6 Current Address Register */
-#define	DMA6_IRQ_STATUS			0xFFC00DA8	/* DMA Channel 6 Interrupt/Status Register */
-#define	DMA6_PERIPHERAL_MAP		0xFFC00DAC	/* DMA Channel 6 Peripheral Map	Register */
-#define	DMA6_CURR_X_COUNT		0xFFC00DB0	/* DMA Channel 6 Current X Count Register */
-#define	DMA6_CURR_Y_COUNT		0xFFC00DB8	/* DMA Channel 6 Current Y Count Register */
-
-#define	DMA7_NEXT_DESC_PTR		0xFFC00DC0	/* DMA Channel 7 Next Descriptor Pointer Register */
-#define	DMA7_START_ADDR			0xFFC00DC4	/* DMA Channel 7 Start Address Register */
-#define	DMA7_CONFIG				0xFFC00DC8	/* DMA Channel 7 Configuration Register */
-#define	DMA7_X_COUNT			0xFFC00DD0	/* DMA Channel 7 X Count Register */
-#define	DMA7_X_MODIFY			0xFFC00DD4	/* DMA Channel 7 X Modify Register */
-#define	DMA7_Y_COUNT			0xFFC00DD8	/* DMA Channel 7 Y Count Register */
-#define	DMA7_Y_MODIFY			0xFFC00DDC	/* DMA Channel 7 Y Modify Register */
-#define	DMA7_CURR_DESC_PTR		0xFFC00DE0	/* DMA Channel 7 Current Descriptor Pointer Register */
-#define	DMA7_CURR_ADDR			0xFFC00DE4	/* DMA Channel 7 Current Address Register */
-#define	DMA7_IRQ_STATUS			0xFFC00DE8	/* DMA Channel 7 Interrupt/Status Register */
-#define	DMA7_PERIPHERAL_MAP		0xFFC00DEC	/* DMA Channel 7 Peripheral Map	Register */
-#define	DMA7_CURR_X_COUNT		0xFFC00DF0	/* DMA Channel 7 Current X Count Register */
-#define	DMA7_CURR_Y_COUNT		0xFFC00DF8	/* DMA Channel 7 Current Y Count Register */
-
-#define	MDMA0_D0_NEXT_DESC_PTR	0xFFC00E00	/* MemDMA0 Stream 0 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA0_D0_START_ADDR		0xFFC00E04	/* MemDMA0 Stream 0 Destination	Start Address Register */
-#define	MDMA0_D0_CONFIG			0xFFC00E08	/* MemDMA0 Stream 0 Destination	Configuration Register */
-#define	MDMA0_D0_X_COUNT		0xFFC00E10	/* MemDMA0 Stream 0 Destination	X Count	Register */
-#define	MDMA0_D0_X_MODIFY		0xFFC00E14	/* MemDMA0 Stream 0 Destination	X Modify Register */
-#define	MDMA0_D0_Y_COUNT		0xFFC00E18	/* MemDMA0 Stream 0 Destination	Y Count	Register */
-#define	MDMA0_D0_Y_MODIFY		0xFFC00E1C	/* MemDMA0 Stream 0 Destination	Y Modify Register */
-#define	MDMA0_D0_CURR_DESC_PTR	0xFFC00E20	/* MemDMA0 Stream 0 Destination	Current	Descriptor Pointer Register */
-#define	MDMA0_D0_CURR_ADDR		0xFFC00E24	/* MemDMA0 Stream 0 Destination	Current	Address	Register */
-#define	MDMA0_D0_IRQ_STATUS		0xFFC00E28	/* MemDMA0 Stream 0 Destination	Interrupt/Status Register */
-#define	MDMA0_D0_PERIPHERAL_MAP	0xFFC00E2C	/* MemDMA0 Stream 0 Destination	Peripheral Map Register */
-#define	MDMA0_D0_CURR_X_COUNT	0xFFC00E30	/* MemDMA0 Stream 0 Destination	Current	X Count	Register */
-#define	MDMA0_D0_CURR_Y_COUNT	0xFFC00E38	/* MemDMA0 Stream 0 Destination	Current	Y Count	Register */
-
-#define	MDMA0_S0_NEXT_DESC_PTR	0xFFC00E40	/* MemDMA0 Stream 0 Source Next	Descriptor Pointer Register */
-#define	MDMA0_S0_START_ADDR		0xFFC00E44	/* MemDMA0 Stream 0 Source Start Address Register */
-#define	MDMA0_S0_CONFIG			0xFFC00E48	/* MemDMA0 Stream 0 Source Configuration Register */
-#define	MDMA0_S0_X_COUNT		0xFFC00E50	/* MemDMA0 Stream 0 Source X Count Register */
-#define	MDMA0_S0_X_MODIFY		0xFFC00E54	/* MemDMA0 Stream 0 Source X Modify Register */
-#define	MDMA0_S0_Y_COUNT		0xFFC00E58	/* MemDMA0 Stream 0 Source Y Count Register */
-#define	MDMA0_S0_Y_MODIFY		0xFFC00E5C	/* MemDMA0 Stream 0 Source Y Modify Register */
-#define	MDMA0_S0_CURR_DESC_PTR	0xFFC00E60	/* MemDMA0 Stream 0 Source Current Descriptor Pointer Register */
-#define	MDMA0_S0_CURR_ADDR		0xFFC00E64	/* MemDMA0 Stream 0 Source Current Address Register */
-#define	MDMA0_S0_IRQ_STATUS		0xFFC00E68	/* MemDMA0 Stream 0 Source Interrupt/Status Register */
-#define	MDMA0_S0_PERIPHERAL_MAP	0xFFC00E6C	/* MemDMA0 Stream 0 Source Peripheral Map Register */
-#define	MDMA0_S0_CURR_X_COUNT	0xFFC00E70	/* MemDMA0 Stream 0 Source Current X Count Register */
-#define	MDMA0_S0_CURR_Y_COUNT	0xFFC00E78	/* MemDMA0 Stream 0 Source Current Y Count Register */
-
-#define	MDMA0_D1_NEXT_DESC_PTR	0xFFC00E80	/* MemDMA0 Stream 1 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA0_D1_START_ADDR		0xFFC00E84	/* MemDMA0 Stream 1 Destination	Start Address Register */
-#define	MDMA0_D1_CONFIG			0xFFC00E88	/* MemDMA0 Stream 1 Destination	Configuration Register */
-#define	MDMA0_D1_X_COUNT		0xFFC00E90	/* MemDMA0 Stream 1 Destination	X Count	Register */
-#define	MDMA0_D1_X_MODIFY		0xFFC00E94	/* MemDMA0 Stream 1 Destination	X Modify Register */
-#define	MDMA0_D1_Y_COUNT		0xFFC00E98	/* MemDMA0 Stream 1 Destination	Y Count	Register */
-#define	MDMA0_D1_Y_MODIFY		0xFFC00E9C	/* MemDMA0 Stream 1 Destination	Y Modify Register */
-#define	MDMA0_D1_CURR_DESC_PTR	0xFFC00EA0	/* MemDMA0 Stream 1 Destination	Current	Descriptor Pointer Register */
-#define	MDMA0_D1_CURR_ADDR		0xFFC00EA4	/* MemDMA0 Stream 1 Destination	Current	Address	Register */
-#define	MDMA0_D1_IRQ_STATUS		0xFFC00EA8	/* MemDMA0 Stream 1 Destination	Interrupt/Status Register */
-#define	MDMA0_D1_PERIPHERAL_MAP	0xFFC00EAC	/* MemDMA0 Stream 1 Destination	Peripheral Map Register */
-#define	MDMA0_D1_CURR_X_COUNT	0xFFC00EB0	/* MemDMA0 Stream 1 Destination	Current	X Count	Register */
-#define	MDMA0_D1_CURR_Y_COUNT	0xFFC00EB8	/* MemDMA0 Stream 1 Destination	Current	Y Count	Register */
-
-#define	MDMA0_S1_NEXT_DESC_PTR	0xFFC00EC0	/* MemDMA0 Stream 1 Source Next	Descriptor Pointer Register */
-#define	MDMA0_S1_START_ADDR		0xFFC00EC4	/* MemDMA0 Stream 1 Source Start Address Register */
-#define	MDMA0_S1_CONFIG			0xFFC00EC8	/* MemDMA0 Stream 1 Source Configuration Register */
-#define	MDMA0_S1_X_COUNT		0xFFC00ED0	/* MemDMA0 Stream 1 Source X Count Register */
-#define	MDMA0_S1_X_MODIFY		0xFFC00ED4	/* MemDMA0 Stream 1 Source X Modify Register */
-#define	MDMA0_S1_Y_COUNT		0xFFC00ED8	/* MemDMA0 Stream 1 Source Y Count Register */
-#define	MDMA0_S1_Y_MODIFY		0xFFC00EDC	/* MemDMA0 Stream 1 Source Y Modify Register */
-#define	MDMA0_S1_CURR_DESC_PTR	0xFFC00EE0	/* MemDMA0 Stream 1 Source Current Descriptor Pointer Register */
-#define	MDMA0_S1_CURR_ADDR		0xFFC00EE4	/* MemDMA0 Stream 1 Source Current Address Register */
-#define	MDMA0_S1_IRQ_STATUS		0xFFC00EE8	/* MemDMA0 Stream 1 Source Interrupt/Status Register */
-#define	MDMA0_S1_PERIPHERAL_MAP	0xFFC00EEC	/* MemDMA0 Stream 1 Source Peripheral Map Register */
-#define	MDMA0_S1_CURR_X_COUNT	0xFFC00EF0	/* MemDMA0 Stream 1 Source Current X Count Register */
-#define	MDMA0_S1_CURR_Y_COUNT	0xFFC00EF8	/* MemDMA0 Stream 1 Source Current Y Count Register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA0_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA0_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA0_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA0_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA0_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA0_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA0_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA0_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA0_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA0_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA0_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA0_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA0_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA0_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA0_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA0_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA0_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA0_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA0_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA0_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA0_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA0_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA0_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA0_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA0_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA0_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA0_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA0_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA0_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA0_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA0_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA0_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA0_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA0_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA0_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA0_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA0_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA0_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA0_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA0_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA0_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA0_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA0_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA0_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA0_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA0_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA0_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA0_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA0_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA0_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA0_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA0_S1_CURR_Y_COUNT
-
-
-/* Parallel Peripheral Interface (PPI) (0xFFC01000 - 0xFFC010FF) */
-#define	PPI_CONTROL			0xFFC01000	/* PPI Control Register */
-#define	PPI_STATUS			0xFFC01004	/* PPI Status Register */
-#define	PPI_COUNT			0xFFC01008	/* PPI Transfer	Count Register */
-#define	PPI_DELAY			0xFFC0100C	/* PPI Delay Count Register */
-#define	PPI_FRAME			0xFFC01010	/* PPI Frame Length Register */
-
-
-/* Two-Wire Interface 0	(0xFFC01400 - 0xFFC014FF)			 */
-#define	TWI0_CLKDIV			0xFFC01400	/* Serial Clock	Divider	Register */
-#define	TWI0_CONTROL		0xFFC01404	/* TWI0	Master Internal	Time Reference Register */
-#define	TWI0_SLAVE_CTL		0xFFC01408	/* Slave Mode Control Register */
-#define	TWI0_SLAVE_STAT		0xFFC0140C	/* Slave Mode Status Register */
-#define	TWI0_SLAVE_ADDR		0xFFC01410	/* Slave Mode Address Register */
-#define	TWI0_MASTER_CTL	0xFFC01414	/* Master Mode Control Register */
-#define	TWI0_MASTER_STAT	0xFFC01418	/* Master Mode Status Register */
-#define	TWI0_MASTER_ADDR	0xFFC0141C	/* Master Mode Address Register */
-#define	TWI0_INT_STAT		0xFFC01420	/* TWI0	Master Interrupt Register */
-#define	TWI0_INT_MASK		0xFFC01424	/* TWI0	Master Interrupt Mask Register */
-#define	TWI0_FIFO_CTL		0xFFC01428	/* FIFO	Control	Register */
-#define	TWI0_FIFO_STAT		0xFFC0142C	/* FIFO	Status Register */
-#define	TWI0_XMT_DATA8		0xFFC01480	/* FIFO	Transmit Data Single Byte Register */
-#define	TWI0_XMT_DATA16		0xFFC01484	/* FIFO	Transmit Data Double Byte Register */
-#define	TWI0_RCV_DATA8		0xFFC01488	/* FIFO	Receive	Data Single Byte Register */
-#define	TWI0_RCV_DATA16		0xFFC0148C	/* FIFO	Receive	Data Double Byte Register */
-
-#define TWI0_REGBASE		TWI0_CLKDIV
-
-/* the following are for backwards compatibility */
-#define	TWI0_PRESCALE	 TWI0_CONTROL
-#define	TWI0_INT_SRC	 TWI0_INT_STAT
-#define	TWI0_INT_ENABLE	 TWI0_INT_MASK
-
-
-/* General-Purpose Ports  (0xFFC01500 -	0xFFC015FF)	 */
-
-/* GPIO	Port C Register	Names */
-#define PORTCIO_FER			0xFFC01500	/* GPIO	Pin Port C Configuration Register */
-#define PORTCIO				0xFFC01510	/* GPIO	Pin Port C Data	Register */
-#define PORTCIO_CLEAR			0xFFC01520	/* Clear GPIO Pin Port C Register */
-#define PORTCIO_SET			0xFFC01530	/* Set GPIO Pin	Port C Register */
-#define PORTCIO_TOGGLE			0xFFC01540	/* Toggle GPIO Pin Port	C Register */
-#define PORTCIO_DIR			0xFFC01550	/* GPIO	Pin Port C Direction Register */
-#define PORTCIO_INEN			0xFFC01560	/* GPIO	Pin Port C Input Enable	Register */
-
-/* GPIO	Port D Register	Names */
-#define PORTDIO_FER			0xFFC01504	/* GPIO	Pin Port D Configuration Register */
-#define PORTDIO				0xFFC01514	/* GPIO	Pin Port D Data	Register */
-#define PORTDIO_CLEAR			0xFFC01524	/* Clear GPIO Pin Port D Register */
-#define PORTDIO_SET			0xFFC01534	/* Set GPIO Pin	Port D Register */
-#define PORTDIO_TOGGLE			0xFFC01544	/* Toggle GPIO Pin Port	D Register */
-#define PORTDIO_DIR			0xFFC01554	/* GPIO	Pin Port D Direction Register */
-#define PORTDIO_INEN			0xFFC01564	/* GPIO	Pin Port D Input Enable	Register */
-
-/* GPIO	Port E Register	Names */
-#define PORTEIO_FER			0xFFC01508	/* GPIO	Pin Port E Configuration Register */
-#define PORTEIO				0xFFC01518	/* GPIO	Pin Port E Data	Register */
-#define PORTEIO_CLEAR			0xFFC01528	/* Clear GPIO Pin Port E Register */
-#define PORTEIO_SET			0xFFC01538	/* Set GPIO Pin	Port E Register */
-#define PORTEIO_TOGGLE			0xFFC01548	/* Toggle GPIO Pin Port	E Register */
-#define PORTEIO_DIR			0xFFC01558	/* GPIO	Pin Port E Direction Register */
-#define PORTEIO_INEN			0xFFC01568	/* GPIO	Pin Port E Input Enable	Register */
-
-/* DMA Controller 1 Traffic Control Registers (0xFFC01B00 - 0xFFC01BFF) */
-
-#define	DMAC1_TC_PER			0xFFC01B0C	/* DMA Controller 1 Traffic Control Periods Register */
-#define	DMAC1_TC_CNT			0xFFC01B10	/* DMA Controller 1 Traffic Control Current Counts Register */
-
-/* Alternate deprecated	register names (below) provided	for backwards code compatibility */
-#define	DMA1_TCPER			DMAC1_TC_PER
-#define	DMA1_TCCNT			DMAC1_TC_CNT
-
-
-/* DMA Controller 1 (0xFFC01C00	- 0xFFC01FFF)							 */
-#define	DMA8_NEXT_DESC_PTR		0xFFC01C00	/* DMA Channel 8 Next Descriptor Pointer Register */
-#define	DMA8_START_ADDR			0xFFC01C04	/* DMA Channel 8 Start Address Register */
-#define	DMA8_CONFIG				0xFFC01C08	/* DMA Channel 8 Configuration Register */
-#define	DMA8_X_COUNT			0xFFC01C10	/* DMA Channel 8 X Count Register */
-#define	DMA8_X_MODIFY			0xFFC01C14	/* DMA Channel 8 X Modify Register */
-#define	DMA8_Y_COUNT			0xFFC01C18	/* DMA Channel 8 Y Count Register */
-#define	DMA8_Y_MODIFY			0xFFC01C1C	/* DMA Channel 8 Y Modify Register */
-#define	DMA8_CURR_DESC_PTR		0xFFC01C20	/* DMA Channel 8 Current Descriptor Pointer Register */
-#define	DMA8_CURR_ADDR			0xFFC01C24	/* DMA Channel 8 Current Address Register */
-#define	DMA8_IRQ_STATUS			0xFFC01C28	/* DMA Channel 8 Interrupt/Status Register */
-#define	DMA8_PERIPHERAL_MAP		0xFFC01C2C	/* DMA Channel 8 Peripheral Map	Register */
-#define	DMA8_CURR_X_COUNT		0xFFC01C30	/* DMA Channel 8 Current X Count Register */
-#define	DMA8_CURR_Y_COUNT		0xFFC01C38	/* DMA Channel 8 Current Y Count Register */
-
-#define	DMA9_NEXT_DESC_PTR		0xFFC01C40	/* DMA Channel 9 Next Descriptor Pointer Register */
-#define	DMA9_START_ADDR			0xFFC01C44	/* DMA Channel 9 Start Address Register */
-#define	DMA9_CONFIG				0xFFC01C48	/* DMA Channel 9 Configuration Register */
-#define	DMA9_X_COUNT			0xFFC01C50	/* DMA Channel 9 X Count Register */
-#define	DMA9_X_MODIFY			0xFFC01C54	/* DMA Channel 9 X Modify Register */
-#define	DMA9_Y_COUNT			0xFFC01C58	/* DMA Channel 9 Y Count Register */
-#define	DMA9_Y_MODIFY			0xFFC01C5C	/* DMA Channel 9 Y Modify Register */
-#define	DMA9_CURR_DESC_PTR		0xFFC01C60	/* DMA Channel 9 Current Descriptor Pointer Register */
-#define	DMA9_CURR_ADDR			0xFFC01C64	/* DMA Channel 9 Current Address Register */
-#define	DMA9_IRQ_STATUS			0xFFC01C68	/* DMA Channel 9 Interrupt/Status Register */
-#define	DMA9_PERIPHERAL_MAP		0xFFC01C6C	/* DMA Channel 9 Peripheral Map	Register */
-#define	DMA9_CURR_X_COUNT		0xFFC01C70	/* DMA Channel 9 Current X Count Register */
-#define	DMA9_CURR_Y_COUNT		0xFFC01C78	/* DMA Channel 9 Current Y Count Register */
-
-#define	DMA10_NEXT_DESC_PTR		0xFFC01C80	/* DMA Channel 10 Next Descriptor Pointer Register */
-#define	DMA10_START_ADDR		0xFFC01C84	/* DMA Channel 10 Start	Address	Register */
-#define	DMA10_CONFIG			0xFFC01C88	/* DMA Channel 10 Configuration	Register */
-#define	DMA10_X_COUNT			0xFFC01C90	/* DMA Channel 10 X Count Register */
-#define	DMA10_X_MODIFY			0xFFC01C94	/* DMA Channel 10 X Modify Register */
-#define	DMA10_Y_COUNT			0xFFC01C98	/* DMA Channel 10 Y Count Register */
-#define	DMA10_Y_MODIFY			0xFFC01C9C	/* DMA Channel 10 Y Modify Register */
-#define	DMA10_CURR_DESC_PTR		0xFFC01CA0	/* DMA Channel 10 Current Descriptor Pointer Register */
-#define	DMA10_CURR_ADDR			0xFFC01CA4	/* DMA Channel 10 Current Address Register */
-#define	DMA10_IRQ_STATUS		0xFFC01CA8	/* DMA Channel 10 Interrupt/Status Register */
-#define	DMA10_PERIPHERAL_MAP	0xFFC01CAC	/* DMA Channel 10 Peripheral Map Register */
-#define	DMA10_CURR_X_COUNT		0xFFC01CB0	/* DMA Channel 10 Current X Count Register */
-#define	DMA10_CURR_Y_COUNT		0xFFC01CB8	/* DMA Channel 10 Current Y Count Register */
-
-#define	DMA11_NEXT_DESC_PTR		0xFFC01CC0	/* DMA Channel 11 Next Descriptor Pointer Register */
-#define	DMA11_START_ADDR		0xFFC01CC4	/* DMA Channel 11 Start	Address	Register */
-#define	DMA11_CONFIG			0xFFC01CC8	/* DMA Channel 11 Configuration	Register */
-#define	DMA11_X_COUNT			0xFFC01CD0	/* DMA Channel 11 X Count Register */
-#define	DMA11_X_MODIFY			0xFFC01CD4	/* DMA Channel 11 X Modify Register */
-#define	DMA11_Y_COUNT			0xFFC01CD8	/* DMA Channel 11 Y Count Register */
-#define	DMA11_Y_MODIFY			0xFFC01CDC	/* DMA Channel 11 Y Modify Register */
-#define	DMA11_CURR_DESC_PTR		0xFFC01CE0	/* DMA Channel 11 Current Descriptor Pointer Register */
-#define	DMA11_CURR_ADDR			0xFFC01CE4	/* DMA Channel 11 Current Address Register */
-#define	DMA11_IRQ_STATUS		0xFFC01CE8	/* DMA Channel 11 Interrupt/Status Register */
-#define	DMA11_PERIPHERAL_MAP	0xFFC01CEC	/* DMA Channel 11 Peripheral Map Register */
-#define	DMA11_CURR_X_COUNT		0xFFC01CF0	/* DMA Channel 11 Current X Count Register */
-#define	DMA11_CURR_Y_COUNT		0xFFC01CF8	/* DMA Channel 11 Current Y Count Register */
-
-#define	DMA12_NEXT_DESC_PTR		0xFFC01D00	/* DMA Channel 12 Next Descriptor Pointer Register */
-#define	DMA12_START_ADDR		0xFFC01D04	/* DMA Channel 12 Start	Address	Register */
-#define	DMA12_CONFIG			0xFFC01D08	/* DMA Channel 12 Configuration	Register */
-#define	DMA12_X_COUNT			0xFFC01D10	/* DMA Channel 12 X Count Register */
-#define	DMA12_X_MODIFY			0xFFC01D14	/* DMA Channel 12 X Modify Register */
-#define	DMA12_Y_COUNT			0xFFC01D18	/* DMA Channel 12 Y Count Register */
-#define	DMA12_Y_MODIFY			0xFFC01D1C	/* DMA Channel 12 Y Modify Register */
-#define	DMA12_CURR_DESC_PTR		0xFFC01D20	/* DMA Channel 12 Current Descriptor Pointer Register */
-#define	DMA12_CURR_ADDR			0xFFC01D24	/* DMA Channel 12 Current Address Register */
-#define	DMA12_IRQ_STATUS		0xFFC01D28	/* DMA Channel 12 Interrupt/Status Register */
-#define	DMA12_PERIPHERAL_MAP	0xFFC01D2C	/* DMA Channel 12 Peripheral Map Register */
-#define	DMA12_CURR_X_COUNT		0xFFC01D30	/* DMA Channel 12 Current X Count Register */
-#define	DMA12_CURR_Y_COUNT		0xFFC01D38	/* DMA Channel 12 Current Y Count Register */
-
-#define	DMA13_NEXT_DESC_PTR		0xFFC01D40	/* DMA Channel 13 Next Descriptor Pointer Register */
-#define	DMA13_START_ADDR		0xFFC01D44	/* DMA Channel 13 Start	Address	Register */
-#define	DMA13_CONFIG			0xFFC01D48	/* DMA Channel 13 Configuration	Register */
-#define	DMA13_X_COUNT			0xFFC01D50	/* DMA Channel 13 X Count Register */
-#define	DMA13_X_MODIFY			0xFFC01D54	/* DMA Channel 13 X Modify Register */
-#define	DMA13_Y_COUNT			0xFFC01D58	/* DMA Channel 13 Y Count Register */
-#define	DMA13_Y_MODIFY			0xFFC01D5C	/* DMA Channel 13 Y Modify Register */
-#define	DMA13_CURR_DESC_PTR		0xFFC01D60	/* DMA Channel 13 Current Descriptor Pointer Register */
-#define	DMA13_CURR_ADDR			0xFFC01D64	/* DMA Channel 13 Current Address Register */
-#define	DMA13_IRQ_STATUS		0xFFC01D68	/* DMA Channel 13 Interrupt/Status Register */
-#define	DMA13_PERIPHERAL_MAP	0xFFC01D6C	/* DMA Channel 13 Peripheral Map Register */
-#define	DMA13_CURR_X_COUNT		0xFFC01D70	/* DMA Channel 13 Current X Count Register */
-#define	DMA13_CURR_Y_COUNT		0xFFC01D78	/* DMA Channel 13 Current Y Count Register */
-
-#define	DMA14_NEXT_DESC_PTR		0xFFC01D80	/* DMA Channel 14 Next Descriptor Pointer Register */
-#define	DMA14_START_ADDR		0xFFC01D84	/* DMA Channel 14 Start	Address	Register */
-#define	DMA14_CONFIG			0xFFC01D88	/* DMA Channel 14 Configuration	Register */
-#define	DMA14_X_COUNT			0xFFC01D90	/* DMA Channel 14 X Count Register */
-#define	DMA14_X_MODIFY			0xFFC01D94	/* DMA Channel 14 X Modify Register */
-#define	DMA14_Y_COUNT			0xFFC01D98	/* DMA Channel 14 Y Count Register */
-#define	DMA14_Y_MODIFY			0xFFC01D9C	/* DMA Channel 14 Y Modify Register */
-#define	DMA14_CURR_DESC_PTR		0xFFC01DA0	/* DMA Channel 14 Current Descriptor Pointer Register */
-#define	DMA14_CURR_ADDR			0xFFC01DA4	/* DMA Channel 14 Current Address Register */
-#define	DMA14_IRQ_STATUS		0xFFC01DA8	/* DMA Channel 14 Interrupt/Status Register */
-#define	DMA14_PERIPHERAL_MAP	0xFFC01DAC	/* DMA Channel 14 Peripheral Map Register */
-#define	DMA14_CURR_X_COUNT		0xFFC01DB0	/* DMA Channel 14 Current X Count Register */
-#define	DMA14_CURR_Y_COUNT		0xFFC01DB8	/* DMA Channel 14 Current Y Count Register */
-
-#define	DMA15_NEXT_DESC_PTR		0xFFC01DC0	/* DMA Channel 15 Next Descriptor Pointer Register */
-#define	DMA15_START_ADDR		0xFFC01DC4	/* DMA Channel 15 Start	Address	Register */
-#define	DMA15_CONFIG			0xFFC01DC8	/* DMA Channel 15 Configuration	Register */
-#define	DMA15_X_COUNT			0xFFC01DD0	/* DMA Channel 15 X Count Register */
-#define	DMA15_X_MODIFY			0xFFC01DD4	/* DMA Channel 15 X Modify Register */
-#define	DMA15_Y_COUNT			0xFFC01DD8	/* DMA Channel 15 Y Count Register */
-#define	DMA15_Y_MODIFY			0xFFC01DDC	/* DMA Channel 15 Y Modify Register */
-#define	DMA15_CURR_DESC_PTR		0xFFC01DE0	/* DMA Channel 15 Current Descriptor Pointer Register */
-#define	DMA15_CURR_ADDR			0xFFC01DE4	/* DMA Channel 15 Current Address Register */
-#define	DMA15_IRQ_STATUS		0xFFC01DE8	/* DMA Channel 15 Interrupt/Status Register */
-#define	DMA15_PERIPHERAL_MAP	0xFFC01DEC	/* DMA Channel 15 Peripheral Map Register */
-#define	DMA15_CURR_X_COUNT		0xFFC01DF0	/* DMA Channel 15 Current X Count Register */
-#define	DMA15_CURR_Y_COUNT		0xFFC01DF8	/* DMA Channel 15 Current Y Count Register */
-
-#define	DMA16_NEXT_DESC_PTR		0xFFC01E00	/* DMA Channel 16 Next Descriptor Pointer Register */
-#define	DMA16_START_ADDR		0xFFC01E04	/* DMA Channel 16 Start	Address	Register */
-#define	DMA16_CONFIG			0xFFC01E08	/* DMA Channel 16 Configuration	Register */
-#define	DMA16_X_COUNT			0xFFC01E10	/* DMA Channel 16 X Count Register */
-#define	DMA16_X_MODIFY			0xFFC01E14	/* DMA Channel 16 X Modify Register */
-#define	DMA16_Y_COUNT			0xFFC01E18	/* DMA Channel 16 Y Count Register */
-#define	DMA16_Y_MODIFY			0xFFC01E1C	/* DMA Channel 16 Y Modify Register */
-#define	DMA16_CURR_DESC_PTR		0xFFC01E20	/* DMA Channel 16 Current Descriptor Pointer Register */
-#define	DMA16_CURR_ADDR			0xFFC01E24	/* DMA Channel 16 Current Address Register */
-#define	DMA16_IRQ_STATUS		0xFFC01E28	/* DMA Channel 16 Interrupt/Status Register */
-#define	DMA16_PERIPHERAL_MAP	0xFFC01E2C	/* DMA Channel 16 Peripheral Map Register */
-#define	DMA16_CURR_X_COUNT		0xFFC01E30	/* DMA Channel 16 Current X Count Register */
-#define	DMA16_CURR_Y_COUNT		0xFFC01E38	/* DMA Channel 16 Current Y Count Register */
-
-#define	DMA17_NEXT_DESC_PTR		0xFFC01E40	/* DMA Channel 17 Next Descriptor Pointer Register */
-#define	DMA17_START_ADDR		0xFFC01E44	/* DMA Channel 17 Start	Address	Register */
-#define	DMA17_CONFIG			0xFFC01E48	/* DMA Channel 17 Configuration	Register */
-#define	DMA17_X_COUNT			0xFFC01E50	/* DMA Channel 17 X Count Register */
-#define	DMA17_X_MODIFY			0xFFC01E54	/* DMA Channel 17 X Modify Register */
-#define	DMA17_Y_COUNT			0xFFC01E58	/* DMA Channel 17 Y Count Register */
-#define	DMA17_Y_MODIFY			0xFFC01E5C	/* DMA Channel 17 Y Modify Register */
-#define	DMA17_CURR_DESC_PTR		0xFFC01E60	/* DMA Channel 17 Current Descriptor Pointer Register */
-#define	DMA17_CURR_ADDR			0xFFC01E64	/* DMA Channel 17 Current Address Register */
-#define	DMA17_IRQ_STATUS		0xFFC01E68	/* DMA Channel 17 Interrupt/Status Register */
-#define	DMA17_PERIPHERAL_MAP	0xFFC01E6C	/* DMA Channel 17 Peripheral Map Register */
-#define	DMA17_CURR_X_COUNT		0xFFC01E70	/* DMA Channel 17 Current X Count Register */
-#define	DMA17_CURR_Y_COUNT		0xFFC01E78	/* DMA Channel 17 Current Y Count Register */
-
-#define	DMA18_NEXT_DESC_PTR		0xFFC01E80	/* DMA Channel 18 Next Descriptor Pointer Register */
-#define	DMA18_START_ADDR		0xFFC01E84	/* DMA Channel 18 Start	Address	Register */
-#define	DMA18_CONFIG			0xFFC01E88	/* DMA Channel 18 Configuration	Register */
-#define	DMA18_X_COUNT			0xFFC01E90	/* DMA Channel 18 X Count Register */
-#define	DMA18_X_MODIFY			0xFFC01E94	/* DMA Channel 18 X Modify Register */
-#define	DMA18_Y_COUNT			0xFFC01E98	/* DMA Channel 18 Y Count Register */
-#define	DMA18_Y_MODIFY			0xFFC01E9C	/* DMA Channel 18 Y Modify Register */
-#define	DMA18_CURR_DESC_PTR		0xFFC01EA0	/* DMA Channel 18 Current Descriptor Pointer Register */
-#define	DMA18_CURR_ADDR			0xFFC01EA4	/* DMA Channel 18 Current Address Register */
-#define	DMA18_IRQ_STATUS		0xFFC01EA8	/* DMA Channel 18 Interrupt/Status Register */
-#define	DMA18_PERIPHERAL_MAP	0xFFC01EAC	/* DMA Channel 18 Peripheral Map Register */
-#define	DMA18_CURR_X_COUNT		0xFFC01EB0	/* DMA Channel 18 Current X Count Register */
-#define	DMA18_CURR_Y_COUNT		0xFFC01EB8	/* DMA Channel 18 Current Y Count Register */
-
-#define	DMA19_NEXT_DESC_PTR		0xFFC01EC0	/* DMA Channel 19 Next Descriptor Pointer Register */
-#define	DMA19_START_ADDR		0xFFC01EC4	/* DMA Channel 19 Start	Address	Register */
-#define	DMA19_CONFIG			0xFFC01EC8	/* DMA Channel 19 Configuration	Register */
-#define	DMA19_X_COUNT			0xFFC01ED0	/* DMA Channel 19 X Count Register */
-#define	DMA19_X_MODIFY			0xFFC01ED4	/* DMA Channel 19 X Modify Register */
-#define	DMA19_Y_COUNT			0xFFC01ED8	/* DMA Channel 19 Y Count Register */
-#define	DMA19_Y_MODIFY			0xFFC01EDC	/* DMA Channel 19 Y Modify Register */
-#define	DMA19_CURR_DESC_PTR		0xFFC01EE0	/* DMA Channel 19 Current Descriptor Pointer Register */
-#define	DMA19_CURR_ADDR			0xFFC01EE4	/* DMA Channel 19 Current Address Register */
-#define	DMA19_IRQ_STATUS		0xFFC01EE8	/* DMA Channel 19 Interrupt/Status Register */
-#define	DMA19_PERIPHERAL_MAP	0xFFC01EEC	/* DMA Channel 19 Peripheral Map Register */
-#define	DMA19_CURR_X_COUNT		0xFFC01EF0	/* DMA Channel 19 Current X Count Register */
-#define	DMA19_CURR_Y_COUNT		0xFFC01EF8	/* DMA Channel 19 Current Y Count Register */
-
-#define	MDMA1_D0_NEXT_DESC_PTR	0xFFC01F00	/* MemDMA1 Stream 0 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA1_D0_START_ADDR		0xFFC01F04	/* MemDMA1 Stream 0 Destination	Start Address Register */
-#define	MDMA1_D0_CONFIG			0xFFC01F08	/* MemDMA1 Stream 0 Destination	Configuration Register */
-#define	MDMA1_D0_X_COUNT		0xFFC01F10	/* MemDMA1 Stream 0 Destination	X Count	Register */
-#define	MDMA1_D0_X_MODIFY		0xFFC01F14	/* MemDMA1 Stream 0 Destination	X Modify Register */
-#define	MDMA1_D0_Y_COUNT		0xFFC01F18	/* MemDMA1 Stream 0 Destination	Y Count	Register */
-#define	MDMA1_D0_Y_MODIFY		0xFFC01F1C	/* MemDMA1 Stream 0 Destination	Y Modify Register */
-#define	MDMA1_D0_CURR_DESC_PTR	0xFFC01F20	/* MemDMA1 Stream 0 Destination	Current	Descriptor Pointer Register */
-#define	MDMA1_D0_CURR_ADDR		0xFFC01F24	/* MemDMA1 Stream 0 Destination	Current	Address	Register */
-#define	MDMA1_D0_IRQ_STATUS		0xFFC01F28	/* MemDMA1 Stream 0 Destination	Interrupt/Status Register */
-#define	MDMA1_D0_PERIPHERAL_MAP	0xFFC01F2C	/* MemDMA1 Stream 0 Destination	Peripheral Map Register */
-#define	MDMA1_D0_CURR_X_COUNT	0xFFC01F30	/* MemDMA1 Stream 0 Destination	Current	X Count	Register */
-#define	MDMA1_D0_CURR_Y_COUNT	0xFFC01F38	/* MemDMA1 Stream 0 Destination	Current	Y Count	Register */
-
-#define	MDMA1_S0_NEXT_DESC_PTR	0xFFC01F40	/* MemDMA1 Stream 0 Source Next	Descriptor Pointer Register */
-#define	MDMA1_S0_START_ADDR		0xFFC01F44	/* MemDMA1 Stream 0 Source Start Address Register */
-#define	MDMA1_S0_CONFIG			0xFFC01F48	/* MemDMA1 Stream 0 Source Configuration Register */
-#define	MDMA1_S0_X_COUNT		0xFFC01F50	/* MemDMA1 Stream 0 Source X Count Register */
-#define	MDMA1_S0_X_MODIFY		0xFFC01F54	/* MemDMA1 Stream 0 Source X Modify Register */
-#define	MDMA1_S0_Y_COUNT		0xFFC01F58	/* MemDMA1 Stream 0 Source Y Count Register */
-#define	MDMA1_S0_Y_MODIFY		0xFFC01F5C	/* MemDMA1 Stream 0 Source Y Modify Register */
-#define	MDMA1_S0_CURR_DESC_PTR	0xFFC01F60	/* MemDMA1 Stream 0 Source Current Descriptor Pointer Register */
-#define	MDMA1_S0_CURR_ADDR		0xFFC01F64	/* MemDMA1 Stream 0 Source Current Address Register */
-#define	MDMA1_S0_IRQ_STATUS		0xFFC01F68	/* MemDMA1 Stream 0 Source Interrupt/Status Register */
-#define	MDMA1_S0_PERIPHERAL_MAP	0xFFC01F6C	/* MemDMA1 Stream 0 Source Peripheral Map Register */
-#define	MDMA1_S0_CURR_X_COUNT	0xFFC01F70	/* MemDMA1 Stream 0 Source Current X Count Register */
-#define	MDMA1_S0_CURR_Y_COUNT	0xFFC01F78	/* MemDMA1 Stream 0 Source Current Y Count Register */
-
-#define	MDMA1_D1_NEXT_DESC_PTR	0xFFC01F80	/* MemDMA1 Stream 1 Destination	Next Descriptor	Pointer	Register */
-#define	MDMA1_D1_START_ADDR		0xFFC01F84	/* MemDMA1 Stream 1 Destination	Start Address Register */
-#define	MDMA1_D1_CONFIG			0xFFC01F88	/* MemDMA1 Stream 1 Destination	Configuration Register */
-#define	MDMA1_D1_X_COUNT		0xFFC01F90	/* MemDMA1 Stream 1 Destination	X Count	Register */
-#define	MDMA1_D1_X_MODIFY		0xFFC01F94	/* MemDMA1 Stream 1 Destination	X Modify Register */
-#define	MDMA1_D1_Y_COUNT		0xFFC01F98	/* MemDMA1 Stream 1 Destination	Y Count	Register */
-#define	MDMA1_D1_Y_MODIFY		0xFFC01F9C	/* MemDMA1 Stream 1 Destination	Y Modify Register */
-#define	MDMA1_D1_CURR_DESC_PTR	0xFFC01FA0	/* MemDMA1 Stream 1 Destination	Current	Descriptor Pointer Register */
-#define	MDMA1_D1_CURR_ADDR		0xFFC01FA4	/* MemDMA1 Stream 1 Destination	Current	Address	Register */
-#define	MDMA1_D1_IRQ_STATUS		0xFFC01FA8	/* MemDMA1 Stream 1 Destination	Interrupt/Status Register */
-#define	MDMA1_D1_PERIPHERAL_MAP	0xFFC01FAC	/* MemDMA1 Stream 1 Destination	Peripheral Map Register */
-#define	MDMA1_D1_CURR_X_COUNT	0xFFC01FB0	/* MemDMA1 Stream 1 Destination	Current	X Count	Register */
-#define	MDMA1_D1_CURR_Y_COUNT	0xFFC01FB8	/* MemDMA1 Stream 1 Destination	Current	Y Count	Register */
-
-#define	MDMA1_S1_NEXT_DESC_PTR	0xFFC01FC0	/* MemDMA1 Stream 1 Source Next	Descriptor Pointer Register */
-#define	MDMA1_S1_START_ADDR		0xFFC01FC4	/* MemDMA1 Stream 1 Source Start Address Register */
-#define	MDMA1_S1_CONFIG			0xFFC01FC8	/* MemDMA1 Stream 1 Source Configuration Register */
-#define	MDMA1_S1_X_COUNT		0xFFC01FD0	/* MemDMA1 Stream 1 Source X Count Register */
-#define	MDMA1_S1_X_MODIFY		0xFFC01FD4	/* MemDMA1 Stream 1 Source X Modify Register */
-#define	MDMA1_S1_Y_COUNT		0xFFC01FD8	/* MemDMA1 Stream 1 Source Y Count Register */
-#define	MDMA1_S1_Y_MODIFY		0xFFC01FDC	/* MemDMA1 Stream 1 Source Y Modify Register */
-#define	MDMA1_S1_CURR_DESC_PTR	0xFFC01FE0	/* MemDMA1 Stream 1 Source Current Descriptor Pointer Register */
-#define	MDMA1_S1_CURR_ADDR		0xFFC01FE4	/* MemDMA1 Stream 1 Source Current Address Register */
-#define	MDMA1_S1_IRQ_STATUS		0xFFC01FE8	/* MemDMA1 Stream 1 Source Interrupt/Status Register */
-#define	MDMA1_S1_PERIPHERAL_MAP	0xFFC01FEC	/* MemDMA1 Stream 1 Source Peripheral Map Register */
-#define	MDMA1_S1_CURR_X_COUNT	0xFFC01FF0	/* MemDMA1 Stream 1 Source Current X Count Register */
-#define	MDMA1_S1_CURR_Y_COUNT	0xFFC01FF8	/* MemDMA1 Stream 1 Source Current Y Count Register */
-
-
-/* UART1 Controller		(0xFFC02000 - 0xFFC020FF)	 */
-#define	UART1_THR			0xFFC02000	/* Transmit Holding register */
-#define	UART1_RBR			0xFFC02000	/* Receive Buffer register */
-#define	UART1_DLL			0xFFC02000	/* Divisor Latch (Low-Byte) */
-#define	UART1_IER			0xFFC02004	/* Interrupt Enable Register */
-#define	UART1_DLH			0xFFC02004	/* Divisor Latch (High-Byte) */
-#define	UART1_IIR			0xFFC02008	/* Interrupt Identification Register */
-#define	UART1_LCR			0xFFC0200C	/* Line	Control	Register */
-#define	UART1_MCR			0xFFC02010	/* Modem Control Register */
-#define	UART1_LSR			0xFFC02014	/* Line	Status Register */
-#define	UART1_SCR			0xFFC0201C	/* SCR Scratch Register */
-#define	UART1_GCTL			0xFFC02024	/* Global Control Register */
-
-
-/* UART2 Controller		(0xFFC02100 - 0xFFC021FF)	 */
-#define	UART2_THR			0xFFC02100	/* Transmit Holding register */
-#define	UART2_RBR			0xFFC02100	/* Receive Buffer register */
-#define	UART2_DLL			0xFFC02100	/* Divisor Latch (Low-Byte) */
-#define	UART2_IER			0xFFC02104	/* Interrupt Enable Register */
-#define	UART2_DLH			0xFFC02104	/* Divisor Latch (High-Byte) */
-#define	UART2_IIR			0xFFC02108	/* Interrupt Identification Register */
-#define	UART2_LCR			0xFFC0210C	/* Line	Control	Register */
-#define	UART2_MCR			0xFFC02110	/* Modem Control Register */
-#define	UART2_LSR			0xFFC02114	/* Line	Status Register */
-#define	UART2_SCR			0xFFC0211C	/* SCR Scratch Register */
-#define	UART2_GCTL			0xFFC02124	/* Global Control Register */
-
-
-/* Two-Wire Interface 1	(0xFFC02200 - 0xFFC022FF)			 */
-#define	TWI1_CLKDIV			0xFFC02200	/* Serial Clock	Divider	Register */
-#define	TWI1_CONTROL		0xFFC02204	/* TWI1	Master Internal	Time Reference Register */
-#define	TWI1_SLAVE_CTL		0xFFC02208	/* Slave Mode Control Register */
-#define	TWI1_SLAVE_STAT		0xFFC0220C	/* Slave Mode Status Register */
-#define	TWI1_SLAVE_ADDR		0xFFC02210	/* Slave Mode Address Register */
-#define	TWI1_MASTER_CTL	0xFFC02214	/* Master Mode Control Register */
-#define	TWI1_MASTER_STAT	0xFFC02218	/* Master Mode Status Register */
-#define	TWI1_MASTER_ADDR	0xFFC0221C	/* Master Mode Address Register */
-#define	TWI1_INT_STAT		0xFFC02220	/* TWI1	Master Interrupt Register */
-#define	TWI1_INT_MASK		0xFFC02224	/* TWI1	Master Interrupt Mask Register */
-#define	TWI1_FIFO_CTL		0xFFC02228	/* FIFO	Control	Register */
-#define	TWI1_FIFO_STAT		0xFFC0222C	/* FIFO	Status Register */
-#define	TWI1_XMT_DATA8		0xFFC02280	/* FIFO	Transmit Data Single Byte Register */
-#define	TWI1_XMT_DATA16		0xFFC02284	/* FIFO	Transmit Data Double Byte Register */
-#define	TWI1_RCV_DATA8		0xFFC02288	/* FIFO	Receive	Data Single Byte Register */
-#define	TWI1_RCV_DATA16		0xFFC0228C	/* FIFO	Receive	Data Double Byte Register */
-#define TWI1_REGBASE		TWI1_CLKDIV
-
-
-/* the following are for backwards compatibility */
-#define	TWI1_PRESCALE	  TWI1_CONTROL
-#define	TWI1_INT_SRC	  TWI1_INT_STAT
-#define	TWI1_INT_ENABLE	  TWI1_INT_MASK
-
-
-/* SPI1	Controller		(0xFFC02300 - 0xFFC023FF)	 */
-#define	SPI1_CTL			0xFFC02300  /* SPI1 Control Register */
-#define	SPI1_FLG			0xFFC02304  /* SPI1 Flag register */
-#define	SPI1_STAT			0xFFC02308  /* SPI1 Status register */
-#define	SPI1_TDBR			0xFFC0230C  /* SPI1 Transmit Data Buffer Register */
-#define	SPI1_RDBR			0xFFC02310  /* SPI1 Receive Data Buffer	Register */
-#define	SPI1_BAUD			0xFFC02314  /* SPI1 Baud rate Register */
-#define	SPI1_SHADOW			0xFFC02318  /* SPI1_RDBR Shadow	Register */
-#define SPI1_REGBASE			SPI1_CTL
-
-/* SPI2	Controller		(0xFFC02400 - 0xFFC024FF)	 */
-#define	SPI2_CTL			0xFFC02400  /* SPI2 Control Register */
-#define	SPI2_FLG			0xFFC02404  /* SPI2 Flag register */
-#define	SPI2_STAT			0xFFC02408  /* SPI2 Status register */
-#define	SPI2_TDBR			0xFFC0240C  /* SPI2 Transmit Data Buffer Register */
-#define	SPI2_RDBR			0xFFC02410  /* SPI2 Receive Data Buffer	Register */
-#define	SPI2_BAUD			0xFFC02414  /* SPI2 Baud rate Register */
-#define	SPI2_SHADOW			0xFFC02418  /* SPI2_RDBR Shadow	Register */
-#define SPI2_REGBASE			SPI2_CTL
-
-/* SPORT2 Controller		(0xFFC02500 - 0xFFC025FF)			 */
-#define	SPORT2_TCR1			0xFFC02500	/* SPORT2 Transmit Configuration 1 Register */
-#define	SPORT2_TCR2			0xFFC02504	/* SPORT2 Transmit Configuration 2 Register */
-#define	SPORT2_TCLKDIV		0xFFC02508	/* SPORT2 Transmit Clock Divider */
-#define	SPORT2_TFSDIV		0xFFC0250C	/* SPORT2 Transmit Frame Sync Divider */
-#define	SPORT2_TX			0xFFC02510	/* SPORT2 TX Data Register */
-#define	SPORT2_RX			0xFFC02518	/* SPORT2 RX Data Register */
-#define	SPORT2_RCR1			0xFFC02520	/* SPORT2 Transmit Configuration 1 Register */
-#define	SPORT2_RCR2			0xFFC02524	/* SPORT2 Transmit Configuration 2 Register */
-#define	SPORT2_RCLKDIV		0xFFC02528	/* SPORT2 Receive Clock	Divider */
-#define	SPORT2_RFSDIV		0xFFC0252C	/* SPORT2 Receive Frame	Sync Divider */
-#define	SPORT2_STAT			0xFFC02530	/* SPORT2 Status Register */
-#define	SPORT2_CHNL			0xFFC02534	/* SPORT2 Current Channel Register */
-#define	SPORT2_MCMC1		0xFFC02538	/* SPORT2 Multi-Channel	Configuration Register 1 */
-#define	SPORT2_MCMC2		0xFFC0253C	/* SPORT2 Multi-Channel	Configuration Register 2 */
-#define	SPORT2_MTCS0		0xFFC02540	/* SPORT2 Multi-Channel	Transmit Select	Register 0 */
-#define	SPORT2_MTCS1		0xFFC02544	/* SPORT2 Multi-Channel	Transmit Select	Register 1 */
-#define	SPORT2_MTCS2		0xFFC02548	/* SPORT2 Multi-Channel	Transmit Select	Register 2 */
-#define	SPORT2_MTCS3		0xFFC0254C	/* SPORT2 Multi-Channel	Transmit Select	Register 3 */
-#define	SPORT2_MRCS0		0xFFC02550	/* SPORT2 Multi-Channel	Receive	Select Register	0 */
-#define	SPORT2_MRCS1		0xFFC02554	/* SPORT2 Multi-Channel	Receive	Select Register	1 */
-#define	SPORT2_MRCS2		0xFFC02558	/* SPORT2 Multi-Channel	Receive	Select Register	2 */
-#define	SPORT2_MRCS3		0xFFC0255C	/* SPORT2 Multi-Channel	Receive	Select Register	3 */
-
-
-/* SPORT3 Controller		(0xFFC02600 - 0xFFC026FF)			 */
-#define	SPORT3_TCR1			0xFFC02600	/* SPORT3 Transmit Configuration 1 Register */
-#define	SPORT3_TCR2			0xFFC02604	/* SPORT3 Transmit Configuration 2 Register */
-#define	SPORT3_TCLKDIV		0xFFC02608	/* SPORT3 Transmit Clock Divider */
-#define	SPORT3_TFSDIV		0xFFC0260C	/* SPORT3 Transmit Frame Sync Divider */
-#define	SPORT3_TX			0xFFC02610	/* SPORT3 TX Data Register */
-#define	SPORT3_RX			0xFFC02618	/* SPORT3 RX Data Register */
-#define	SPORT3_RCR1			0xFFC02620	/* SPORT3 Transmit Configuration 1 Register */
-#define	SPORT3_RCR2			0xFFC02624	/* SPORT3 Transmit Configuration 2 Register */
-#define	SPORT3_RCLKDIV		0xFFC02628	/* SPORT3 Receive Clock	Divider */
-#define	SPORT3_RFSDIV		0xFFC0262C	/* SPORT3 Receive Frame	Sync Divider */
-#define	SPORT3_STAT			0xFFC02630	/* SPORT3 Status Register */
-#define	SPORT3_CHNL			0xFFC02634	/* SPORT3 Current Channel Register */
-#define	SPORT3_MCMC1		0xFFC02638	/* SPORT3 Multi-Channel	Configuration Register 1 */
-#define	SPORT3_MCMC2		0xFFC0263C	/* SPORT3 Multi-Channel	Configuration Register 2 */
-#define	SPORT3_MTCS0		0xFFC02640	/* SPORT3 Multi-Channel	Transmit Select	Register 0 */
-#define	SPORT3_MTCS1		0xFFC02644	/* SPORT3 Multi-Channel	Transmit Select	Register 1 */
-#define	SPORT3_MTCS2		0xFFC02648	/* SPORT3 Multi-Channel	Transmit Select	Register 2 */
-#define	SPORT3_MTCS3		0xFFC0264C	/* SPORT3 Multi-Channel	Transmit Select	Register 3 */
-#define	SPORT3_MRCS0		0xFFC02650	/* SPORT3 Multi-Channel	Receive	Select Register	0 */
-#define	SPORT3_MRCS1		0xFFC02654	/* SPORT3 Multi-Channel	Receive	Select Register	1 */
-#define	SPORT3_MRCS2		0xFFC02658	/* SPORT3 Multi-Channel	Receive	Select Register	2 */
-#define	SPORT3_MRCS3		0xFFC0265C	/* SPORT3 Multi-Channel	Receive	Select Register	3 */
-
+#include "defBF538.h"
 
 /* Media Transceiver (MXVR)   (0xFFC02700 - 0xFFC028FF) */
 
@@ -995,1249 +149,4 @@
 #define	MXVR_BLOCK_CNT	      0xFFC028C0  /* MXVR Block	Counter */
 #define	MXVR_PLL_CTL_2	      0xFFC028C4  /* MXVR Phase	Lock Loop Control Register 2 */
 
-
-/* CAN Controller		(0xFFC02A00 - 0xFFC02FFF)				 */
-/* For Mailboxes 0-15											 */
-#define	CAN_MC1				0xFFC02A00	/* Mailbox config reg 1	 */
-#define	CAN_MD1				0xFFC02A04	/* Mailbox direction reg 1 */
-#define	CAN_TRS1			0xFFC02A08	/* Transmit Request Set	reg 1 */
-#define	CAN_TRR1			0xFFC02A0C	/* Transmit Request Reset reg 1 */
-#define	CAN_TA1				0xFFC02A10	/* Transmit Acknowledge	reg 1 */
-#define	CAN_AA1				0xFFC02A14	/* Transmit Abort Acknowledge reg 1 */
-#define	CAN_RMP1			0xFFC02A18	/* Receive Message Pending reg 1 */
-#define	CAN_RML1			0xFFC02A1C	/* Receive Message Lost	reg 1 */
-#define	CAN_MBTIF1			0xFFC02A20	/* Mailbox Transmit Interrupt Flag reg 1 */
-#define	CAN_MBRIF1			0xFFC02A24	/* Mailbox Receive  Interrupt Flag reg 1 */
-#define	CAN_MBIM1			0xFFC02A28	/* Mailbox Interrupt Mask reg 1 */
-#define	CAN_RFH1			0xFFC02A2C	/* Remote Frame	Handling reg 1 */
-#define	CAN_OPSS1			0xFFC02A30	/* Overwrite Protection	Single Shot Xmission reg 1 */
-
-/* For Mailboxes 16-31											 */
-#define	CAN_MC2				0xFFC02A40	/* Mailbox config reg 2	 */
-#define	CAN_MD2				0xFFC02A44	/* Mailbox direction reg 2 */
-#define	CAN_TRS2			0xFFC02A48	/* Transmit Request Set	reg 2 */
-#define	CAN_TRR2			0xFFC02A4C	/* Transmit Request Reset reg 2 */
-#define	CAN_TA2				0xFFC02A50	/* Transmit Acknowledge	reg 2 */
-#define	CAN_AA2				0xFFC02A54	/* Transmit Abort Acknowledge reg 2 */
-#define	CAN_RMP2			0xFFC02A58	/* Receive Message Pending reg 2 */
-#define	CAN_RML2			0xFFC02A5C	/* Receive Message Lost	reg 2 */
-#define	CAN_MBTIF2			0xFFC02A60	/* Mailbox Transmit Interrupt Flag reg 2 */
-#define	CAN_MBRIF2			0xFFC02A64	/* Mailbox Receive  Interrupt Flag reg 2 */
-#define	CAN_MBIM2			0xFFC02A68	/* Mailbox Interrupt Mask reg 2 */
-#define	CAN_RFH2			0xFFC02A6C	/* Remote Frame	Handling reg 2 */
-#define	CAN_OPSS2			0xFFC02A70	/* Overwrite Protection	Single Shot Xmission reg 2 */
-
-#define	CAN_CLOCK			0xFFC02A80	/* Bit Timing Configuration register 0 */
-#define	CAN_TIMING			0xFFC02A84	/* Bit Timing Configuration register 1 */
-
-#define	CAN_DEBUG			0xFFC02A88	/* Debug Register		 */
-/* the following is for	backwards compatibility */
-#define	CAN_CNF		 CAN_DEBUG
-
-#define	CAN_STATUS			0xFFC02A8C	/* Global Status Register */
-#define	CAN_CEC				0xFFC02A90	/* Error Counter Register */
-#define	CAN_GIS				0xFFC02A94	/* Global Interrupt Status Register */
-#define	CAN_GIM				0xFFC02A98	/* Global Interrupt Mask Register */
-#define	CAN_GIF				0xFFC02A9C	/* Global Interrupt Flag Register */
-#define	CAN_CONTROL			0xFFC02AA0	/* Master Control Register */
-#define	CAN_INTR			0xFFC02AA4	/* Interrupt Pending Register */
-#define	CAN_MBTD			0xFFC02AAC	/* Mailbox Temporary Disable Feature */
-#define	CAN_EWR				0xFFC02AB0	/* Programmable	Warning	Level */
-#define	CAN_ESR				0xFFC02AB4	/* Error Status	Register */
-#define	CAN_UCCNT			0xFFC02AC4	/* Universal Counter	 */
-#define	CAN_UCRC			0xFFC02AC8	/* Universal Counter Reload/Capture Register */
-#define	CAN_UCCNF			0xFFC02ACC	/* Universal Counter Configuration Register */
-
-/* Mailbox Acceptance Masks					 */
-#define	CAN_AM00L			0xFFC02B00	/* Mailbox 0 Low Acceptance Mask */
-#define	CAN_AM00H			0xFFC02B04	/* Mailbox 0 High Acceptance Mask */
-#define	CAN_AM01L			0xFFC02B08	/* Mailbox 1 Low Acceptance Mask */
-#define	CAN_AM01H			0xFFC02B0C	/* Mailbox 1 High Acceptance Mask */
-#define	CAN_AM02L			0xFFC02B10	/* Mailbox 2 Low Acceptance Mask */
-#define	CAN_AM02H			0xFFC02B14	/* Mailbox 2 High Acceptance Mask */
-#define	CAN_AM03L			0xFFC02B18	/* Mailbox 3 Low Acceptance Mask */
-#define	CAN_AM03H			0xFFC02B1C	/* Mailbox 3 High Acceptance Mask */
-#define	CAN_AM04L			0xFFC02B20	/* Mailbox 4 Low Acceptance Mask */
-#define	CAN_AM04H			0xFFC02B24	/* Mailbox 4 High Acceptance Mask */
-#define	CAN_AM05L			0xFFC02B28	/* Mailbox 5 Low Acceptance Mask */
-#define	CAN_AM05H			0xFFC02B2C	/* Mailbox 5 High Acceptance Mask */
-#define	CAN_AM06L			0xFFC02B30	/* Mailbox 6 Low Acceptance Mask */
-#define	CAN_AM06H			0xFFC02B34	/* Mailbox 6 High Acceptance Mask */
-#define	CAN_AM07L			0xFFC02B38	/* Mailbox 7 Low Acceptance Mask */
-#define	CAN_AM07H			0xFFC02B3C	/* Mailbox 7 High Acceptance Mask */
-#define	CAN_AM08L			0xFFC02B40	/* Mailbox 8 Low Acceptance Mask */
-#define	CAN_AM08H			0xFFC02B44	/* Mailbox 8 High Acceptance Mask */
-#define	CAN_AM09L			0xFFC02B48	/* Mailbox 9 Low Acceptance Mask */
-#define	CAN_AM09H			0xFFC02B4C	/* Mailbox 9 High Acceptance Mask */
-#define	CAN_AM10L			0xFFC02B50	/* Mailbox 10 Low Acceptance Mask */
-#define	CAN_AM10H			0xFFC02B54	/* Mailbox 10 High Acceptance Mask */
-#define	CAN_AM11L			0xFFC02B58	/* Mailbox 11 Low Acceptance Mask */
-#define	CAN_AM11H			0xFFC02B5C	/* Mailbox 11 High Acceptance Mask */
-#define	CAN_AM12L			0xFFC02B60	/* Mailbox 12 Low Acceptance Mask */
-#define	CAN_AM12H			0xFFC02B64	/* Mailbox 12 High Acceptance Mask */
-#define	CAN_AM13L			0xFFC02B68	/* Mailbox 13 Low Acceptance Mask */
-#define	CAN_AM13H			0xFFC02B6C	/* Mailbox 13 High Acceptance Mask */
-#define	CAN_AM14L			0xFFC02B70	/* Mailbox 14 Low Acceptance Mask */
-#define	CAN_AM14H			0xFFC02B74	/* Mailbox 14 High Acceptance Mask */
-#define	CAN_AM15L			0xFFC02B78	/* Mailbox 15 Low Acceptance Mask */
-#define	CAN_AM15H			0xFFC02B7C	/* Mailbox 15 High Acceptance Mask */
-
-#define	CAN_AM16L			0xFFC02B80	/* Mailbox 16 Low Acceptance Mask */
-#define	CAN_AM16H			0xFFC02B84	/* Mailbox 16 High Acceptance Mask */
-#define	CAN_AM17L			0xFFC02B88	/* Mailbox 17 Low Acceptance Mask */
-#define	CAN_AM17H			0xFFC02B8C	/* Mailbox 17 High Acceptance Mask */
-#define	CAN_AM18L			0xFFC02B90	/* Mailbox 18 Low Acceptance Mask */
-#define	CAN_AM18H			0xFFC02B94	/* Mailbox 18 High Acceptance Mask */
-#define	CAN_AM19L			0xFFC02B98	/* Mailbox 19 Low Acceptance Mask */
-#define	CAN_AM19H			0xFFC02B9C	/* Mailbox 19 High Acceptance Mask */
-#define	CAN_AM20L			0xFFC02BA0	/* Mailbox 20 Low Acceptance Mask */
-#define	CAN_AM20H			0xFFC02BA4	/* Mailbox 20 High Acceptance Mask */
-#define	CAN_AM21L			0xFFC02BA8	/* Mailbox 21 Low Acceptance Mask */
-#define	CAN_AM21H			0xFFC02BAC	/* Mailbox 21 High Acceptance Mask */
-#define	CAN_AM22L			0xFFC02BB0	/* Mailbox 22 Low Acceptance Mask */
-#define	CAN_AM22H			0xFFC02BB4	/* Mailbox 22 High Acceptance Mask */
-#define	CAN_AM23L			0xFFC02BB8	/* Mailbox 23 Low Acceptance Mask */
-#define	CAN_AM23H			0xFFC02BBC	/* Mailbox 23 High Acceptance Mask */
-#define	CAN_AM24L			0xFFC02BC0	/* Mailbox 24 Low Acceptance Mask */
-#define	CAN_AM24H			0xFFC02BC4	/* Mailbox 24 High Acceptance Mask */
-#define	CAN_AM25L			0xFFC02BC8	/* Mailbox 25 Low Acceptance Mask */
-#define	CAN_AM25H			0xFFC02BCC	/* Mailbox 25 High Acceptance Mask */
-#define	CAN_AM26L			0xFFC02BD0	/* Mailbox 26 Low Acceptance Mask */
-#define	CAN_AM26H			0xFFC02BD4	/* Mailbox 26 High Acceptance Mask */
-#define	CAN_AM27L			0xFFC02BD8	/* Mailbox 27 Low Acceptance Mask */
-#define	CAN_AM27H			0xFFC02BDC	/* Mailbox 27 High Acceptance Mask */
-#define	CAN_AM28L			0xFFC02BE0	/* Mailbox 28 Low Acceptance Mask */
-#define	CAN_AM28H			0xFFC02BE4	/* Mailbox 28 High Acceptance Mask */
-#define	CAN_AM29L			0xFFC02BE8	/* Mailbox 29 Low Acceptance Mask */
-#define	CAN_AM29H			0xFFC02BEC	/* Mailbox 29 High Acceptance Mask */
-#define	CAN_AM30L			0xFFC02BF0	/* Mailbox 30 Low Acceptance Mask */
-#define	CAN_AM30H			0xFFC02BF4	/* Mailbox 30 High Acceptance Mask */
-#define	CAN_AM31L			0xFFC02BF8	/* Mailbox 31 Low Acceptance Mask */
-#define	CAN_AM31H			0xFFC02BFC	/* Mailbox 31 High Acceptance Mask */
-
-/* CAN Acceptance Mask Macros */
-#define	CAN_AM_L(x)			(CAN_AM00L+((x)*0x8))
-#define	CAN_AM_H(x)			(CAN_AM00H+((x)*0x8))
-
-/* Mailbox Registers									 */
-#define	CAN_MB00_DATA0		0xFFC02C00	/* Mailbox 0 Data Word 0 [15:0]	Register */
-#define	CAN_MB00_DATA1		0xFFC02C04	/* Mailbox 0 Data Word 1 [31:16] Register */
-#define	CAN_MB00_DATA2		0xFFC02C08	/* Mailbox 0 Data Word 2 [47:32] Register */
-#define	CAN_MB00_DATA3		0xFFC02C0C	/* Mailbox 0 Data Word 3 [63:48] Register */
-#define	CAN_MB00_LENGTH		0xFFC02C10	/* Mailbox 0 Data Length Code Register */
-#define	CAN_MB00_TIMESTAMP	0xFFC02C14	/* Mailbox 0 Time Stamp	Value Register */
-#define	CAN_MB00_ID0		0xFFC02C18	/* Mailbox 0 Identifier	Low Register */
-#define	CAN_MB00_ID1		0xFFC02C1C	/* Mailbox 0 Identifier	High Register */
-
-#define	CAN_MB01_DATA0		0xFFC02C20	/* Mailbox 1 Data Word 0 [15:0]	Register */
-#define	CAN_MB01_DATA1		0xFFC02C24	/* Mailbox 1 Data Word 1 [31:16] Register */
-#define	CAN_MB01_DATA2		0xFFC02C28	/* Mailbox 1 Data Word 2 [47:32] Register */
-#define	CAN_MB01_DATA3		0xFFC02C2C	/* Mailbox 1 Data Word 3 [63:48] Register */
-#define	CAN_MB01_LENGTH		0xFFC02C30	/* Mailbox 1 Data Length Code Register */
-#define	CAN_MB01_TIMESTAMP	0xFFC02C34	/* Mailbox 1 Time Stamp	Value Register */
-#define	CAN_MB01_ID0		0xFFC02C38	/* Mailbox 1 Identifier	Low Register */
-#define	CAN_MB01_ID1		0xFFC02C3C	/* Mailbox 1 Identifier	High Register */
-
-#define	CAN_MB02_DATA0		0xFFC02C40	/* Mailbox 2 Data Word 0 [15:0]	Register */
-#define	CAN_MB02_DATA1		0xFFC02C44	/* Mailbox 2 Data Word 1 [31:16] Register */
-#define	CAN_MB02_DATA2		0xFFC02C48	/* Mailbox 2 Data Word 2 [47:32] Register */
-#define	CAN_MB02_DATA3		0xFFC02C4C	/* Mailbox 2 Data Word 3 [63:48] Register */
-#define	CAN_MB02_LENGTH		0xFFC02C50	/* Mailbox 2 Data Length Code Register */
-#define	CAN_MB02_TIMESTAMP	0xFFC02C54	/* Mailbox 2 Time Stamp	Value Register */
-#define	CAN_MB02_ID0		0xFFC02C58	/* Mailbox 2 Identifier	Low Register */
-#define	CAN_MB02_ID1		0xFFC02C5C	/* Mailbox 2 Identifier	High Register */
-
-#define	CAN_MB03_DATA0		0xFFC02C60	/* Mailbox 3 Data Word 0 [15:0]	Register */
-#define	CAN_MB03_DATA1		0xFFC02C64	/* Mailbox 3 Data Word 1 [31:16] Register */
-#define	CAN_MB03_DATA2		0xFFC02C68	/* Mailbox 3 Data Word 2 [47:32] Register */
-#define	CAN_MB03_DATA3		0xFFC02C6C	/* Mailbox 3 Data Word 3 [63:48] Register */
-#define	CAN_MB03_LENGTH		0xFFC02C70	/* Mailbox 3 Data Length Code Register */
-#define	CAN_MB03_TIMESTAMP	0xFFC02C74	/* Mailbox 3 Time Stamp	Value Register */
-#define	CAN_MB03_ID0		0xFFC02C78	/* Mailbox 3 Identifier	Low Register */
-#define	CAN_MB03_ID1		0xFFC02C7C	/* Mailbox 3 Identifier	High Register */
-
-#define	CAN_MB04_DATA0		0xFFC02C80	/* Mailbox 4 Data Word 0 [15:0]	Register */
-#define	CAN_MB04_DATA1		0xFFC02C84	/* Mailbox 4 Data Word 1 [31:16] Register */
-#define	CAN_MB04_DATA2		0xFFC02C88	/* Mailbox 4 Data Word 2 [47:32] Register */
-#define	CAN_MB04_DATA3		0xFFC02C8C	/* Mailbox 4 Data Word 3 [63:48] Register */
-#define	CAN_MB04_LENGTH		0xFFC02C90	/* Mailbox 4 Data Length Code Register */
-#define	CAN_MB04_TIMESTAMP	0xFFC02C94	/* Mailbox 4 Time Stamp	Value Register */
-#define	CAN_MB04_ID0		0xFFC02C98	/* Mailbox 4 Identifier	Low Register */
-#define	CAN_MB04_ID1		0xFFC02C9C	/* Mailbox 4 Identifier	High Register */
-
-#define	CAN_MB05_DATA0		0xFFC02CA0	/* Mailbox 5 Data Word 0 [15:0]	Register */
-#define	CAN_MB05_DATA1		0xFFC02CA4	/* Mailbox 5 Data Word 1 [31:16] Register */
-#define	CAN_MB05_DATA2		0xFFC02CA8	/* Mailbox 5 Data Word 2 [47:32] Register */
-#define	CAN_MB05_DATA3		0xFFC02CAC	/* Mailbox 5 Data Word 3 [63:48] Register */
-#define	CAN_MB05_LENGTH		0xFFC02CB0	/* Mailbox 5 Data Length Code Register */
-#define	CAN_MB05_TIMESTAMP	0xFFC02CB4	/* Mailbox 5 Time Stamp	Value Register */
-#define	CAN_MB05_ID0		0xFFC02CB8	/* Mailbox 5 Identifier	Low Register */
-#define	CAN_MB05_ID1		0xFFC02CBC	/* Mailbox 5 Identifier	High Register */
-
-#define	CAN_MB06_DATA0		0xFFC02CC0	/* Mailbox 6 Data Word 0 [15:0]	Register */
-#define	CAN_MB06_DATA1		0xFFC02CC4	/* Mailbox 6 Data Word 1 [31:16] Register */
-#define	CAN_MB06_DATA2		0xFFC02CC8	/* Mailbox 6 Data Word 2 [47:32] Register */
-#define	CAN_MB06_DATA3		0xFFC02CCC	/* Mailbox 6 Data Word 3 [63:48] Register */
-#define	CAN_MB06_LENGTH		0xFFC02CD0	/* Mailbox 6 Data Length Code Register */
-#define	CAN_MB06_TIMESTAMP	0xFFC02CD4	/* Mailbox 6 Time Stamp	Value Register */
-#define	CAN_MB06_ID0		0xFFC02CD8	/* Mailbox 6 Identifier	Low Register */
-#define	CAN_MB06_ID1		0xFFC02CDC	/* Mailbox 6 Identifier	High Register */
-
-#define	CAN_MB07_DATA0		0xFFC02CE0	/* Mailbox 7 Data Word 0 [15:0]	Register */
-#define	CAN_MB07_DATA1		0xFFC02CE4	/* Mailbox 7 Data Word 1 [31:16] Register */
-#define	CAN_MB07_DATA2		0xFFC02CE8	/* Mailbox 7 Data Word 2 [47:32] Register */
-#define	CAN_MB07_DATA3		0xFFC02CEC	/* Mailbox 7 Data Word 3 [63:48] Register */
-#define	CAN_MB07_LENGTH		0xFFC02CF0	/* Mailbox 7 Data Length Code Register */
-#define	CAN_MB07_TIMESTAMP	0xFFC02CF4	/* Mailbox 7 Time Stamp	Value Register */
-#define	CAN_MB07_ID0		0xFFC02CF8	/* Mailbox 7 Identifier	Low Register */
-#define	CAN_MB07_ID1		0xFFC02CFC	/* Mailbox 7 Identifier	High Register */
-
-#define	CAN_MB08_DATA0		0xFFC02D00	/* Mailbox 8 Data Word 0 [15:0]	Register */
-#define	CAN_MB08_DATA1		0xFFC02D04	/* Mailbox 8 Data Word 1 [31:16] Register */
-#define	CAN_MB08_DATA2		0xFFC02D08	/* Mailbox 8 Data Word 2 [47:32] Register */
-#define	CAN_MB08_DATA3		0xFFC02D0C	/* Mailbox 8 Data Word 3 [63:48] Register */
-#define	CAN_MB08_LENGTH		0xFFC02D10	/* Mailbox 8 Data Length Code Register */
-#define	CAN_MB08_TIMESTAMP	0xFFC02D14	/* Mailbox 8 Time Stamp	Value Register */
-#define	CAN_MB08_ID0		0xFFC02D18	/* Mailbox 8 Identifier	Low Register */
-#define	CAN_MB08_ID1		0xFFC02D1C	/* Mailbox 8 Identifier	High Register */
-
-#define	CAN_MB09_DATA0		0xFFC02D20	/* Mailbox 9 Data Word 0 [15:0]	Register */
-#define	CAN_MB09_DATA1		0xFFC02D24	/* Mailbox 9 Data Word 1 [31:16] Register */
-#define	CAN_MB09_DATA2		0xFFC02D28	/* Mailbox 9 Data Word 2 [47:32] Register */
-#define	CAN_MB09_DATA3		0xFFC02D2C	/* Mailbox 9 Data Word 3 [63:48] Register */
-#define	CAN_MB09_LENGTH		0xFFC02D30	/* Mailbox 9 Data Length Code Register */
-#define	CAN_MB09_TIMESTAMP	0xFFC02D34	/* Mailbox 9 Time Stamp	Value Register */
-#define	CAN_MB09_ID0		0xFFC02D38	/* Mailbox 9 Identifier	Low Register */
-#define	CAN_MB09_ID1		0xFFC02D3C	/* Mailbox 9 Identifier	High Register */
-
-#define	CAN_MB10_DATA0		0xFFC02D40	/* Mailbox 10 Data Word	0 [15:0] Register */
-#define	CAN_MB10_DATA1		0xFFC02D44	/* Mailbox 10 Data Word	1 [31:16] Register */
-#define	CAN_MB10_DATA2		0xFFC02D48	/* Mailbox 10 Data Word	2 [47:32] Register */
-#define	CAN_MB10_DATA3		0xFFC02D4C	/* Mailbox 10 Data Word	3 [63:48] Register */
-#define	CAN_MB10_LENGTH		0xFFC02D50	/* Mailbox 10 Data Length Code Register */
-#define	CAN_MB10_TIMESTAMP	0xFFC02D54	/* Mailbox 10 Time Stamp Value Register */
-#define	CAN_MB10_ID0		0xFFC02D58	/* Mailbox 10 Identifier Low Register */
-#define	CAN_MB10_ID1		0xFFC02D5C	/* Mailbox 10 Identifier High Register */
-
-#define	CAN_MB11_DATA0		0xFFC02D60	/* Mailbox 11 Data Word	0 [15:0] Register */
-#define	CAN_MB11_DATA1		0xFFC02D64	/* Mailbox 11 Data Word	1 [31:16] Register */
-#define	CAN_MB11_DATA2		0xFFC02D68	/* Mailbox 11 Data Word	2 [47:32] Register */
-#define	CAN_MB11_DATA3		0xFFC02D6C	/* Mailbox 11 Data Word	3 [63:48] Register */
-#define	CAN_MB11_LENGTH		0xFFC02D70	/* Mailbox 11 Data Length Code Register */
-#define	CAN_MB11_TIMESTAMP	0xFFC02D74	/* Mailbox 11 Time Stamp Value Register */
-#define	CAN_MB11_ID0		0xFFC02D78	/* Mailbox 11 Identifier Low Register */
-#define	CAN_MB11_ID1		0xFFC02D7C	/* Mailbox 11 Identifier High Register */
-
-#define	CAN_MB12_DATA0		0xFFC02D80	/* Mailbox 12 Data Word	0 [15:0] Register */
-#define	CAN_MB12_DATA1		0xFFC02D84	/* Mailbox 12 Data Word	1 [31:16] Register */
-#define	CAN_MB12_DATA2		0xFFC02D88	/* Mailbox 12 Data Word	2 [47:32] Register */
-#define	CAN_MB12_DATA3		0xFFC02D8C	/* Mailbox 12 Data Word	3 [63:48] Register */
-#define	CAN_MB12_LENGTH		0xFFC02D90	/* Mailbox 12 Data Length Code Register */
-#define	CAN_MB12_TIMESTAMP	0xFFC02D94	/* Mailbox 12 Time Stamp Value Register */
-#define	CAN_MB12_ID0		0xFFC02D98	/* Mailbox 12 Identifier Low Register */
-#define	CAN_MB12_ID1		0xFFC02D9C	/* Mailbox 12 Identifier High Register */
-
-#define	CAN_MB13_DATA0		0xFFC02DA0	/* Mailbox 13 Data Word	0 [15:0] Register */
-#define	CAN_MB13_DATA1		0xFFC02DA4	/* Mailbox 13 Data Word	1 [31:16] Register */
-#define	CAN_MB13_DATA2		0xFFC02DA8	/* Mailbox 13 Data Word	2 [47:32] Register */
-#define	CAN_MB13_DATA3		0xFFC02DAC	/* Mailbox 13 Data Word	3 [63:48] Register */
-#define	CAN_MB13_LENGTH		0xFFC02DB0	/* Mailbox 13 Data Length Code Register */
-#define	CAN_MB13_TIMESTAMP	0xFFC02DB4	/* Mailbox 13 Time Stamp Value Register */
-#define	CAN_MB13_ID0		0xFFC02DB8	/* Mailbox 13 Identifier Low Register */
-#define	CAN_MB13_ID1		0xFFC02DBC	/* Mailbox 13 Identifier High Register */
-
-#define	CAN_MB14_DATA0		0xFFC02DC0	/* Mailbox 14 Data Word	0 [15:0] Register */
-#define	CAN_MB14_DATA1		0xFFC02DC4	/* Mailbox 14 Data Word	1 [31:16] Register */
-#define	CAN_MB14_DATA2		0xFFC02DC8	/* Mailbox 14 Data Word	2 [47:32] Register */
-#define	CAN_MB14_DATA3		0xFFC02DCC	/* Mailbox 14 Data Word	3 [63:48] Register */
-#define	CAN_MB14_LENGTH		0xFFC02DD0	/* Mailbox 14 Data Length Code Register */
-#define	CAN_MB14_TIMESTAMP	0xFFC02DD4	/* Mailbox 14 Time Stamp Value Register */
-#define	CAN_MB14_ID0		0xFFC02DD8	/* Mailbox 14 Identifier Low Register */
-#define	CAN_MB14_ID1		0xFFC02DDC	/* Mailbox 14 Identifier High Register */
-
-#define	CAN_MB15_DATA0		0xFFC02DE0	/* Mailbox 15 Data Word	0 [15:0] Register */
-#define	CAN_MB15_DATA1		0xFFC02DE4	/* Mailbox 15 Data Word	1 [31:16] Register */
-#define	CAN_MB15_DATA2		0xFFC02DE8	/* Mailbox 15 Data Word	2 [47:32] Register */
-#define	CAN_MB15_DATA3		0xFFC02DEC	/* Mailbox 15 Data Word	3 [63:48] Register */
-#define	CAN_MB15_LENGTH		0xFFC02DF0	/* Mailbox 15 Data Length Code Register */
-#define	CAN_MB15_TIMESTAMP	0xFFC02DF4	/* Mailbox 15 Time Stamp Value Register */
-#define	CAN_MB15_ID0		0xFFC02DF8	/* Mailbox 15 Identifier Low Register */
-#define	CAN_MB15_ID1		0xFFC02DFC	/* Mailbox 15 Identifier High Register */
-
-#define	CAN_MB16_DATA0		0xFFC02E00	/* Mailbox 16 Data Word	0 [15:0] Register */
-#define	CAN_MB16_DATA1		0xFFC02E04	/* Mailbox 16 Data Word	1 [31:16] Register */
-#define	CAN_MB16_DATA2		0xFFC02E08	/* Mailbox 16 Data Word	2 [47:32] Register */
-#define	CAN_MB16_DATA3		0xFFC02E0C	/* Mailbox 16 Data Word	3 [63:48] Register */
-#define	CAN_MB16_LENGTH		0xFFC02E10	/* Mailbox 16 Data Length Code Register */
-#define	CAN_MB16_TIMESTAMP	0xFFC02E14	/* Mailbox 16 Time Stamp Value Register */
-#define	CAN_MB16_ID0		0xFFC02E18	/* Mailbox 16 Identifier Low Register */
-#define	CAN_MB16_ID1		0xFFC02E1C	/* Mailbox 16 Identifier High Register */
-
-#define	CAN_MB17_DATA0		0xFFC02E20	/* Mailbox 17 Data Word	0 [15:0] Register */
-#define	CAN_MB17_DATA1		0xFFC02E24	/* Mailbox 17 Data Word	1 [31:16] Register */
-#define	CAN_MB17_DATA2		0xFFC02E28	/* Mailbox 17 Data Word	2 [47:32] Register */
-#define	CAN_MB17_DATA3		0xFFC02E2C	/* Mailbox 17 Data Word	3 [63:48] Register */
-#define	CAN_MB17_LENGTH		0xFFC02E30	/* Mailbox 17 Data Length Code Register */
-#define	CAN_MB17_TIMESTAMP	0xFFC02E34	/* Mailbox 17 Time Stamp Value Register */
-#define	CAN_MB17_ID0		0xFFC02E38	/* Mailbox 17 Identifier Low Register */
-#define	CAN_MB17_ID1		0xFFC02E3C	/* Mailbox 17 Identifier High Register */
-
-#define	CAN_MB18_DATA0		0xFFC02E40	/* Mailbox 18 Data Word	0 [15:0] Register */
-#define	CAN_MB18_DATA1		0xFFC02E44	/* Mailbox 18 Data Word	1 [31:16] Register */
-#define	CAN_MB18_DATA2		0xFFC02E48	/* Mailbox 18 Data Word	2 [47:32] Register */
-#define	CAN_MB18_DATA3		0xFFC02E4C	/* Mailbox 18 Data Word	3 [63:48] Register */
-#define	CAN_MB18_LENGTH		0xFFC02E50	/* Mailbox 18 Data Length Code Register */
-#define	CAN_MB18_TIMESTAMP	0xFFC02E54	/* Mailbox 18 Time Stamp Value Register */
-#define	CAN_MB18_ID0		0xFFC02E58	/* Mailbox 18 Identifier Low Register */
-#define	CAN_MB18_ID1		0xFFC02E5C	/* Mailbox 18 Identifier High Register */
-
-#define	CAN_MB19_DATA0		0xFFC02E60	/* Mailbox 19 Data Word	0 [15:0] Register */
-#define	CAN_MB19_DATA1		0xFFC02E64	/* Mailbox 19 Data Word	1 [31:16] Register */
-#define	CAN_MB19_DATA2		0xFFC02E68	/* Mailbox 19 Data Word	2 [47:32] Register */
-#define	CAN_MB19_DATA3		0xFFC02E6C	/* Mailbox 19 Data Word	3 [63:48] Register */
-#define	CAN_MB19_LENGTH		0xFFC02E70	/* Mailbox 19 Data Length Code Register */
-#define	CAN_MB19_TIMESTAMP	0xFFC02E74	/* Mailbox 19 Time Stamp Value Register */
-#define	CAN_MB19_ID0		0xFFC02E78	/* Mailbox 19 Identifier Low Register */
-#define	CAN_MB19_ID1		0xFFC02E7C	/* Mailbox 19 Identifier High Register */
-
-#define	CAN_MB20_DATA0		0xFFC02E80	/* Mailbox 20 Data Word	0 [15:0] Register */
-#define	CAN_MB20_DATA1		0xFFC02E84	/* Mailbox 20 Data Word	1 [31:16] Register */
-#define	CAN_MB20_DATA2		0xFFC02E88	/* Mailbox 20 Data Word	2 [47:32] Register */
-#define	CAN_MB20_DATA3		0xFFC02E8C	/* Mailbox 20 Data Word	3 [63:48] Register */
-#define	CAN_MB20_LENGTH		0xFFC02E90	/* Mailbox 20 Data Length Code Register */
-#define	CAN_MB20_TIMESTAMP	0xFFC02E94	/* Mailbox 20 Time Stamp Value Register */
-#define	CAN_MB20_ID0		0xFFC02E98	/* Mailbox 20 Identifier Low Register */
-#define	CAN_MB20_ID1		0xFFC02E9C	/* Mailbox 20 Identifier High Register */
-
-#define	CAN_MB21_DATA0		0xFFC02EA0	/* Mailbox 21 Data Word	0 [15:0] Register */
-#define	CAN_MB21_DATA1		0xFFC02EA4	/* Mailbox 21 Data Word	1 [31:16] Register */
-#define	CAN_MB21_DATA2		0xFFC02EA8	/* Mailbox 21 Data Word	2 [47:32] Register */
-#define	CAN_MB21_DATA3		0xFFC02EAC	/* Mailbox 21 Data Word	3 [63:48] Register */
-#define	CAN_MB21_LENGTH		0xFFC02EB0	/* Mailbox 21 Data Length Code Register */
-#define	CAN_MB21_TIMESTAMP	0xFFC02EB4	/* Mailbox 21 Time Stamp Value Register */
-#define	CAN_MB21_ID0		0xFFC02EB8	/* Mailbox 21 Identifier Low Register */
-#define	CAN_MB21_ID1		0xFFC02EBC	/* Mailbox 21 Identifier High Register */
-
-#define	CAN_MB22_DATA0		0xFFC02EC0	/* Mailbox 22 Data Word	0 [15:0] Register */
-#define	CAN_MB22_DATA1		0xFFC02EC4	/* Mailbox 22 Data Word	1 [31:16] Register */
-#define	CAN_MB22_DATA2		0xFFC02EC8	/* Mailbox 22 Data Word	2 [47:32] Register */
-#define	CAN_MB22_DATA3		0xFFC02ECC	/* Mailbox 22 Data Word	3 [63:48] Register */
-#define	CAN_MB22_LENGTH		0xFFC02ED0	/* Mailbox 22 Data Length Code Register */
-#define	CAN_MB22_TIMESTAMP	0xFFC02ED4	/* Mailbox 22 Time Stamp Value Register */
-#define	CAN_MB22_ID0		0xFFC02ED8	/* Mailbox 22 Identifier Low Register */
-#define	CAN_MB22_ID1		0xFFC02EDC	/* Mailbox 22 Identifier High Register */
-
-#define	CAN_MB23_DATA0		0xFFC02EE0	/* Mailbox 23 Data Word	0 [15:0] Register */
-#define	CAN_MB23_DATA1		0xFFC02EE4	/* Mailbox 23 Data Word	1 [31:16] Register */
-#define	CAN_MB23_DATA2		0xFFC02EE8	/* Mailbox 23 Data Word	2 [47:32] Register */
-#define	CAN_MB23_DATA3		0xFFC02EEC	/* Mailbox 23 Data Word	3 [63:48] Register */
-#define	CAN_MB23_LENGTH		0xFFC02EF0	/* Mailbox 23 Data Length Code Register */
-#define	CAN_MB23_TIMESTAMP	0xFFC02EF4	/* Mailbox 23 Time Stamp Value Register */
-#define	CAN_MB23_ID0		0xFFC02EF8	/* Mailbox 23 Identifier Low Register */
-#define	CAN_MB23_ID1		0xFFC02EFC	/* Mailbox 23 Identifier High Register */
-
-#define	CAN_MB24_DATA0		0xFFC02F00	/* Mailbox 24 Data Word	0 [15:0] Register */
-#define	CAN_MB24_DATA1		0xFFC02F04	/* Mailbox 24 Data Word	1 [31:16] Register */
-#define	CAN_MB24_DATA2		0xFFC02F08	/* Mailbox 24 Data Word	2 [47:32] Register */
-#define	CAN_MB24_DATA3		0xFFC02F0C	/* Mailbox 24 Data Word	3 [63:48] Register */
-#define	CAN_MB24_LENGTH		0xFFC02F10	/* Mailbox 24 Data Length Code Register */
-#define	CAN_MB24_TIMESTAMP	0xFFC02F14	/* Mailbox 24 Time Stamp Value Register */
-#define	CAN_MB24_ID0		0xFFC02F18	/* Mailbox 24 Identifier Low Register */
-#define	CAN_MB24_ID1		0xFFC02F1C	/* Mailbox 24 Identifier High Register */
-
-#define	CAN_MB25_DATA0		0xFFC02F20	/* Mailbox 25 Data Word	0 [15:0] Register */
-#define	CAN_MB25_DATA1		0xFFC02F24	/* Mailbox 25 Data Word	1 [31:16] Register */
-#define	CAN_MB25_DATA2		0xFFC02F28	/* Mailbox 25 Data Word	2 [47:32] Register */
-#define	CAN_MB25_DATA3		0xFFC02F2C	/* Mailbox 25 Data Word	3 [63:48] Register */
-#define	CAN_MB25_LENGTH		0xFFC02F30	/* Mailbox 25 Data Length Code Register */
-#define	CAN_MB25_TIMESTAMP	0xFFC02F34	/* Mailbox 25 Time Stamp Value Register */
-#define	CAN_MB25_ID0		0xFFC02F38	/* Mailbox 25 Identifier Low Register */
-#define	CAN_MB25_ID1		0xFFC02F3C	/* Mailbox 25 Identifier High Register */
-
-#define	CAN_MB26_DATA0		0xFFC02F40	/* Mailbox 26 Data Word	0 [15:0] Register */
-#define	CAN_MB26_DATA1		0xFFC02F44	/* Mailbox 26 Data Word	1 [31:16] Register */
-#define	CAN_MB26_DATA2		0xFFC02F48	/* Mailbox 26 Data Word	2 [47:32] Register */
-#define	CAN_MB26_DATA3		0xFFC02F4C	/* Mailbox 26 Data Word	3 [63:48] Register */
-#define	CAN_MB26_LENGTH		0xFFC02F50	/* Mailbox 26 Data Length Code Register */
-#define	CAN_MB26_TIMESTAMP	0xFFC02F54	/* Mailbox 26 Time Stamp Value Register */
-#define	CAN_MB26_ID0		0xFFC02F58	/* Mailbox 26 Identifier Low Register */
-#define	CAN_MB26_ID1		0xFFC02F5C	/* Mailbox 26 Identifier High Register */
-
-#define	CAN_MB27_DATA0		0xFFC02F60	/* Mailbox 27 Data Word	0 [15:0] Register */
-#define	CAN_MB27_DATA1		0xFFC02F64	/* Mailbox 27 Data Word	1 [31:16] Register */
-#define	CAN_MB27_DATA2		0xFFC02F68	/* Mailbox 27 Data Word	2 [47:32] Register */
-#define	CAN_MB27_DATA3		0xFFC02F6C	/* Mailbox 27 Data Word	3 [63:48] Register */
-#define	CAN_MB27_LENGTH		0xFFC02F70	/* Mailbox 27 Data Length Code Register */
-#define	CAN_MB27_TIMESTAMP	0xFFC02F74	/* Mailbox 27 Time Stamp Value Register */
-#define	CAN_MB27_ID0		0xFFC02F78	/* Mailbox 27 Identifier Low Register */
-#define	CAN_MB27_ID1		0xFFC02F7C	/* Mailbox 27 Identifier High Register */
-
-#define	CAN_MB28_DATA0		0xFFC02F80	/* Mailbox 28 Data Word	0 [15:0] Register */
-#define	CAN_MB28_DATA1		0xFFC02F84	/* Mailbox 28 Data Word	1 [31:16] Register */
-#define	CAN_MB28_DATA2		0xFFC02F88	/* Mailbox 28 Data Word	2 [47:32] Register */
-#define	CAN_MB28_DATA3		0xFFC02F8C	/* Mailbox 28 Data Word	3 [63:48] Register */
-#define	CAN_MB28_LENGTH		0xFFC02F90	/* Mailbox 28 Data Length Code Register */
-#define	CAN_MB28_TIMESTAMP	0xFFC02F94	/* Mailbox 28 Time Stamp Value Register */
-#define	CAN_MB28_ID0		0xFFC02F98	/* Mailbox 28 Identifier Low Register */
-#define	CAN_MB28_ID1		0xFFC02F9C	/* Mailbox 28 Identifier High Register */
-
-#define	CAN_MB29_DATA0		0xFFC02FA0	/* Mailbox 29 Data Word	0 [15:0] Register */
-#define	CAN_MB29_DATA1		0xFFC02FA4	/* Mailbox 29 Data Word	1 [31:16] Register */
-#define	CAN_MB29_DATA2		0xFFC02FA8	/* Mailbox 29 Data Word	2 [47:32] Register */
-#define	CAN_MB29_DATA3		0xFFC02FAC	/* Mailbox 29 Data Word	3 [63:48] Register */
-#define	CAN_MB29_LENGTH		0xFFC02FB0	/* Mailbox 29 Data Length Code Register */
-#define	CAN_MB29_TIMESTAMP	0xFFC02FB4	/* Mailbox 29 Time Stamp Value Register */
-#define	CAN_MB29_ID0		0xFFC02FB8	/* Mailbox 29 Identifier Low Register */
-#define	CAN_MB29_ID1		0xFFC02FBC	/* Mailbox 29 Identifier High Register */
-
-#define	CAN_MB30_DATA0		0xFFC02FC0	/* Mailbox 30 Data Word	0 [15:0] Register */
-#define	CAN_MB30_DATA1		0xFFC02FC4	/* Mailbox 30 Data Word	1 [31:16] Register */
-#define	CAN_MB30_DATA2		0xFFC02FC8	/* Mailbox 30 Data Word	2 [47:32] Register */
-#define	CAN_MB30_DATA3		0xFFC02FCC	/* Mailbox 30 Data Word	3 [63:48] Register */
-#define	CAN_MB30_LENGTH		0xFFC02FD0	/* Mailbox 30 Data Length Code Register */
-#define	CAN_MB30_TIMESTAMP	0xFFC02FD4	/* Mailbox 30 Time Stamp Value Register */
-#define	CAN_MB30_ID0		0xFFC02FD8	/* Mailbox 30 Identifier Low Register */
-#define	CAN_MB30_ID1		0xFFC02FDC	/* Mailbox 30 Identifier High Register */
-
-#define	CAN_MB31_DATA0		0xFFC02FE0	/* Mailbox 31 Data Word	0 [15:0] Register */
-#define	CAN_MB31_DATA1		0xFFC02FE4	/* Mailbox 31 Data Word	1 [31:16] Register */
-#define	CAN_MB31_DATA2		0xFFC02FE8	/* Mailbox 31 Data Word	2 [47:32] Register */
-#define	CAN_MB31_DATA3		0xFFC02FEC	/* Mailbox 31 Data Word	3 [63:48] Register */
-#define	CAN_MB31_LENGTH		0xFFC02FF0	/* Mailbox 31 Data Length Code Register */
-#define	CAN_MB31_TIMESTAMP	0xFFC02FF4	/* Mailbox 31 Time Stamp Value Register */
-#define	CAN_MB31_ID0		0xFFC02FF8	/* Mailbox 31 Identifier Low Register */
-#define	CAN_MB31_ID1		0xFFC02FFC	/* Mailbox 31 Identifier High Register */
-
-/* CAN Mailbox Area Macros */
-#define	CAN_MB_ID1(x)		(CAN_MB00_ID1+((x)*0x20))
-#define	CAN_MB_ID0(x)		(CAN_MB00_ID0+((x)*0x20))
-#define	CAN_MB_TIMESTAMP(x)	(CAN_MB00_TIMESTAMP+((x)*0x20))
-#define	CAN_MB_LENGTH(x)	(CAN_MB00_LENGTH+((x)*0x20))
-#define	CAN_MB_DATA3(x)		(CAN_MB00_DATA3+((x)*0x20))
-#define	CAN_MB_DATA2(x)		(CAN_MB00_DATA2+((x)*0x20))
-#define	CAN_MB_DATA1(x)		(CAN_MB00_DATA1+((x)*0x20))
-#define	CAN_MB_DATA0(x)		(CAN_MB00_DATA0+((x)*0x20))
-
-
-/*********************************************************************************** */
-/* System MMR Register Bits and	Macros */
-/******************************************************************************* */
-
-/* SWRST Mask */
-#define	SYSTEM_RESET	0x0007	/* Initiates A System Software Reset */
-#define	DOUBLE_FAULT	0x0008	/* Core	Double Fault Causes Reset */
-#define	RESET_DOUBLE	0x2000	/* SW Reset Generated By Core Double-Fault */
-#define	RESET_WDOG		0x4000	/* SW Reset Generated By Watchdog Timer */
-#define	RESET_SOFTWARE	0x8000	/* SW Reset Occurred Since Last	Read Of	SWRST */
-
-/* SYSCR Masks													 */
-#define	BMODE			0x0006	/* Boot	Mode - Latched During HW Reset From Mode Pins */
-#define	NOBOOT			0x0010	/* Execute From	L1 or ASYNC Bank 0 When	BMODE =	0 */
-
-
-/* *************  SYSTEM INTERRUPT CONTROLLER MASKS ***************** */
-
-/* Peripheral Masks For	SIC0_ISR, SIC0_IWR, SIC0_IMASK */
-#define	PLL_WAKEUP_IRQ		0x00000001	/* PLL Wakeup Interrupt	Request */
-#define	DMAC0_ERR_IRQ		0x00000002	/* DMA Controller 0 Error Interrupt Request */
-#define	PPI_ERR_IRQ		0x00000004	/* PPI Error Interrupt Request */
-#define	SPORT0_ERR_IRQ		0x00000008	/* SPORT0 Error	Interrupt Request */
-#define	SPORT1_ERR_IRQ		0x00000010	/* SPORT1 Error	Interrupt Request */
-#define	SPI0_ERR_IRQ		0x00000020	/* SPI0	Error Interrupt	Request */
-#define	UART0_ERR_IRQ		0x00000040	/* UART0 Error Interrupt Request */
-#define	RTC_IRQ			0x00000080	/* Real-Time Clock Interrupt Request */
-#define	DMA0_IRQ		0x00000100	/* DMA Channel 0 (PPI) Interrupt Request */
-#define	DMA1_IRQ		0x00000200	/* DMA Channel 1 (SPORT0 RX) Interrupt Request */
-#define	DMA2_IRQ		0x00000400	/* DMA Channel 2 (SPORT0 TX) Interrupt Request */
-#define	DMA3_IRQ		0x00000800	/* DMA Channel 3 (SPORT1 RX) Interrupt Request */
-#define	DMA4_IRQ		0x00001000	/* DMA Channel 4 (SPORT1 TX) Interrupt Request */
-#define	DMA5_IRQ		0x00002000	/* DMA Channel 5 (SPI) Interrupt Request */
-#define	DMA6_IRQ		0x00004000	/* DMA Channel 6 (UART RX) Interrupt Request */
-#define	DMA7_IRQ		0x00008000	/* DMA Channel 7 (UART TX) Interrupt Request */
-#define	TIMER0_IRQ		0x00010000	/* Timer 0 Interrupt Request */
-#define	TIMER1_IRQ		0x00020000	/* Timer 1 Interrupt Request */
-#define	TIMER2_IRQ		0x00040000	/* Timer 2 Interrupt Request */
-#define	PFA_IRQ			0x00080000	/* Programmable	Flag Interrupt Request A */
-#define	PFB_IRQ			0x00100000	/* Programmable	Flag Interrupt Request B */
-#define	MDMA0_0_IRQ		0x00200000	/* MemDMA0 Stream 0 Interrupt Request */
-#define	MDMA0_1_IRQ		0x00400000	/* MemDMA0 Stream 1 Interrupt Request */
-#define	WDOG_IRQ		0x00800000	/* Software Watchdog Timer Interrupt Request */
-#define	DMAC1_ERR_IRQ		0x01000000	/* DMA Controller 1 Error Interrupt Request */
-#define	SPORT2_ERR_IRQ		0x02000000	/* SPORT2 Error	Interrupt Request */
-#define	SPORT3_ERR_IRQ		0x04000000	/* SPORT3 Error	Interrupt Request */
-#define	MXVR_SD_IRQ		0x08000000	/* MXVR	Synchronous Data Interrupt Request */
-#define	SPI1_ERR_IRQ		0x10000000	/* SPI1	Error Interrupt	Request */
-#define	SPI2_ERR_IRQ		0x20000000	/* SPI2	Error Interrupt	Request */
-#define	UART1_ERR_IRQ		0x40000000	/* UART1 Error Interrupt Request */
-#define	UART2_ERR_IRQ		0x80000000	/* UART2 Error Interrupt Request */
-
-/* the following are for backwards compatibility */
-#define	DMA0_ERR_IRQ		DMAC0_ERR_IRQ
-#define	DMA1_ERR_IRQ		DMAC1_ERR_IRQ
-
-
-/* Peripheral Masks For	SIC_ISR1, SIC_IWR1, SIC_IMASK1	 */
-#define	CAN_ERR_IRQ			0x00000001	/* CAN Error Interrupt Request */
-#define	DMA8_IRQ			0x00000002	/* DMA Channel 8 (SPORT2 RX) Interrupt Request */
-#define	DMA9_IRQ			0x00000004	/* DMA Channel 9 (SPORT2 TX) Interrupt Request */
-#define	DMA10_IRQ			0x00000008	/* DMA Channel 10 (SPORT3 RX) Interrupt	Request */
-#define	DMA11_IRQ			0x00000010	/* DMA Channel 11 (SPORT3 TX) Interrupt	Request */
-#define	DMA12_IRQ			0x00000020	/* DMA Channel 12 Interrupt Request */
-#define	DMA13_IRQ			0x00000040	/* DMA Channel 13 Interrupt Request */
-#define	DMA14_IRQ			0x00000080	/* DMA Channel 14 (SPI1) Interrupt Request */
-#define	DMA15_IRQ			0x00000100	/* DMA Channel 15 (SPI2) Interrupt Request */
-#define	DMA16_IRQ			0x00000200	/* DMA Channel 16 (UART1 RX) Interrupt Request */
-#define	DMA17_IRQ			0x00000400	/* DMA Channel 17 (UART1 TX) Interrupt Request */
-#define	DMA18_IRQ			0x00000800	/* DMA Channel 18 (UART2 RX) Interrupt Request */
-#define	DMA19_IRQ			0x00001000	/* DMA Channel 19 (UART2 TX) Interrupt Request */
-#define	TWI0_IRQ			0x00002000	/* TWI0	Interrupt Request */
-#define	TWI1_IRQ			0x00004000	/* TWI1	Interrupt Request */
-#define	CAN_RX_IRQ			0x00008000	/* CAN Receive Interrupt Request */
-#define	CAN_TX_IRQ			0x00010000	/* CAN Transmit	Interrupt Request */
-#define	MDMA1_0_IRQ			0x00020000	/* MemDMA1 Stream 0 Interrupt Request */
-#define	MDMA1_1_IRQ			0x00040000	/* MemDMA1 Stream 1 Interrupt Request */
-#define	MXVR_STAT_IRQ			0x00080000	/* MXVR	Status Interrupt Request */
-#define	MXVR_CM_IRQ			0x00100000	/* MXVR	Control	Message	Interrupt Request */
-#define	MXVR_AP_IRQ			0x00200000	/* MXVR	Asynchronous Packet Interrupt */
-
-/* the following are for backwards compatibility */
-#define	MDMA0_IRQ		MDMA1_0_IRQ
-#define	MDMA1_IRQ		MDMA1_1_IRQ
-
-#ifdef _MISRA_RULES
-#define	_MF15 0xFu
-#define	_MF7 7u
-#else
-#define	_MF15 0xF
-#define	_MF7 7
-#endif /* _MISRA_RULES */
-
-/* SIC_IMASKx Masks											 */
-#define	SIC_UNMASK_ALL	0x00000000					/* Unmask all peripheral interrupts */
-#define	SIC_MASK_ALL	0xFFFFFFFF					/* Mask	all peripheral interrupts */
-#ifdef _MISRA_RULES
-#define	SIC_MASK(x)		(1 << ((x)&0x1Fu))					/* Mask	Peripheral #x interrupt */
-#define	SIC_UNMASK(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Unmask Peripheral #x	interrupt */
-#else
-#define	SIC_MASK(x)		(1 << ((x)&0x1F))					/* Mask	Peripheral #x interrupt */
-#define	SIC_UNMASK(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Unmask Peripheral #x	interrupt */
-#endif /* _MISRA_RULES */
-
-/* SIC_IWRx Masks											 */
-#define	IWR_DISABLE_ALL	0x00000000					/* Wakeup Disable all peripherals */
-#define	IWR_ENABLE_ALL	0xFFFFFFFF					/* Wakeup Enable all peripherals */
-#ifdef _MISRA_RULES
-#define	IWR_ENABLE(x)	(1 << ((x)&0x1Fu))					/* Wakeup Enable Peripheral #x */
-#define	IWR_DISABLE(x)	(0xFFFFFFFFu ^ (1 << ((x)&0x1Fu)))	/* Wakeup Disable Peripheral #x */
-#else
-#define	IWR_ENABLE(x)	(1 << ((x)&0x1F))					/* Wakeup Enable Peripheral #x */
-#define	IWR_DISABLE(x)	(0xFFFFFFFF ^ (1 << ((x)&0x1F)))	/* Wakeup Disable Peripheral #x */
-#endif /* _MISRA_RULES */
-
-
-/* ***************************** UART CONTROLLER MASKS ********************** */
-/* UARTx_LCR Register */
-#ifdef _MISRA_RULES
-#define	WLS(x)		(((x)-5u) & 0x03u)	/* Word	Length Select */
-#else
-#define	WLS(x)		(((x)-5) & 0x03)	/* Word	Length Select */
-#endif /* _MISRA_RULES */
-#define	STB			0x04				/* Stop	Bits */
-#define	PEN			0x08				/* Parity Enable */
-#define	EPS			0x10				/* Even	Parity Select */
-#define	STP			0x20				/* Stick Parity */
-#define	SB			0x40				/* Set Break */
-#define	DLAB		0x80				/* Divisor Latch Access */
-
-#define	DLAB_P		0x07
-#define	SB_P		0x06
-#define	STP_P		0x05
-#define	EPS_P		0x04
-#define	PEN_P		0x03
-#define	STB_P		0x02
-#define	WLS_P1		0x01
-#define	WLS_P0		0x00
-
-/* UARTx_MCR Register */
-#define	LOOP_ENA	0x10	/* Loopback Mode Enable */
-#define	LOOP_ENA_P	0x04
-/* Deprecated UARTx_MCR	Mask			 */
-
-/* UARTx_LSR Register */
-#define	DR			0x01	/* Data	Ready */
-#define	OE			0x02	/* Overrun Error */
-#define	PE			0x04	/* Parity Error */
-#define	FE			0x08	/* Framing Error */
-#define	BI			0x10	/* Break Interrupt */
-#define	THRE		0x20	/* THR Empty */
-#define	TEMT		0x40	/* TSR and UART_THR Empty */
-
-#define	TEMP_P		0x06
-#define	THRE_P		0x05
-#define	BI_P		0x04
-#define	FE_P		0x03
-#define	PE_P		0x02
-#define	OE_P		0x01
-#define	DR_P		0x00
-
-/* UARTx_IER Register */
-#define	ERBFI		0x01		/* Enable Receive Buffer Full Interrupt */
-#define	ETBEI		0x02		/* Enable Transmit Buffer Empty	Interrupt */
-#define	ELSI		0x04		/* Enable RX Status Interrupt */
-
-#define	ELSI_P		0x02
-#define	ETBEI_P		0x01
-#define	ERBFI_P		0x00
-
-/* UARTx_IIR Register */
-#define	NINT		0x01
-#define	STATUS_P1	0x02
-#define	STATUS_P0	0x01
-#define	NINT_P		0x00
-
-/* UARTx_GCTL Register */
-#define	UCEN		0x01		/* Enable UARTx	Clocks */
-#define	IREN		0x02		/* Enable IrDA Mode */
-#define	TPOLC		0x04		/* IrDA	TX Polarity Change */
-#define	RPOLC		0x08		/* IrDA	RX Polarity Change */
-#define	FPE			0x10		/* Force Parity	Error On Transmit */
-#define	FFE			0x20		/* Force Framing Error On Transmit */
-
-#define	FFE_P		0x05
-#define	FPE_P		0x04
-#define	RPOLC_P		0x03
-#define	TPOLC_P		0x02
-#define	IREN_P		0x01
-#define	UCEN_P		0x00
-
-
-/*  *********  PARALLEL	PERIPHERAL INTERFACE (PPI) MASKS ****************   */
-/*  PPI_CONTROL	Masks	      */
-#define	PORT_EN		0x0001	/* PPI Port Enable  */
-#define	PORT_DIR	0x0002	/* PPI Port Direction	    */
-#define	XFR_TYPE	0x000C	/* PPI Transfer	Type  */
-#define	PORT_CFG	0x0030	/* PPI Port Configuration */
-#define	FLD_SEL		0x0040	/* PPI Active Field Select */
-#define	PACK_EN		0x0080	/* PPI Packing Mode */
-/* previous versions of	defBF539.h erroneously included	DMA32 (PPI 32-bit DMA Enable) */
-#define	SKIP_EN		0x0200	/* PPI Skip Element Enable */
-#define	SKIP_EO		0x0400	/* PPI Skip Even/Odd Elements */
-#define	DLENGTH		0x3800	/* PPI Data Length  */
-#define	DLEN_8		0x0	     /*	PPI Data Length	mask for DLEN=8 */
-#define	DLEN_10		0x0800		/* Data	Length = 10 Bits */
-#define	DLEN_11		0x1000		/* Data	Length = 11 Bits */
-#define	DLEN_12		0x1800		/* Data	Length = 12 Bits */
-#define	DLEN_13		0x2000		/* Data	Length = 13 Bits */
-#define	DLEN_14		0x2800		/* Data	Length = 14 Bits */
-#define	DLEN_15		0x3000		/* Data	Length = 15 Bits */
-#define	DLEN_16		0x3800		/* Data	Length = 16 Bits */
-#ifdef _MISRA_RULES
-#define	DLEN(x)		((((x)-9u) & 0x07u) << 11)  /* PPI Data	Length (only works for x=10-->x=16) */
-#else
-#define	DLEN(x)		((((x)-9) & 0x07) << 11)  /* PPI Data Length (only works for x=10-->x=16) */
-#endif /* _MISRA_RULES */
-#define	POL			0xC000	/* PPI Signal Polarities       */
-#define	POLC		0x4000		/* PPI Clock Polarity */
-#define	POLS		0x8000		/* PPI Frame Sync Polarity */
-
-
-/* PPI_STATUS Masks					     */
-#define	FLD			0x0400	/* Field Indicator   */
-#define	FT_ERR		0x0800	/* Frame Track Error */
-#define	OVR			0x1000	/* FIFO	Overflow Error */
-#define	UNDR		0x2000	/* FIFO	Underrun Error */
-#define	ERR_DET		0x4000	/* Error Detected Indicator */
-#define	ERR_NCOR	0x8000	/* Error Not Corrected Indicator */
-
-
-/* **********  DMA CONTROLLER MASKS  ***********************/
-
-/* DMAx_PERIPHERAL_MAP,	MDMA_yy_PERIPHERAL_MAP Masks */
-
-#define	CTYPE			0x0040	/* DMA Channel Type Indicator */
-#define	CTYPE_P			0x6		/* DMA Channel Type Indicator BIT POSITION */
-#define	PCAP8			0x0080	/* DMA 8-bit Operation Indicator   */
-#define	PCAP16			0x0100	/* DMA 16-bit Operation	Indicator */
-#define	PCAP32			0x0200	/* DMA 32-bit Operation	Indicator */
-#define	PCAPWR			0x0400	/* DMA Write Operation Indicator */
-#define	PCAPRD			0x0800	/* DMA Read Operation Indicator */
-#define	PMAP			0xF000	/* DMA Peripheral Map Field */
-
-/* PMAP	Encodings For DMA Controller 0 */
-#define	PMAP_PPI		0x0000	/* PMAP	PPI Port DMA */
-#define	PMAP_SPORT0RX	0x1000	/* PMAP	SPORT0 Receive DMA */
-#define	PMAP_SPORT0TX	0x2000	/* PMAP	SPORT0 Transmit	DMA */
-#define	PMAP_SPORT1RX	0x3000	/* PMAP	SPORT1 Receive DMA */
-#define	PMAP_SPORT1TX	0x4000	/* PMAP	SPORT1 Transmit	DMA */
-#define	PMAP_SPI0		0x5000	/* PMAP	SPI DMA */
-#define	PMAP_UART0RX		0x6000	/* PMAP	UART Receive DMA */
-#define	PMAP_UART0TX		0x7000	/* PMAP	UART Transmit DMA */
-
-/* PMAP	Encodings For DMA Controller 1 */
-#define	PMAP_SPORT2RX	    0x0000  /* PMAP SPORT2 Receive DMA */
-#define	PMAP_SPORT2TX	    0x1000  /* PMAP SPORT2 Transmit DMA */
-#define	PMAP_SPORT3RX	    0x2000  /* PMAP SPORT3 Receive DMA */
-#define	PMAP_SPORT3TX	    0x3000  /* PMAP SPORT3 Transmit DMA */
-#define	PMAP_SPI1	    0x6000  /* PMAP SPI1 DMA */
-#define	PMAP_SPI2	    0x7000  /* PMAP SPI2 DMA */
-#define	PMAP_UART1RX	    0x8000  /* PMAP UART1 Receive DMA */
-#define	PMAP_UART1TX	    0x9000  /* PMAP UART1 Transmit DMA */
-#define	PMAP_UART2RX	    0xA000  /* PMAP UART2 Receive DMA */
-#define	PMAP_UART2TX	    0xB000  /* PMAP UART2 Transmit DMA */
-
-
-/*  *************  GENERAL PURPOSE TIMER MASKS	******************** */
-/* PWM Timer bit definitions */
-/* TIMER_ENABLE	Register */
-#define	TIMEN0			0x0001	/* Enable Timer	0 */
-#define	TIMEN1			0x0002	/* Enable Timer	1 */
-#define	TIMEN2			0x0004	/* Enable Timer	2 */
-
-#define	TIMEN0_P		0x00
-#define	TIMEN1_P		0x01
-#define	TIMEN2_P		0x02
-
-/* TIMER_DISABLE Register */
-#define	TIMDIS0			0x0001	/* Disable Timer 0 */
-#define	TIMDIS1			0x0002	/* Disable Timer 1 */
-#define	TIMDIS2			0x0004	/* Disable Timer 2 */
-
-#define	TIMDIS0_P		0x00
-#define	TIMDIS1_P		0x01
-#define	TIMDIS2_P		0x02
-
-/* TIMER_STATUS	Register */
-#define	TIMIL0			0x0001	/* Timer 0 Interrupt */
-#define	TIMIL1			0x0002	/* Timer 1 Interrupt */
-#define	TIMIL2			0x0004	/* Timer 2 Interrupt */
-#define	TOVF_ERR0		0x0010	/* Timer 0 Counter Overflow */
-#define	TOVF_ERR1		0x0020	/* Timer 1 Counter Overflow */
-#define	TOVF_ERR2		0x0040	/* Timer 2 Counter Overflow */
-#define	TRUN0			0x1000	/* Timer 0 Slave Enable	Status */
-#define	TRUN1			0x2000	/* Timer 1 Slave Enable	Status */
-#define	TRUN2			0x4000	/* Timer 2 Slave Enable	Status */
-
-#define	TIMIL0_P		0x00
-#define	TIMIL1_P		0x01
-#define	TIMIL2_P		0x02
-#define	TOVF_ERR0_P		0x04
-#define	TOVF_ERR1_P		0x05
-#define	TOVF_ERR2_P		0x06
-#define	TRUN0_P			0x0C
-#define	TRUN1_P			0x0D
-#define	TRUN2_P			0x0E
-
-/* Alternate Deprecated	Macros Provided	For Backwards Code Compatibility */
-#define	TOVL_ERR0		TOVF_ERR0
-#define	TOVL_ERR1		TOVF_ERR1
-#define	TOVL_ERR2		TOVF_ERR2
-#define	TOVL_ERR0_P		TOVF_ERR0_P
-#define	TOVL_ERR1_P	TOVF_ERR1_P
-#define	TOVL_ERR2_P	TOVF_ERR2_P
-
-/* TIMERx_CONFIG Registers */
-#define	PWM_OUT			0x0001
-#define	WDTH_CAP		0x0002
-#define	EXT_CLK			0x0003
-#define	PULSE_HI		0x0004
-#define	PERIOD_CNT		0x0008
-#define	IRQ_ENA			0x0010
-#define	TIN_SEL			0x0020
-#define	OUT_DIS			0x0040
-#define	CLK_SEL			0x0080
-#define	TOGGLE_HI		0x0100
-#define	EMU_RUN			0x0200
-#ifdef _MISRA_RULES
-#define	ERR_TYP(x)		(((x) &	0x03u) << 14)
-#else
-#define	ERR_TYP(x)		(((x) &	0x03) << 14)
-#endif /* _MISRA_RULES */
-
-#define	TMODE_P0		0x00
-#define	TMODE_P1		0x01
-#define	PULSE_HI_P		0x02
-#define	PERIOD_CNT_P	0x03
-#define	IRQ_ENA_P		0x04
-#define	TIN_SEL_P		0x05
-#define	OUT_DIS_P		0x06
-#define	CLK_SEL_P		0x07
-#define	TOGGLE_HI_P		0x08
-#define	EMU_RUN_P		0x09
-#define	ERR_TYP_P0		0x0E
-#define	ERR_TYP_P1		0x0F
-
-
-/*/ ******************	 GENERAL-PURPOSE I/O  ********************* */
-/*  Flag I/O (FIO_) Masks */
-#define	PF0			0x0001
-#define	PF1			0x0002
-#define	PF2			0x0004
-#define	PF3			0x0008
-#define	PF4			0x0010
-#define	PF5			0x0020
-#define	PF6			0x0040
-#define	PF7			0x0080
-#define	PF8			0x0100
-#define	PF9			0x0200
-#define	PF10		0x0400
-#define	PF11		0x0800
-#define	PF12		0x1000
-#define	PF13		0x2000
-#define	PF14		0x4000
-#define	PF15		0x8000
-
-/*  PORT F BIT POSITIONS */
-#define	PF0_P		0x0
-#define	PF1_P		0x1
-#define	PF2_P		0x2
-#define	PF3_P		0x3
-#define	PF4_P		0x4
-#define	PF5_P		0x5
-#define	PF6_P		0x6
-#define	PF7_P		0x7
-#define	PF8_P		0x8
-#define	PF9_P		0x9
-#define	PF10_P		0xA
-#define	PF11_P		0xB
-#define	PF12_P		0xC
-#define	PF13_P		0xD
-#define	PF14_P		0xE
-#define	PF15_P		0xF
-
-
-/*******************   GPIO MASKS  *********************/
-/* Port	C Masks */
-#define	PC0		0x0001
-#define	PC1		0x0002
-#define	PC4		0x0010
-#define	PC5		0x0020
-#define	PC6		0x0040
-#define	PC7		0x0080
-#define	PC8		0x0100
-#define	PC9		0x0200
-/* Port	C Bit Positions */
-#define	PC0_P	0x0
-#define	PC1_P	0x1
-#define	PC4_P	0x4
-#define	PC5_P	0x5
-#define	PC6_P	0x6
-#define	PC7_P	0x7
-#define	PC8_P	0x8
-#define	PC9_P	0x9
-
-/* Port	D */
-#define	PD0		0x0001
-#define	PD1		0x0002
-#define	PD2		0x0004
-#define	PD3		0x0008
-#define	PD4		0x0010
-#define	PD5		0x0020
-#define	PD6		0x0040
-#define	PD7		0x0080
-#define	PD8		0x0100
-#define	PD9		0x0200
-#define	PD10	0x0400
-#define	PD11	0x0800
-#define	PD12	0x1000
-#define	PD13	0x2000
-#define	PD14	0x4000
-#define	PD15	0x8000
-/* Port	D Bit Positions */
-#define	PD0_P	0x0
-#define	PD1_P	0x1
-#define	PD2_P	0x2
-#define	PD3_P	0x3
-#define	PD4_P	0x4
-#define	PD5_P	0x5
-#define	PD6_P	0x6
-#define	PD7_P	0x7
-#define	PD8_P	0x8
-#define	PD9_P	0x9
-#define	PD10_P	0xA
-#define	PD11_P	0xB
-#define	PD12_P	0xC
-#define	PD13_P	0xD
-#define	PD14_P	0xE
-#define	PD15_P	0xF
-
-/* Port	E */
-#define	PE0		0x0001
-#define	PE1		0x0002
-#define	PE2		0x0004
-#define	PE3		0x0008
-#define	PE4		0x0010
-#define	PE5		0x0020
-#define	PE6		0x0040
-#define	PE7		0x0080
-#define	PE8		0x0100
-#define	PE9		0x0200
-#define	PE10	0x0400
-#define	PE11	0x0800
-#define	PE12	0x1000
-#define	PE13	0x2000
-#define	PE14	0x4000
-#define	PE15	0x8000
-/* Port	E Bit Positions */
-#define	PE0_P	0x0
-#define	PE1_P	0x1
-#define	PE2_P	0x2
-#define	PE3_P	0x3
-#define	PE4_P	0x4
-#define	PE5_P	0x5
-#define	PE6_P	0x6
-#define	PE7_P	0x7
-#define	PE8_P	0x8
-#define	PE9_P	0x9
-#define	PE10_P	0xA
-#define	PE11_P	0xB
-#define	PE12_P	0xC
-#define	PE13_P	0xD
-#define	PE14_P	0xE
-#define	PE15_P	0xF
-
-/* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS	************* */
-/* EBIU_AMGCTL Masks */
-#define	AMCKEN		0x0001	/* Enable CLKOUT */
-#define	AMBEN_NONE	0x0000	/* All Banks Disabled */
-#define	AMBEN_B0	0x0002	/* Enable Asynchronous Memory Bank 0 only */
-#define	AMBEN_B0_B1	0x0004	/* Enable Asynchronous Memory Banks 0 &	1 only */
-#define	AMBEN_B0_B1_B2	0x0006	/* Enable Asynchronous Memory Banks 0, 1, and 2 */
-#define	AMBEN_ALL	0x0008	/* Enable Asynchronous Memory Banks (all) 0, 1,	2, and 3 */
-#define	CDPRIO		0x0100	/* DMA has priority over core for external accesses */
-
-/* EBIU_AMGCTL Bit Positions */
-#define	AMCKEN_P		0x0000	/* Enable CLKOUT */
-#define	AMBEN_P0		0x0001	/* Asynchronous	Memory Enable, 000 - banks 0-3 disabled, 001 - Bank 0 enabled */
-#define	AMBEN_P1		0x0002	/* Asynchronous	Memory Enable, 010 - banks 0&1 enabled,	 011 - banks 0-3 enabled */
-#define	AMBEN_P2		0x0003	/* Asynchronous	Memory Enable, 1xx - All banks (bank 0,	1, 2, and 3) enabled */
-
-/* EBIU_AMBCTL0	Masks */
-#define	B0RDYEN			0x00000001  /* Bank 0 RDY Enable, 0=disable, 1=enable */
-#define	B0RDYPOL		0x00000002  /* Bank 0 RDY Active high, 0=active	low, 1=active high */
-#define	B0TT_1			0x00000004  /* Bank 0 Transition Time from Read	to Write = 1 cycle */
-#define	B0TT_2			0x00000008  /* Bank 0 Transition Time from Read	to Write = 2 cycles */
-#define	B0TT_3			0x0000000C  /* Bank 0 Transition Time from Read	to Write = 3 cycles */
-#define	B0TT_4			0x00000000  /* Bank 0 Transition Time from Read	to Write = 4 cycles */
-#define	B0ST_1			0x00000010  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=1 cycle */
-#define	B0ST_2			0x00000020  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=2 cycles */
-#define	B0ST_3			0x00000030  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=3 cycles */
-#define	B0ST_4			0x00000000  /* Bank 0 Setup Time from AOE asserted to Read/Write asserted=4 cycles */
-#define	B0HT_1			0x00000040  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 1 cycle */
-#define	B0HT_2			0x00000080  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 2 cycles */
-#define	B0HT_3			0x000000C0  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 3 cycles */
-#define	B0HT_0			0x00000000  /* Bank 0 Hold Time	from Read/Write	deasserted to AOE deasserted = 0 cycles */
-#define	B0RAT_1			0x00000100  /* Bank 0 Read Access Time = 1 cycle */
-#define	B0RAT_2			0x00000200  /* Bank 0 Read Access Time = 2 cycles */
-#define	B0RAT_3			0x00000300  /* Bank 0 Read Access Time = 3 cycles */
-#define	B0RAT_4			0x00000400  /* Bank 0 Read Access Time = 4 cycles */
-#define	B0RAT_5			0x00000500  /* Bank 0 Read Access Time = 5 cycles */
-#define	B0RAT_6			0x00000600  /* Bank 0 Read Access Time = 6 cycles */
-#define	B0RAT_7			0x00000700  /* Bank 0 Read Access Time = 7 cycles */
-#define	B0RAT_8			0x00000800  /* Bank 0 Read Access Time = 8 cycles */
-#define	B0RAT_9			0x00000900  /* Bank 0 Read Access Time = 9 cycles */
-#define	B0RAT_10		0x00000A00  /* Bank 0 Read Access Time = 10 cycles */
-#define	B0RAT_11		0x00000B00  /* Bank 0 Read Access Time = 11 cycles */
-#define	B0RAT_12		0x00000C00  /* Bank 0 Read Access Time = 12 cycles */
-#define	B0RAT_13		0x00000D00  /* Bank 0 Read Access Time = 13 cycles */
-#define	B0RAT_14		0x00000E00  /* Bank 0 Read Access Time = 14 cycles */
-#define	B0RAT_15		0x00000F00  /* Bank 0 Read Access Time = 15 cycles */
-#define	B0WAT_1			0x00001000  /* Bank 0 Write Access Time	= 1 cycle */
-#define	B0WAT_2			0x00002000  /* Bank 0 Write Access Time	= 2 cycles */
-#define	B0WAT_3			0x00003000  /* Bank 0 Write Access Time	= 3 cycles */
-#define	B0WAT_4			0x00004000  /* Bank 0 Write Access Time	= 4 cycles */
-#define	B0WAT_5			0x00005000  /* Bank 0 Write Access Time	= 5 cycles */
-#define	B0WAT_6			0x00006000  /* Bank 0 Write Access Time	= 6 cycles */
-#define	B0WAT_7			0x00007000  /* Bank 0 Write Access Time	= 7 cycles */
-#define	B0WAT_8			0x00008000  /* Bank 0 Write Access Time	= 8 cycles */
-#define	B0WAT_9			0x00009000  /* Bank 0 Write Access Time	= 9 cycles */
-#define	B0WAT_10		0x0000A000  /* Bank 0 Write Access Time	= 10 cycles */
-#define	B0WAT_11		0x0000B000  /* Bank 0 Write Access Time	= 11 cycles */
-#define	B0WAT_12		0x0000C000  /* Bank 0 Write Access Time	= 12 cycles */
-#define	B0WAT_13		0x0000D000  /* Bank 0 Write Access Time	= 13 cycles */
-#define	B0WAT_14		0x0000E000  /* Bank 0 Write Access Time	= 14 cycles */
-#define	B0WAT_15		0x0000F000  /* Bank 0 Write Access Time	= 15 cycles */
-#define	B1RDYEN			0x00010000  /* Bank 1 RDY enable, 0=disable, 1=enable */
-#define	B1RDYPOL		0x00020000  /* Bank 1 RDY Active high, 0=active	low, 1=active high */
-#define	B1TT_1			0x00040000  /* Bank 1 Transition Time from Read	to Write = 1 cycle */
-#define	B1TT_2			0x00080000  /* Bank 1 Transition Time from Read	to Write = 2 cycles */
-#define	B1TT_3			0x000C0000  /* Bank 1 Transition Time from Read	to Write = 3 cycles */
-#define	B1TT_4			0x00000000  /* Bank 1 Transition Time from Read	to Write = 4 cycles */
-#define	B1ST_1			0x00100000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B1ST_2			0x00200000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B1ST_3			0x00300000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B1ST_4			0x00000000  /* Bank 1 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B1HT_1			0x00400000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B1HT_2			0x00800000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B1HT_3			0x00C00000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B1HT_0			0x00000000  /* Bank 1 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B1RAT_1			0x01000000  /* Bank 1 Read Access Time = 1 cycle */
-#define	B1RAT_2			0x02000000  /* Bank 1 Read Access Time = 2 cycles */
-#define	B1RAT_3			0x03000000  /* Bank 1 Read Access Time = 3 cycles */
-#define	B1RAT_4			0x04000000  /* Bank 1 Read Access Time = 4 cycles */
-#define	B1RAT_5			0x05000000  /* Bank 1 Read Access Time = 5 cycles */
-#define	B1RAT_6			0x06000000  /* Bank 1 Read Access Time = 6 cycles */
-#define	B1RAT_7			0x07000000  /* Bank 1 Read Access Time = 7 cycles */
-#define	B1RAT_8			0x08000000  /* Bank 1 Read Access Time = 8 cycles */
-#define	B1RAT_9			0x09000000  /* Bank 1 Read Access Time = 9 cycles */
-#define	B1RAT_10		0x0A000000  /* Bank 1 Read Access Time = 10 cycles */
-#define	B1RAT_11		0x0B000000  /* Bank 1 Read Access Time = 11 cycles */
-#define	B1RAT_12		0x0C000000  /* Bank 1 Read Access Time = 12 cycles */
-#define	B1RAT_13		0x0D000000  /* Bank 1 Read Access Time = 13 cycles */
-#define	B1RAT_14		0x0E000000  /* Bank 1 Read Access Time = 14 cycles */
-#define	B1RAT_15		0x0F000000  /* Bank 1 Read Access Time = 15 cycles */
-#define	B1WAT_1			0x10000000 /* Bank 1 Write Access Time = 1 cycle */
-#define	B1WAT_2			0x20000000  /* Bank 1 Write Access Time	= 2 cycles */
-#define	B1WAT_3			0x30000000  /* Bank 1 Write Access Time	= 3 cycles */
-#define	B1WAT_4			0x40000000  /* Bank 1 Write Access Time	= 4 cycles */
-#define	B1WAT_5			0x50000000  /* Bank 1 Write Access Time	= 5 cycles */
-#define	B1WAT_6			0x60000000  /* Bank 1 Write Access Time	= 6 cycles */
-#define	B1WAT_7			0x70000000  /* Bank 1 Write Access Time	= 7 cycles */
-#define	B1WAT_8			0x80000000  /* Bank 1 Write Access Time	= 8 cycles */
-#define	B1WAT_9			0x90000000  /* Bank 1 Write Access Time	= 9 cycles */
-#define	B1WAT_10		0xA0000000  /* Bank 1 Write Access Time	= 10 cycles */
-#define	B1WAT_11		0xB0000000  /* Bank 1 Write Access Time	= 11 cycles */
-#define	B1WAT_12		0xC0000000  /* Bank 1 Write Access Time	= 12 cycles */
-#define	B1WAT_13		0xD0000000  /* Bank 1 Write Access Time	= 13 cycles */
-#define	B1WAT_14		0xE0000000  /* Bank 1 Write Access Time	= 14 cycles */
-#define	B1WAT_15		0xF0000000  /* Bank 1 Write Access Time	= 15 cycles */
-
-/* EBIU_AMBCTL1	Masks */
-#define	B2RDYEN			0x00000001  /* Bank 2 RDY Enable, 0=disable, 1=enable */
-#define	B2RDYPOL		0x00000002  /* Bank 2 RDY Active high, 0=active	low, 1=active high */
-#define	B2TT_1			0x00000004  /* Bank 2 Transition Time from Read	to Write = 1 cycle */
-#define	B2TT_2			0x00000008  /* Bank 2 Transition Time from Read	to Write = 2 cycles */
-#define	B2TT_3			0x0000000C  /* Bank 2 Transition Time from Read	to Write = 3 cycles */
-#define	B2TT_4			0x00000000  /* Bank 2 Transition Time from Read	to Write = 4 cycles */
-#define	B2ST_1			0x00000010  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B2ST_2			0x00000020  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B2ST_3			0x00000030  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B2ST_4			0x00000000  /* Bank 2 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B2HT_1			0x00000040  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B2HT_2			0x00000080  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B2HT_3			0x000000C0  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B2HT_0			0x00000000  /* Bank 2 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B2RAT_1			0x00000100  /* Bank 2 Read Access Time = 1 cycle */
-#define	B2RAT_2			0x00000200  /* Bank 2 Read Access Time = 2 cycles */
-#define	B2RAT_3			0x00000300  /* Bank 2 Read Access Time = 3 cycles */
-#define	B2RAT_4			0x00000400  /* Bank 2 Read Access Time = 4 cycles */
-#define	B2RAT_5			0x00000500  /* Bank 2 Read Access Time = 5 cycles */
-#define	B2RAT_6			0x00000600  /* Bank 2 Read Access Time = 6 cycles */
-#define	B2RAT_7			0x00000700  /* Bank 2 Read Access Time = 7 cycles */
-#define	B2RAT_8			0x00000800  /* Bank 2 Read Access Time = 8 cycles */
-#define	B2RAT_9			0x00000900  /* Bank 2 Read Access Time = 9 cycles */
-#define	B2RAT_10		0x00000A00  /* Bank 2 Read Access Time = 10 cycles */
-#define	B2RAT_11		0x00000B00  /* Bank 2 Read Access Time = 11 cycles */
-#define	B2RAT_12		0x00000C00  /* Bank 2 Read Access Time = 12 cycles */
-#define	B2RAT_13		0x00000D00  /* Bank 2 Read Access Time = 13 cycles */
-#define	B2RAT_14		0x00000E00  /* Bank 2 Read Access Time = 14 cycles */
-#define	B2RAT_15		0x00000F00  /* Bank 2 Read Access Time = 15 cycles */
-#define	B2WAT_1			0x00001000  /* Bank 2 Write Access Time	= 1 cycle */
-#define	B2WAT_2			0x00002000  /* Bank 2 Write Access Time	= 2 cycles */
-#define	B2WAT_3			0x00003000  /* Bank 2 Write Access Time	= 3 cycles */
-#define	B2WAT_4			0x00004000  /* Bank 2 Write Access Time	= 4 cycles */
-#define	B2WAT_5			0x00005000  /* Bank 2 Write Access Time	= 5 cycles */
-#define	B2WAT_6			0x00006000  /* Bank 2 Write Access Time	= 6 cycles */
-#define	B2WAT_7			0x00007000  /* Bank 2 Write Access Time	= 7 cycles */
-#define	B2WAT_8			0x00008000  /* Bank 2 Write Access Time	= 8 cycles */
-#define	B2WAT_9			0x00009000  /* Bank 2 Write Access Time	= 9 cycles */
-#define	B2WAT_10		0x0000A000  /* Bank 2 Write Access Time	= 10 cycles */
-#define	B2WAT_11		0x0000B000  /* Bank 2 Write Access Time	= 11 cycles */
-#define	B2WAT_12		0x0000C000  /* Bank 2 Write Access Time	= 12 cycles */
-#define	B2WAT_13		0x0000D000  /* Bank 2 Write Access Time	= 13 cycles */
-#define	B2WAT_14		0x0000E000  /* Bank 2 Write Access Time	= 14 cycles */
-#define	B2WAT_15		0x0000F000  /* Bank 2 Write Access Time	= 15 cycles */
-#define	B3RDYEN			0x00010000  /* Bank 3 RDY enable, 0=disable, 1=enable */
-#define	B3RDYPOL		0x00020000  /* Bank 3 RDY Active high, 0=active	low, 1=active high */
-#define	B3TT_1			0x00040000  /* Bank 3 Transition Time from Read	to Write = 1 cycle */
-#define	B3TT_2			0x00080000  /* Bank 3 Transition Time from Read	to Write = 2 cycles */
-#define	B3TT_3			0x000C0000  /* Bank 3 Transition Time from Read	to Write = 3 cycles */
-#define	B3TT_4			0x00000000  /* Bank 3 Transition Time from Read	to Write = 4 cycles */
-#define	B3ST_1			0x00100000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 1 cycle */
-#define	B3ST_2			0x00200000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 2 cycles */
-#define	B3ST_3			0x00300000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 3 cycles */
-#define	B3ST_4			0x00000000  /* Bank 3 Setup Time from AOE asserted to Read or Write asserted = 4 cycles */
-#define	B3HT_1			0x00400000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 1 cycle */
-#define	B3HT_2			0x00800000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 2 cycles */
-#define	B3HT_3			0x00C00000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 3 cycles */
-#define	B3HT_0			0x00000000  /* Bank 3 Hold Time	from Read or Write deasserted to AOE deasserted	= 0 cycles */
-#define	B3RAT_1			0x01000000 /* Bank 3 Read Access Time =	1 cycle */
-#define	B3RAT_2			0x02000000  /* Bank 3 Read Access Time = 2 cycles */
-#define	B3RAT_3			0x03000000  /* Bank 3 Read Access Time = 3 cycles */
-#define	B3RAT_4			0x04000000  /* Bank 3 Read Access Time = 4 cycles */
-#define	B3RAT_5			0x05000000  /* Bank 3 Read Access Time = 5 cycles */
-#define	B3RAT_6			0x06000000  /* Bank 3 Read Access Time = 6 cycles */
-#define	B3RAT_7			0x07000000  /* Bank 3 Read Access Time = 7 cycles */
-#define	B3RAT_8			0x08000000  /* Bank 3 Read Access Time = 8 cycles */
-#define	B3RAT_9			0x09000000  /* Bank 3 Read Access Time = 9 cycles */
-#define	B3RAT_10		0x0A000000  /* Bank 3 Read Access Time = 10 cycles */
-#define	B3RAT_11		0x0B000000  /* Bank 3 Read Access Time = 11 cycles */
-#define	B3RAT_12		0x0C000000  /* Bank 3 Read Access Time = 12 cycles */
-#define	B3RAT_13		0x0D000000  /* Bank 3 Read Access Time = 13 cycles */
-#define	B3RAT_14		0x0E000000  /* Bank 3 Read Access Time = 14 cycles */
-#define	B3RAT_15		0x0F000000  /* Bank 3 Read Access Time = 15 cycles */
-#define	B3WAT_1			0x10000000 /* Bank 3 Write Access Time = 1 cycle */
-#define	B3WAT_2			0x20000000  /* Bank 3 Write Access Time	= 2 cycles */
-#define	B3WAT_3			0x30000000  /* Bank 3 Write Access Time	= 3 cycles */
-#define	B3WAT_4			0x40000000  /* Bank 3 Write Access Time	= 4 cycles */
-#define	B3WAT_5			0x50000000  /* Bank 3 Write Access Time	= 5 cycles */
-#define	B3WAT_6			0x60000000  /* Bank 3 Write Access Time	= 6 cycles */
-#define	B3WAT_7			0x70000000  /* Bank 3 Write Access Time	= 7 cycles */
-#define	B3WAT_8			0x80000000  /* Bank 3 Write Access Time	= 8 cycles */
-#define	B3WAT_9			0x90000000  /* Bank 3 Write Access Time	= 9 cycles */
-#define	B3WAT_10		0xA0000000  /* Bank 3 Write Access Time	= 10 cycles */
-#define	B3WAT_11		0xB0000000  /* Bank 3 Write Access Time	= 11 cycles */
-#define	B3WAT_12		0xC0000000  /* Bank 3 Write Access Time	= 12 cycles */
-#define	B3WAT_13		0xD0000000  /* Bank 3 Write Access Time	= 13 cycles */
-#define	B3WAT_14		0xE0000000  /* Bank 3 Write Access Time	= 14 cycles */
-#define	B3WAT_15		0xF0000000  /* Bank 3 Write Access Time	= 15 cycles */
-
-/* **********************  SDRAM CONTROLLER MASKS  *************************** */
-/* EBIU_SDGCTL Masks */
-#define	SCTLE			0x00000001 /* Enable SCLK[0], /SRAS, /SCAS, /SWE, SDQM[3:0] */
-#define	CL_2			0x00000008 /* SDRAM CAS	latency	= 2 cycles */
-#define	CL_3			0x0000000C /* SDRAM CAS	latency	= 3 cycles */
-#define	PFE				0x00000010 /* Enable SDRAM prefetch */
-#define	PFP				0x00000020 /* Prefetch has priority over AMC requests */
-#define	PASR_ALL		0x00000000	/* All 4 SDRAM Banks Refreshed In Self-Refresh */
-#define	PASR_B0_B1		0x00000010	/* SDRAM Banks 0 and 1 Are Refreshed In	Self-Refresh */
-#define	PASR_B0			0x00000020	/* Only	SDRAM Bank 0 Is	Refreshed In Self-Refresh */
-#define	TRAS_1			0x00000040 /* SDRAM tRAS = 1 cycle */
-#define	TRAS_2			0x00000080 /* SDRAM tRAS = 2 cycles */
-#define	TRAS_3			0x000000C0 /* SDRAM tRAS = 3 cycles */
-#define	TRAS_4			0x00000100 /* SDRAM tRAS = 4 cycles */
-#define	TRAS_5			0x00000140 /* SDRAM tRAS = 5 cycles */
-#define	TRAS_6			0x00000180 /* SDRAM tRAS = 6 cycles */
-#define	TRAS_7			0x000001C0 /* SDRAM tRAS = 7 cycles */
-#define	TRAS_8			0x00000200 /* SDRAM tRAS = 8 cycles */
-#define	TRAS_9			0x00000240 /* SDRAM tRAS = 9 cycles */
-#define	TRAS_10			0x00000280 /* SDRAM tRAS = 10 cycles */
-#define	TRAS_11			0x000002C0 /* SDRAM tRAS = 11 cycles */
-#define	TRAS_12			0x00000300 /* SDRAM tRAS = 12 cycles */
-#define	TRAS_13			0x00000340 /* SDRAM tRAS = 13 cycles */
-#define	TRAS_14			0x00000380 /* SDRAM tRAS = 14 cycles */
-#define	TRAS_15			0x000003C0 /* SDRAM tRAS = 15 cycles */
-#define	TRP_1			0x00000800 /* SDRAM tRP	= 1 cycle */
-#define	TRP_2			0x00001000 /* SDRAM tRP	= 2 cycles */
-#define	TRP_3			0x00001800 /* SDRAM tRP	= 3 cycles */
-#define	TRP_4			0x00002000 /* SDRAM tRP	= 4 cycles */
-#define	TRP_5			0x00002800 /* SDRAM tRP	= 5 cycles */
-#define	TRP_6			0x00003000 /* SDRAM tRP	= 6 cycles */
-#define	TRP_7			0x00003800 /* SDRAM tRP	= 7 cycles */
-#define	TRCD_1			0x00008000 /* SDRAM tRCD = 1 cycle */
-#define	TRCD_2			0x00010000 /* SDRAM tRCD = 2 cycles */
-#define	TRCD_3			0x00018000 /* SDRAM tRCD = 3 cycles */
-#define	TRCD_4			0x00020000 /* SDRAM tRCD = 4 cycles */
-#define	TRCD_5			0x00028000 /* SDRAM tRCD = 5 cycles */
-#define	TRCD_6			0x00030000 /* SDRAM tRCD = 6 cycles */
-#define	TRCD_7			0x00038000 /* SDRAM tRCD = 7 cycles */
-#define	TWR_1			0x00080000 /* SDRAM tWR	= 1 cycle */
-#define	TWR_2			0x00100000 /* SDRAM tWR	= 2 cycles */
-#define	TWR_3			0x00180000 /* SDRAM tWR	= 3 cycles */
-#define	PUPSD			0x00200000 /*Power-up start delay */
-#define	PSM				0x00400000 /* SDRAM power-up sequence =	Precharge, mode	register set, 8	CBR refresh cycles */
-#define	PSS				0x00800000 /* enable SDRAM power-up sequence on	next SDRAM access */
-#define	SRFS			0x01000000 /* Start SDRAM self-refresh mode */
-#define	EBUFE			0x02000000 /* Enable external buffering	timing */
-#define	FBBRW			0x04000000 /* Fast back-to-back	read write enable */
-#define	EMREN			0x10000000 /* Extended mode register enable */
-#define	TCSR			0x20000000 /* Temp compensated self refresh value 85 deg C */
-#define	CDDBG			0x40000000 /* Tristate SDRAM controls during bus grant */
-
-/* EBIU_SDBCTL Masks */
-#define	EBE				0x00000001 /* Enable SDRAM external bank */
-#define	EBSZ_16			0x00000000 /* SDRAM external bank size = 16MB */
-#define	EBSZ_32			0x00000002 /* SDRAM external bank size = 32MB */
-#define	EBSZ_64			0x00000004 /* SDRAM external bank size = 64MB */
-#define	EBSZ_128		0x00000006 /* SDRAM external bank size = 128MB */
-#define	EBSZ_256		0x00000008 /* SDRAM External Bank Size = 256MB */
-#define	EBSZ_512		0x0000000A /* SDRAM External Bank Size = 512MB */
-#define	EBCAW_8			0x00000000 /* SDRAM external bank column address width = 8 bits */
-#define	EBCAW_9			0x00000010 /* SDRAM external bank column address width = 9 bits */
-#define	EBCAW_10		0x00000020 /* SDRAM external bank column address width = 9 bits */
-#define	EBCAW_11		0x00000030 /* SDRAM external bank column address width = 9 bits */
-
-/* EBIU_SDSTAT Masks */
-#define	SDCI			0x00000001 /* SDRAM controller is idle */
-#define	SDSRA			0x00000002 /* SDRAM SDRAM self refresh is active */
-#define	SDPUA			0x00000004 /* SDRAM power up active  */
-#define	SDRS			0x00000008 /* SDRAM is in reset	state */
-#define	SDEASE			0x00000010 /* SDRAM EAB	sticky error status - W1C */
-#define	BGSTAT			0x00000020 /* Bus granted */
-
-
-/*  ********************  TWO-WIRE INTERFACE (TWIx) MASKS  ***********************/
-/* TWIx_CLKDIV Macros (Use: *pTWIx_CLKDIV = CLKLOW(x)|CLKHI(y);	 ) */
-#ifdef _MISRA_RULES
-#define	CLKLOW(x)	((x) & 0xFFu)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFFu)<<0x8)	/* Periods Before New Clock Low */
-#else
-#define	CLKLOW(x)	((x) & 0xFF)		/* Periods Clock Is Held Low */
-#define	CLKHI(y)	(((y)&0xFF)<<0x8)	/* Periods Before New Clock Low */
-#endif /* _MISRA_RULES */
-
-/* TWIx_PRESCALE Masks								 */
-#define	PRESCALE	0x007F		/* SCLKs Per Internal Time Reference (10MHz) */
-#define	TWI_ENA		0x0080		/* TWI Enable		 */
-#define	SCCB		0x0200		/* SCCB	Compatibility Enable */
-
-/* TWIx_SLAVE_CTRL Masks								 */
-#define	SEN			0x0001		/* Slave Enable		 */
-#define	SADD_LEN	0x0002		/* Slave Address Length */
-#define	STDVAL		0x0004		/* Slave Transmit Data Valid */
-#define	NAK			0x0008		/* NAK/ACK* Generated At Conclusion Of Transfer */
-#define	GEN			0x0010		/* General Call	Adrress	Matching Enabled */
-
-/* TWIx_SLAVE_STAT Masks								 */
-#define	SDIR		0x0001		/* Slave Transfer Direction (Transmit/Receive*) */
-#define	GCALL		0x0002		/* General Call	Indicator */
-
-/* TWIx_MASTER_CTRL Masks						 */
-#define	MEN			0x0001		/* Master Mode Enable */
-#define	MADD_LEN	0x0002		/* Master Address Length */
-#define	MDIR		0x0004		/* Master Transmit Direction (RX/TX*) */
-#define	FAST		0x0008		/* Use Fast Mode Timing	Specs */
-#define	STOP		0x0010		/* Issue Stop Condition */
-#define	RSTART		0x0020		/* Repeat Start	or Stop* At End	Of Transfer */
-#define	DCNT		0x3FC0		/* Data	Bytes To Transfer */
-#define	SDAOVR		0x4000		/* Serial Data Override */
-#define	SCLOVR		0x8000		/* Serial Clock	Override */
-
-/* TWIx_MASTER_STAT Masks							 */
-#define	MPROG		0x0001		/* Master Transfer In Progress */
-#define	LOSTARB		0x0002		/* Lost	Arbitration Indicator (Xfer Aborted) */
-#define	ANAK		0x0004		/* Address Not Acknowledged */
-#define	DNAK		0x0008		/* Data	Not Acknowledged */
-#define	BUFRDERR	0x0010		/* Buffer Read Error */
-#define	BUFWRERR	0x0020		/* Buffer Write	Error */
-#define	SDASEN		0x0040		/* Serial Data Sense */
-#define	SCLSEN		0x0080		/* Serial Clock	Sense */
-#define	BUSBUSY		0x0100		/* Bus Busy Indicator */
-
-/* TWIx_INT_SRC	and TWIx_INT_ENABLE Masks */
-#define	SINIT		0x0001		/* Slave Transfer Initiated */
-#define	SCOMP		0x0002		/* Slave Transfer Complete */
-#define	SERR		0x0004		/* Slave Transfer Error */
-#define	SOVF		0x0008		/* Slave Overflow */
-#define	MCOMP		0x0010		/* Master Transfer Complete */
-#define	MERR		0x0020		/* Master Transfer Error */
-#define	XMTSERV		0x0040		/* Transmit FIFO Service */
-#define	RCVSERV		0x0080		/* Receive FIFO	Service */
-
-/* TWIx_FIFO_CTL Masks					 */
-#define	XMTFLUSH	0x0001		/* Transmit Buffer Flush */
-#define	RCVFLUSH	0x0002		/* Receive Buffer Flush */
-#define	XMTINTLEN	0x0004		/* Transmit Buffer Interrupt Length */
-#define	RCVINTLEN	0x0008		/* Receive Buffer Interrupt Length */
-
-/* TWIx_FIFO_STAT Masks								 */
-#define	XMTSTAT		0x0003		/* Transmit FIFO Status */
-#define	XMT_EMPTY	0x0000		/*		Transmit FIFO Empty */
-#define	XMT_HALF	0x0001		/*		Transmit FIFO Has 1 Byte To Write */
-#define	XMT_FULL	0x0003		/*		Transmit FIFO Full (2 Bytes To Write) */
-
-#define	RCVSTAT		0x000C		/* Receive FIFO	Status */
-#define	RCV_EMPTY	0x0000		/*		Receive	FIFO Empty */
-#define	RCV_HALF	0x0004		/*		Receive	FIFO Has 1 Byte	To Read */
-#define	RCV_FULL	0x000C		/*		Receive	FIFO Full (2 Bytes To Read) */
-
 #endif /* _DEF_BF539_H */
diff --git a/arch/blackfin/mach-bf538/include/mach/gpio.h b/arch/blackfin/mach-bf538/include/mach/gpio.h
index bd9adb7..8a5beee 100644
--- a/arch/blackfin/mach-bf538/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf538/include/mach/gpio.h
@@ -70,4 +70,9 @@
 #define PORT_D GPIO_PD0
 #define PORT_E GPIO_PE0
 
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf538/include/mach/pll.h b/arch/blackfin/mach-bf538/include/mach/pll.h
index b30bbcd..94cca67 100644
--- a/arch/blackfin/mach-bf538/include/mach/pll.h
+++ b/arch/blackfin/mach-bf538/include/mach/pll.h
@@ -1,63 +1 @@
-/*
- * Copyright 2008-2009 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf548/boards/cm_bf548.c b/arch/blackfin/mach-bf548/boards/cm_bf548.c
index 4c2ee67..d11502a 100644
--- a/arch/blackfin/mach-bf548/boards/cm_bf548.c
+++ b/arch/blackfin/mach-bf548/boards/cm_bf548.c
@@ -156,7 +156,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -211,7 +211,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX,
 #ifdef CONFIG_BFIN_UART1_CTSRTS
 	P_UART1_RTS, P_UART1_CTS,
@@ -258,7 +258,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -313,7 +313,7 @@
 #endif
 };
 
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
 	P_UART3_TX, P_UART3_RX,
 #ifdef CONFIG_BFIN_UART3_CTSRTS
 	P_UART3_RTS, P_UART3_CTS,
@@ -504,6 +504,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -552,9 +553,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -586,9 +587,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -620,7 +621,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -654,7 +655,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -756,7 +757,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 4f03fbc..ce5a2bb 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -261,7 +261,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
@@ -316,7 +316,7 @@
 #endif
 };
 
-unsigned short bfin_uart1_peripherals[] = {
+static unsigned short bfin_uart1_peripherals[] = {
 	P_UART1_TX, P_UART1_RX,
 #ifdef CONFIG_BFIN_UART1_CTSRTS
 	P_UART1_RTS, P_UART1_CTS,
@@ -363,7 +363,7 @@
 	},
 };
 
-unsigned short bfin_uart2_peripherals[] = {
+static unsigned short bfin_uart2_peripherals[] = {
 	P_UART2_TX, P_UART2_RX, 0
 };
 
@@ -418,7 +418,7 @@
 #endif
 };
 
-unsigned short bfin_uart3_peripherals[] = {
+static unsigned short bfin_uart3_peripherals[] = {
 	P_UART3_TX, P_UART3_RX,
 #ifdef CONFIG_BFIN_UART3_CTSRTS
 	P_UART3_RTS, P_UART3_CTS,
@@ -609,6 +609,7 @@
 	 * if it is the case.
 	 */
 	.gpio_vrsel_active	= 1,
+	.clkin          = 24,           /* musb CLKIN in MHZ */
 };
 
 static struct musb_hdrc_platform_data musb_plat = {
@@ -657,9 +658,9 @@
 	},
 };
 
-unsigned short bfin_sport0_peripherals[] = {
+static unsigned short bfin_sport0_peripherals[] = {
 	P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
-	P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
+	P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0
 };
 
 static struct platform_device bfin_sport0_uart_device = {
@@ -691,9 +692,9 @@
 	},
 };
 
-unsigned short bfin_sport1_peripherals[] = {
+static unsigned short bfin_sport1_peripherals[] = {
 	P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
-	P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
+	P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0
 };
 
 static struct platform_device bfin_sport1_uart_device = {
@@ -725,7 +726,7 @@
 	},
 };
 
-unsigned short bfin_sport2_peripherals[] = {
+static unsigned short bfin_sport2_peripherals[] = {
 	P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS,
 	P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0
 };
@@ -759,7 +760,7 @@
 	},
 };
 
-unsigned short bfin_sport3_peripherals[] = {
+static unsigned short bfin_sport3_peripherals[] = {
 	P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS,
 	P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0
 };
@@ -777,7 +778,7 @@
 #endif
 
 #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE)
-unsigned short bfin_can_peripherals[] = {
+static unsigned short bfin_can_peripherals[] = {
 	P_CAN0_RX, P_CAN0_TX, 0
 };
 
diff --git a/arch/blackfin/mach-bf548/dma.c b/arch/blackfin/mach-bf548/dma.c
index 888b9cc..69ead33 100644
--- a/arch/blackfin/mach-bf548/dma.c
+++ b/arch/blackfin/mach-bf548/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
new file mode 100644
index 0000000..a77109f
--- /dev/null
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial.h
@@ -0,0 +1,16 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	4
+
+#define BFIN_UART_BF54X_STYLE
+
+#endif
diff --git a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
index dd44aa7..0d94eda 100644
--- a/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf548/include/mach/bfin_serial_5xx.h
@@ -4,72 +4,14 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER_SET))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_LSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LSR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-#define UART_GET_MSR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MSR))
-#define UART_GET_MCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_MCR))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_SET_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER_SET),v)
-#define UART_CLEAR_IER(uart,v)  bfin_write16(((uart)->port.membase + OFFSET_IER_CLEAR),v)
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LSR(uart,v)	bfin_write16(((uart)->port.membase + OFFSET_LSR),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_CLEAR_LSR(uart)    bfin_write16(((uart)->port.membase + OFFSET_LSR), -1)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-#define UART_PUT_MCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_MCR),v)
-#define UART_CLEAR_SCTS(uart)   bfin_write16(((uart)->port.membase + OFFSET_MSR),SCTS)
-
-#define UART_SET_DLAB(uart)     /* MMRs not muxed on BF54x */
-#define UART_CLEAR_DLAB(uart)   /* MMRs not muxed on BF54x */
-
-#define UART_GET_CTS(x) (UART_GET_MSR(x) & CTS)
-#define UART_DISABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) & ~(ARTS|MRTS))
-#define UART_ENABLE_RTS(x) UART_PUT_MCR(x, UART_GET_MCR(x) | MRTS | ARTS)
-#define UART_ENABLE_INTS(x, v) UART_SET_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_CLEAR_IER(x, 0xF)
-
 #if defined(CONFIG_BFIN_UART0_CTSRTS) || defined(CONFIG_BFIN_UART1_CTSRTS) || \
 	defined(CONFIG_BFIN_UART2_CTSRTS) || defined(CONFIG_BFIN_UART3_CTSRTS)
 # define CONFIG_SERIAL_BFIN_HARD_CTSRTS
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-/*
- * The pin configuration is different from schematic
- */
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#endif
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	int			scts;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -148,3 +90,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf548/include/mach/blackfin.h b/arch/blackfin/mach-bf548/include/mach/blackfin.h
index 5684030..72da721 100644
--- a/arch/blackfin/mach-bf548/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf548/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2009 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,58 +10,40 @@
 #include "bf548.h"
 #include "anomaly.h"
 
+#include <asm/def_LPBlackfin.h>
 #ifdef CONFIG_BF542
-#include "defBF542.h"
-#endif
-
-#ifdef CONFIG_BF544
-#include "defBF544.h"
-#endif
-
-#ifdef CONFIG_BF547
-#include "defBF547.h"
-#endif
-
-#ifdef CONFIG_BF548
-#include "defBF548.h"
-#endif
-
-#ifdef CONFIG_BF549
-#include "defBF549.h"
-#endif
-
-#if !defined(__ASSEMBLY__)
-#ifdef CONFIG_BF542
-#include "cdefBF542.h"
+# include "defBF542.h"
 #endif
 #ifdef CONFIG_BF544
-#include "cdefBF544.h"
+# include "defBF544.h"
 #endif
 #ifdef CONFIG_BF547
-#include "cdefBF547.h"
+# include "defBF547.h"
 #endif
 #ifdef CONFIG_BF548
-#include "cdefBF548.h"
+# include "defBF548.h"
 #endif
 #ifdef CONFIG_BF549
-#include "cdefBF549.h"
+# include "defBF549.h"
 #endif
 
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# ifdef CONFIG_BF542
+#  include "cdefBF542.h"
+# endif
+# ifdef CONFIG_BF544
+#  include "cdefBF544.h"
+# endif
+# ifdef CONFIG_BF547
+#  include "cdefBF547.h"
+# endif
+# ifdef CONFIG_BF548
+#  include "cdefBF548.h"
+# endif
+# ifdef CONFIG_BF549
+#  include "cdefBF549.h"
+# endif
 #endif
 
-#define BFIN_UART_NR_PORTS	4
-
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_GCTL             0x08	/* Global Control Register              */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_IER_SET          0x20	/* Set Interrupt Enable Register        */
-#define OFFSET_IER_CLEAR        0x24	/* Clear Interrupt Enable Register      */
-#define OFFSET_THR              0x28	/* Transmit Holding register            */
-#define OFFSET_RBR              0x2C	/* Receive Buffer register              */
-
 #endif
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
index 42f4a94..d09c19c 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF542.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF542_H
 #define _CDEF_BF542_H
 
-/* include all Core registers and bit definitions */
-#include "defBF542.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
index 2207799..33ec810 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF544.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF544_H
 #define _CDEF_BF544_H
 
-/* include all Core registers and bit definitions */
-#include "defBF544.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
index bc650e6..bcb9726 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF547.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF547_H
 #define _CDEF_BF547_H
 
-/* include all Core registers and bit definitions */
-#include "defBF547.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
index 3523e08..bae67a6 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF548.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF548_H
 #define _CDEF_BF548_H
 
-/* include all Core registers and bit definitions */
-#include "defBF548.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
index 80201ed..002136a 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF549.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF549_H
 #define _CDEF_BF549_H
 
-/* include all Core registers and bit definitions */
-#include "defBF549.h"
-
-/* include core sbfin_read_()ecific register pointer definitions */
-#include <asm/cdef_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
 /* include cdefBF54x_base.h for the set of #defines that are common to all ADSP-BF54x bfin_read_()rocessors */
 #include "cdefBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
index deaf5d6..50c89c8 100644
--- a/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/cdefBF54x_base.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,10 +7,6 @@
 #ifndef _CDEF_BF54X_H
 #define _CDEF_BF54X_H
 
-#include <asm/blackfin.h>
-
-#include "defBF54x_base.h"
-
 /* ************************************************************** */
 /* SYSTEM & MMR ADDRESS DEFINITIONS COMMON TO ALL ADSP-BF54x    */
 /* ************************************************************** */
@@ -2633,22 +2629,5 @@
 
 /* Handshake MDMA is not defined in the shared file because it is not available on the ADSP-BF542 bfin_read_()rocessor */
 
-/* legacy definitions */
-#define bfin_read_EBIU_AMCBCTL0		bfin_read_EBIU_AMBCTL0
-#define bfin_write_EBIU_AMCBCTL0	bfin_write_EBIU_AMBCTL0
-#define bfin_read_EBIU_AMCBCTL1		bfin_read_EBIU_AMBCTL1
-#define bfin_write_EBIU_AMCBCTL1	bfin_write_EBIU_AMBCTL1
-#define bfin_read_PINT0_IRQ		bfin_read_PINT0_REQUEST
-#define bfin_write_PINT0_IRQ		bfin_write_PINT0_REQUEST
-#define bfin_read_PINT1_IRQ		bfin_read_PINT1_REQUEST
-#define bfin_write_PINT1_IRQ		bfin_write_PINT1_REQUEST
-#define bfin_read_PINT2_IRQ		bfin_read_PINT2_REQUEST
-#define bfin_write_PINT2_IRQ		bfin_write_PINT2_REQUEST
-#define bfin_read_PINT3_IRQ		bfin_read_PINT3_REQUEST
-#define bfin_write_PINT3_IRQ		bfin_write_PINT3_REQUEST
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif /* _CDEF_BF54X_H */
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF542.h b/arch/blackfin/mach-bf548/include/mach/defBF542.h
index abf5f75..629bf21 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF542.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF542.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF542_H
 #define _DEF_BF542_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF542 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF544.h b/arch/blackfin/mach-bf548/include/mach/defBF544.h
index e277109..642468c 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF544.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF544.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF544_H
 #define _DEF_BF544_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF544 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF547.h b/arch/blackfin/mach-bf548/include/mach/defBF547.h
index be21ba5..2f3337c 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF547.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF547.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2008 Analog Devices Inc.
+ * Copyright 2008-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF547_H
 #define _DEF_BF547_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF547 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF548.h b/arch/blackfin/mach-bf548/include/mach/defBF548.h
index 3fb33b0..3c7f1b6 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF548.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF548.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF548_H
 #define _DEF_BF548_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF548 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF549.h b/arch/blackfin/mach-bf548/include/mach/defBF549.h
index 5a04e6d..9a45cb6 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF549.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF549.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -7,11 +7,6 @@
 #ifndef _DEF_BF549_H
 #define _DEF_BF549_H
 
-/* Include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
-
-/* SYSTEM & MMR ADDRESS DEFINITIONS FOR ADSP-BF549 */
-
 /* Include defBF54x_base.h for the set of #defines that are common to all ADSP-BF54x processors */
 #include "defBF54x_base.h"
 
diff --git a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
index 78f9110..0867c2b 100644
--- a/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
+++ b/arch/blackfin/mach-bf548/include/mach/defBF54x_base.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2007-2008 Analog Devices Inc.
+ * Copyright 2007-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
@@ -1615,14 +1615,14 @@
 #define                     CTYPE  0x40       /* DMA Channel Type */
 #define                      PMAP  0xf000     /* Peripheral Mapped To This Channel */
 
-/* Bit masks for DMACx_TCPER */
+/* Bit masks for DMACx_TC_PER */
 
 #define        DCB_TRAFFIC_PERIOD  0xf        /* DCB Traffic Control Period */
 #define        DEB_TRAFFIC_PERIOD  0xf0       /* DEB Traffic Control Period */
 #define        DAB_TRAFFIC_PERIOD  0x700      /* DAB Traffic Control Period */
 #define   MDMA_ROUND_ROBIN_PERIOD  0xf800     /* MDMA Round Robin Period */
 
-/* Bit masks for DMACx_TCCNT */
+/* Bit masks for DMACx_TC_CNT */
 
 #define         DCB_TRAFFIC_COUNT  0xf        /* DCB Traffic Control Count */
 #define         DEB_TRAFFIC_COUNT  0xf0       /* DEB Traffic Control Count */
@@ -2172,68 +2172,6 @@
 
 #define                 RCVDATA16  0xffff     /* Receive FIFO 16-Bit Data */
 
-/* Bit masks for UARTx_LCR */
-
-#if 0
-/* conflicts with legacy one in last section */
-#define                       WLS  0x3        /* Word Length Select */
-#endif
-#define                       STB  0x4        /* Stop Bits */
-#define                       PEN  0x8        /* Parity Enable */
-#define                       EPS  0x10       /* Even Parity Select */
-#define                       STP  0x20       /* Sticky Parity */
-#define                        SB  0x40       /* Set Break */
-
-/* Bit masks for UARTx_MCR */
-
-#define                      XOFF  0x1        /* Transmitter Off */
-#define                      MRTS  0x2        /* Manual Request To Send */
-#define                      RFIT  0x4        /* Receive FIFO IRQ Threshold */
-#define                      RFRT  0x8        /* Receive FIFO RTS Threshold */
-#define                  LOOP_ENA  0x10       /* Loopback Mode Enable */
-#define                     FCPOL  0x20       /* Flow Control Pin Polarity */
-#define                      ARTS  0x40       /* Automatic Request To Send */
-#define                      ACTS  0x80       /* Automatic Clear To Send */
-
-/* Bit masks for UARTx_LSR */
-
-#define                        DR  0x1        /* Data Ready */
-#define                        OE  0x2        /* Overrun Error */
-#define                        PE  0x4        /* Parity Error */
-#define                        FE  0x8        /* Framing Error */
-#define                        BI  0x10       /* Break Interrupt */
-#define                      THRE  0x20       /* THR Empty */
-#define                      TEMT  0x40       /* Transmitter Empty */
-#define                       TFI  0x80       /* Transmission Finished Indicator */
-
-/* Bit masks for UARTx_MSR */
-
-#define                      SCTS  0x1        /* Sticky CTS */
-#define                       CTS  0x10       /* Clear To Send */
-#define                      RFCS  0x20       /* Receive FIFO Count Status */
-
-/* Bit masks for UARTx_IER_SET & UARTx_IER_CLEAR */
-
-#define                   ERBFI  0x1        /* Enable Receive Buffer Full Interrupt */
-#define                   ETBEI  0x2        /* Enable Transmit Buffer Empty Interrupt */
-#define                    ELSI  0x4        /* Enable Receive Status Interrupt */
-#define                   EDSSI  0x8        /* Enable Modem Status Interrupt */
-#define                  EDTPTI  0x10       /* Enable DMA Transmit PIRQ Interrupt */
-#define                    ETFI  0x20       /* Enable Transmission Finished Interrupt */
-#define                   ERFCI  0x40       /* Enable Receive FIFO Count Interrupt */
-
-/* Bit masks for UARTx_GCTL */
-
-#define                      UCEN  0x1        /* UART Enable */
-#define                      IREN  0x2        /* IrDA Mode Enable */
-#define                     TPOLC  0x4        /* IrDA TX Polarity Change */
-#define                     RPOLC  0x8        /* IrDA RX Polarity Change */
-#define                       FPE  0x10       /* Force Parity Error */
-#define                       FFE  0x20       /* Force Framing Error */
-#define                      EDBO  0x40       /* Enable Divide-by-One */
-#define                     EGLSI  0x80       /* Enable Global LS Interrupt */
-
-
 /* ******************************************* */
 /*     MULTI BIT MACRO ENUMERATIONS            */
 /* ******************************************* */
@@ -2251,13 +2189,6 @@
 #define WDTH_CAP 0x0002
 #define EXT_CLK  0x0003
 
-/* UARTx_LCR bit field options */
-
-#define WLS_5   0x0000    /* 5 data bits */
-#define WLS_6   0x0001    /* 6 data bits */
-#define WLS_7   0x0002    /* 7 data bits */
-#define WLS_8   0x0003    /* 8 data bits */
-
 /* PINTx Register Bit Definitions */
 
 #define PIQ0 0x00000001
@@ -2300,240 +2231,6 @@
 #define PIQ30 0x40000000
 #define PIQ31 0x80000000
 
-/* PORT A Bit Definitions for the registers
-PORTA, PORTA_SET, PORTA_CLEAR,
-PORTA_DIR_SET, PORTA_DIR_CLEAR, PORTA_INEN,
-PORTA_FER registers
-*/
-
-#define PA0 0x0001
-#define PA1 0x0002
-#define PA2 0x0004
-#define PA3 0x0008
-#define PA4 0x0010
-#define PA5 0x0020
-#define PA6 0x0040
-#define PA7 0x0080
-#define PA8 0x0100
-#define PA9 0x0200
-#define PA10 0x0400
-#define PA11 0x0800
-#define PA12 0x1000
-#define PA13 0x2000
-#define PA14 0x4000
-#define PA15 0x8000
-
-/* PORT B Bit Definitions for the registers
-PORTB, PORTB_SET, PORTB_CLEAR,
-PORTB_DIR_SET, PORTB_DIR_CLEAR, PORTB_INEN,
-PORTB_FER registers
-*/
-
-#define PB0 0x0001
-#define PB1 0x0002
-#define PB2 0x0004
-#define PB3 0x0008
-#define PB4 0x0010
-#define PB5 0x0020
-#define PB6 0x0040
-#define PB7 0x0080
-#define PB8 0x0100
-#define PB9 0x0200
-#define PB10 0x0400
-#define PB11 0x0800
-#define PB12 0x1000
-#define PB13 0x2000
-#define PB14 0x4000
-
-
-/* PORT C Bit Definitions for the registers
-PORTC, PORTC_SET, PORTC_CLEAR,
-PORTC_DIR_SET, PORTC_DIR_CLEAR, PORTC_INEN,
-PORTC_FER registers
-*/
-
-
-#define PC0 0x0001
-#define PC1 0x0002
-#define PC2 0x0004
-#define PC3 0x0008
-#define PC4 0x0010
-#define PC5 0x0020
-#define PC6 0x0040
-#define PC7 0x0080
-#define PC8 0x0100
-#define PC9 0x0200
-#define PC10 0x0400
-#define PC11 0x0800
-#define PC12 0x1000
-#define PC13 0x2000
-
-
-/* PORT D Bit Definitions for the registers
-PORTD, PORTD_SET, PORTD_CLEAR,
-PORTD_DIR_SET, PORTD_DIR_CLEAR, PORTD_INEN,
-PORTD_FER registers
-*/
-
-#define PD0 0x0001
-#define PD1 0x0002
-#define PD2 0x0004
-#define PD3 0x0008
-#define PD4 0x0010
-#define PD5 0x0020
-#define PD6 0x0040
-#define PD7 0x0080
-#define PD8 0x0100
-#define PD9 0x0200
-#define PD10 0x0400
-#define PD11 0x0800
-#define PD12 0x1000
-#define PD13 0x2000
-#define PD14 0x4000
-#define PD15 0x8000
-
-/* PORT E Bit Definitions for the registers
-PORTE, PORTE_SET, PORTE_CLEAR,
-PORTE_DIR_SET, PORTE_DIR_CLEAR, PORTE_INEN,
-PORTE_FER registers
-*/
-
-
-#define PE0 0x0001
-#define PE1 0x0002
-#define PE2 0x0004
-#define PE3 0x0008
-#define PE4 0x0010
-#define PE5 0x0020
-#define PE6 0x0040
-#define PE7 0x0080
-#define PE8 0x0100
-#define PE9 0x0200
-#define PE10 0x0400
-#define PE11 0x0800
-#define PE12 0x1000
-#define PE13 0x2000
-#define PE14 0x4000
-#define PE15 0x8000
-
-/* PORT F Bit Definitions for the registers
-PORTF, PORTF_SET, PORTF_CLEAR,
-PORTF_DIR_SET, PORTF_DIR_CLEAR, PORTF_INEN,
-PORTF_FER registers
-*/
-
-
-#define PF0 0x0001
-#define PF1 0x0002
-#define PF2 0x0004
-#define PF3 0x0008
-#define PF4 0x0010
-#define PF5 0x0020
-#define PF6 0x0040
-#define PF7 0x0080
-#define PF8 0x0100
-#define PF9 0x0200
-#define PF10 0x0400
-#define PF11 0x0800
-#define PF12 0x1000
-#define PF13 0x2000
-#define PF14 0x4000
-#define PF15 0x8000
-
-/* PORT G Bit Definitions for the registers
-PORTG, PORTG_SET, PORTG_CLEAR,
-PORTG_DIR_SET, PORTG_DIR_CLEAR, PORTG_INEN,
-PORTG_FER registers
-*/
-
-
-#define PG0 0x0001
-#define PG1 0x0002
-#define PG2 0x0004
-#define PG3 0x0008
-#define PG4 0x0010
-#define PG5 0x0020
-#define PG6 0x0040
-#define PG7 0x0080
-#define PG8 0x0100
-#define PG9 0x0200
-#define PG10 0x0400
-#define PG11 0x0800
-#define PG12 0x1000
-#define PG13 0x2000
-#define PG14 0x4000
-#define PG15 0x8000
-
-/* PORT H Bit Definitions for the registers
-PORTH, PORTH_SET, PORTH_CLEAR,
-PORTH_DIR_SET, PORTH_DIR_CLEAR, PORTH_INEN,
-PORTH_FER registers
-*/
-
-
-#define PH0 0x0001
-#define PH1 0x0002
-#define PH2 0x0004
-#define PH3 0x0008
-#define PH4 0x0010
-#define PH5 0x0020
-#define PH6 0x0040
-#define PH7 0x0080
-#define PH8 0x0100
-#define PH9 0x0200
-#define PH10 0x0400
-#define PH11 0x0800
-#define PH12 0x1000
-#define PH13 0x2000
-
-
-/* PORT I Bit Definitions for the registers
-PORTI, PORTI_SET, PORTI_CLEAR,
-PORTI_DIR_SET, PORTI_DIR_CLEAR, PORTI_INEN,
-PORTI_FER registers
-*/
-
-
-#define PI0 0x0001
-#define PI1 0x0002
-#define PI2 0x0004
-#define PI3 0x0008
-#define PI4 0x0010
-#define PI5 0x0020
-#define PI6 0x0040
-#define PI7 0x0080
-#define PI8 0x0100
-#define PI9 0x0200
-#define PI10 0x0400
-#define PI11 0x0800
-#define PI12 0x1000
-#define PI13 0x2000
-#define PI14 0x4000
-#define PI15 0x8000
-
-/* PORT J Bit Definitions for the registers
-PORTJ, PORTJ_SET, PORTJ_CLEAR,
-PORTJ_DIR_SET, PORTJ_DIR_CLEAR, PORTJ_INEN,
-PORTJ_FER registers
-*/
-
-
-#define PJ0 0x0001
-#define PJ1 0x0002
-#define PJ2 0x0004
-#define PJ3 0x0008
-#define PJ4 0x0010
-#define PJ5 0x0020
-#define PJ6 0x0040
-#define PJ7 0x0080
-#define PJ8 0x0100
-#define PJ9 0x0200
-#define PJ10 0x0400
-#define PJ11 0x0800
-#define PJ12 0x1000
-#define PJ13 0x2000
-
-
 /* Port Muxing Bit Fields for PORTx_MUX Registers */
 
 #define MUX0 0x00000003
@@ -2703,16 +2400,4 @@
 #define B3MAP_PIH 0x06000000 /* Map Port I High to Byte 3 */
 #define B3MAP_PJH 0x07000000 /* Map Port J High to Byte 3 */
 
-
-/* for legacy compatibility */
-
-#define WLS(x)  (((x)-5) & 0x03) /* Word Length Select */
-#define W1LMAX_MAX W1LMAX_MIN
-#define EBIU_AMCBCTL0 EBIU_AMBCTL0
-#define EBIU_AMCBCTL1 EBIU_AMBCTL1
-#define PINT0_IRQ PINT0_REQUEST
-#define PINT1_IRQ PINT1_REQUEST
-#define PINT2_IRQ PINT2_REQUEST
-#define PINT3_IRQ PINT3_REQUEST
-
 #endif /* _DEF_BF54X_H */
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 28037e3..7db4335 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -200,4 +200,15 @@
 
 #endif
 
+#include <mach-common/ports-a.h>
+#include <mach-common/ports-b.h>
+#include <mach-common/ports-c.h>
+#include <mach-common/ports-d.h>
+#include <mach-common/ports-e.h>
+#include <mach-common/ports-f.h>
+#include <mach-common/ports-g.h>
+#include <mach-common/ports-h.h>
+#include <mach-common/ports-i.h>
+#include <mach-common/ports-j.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/irq.h b/arch/blackfin/mach-bf548/include/mach/irq.h
index 1f99b51..99fd1b2 100644
--- a/arch/blackfin/mach-bf548/include/mach/irq.h
+++ b/arch/blackfin/mach-bf548/include/mach/irq.h
@@ -474,4 +474,26 @@
 #define IRQ_PINT2_POS		24
 #define IRQ_PINT3_POS		28
 
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * bfin pint registers layout
+ */
+struct bfin_pint_regs {
+	u32 mask_set;
+	u32 mask_clear;
+	u32 irq;
+	u32 assign;
+	u32 edge_set;
+	u32 edge_clear;
+	u32 invert_set;
+	u32 invert_clear;
+	u32 pinstate;
+	u32 latch;
+	u32 __pad0[2];
+};
+
+#endif
+
 #endif /* _BF548_IRQ_H_ */
diff --git a/arch/blackfin/mach-bf548/include/mach/pll.h b/arch/blackfin/mach-bf548/include/mach/pll.h
index 7865a09..94cca67 100644
--- a/arch/blackfin/mach-bf548/include/mach/pll.h
+++ b/arch/blackfin/mach-bf548/include/mach/pll.h
@@ -1,69 +1 @@
-/*
- * Copyright 2007-2008 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _MACH_PLL_H
-#define _MACH_PLL_H
-
-#include <asm/blackfin.h>
-#include <asm/irqflags.h>
-
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_PLL_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	hard_local_irq_restore(flags);
-}
-
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1, iwr2;
-
-	if (val == bfin_read_VR_CTL())
-		return;
-
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SIC_IWR0);
-	iwr1 = bfin_read32(SIC_IWR1);
-	iwr2 = bfin_read32(SIC_IWR2);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SIC_IWR0, IWR_ENABLE(0));
-	bfin_write32(SIC_IWR1, 0);
-	bfin_write32(SIC_IWR2, 0);
-
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SIC_IWR0, iwr0);
-	bfin_write32(SIC_IWR1, iwr1);
-	bfin_write32(SIC_IWR2, iwr2);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#include <mach-common/pll.h>
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S
index f99f174..52d6f73 100644
--- a/arch/blackfin/mach-bf561/atomic.S
+++ b/arch/blackfin/mach-bf561/atomic.S
@@ -49,6 +49,7 @@
 	jump .Lretry_corelock
 .Ldone_corelock:
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
 	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
@@ -685,6 +686,8 @@
 	r1 = -L1_CACHE_BYTES;
 	r1 = r0 & r1;
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
+	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
 	r0 = [p1];
@@ -907,6 +910,8 @@
 	r1 = -L1_CACHE_BYTES;
 	r1 = r0 & r1;
 	p0 = r1;
+	/* flush core internal write buffer before invalidate dcache */
+	CSYNC(r2);
 	flushinv[p0];
 	SSYNC(r2);
 	r0 = [p1];
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index 0b1c20f..3926cd9 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -224,7 +224,7 @@
 	 },
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c
index 087b6b0..3b67929 100644
--- a/arch/blackfin/mach-bf561/boards/cm_bf561.c
+++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c
@@ -334,7 +334,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index ab7a487..f667e77 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -190,7 +190,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/boards/tepla.c b/arch/blackfin/mach-bf561/boards/tepla.c
index d3017e5..bb056e6 100644
--- a/arch/blackfin/mach-bf561/boards/tepla.c
+++ b/arch/blackfin/mach-bf561/boards/tepla.c
@@ -72,7 +72,7 @@
 	},
 };
 
-unsigned short bfin_uart0_peripherals[] = {
+static unsigned short bfin_uart0_peripherals[] = {
 	P_UART0_TX, P_UART0_RX, 0
 };
 
diff --git a/arch/blackfin/mach-bf561/dma.c b/arch/blackfin/mach-bf561/dma.c
index c938c3c..8ffdd6b 100644
--- a/arch/blackfin/mach-bf561/dma.c
+++ b/arch/blackfin/mach-bf561/dma.c
@@ -11,7 +11,7 @@
 #include <asm/blackfin.h>
 #include <asm/dma.h>
 
-struct dma_register *dma_io_base_addr[MAX_DMA_CHANNELS] = {
+struct dma_register * const dma_io_base_addr[MAX_DMA_CHANNELS] = {
 	(struct dma_register *) DMA1_0_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_1_NEXT_DESC_PTR,
 	(struct dma_register *) DMA1_2_NEXT_DESC_PTR,
@@ -36,14 +36,14 @@
 	(struct dma_register *) DMA2_9_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_10_NEXT_DESC_PTR,
 	(struct dma_register *) DMA2_11_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA1_S1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_D0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_S0_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_D1_NEXT_DESC_PTR,
-	(struct dma_register *) MDMA2_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S0_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S1_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S2_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_D3_NEXT_DESC_PTR,
+	(struct dma_register *) MDMA_S3_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_D0_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_S0_NEXT_DESC_PTR,
 	(struct dma_register *) IMDMA_D1_NEXT_DESC_PTR,
diff --git a/arch/blackfin/mach-bf561/hotplug.c b/arch/blackfin/mach-bf561/hotplug.c
index c95169b..4cd3b28 100644
--- a/arch/blackfin/mach-bf561/hotplug.c
+++ b/arch/blackfin/mach-bf561/hotplug.c
@@ -6,7 +6,9 @@
  */
 
 #include <asm/blackfin.h>
+#include <asm/irq.h>
 #include <asm/smp.h>
+
 #define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
 
 int hotplug_coreb;
diff --git a/arch/blackfin/mach-bf561/include/mach/anomaly.h b/arch/blackfin/mach-bf561/include/mach/anomaly.h
index 4c108c9..6a3499b 100644
--- a/arch/blackfin/mach-bf561/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf561/include/mach/anomaly.h
@@ -181,7 +181,11 @@
 /* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
 #define ANOMALY_05000254 (__SILICON_REVISION__ > 3)
 /* Interrupt/Exception During Short Hardware Loop May Cause Bad Instruction Fetches */
-#define ANOMALY_05000257 (__SILICON_REVISION__ < 5)
+/* Tempoary work around for kgdb bug 6333 in SMP kernel. It looks coreb hangs in exception
+ * without handling anomaly 05000257 properly on bf561 v0.5. This work around may change
+ * after the behavior and the root cause are confirmed with hardware team.
+ */
+#define ANOMALY_05000257 (__SILICON_REVISION__ < 5 || (__SILICON_REVISION__ == 5 && CONFIG_SMP))
 /* Instruction Cache Is Corrupted When Bits 9 and 12 of the ICPLB Data Registers Differ */
 #define ANOMALY_05000258 (__SILICON_REVISION__ < 5)
 /* ICPLB_STATUS MMR Register May Be Corrupted */
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
new file mode 100644
index 0000000..08072c8
--- /dev/null
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial.h
@@ -0,0 +1,14 @@
+/*
+ * mach/bfin_serial.h - Blackfin UART/Serial definitions
+ *
+ * Copyright 2006-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __BFIN_MACH_SERIAL_H__
+#define __BFIN_MACH_SERIAL_H__
+
+#define BFIN_UART_NR_PORTS	1
+
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
index e33e158..3a69474 100644
--- a/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
+++ b/arch/blackfin/mach-bf561/include/mach/bfin_serial_5xx.h
@@ -4,36 +4,9 @@
  * Licensed under the GPL-2 or later.
  */
 
-#include <linux/serial.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
 
-#define UART_GET_CHAR(uart)     bfin_read16(((uart)->port.membase + OFFSET_RBR))
-#define UART_GET_DLL(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLL))
-#define UART_GET_IER(uart)      bfin_read16(((uart)->port.membase + OFFSET_IER))
-#define UART_GET_DLH(uart)	bfin_read16(((uart)->port.membase + OFFSET_DLH))
-#define UART_GET_IIR(uart)      bfin_read16(((uart)->port.membase + OFFSET_IIR))
-#define UART_GET_LCR(uart)      bfin_read16(((uart)->port.membase + OFFSET_LCR))
-#define UART_GET_GCTL(uart)     bfin_read16(((uart)->port.membase + OFFSET_GCTL))
-
-#define UART_PUT_CHAR(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_THR),v)
-#define UART_PUT_DLL(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLL),v)
-#define UART_PUT_IER(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_IER),v)
-#define UART_SET_IER(uart,v)    UART_PUT_IER(uart, UART_GET_IER(uart) | (v))
-#define UART_CLEAR_IER(uart,v)  UART_PUT_IER(uart, UART_GET_IER(uart) & ~(v))
-#define UART_PUT_DLH(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_DLH),v)
-#define UART_PUT_LCR(uart,v)    bfin_write16(((uart)->port.membase + OFFSET_LCR),v)
-#define UART_PUT_GCTL(uart,v)   bfin_write16(((uart)->port.membase + OFFSET_GCTL),v)
-
-#define UART_SET_DLAB(uart)     do { UART_PUT_LCR(uart, UART_GET_LCR(uart) | DLAB); SSYNC(); } while (0)
-#define UART_CLEAR_DLAB(uart)   do { UART_PUT_LCR(uart, UART_GET_LCR(uart) & ~DLAB); SSYNC(); } while (0)
-
-#define UART_GET_CTS(x) gpio_get_value(x->cts_pin)
-#define UART_DISABLE_RTS(x) gpio_set_value(x->rts_pin, 1)
-#define UART_ENABLE_RTS(x) gpio_set_value(x->rts_pin, 0)
-#define UART_ENABLE_INTS(x, v) UART_PUT_IER(x, v)
-#define UART_DISABLE_INTS(x) UART_PUT_IER(x, 0)
-
 #ifdef CONFIG_BFIN_UART0_CTSRTS
 # define CONFIG_SERIAL_BFIN_CTSRTS
 # ifndef CONFIG_UART0_CTS_PIN
@@ -44,51 +17,6 @@
 # endif
 #endif
 
-#define BFIN_UART_TX_FIFO_SIZE	2
-
-struct bfin_serial_port {
-        struct uart_port        port;
-        unsigned int            old_status;
-	int			status_irq;
-	unsigned int lsr;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-	int			tx_done;
-	int			tx_count;
-	struct circ_buf		rx_dma_buf;
-	struct timer_list       rx_dma_timer;
-	int			rx_dma_nrows;
-	unsigned int		tx_dma_channel;
-	unsigned int		rx_dma_channel;
-	struct work_struct	tx_dma_workqueue;
-#else
-# if ANOMALY_05000363
-	unsigned int anomaly_threshold;
-# endif
-#endif
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
-	struct timer_list       cts_timer;
-	int			cts_pin;
-	int			rts_pin;
-#endif
-};
-
-/* The hardware clears the LSR bits upon read, so we need to cache
- * some of the more fun bits in software so they don't get lost
- * when checking the LSR in other code paths (TX).
- */
-static inline unsigned int UART_GET_LSR(struct bfin_serial_port *uart)
-{
-	unsigned int lsr = bfin_read16(uart->port.membase + OFFSET_LSR);
-	uart->lsr |= (lsr & (BI|FE|PE|OE));
-	return lsr | uart->lsr;
-}
-
-static inline void UART_CLEAR_LSR(struct bfin_serial_port *uart)
-{
-	uart->lsr = 0;
-	bfin_write16(uart->port.membase + OFFSET_LSR, -1);
-}
-
 struct bfin_serial_res {
 	unsigned long	uart_base_addr;
 	int		uart_irq;
@@ -120,3 +48,5 @@
 };
 
 #define DRIVER_NAME "bfin-uart"
+
+#include <asm/bfin_serial.h>
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h
index 6c7dc58..dc47053 100644
--- a/arch/blackfin/mach-bf561/include/mach/blackfin.h
+++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -10,11 +10,14 @@
 #define BF561_FAMILY
 
 #include "bf561.h"
-#include "defBF561.h"
 #include "anomaly.h"
 
-#if !defined(__ASSEMBLY__)
-#include "cdefBF561.h"
+#include <asm/def_LPBlackfin.h>
+#include "defBF561.h"
+
+#ifndef __ASSEMBLY__
+# include <asm/cdef_LPBlackfin.h>
+# include "cdefBF561.h"
 #endif
 
 #define bfin_read_FIO_FLAG_D() bfin_read_FIO0_FLAG_D()
@@ -35,19 +38,4 @@
 #define bfin_read_SICB_ISR(x)		bfin_read32(__SIC_MUX(SICB_ISR0, x))
 #define bfin_write_SICB_ISR(x, val)	bfin_write32(__SIC_MUX(SICB_ISR0, x), val)
 
-#define BFIN_UART_NR_PORTS      1
-
-#define OFFSET_THR              0x00	/* Transmit Holding register            */
-#define OFFSET_RBR              0x00	/* Receive Buffer register              */
-#define OFFSET_DLL              0x00	/* Divisor Latch (Low-Byte)             */
-#define OFFSET_IER              0x04	/* Interrupt Enable Register            */
-#define OFFSET_DLH              0x04	/* Divisor Latch (High-Byte)            */
-#define OFFSET_IIR              0x08	/* Interrupt Identification Register    */
-#define OFFSET_LCR              0x0C	/* Line Control Register                */
-#define OFFSET_MCR              0x10	/* Modem Control Register               */
-#define OFFSET_LSR              0x14	/* Line Status Register                 */
-#define OFFSET_MSR              0x18	/* Modem Status Register                */
-#define OFFSET_SCR              0x1C	/* SCR Scratch Register                 */
-#define OFFSET_GCTL             0x24	/* Global Control Register              */
-
 #endif				/* _MACH_BLACKFIN_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
index 2bab991..7533315 100644
--- a/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/cdefBF561.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,14 +7,6 @@
 #ifndef _CDEF_BF561_H
 #define _CDEF_BF561_H
 
-#include <asm/blackfin.h>
-
-/* include all Core registers and bit definitions */
-#include "defBF561.h"
-
-/*include core specific register pointer definitions*/
-#include <asm/cdef_LPBlackfin.h>
-
 /*********************************************************************************** */
 /* System MMR Register Map */
 /*********************************************************************************** */
@@ -523,14 +515,14 @@
 #define bfin_read_PPI1_FRAME()               bfin_read16(PPI1_FRAME)
 #define bfin_write_PPI1_FRAME(val)           bfin_write16(PPI1_FRAME,val)
 /*DMA traffic control registers */
-#define bfin_read_DMA1_TC_PER()              bfin_read16(DMA1_TC_PER)
-#define bfin_write_DMA1_TC_PER(val)          bfin_write16(DMA1_TC_PER,val)
-#define bfin_read_DMA1_TC_CNT()              bfin_read16(DMA1_TC_CNT)
-#define bfin_write_DMA1_TC_CNT(val)          bfin_write16(DMA1_TC_CNT,val)
-#define bfin_read_DMA2_TC_PER()              bfin_read16(DMA2_TC_PER)
-#define bfin_write_DMA2_TC_PER(val)          bfin_write16(DMA2_TC_PER,val)
-#define bfin_read_DMA2_TC_CNT()              bfin_read16(DMA2_TC_CNT)
-#define bfin_write_DMA2_TC_CNT(val)          bfin_write16(DMA2_TC_CNT,val)
+#define bfin_read_DMAC0_TC_PER()             bfin_read16(DMAC0_TC_PER)
+#define bfin_write_DMAC0_TC_PER(val)         bfin_write16(DMAC0_TC_PER,val)
+#define bfin_read_DMAC0_TC_CNT()             bfin_read16(DMAC0_TC_CNT)
+#define bfin_write_DMAC0_TC_CNT(val)         bfin_write16(DMAC0_TC_CNT,val)
+#define bfin_read_DMAC1_TC_PER()             bfin_read16(DMAC1_TC_PER)
+#define bfin_write_DMAC1_TC_PER(val)         bfin_write16(DMAC1_TC_PER,val)
+#define bfin_read_DMAC1_TC_CNT()             bfin_read16(DMAC1_TC_CNT)
+#define bfin_write_DMAC1_TC_CNT(val)         bfin_write16(DMAC1_TC_CNT,val)
 /* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
 #define bfin_read_DMA1_0_CONFIG()            bfin_read16(DMA1_0_CONFIG)
 #define bfin_write_DMA1_0_CONFIG(val)        bfin_write16(DMA1_0_CONFIG,val)
@@ -845,110 +837,110 @@
 #define bfin_read_DMA1_11_PERIPHERAL_MAP()   bfin_read16(DMA1_11_PERIPHERAL_MAP)
 #define bfin_write_DMA1_11_PERIPHERAL_MAP(val) bfin_write16(DMA1_11_PERIPHERAL_MAP,val)
 /* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define bfin_read_MDMA1_D0_CONFIG()          bfin_read16(MDMA1_D0_CONFIG)
-#define bfin_write_MDMA1_D0_CONFIG(val)      bfin_write16(MDMA1_D0_CONFIG,val)
-#define bfin_read_MDMA1_D0_NEXT_DESC_PTR()   bfin_read32(MDMA1_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_START_ADDR()      bfin_read32(MDMA1_D0_START_ADDR)
-#define bfin_write_MDMA1_D0_START_ADDR(val)  bfin_write32(MDMA1_D0_START_ADDR,val)
-#define bfin_read_MDMA1_D0_X_COUNT()         bfin_read16(MDMA1_D0_X_COUNT)
-#define bfin_write_MDMA1_D0_X_COUNT(val)     bfin_write16(MDMA1_D0_X_COUNT,val)
-#define bfin_read_MDMA1_D0_Y_COUNT()         bfin_read16(MDMA1_D0_Y_COUNT)
-#define bfin_write_MDMA1_D0_Y_COUNT(val)     bfin_write16(MDMA1_D0_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_X_MODIFY()        bfin_read16(MDMA1_D0_X_MODIFY)
-#define bfin_write_MDMA1_D0_X_MODIFY(val)    bfin_write16(MDMA1_D0_X_MODIFY,val)
-#define bfin_read_MDMA1_D0_Y_MODIFY()        bfin_read16(MDMA1_D0_Y_MODIFY)
-#define bfin_write_MDMA1_D0_Y_MODIFY(val)    bfin_write16(MDMA1_D0_Y_MODIFY,val)
-#define bfin_read_MDMA1_D0_CURR_DESC_PTR()   bfin_read32(MDMA1_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D0_CURR_DESC_PTR(val) bfin_write32(MDMA1_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D0_CURR_ADDR()       bfin_read32(MDMA1_D0_CURR_ADDR)
-#define bfin_write_MDMA1_D0_CURR_ADDR(val)   bfin_write32(MDMA1_D0_CURR_ADDR,val)
-#define bfin_read_MDMA1_D0_CURR_X_COUNT()    bfin_read16(MDMA1_D0_CURR_X_COUNT)
-#define bfin_write_MDMA1_D0_CURR_X_COUNT(val) bfin_write16(MDMA1_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D0_CURR_Y_COUNT()    bfin_read16(MDMA1_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D0_CURR_Y_COUNT(val) bfin_write16(MDMA1_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D0_IRQ_STATUS()      bfin_read16(MDMA1_D0_IRQ_STATUS)
-#define bfin_write_MDMA1_D0_IRQ_STATUS(val)  bfin_write16(MDMA1_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D0_PERIPHERAL_MAP()  bfin_read16(MDMA1_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S0_CONFIG()          bfin_read16(MDMA1_S0_CONFIG)
-#define bfin_write_MDMA1_S0_CONFIG(val)      bfin_write16(MDMA1_S0_CONFIG,val)
-#define bfin_read_MDMA1_S0_NEXT_DESC_PTR()   bfin_read32(MDMA1_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_START_ADDR()      bfin_read32(MDMA1_S0_START_ADDR)
-#define bfin_write_MDMA1_S0_START_ADDR(val)  bfin_write32(MDMA1_S0_START_ADDR,val)
-#define bfin_read_MDMA1_S0_X_COUNT()         bfin_read16(MDMA1_S0_X_COUNT)
-#define bfin_write_MDMA1_S0_X_COUNT(val)     bfin_write16(MDMA1_S0_X_COUNT,val)
-#define bfin_read_MDMA1_S0_Y_COUNT()         bfin_read16(MDMA1_S0_Y_COUNT)
-#define bfin_write_MDMA1_S0_Y_COUNT(val)     bfin_write16(MDMA1_S0_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_X_MODIFY()        bfin_read16(MDMA1_S0_X_MODIFY)
-#define bfin_write_MDMA1_S0_X_MODIFY(val)    bfin_write16(MDMA1_S0_X_MODIFY,val)
-#define bfin_read_MDMA1_S0_Y_MODIFY()        bfin_read16(MDMA1_S0_Y_MODIFY)
-#define bfin_write_MDMA1_S0_Y_MODIFY(val)    bfin_write16(MDMA1_S0_Y_MODIFY,val)
-#define bfin_read_MDMA1_S0_CURR_DESC_PTR()   bfin_read32(MDMA1_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S0_CURR_DESC_PTR(val) bfin_write32(MDMA1_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S0_CURR_ADDR()       bfin_read32(MDMA1_S0_CURR_ADDR)
-#define bfin_write_MDMA1_S0_CURR_ADDR(val)   bfin_write32(MDMA1_S0_CURR_ADDR,val)
-#define bfin_read_MDMA1_S0_CURR_X_COUNT()    bfin_read16(MDMA1_S0_CURR_X_COUNT)
-#define bfin_write_MDMA1_S0_CURR_X_COUNT(val) bfin_write16(MDMA1_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S0_CURR_Y_COUNT()    bfin_read16(MDMA1_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S0_CURR_Y_COUNT(val) bfin_write16(MDMA1_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S0_IRQ_STATUS()      bfin_read16(MDMA1_S0_IRQ_STATUS)
-#define bfin_write_MDMA1_S0_IRQ_STATUS(val)  bfin_write16(MDMA1_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S0_PERIPHERAL_MAP()  bfin_read16(MDMA1_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_D1_CONFIG()          bfin_read16(MDMA1_D1_CONFIG)
-#define bfin_write_MDMA1_D1_CONFIG(val)      bfin_write16(MDMA1_D1_CONFIG,val)
-#define bfin_read_MDMA1_D1_NEXT_DESC_PTR()   bfin_read32(MDMA1_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_START_ADDR()      bfin_read32(MDMA1_D1_START_ADDR)
-#define bfin_write_MDMA1_D1_START_ADDR(val)  bfin_write32(MDMA1_D1_START_ADDR,val)
-#define bfin_read_MDMA1_D1_X_COUNT()         bfin_read16(MDMA1_D1_X_COUNT)
-#define bfin_write_MDMA1_D1_X_COUNT(val)     bfin_write16(MDMA1_D1_X_COUNT,val)
-#define bfin_read_MDMA1_D1_Y_COUNT()         bfin_read16(MDMA1_D1_Y_COUNT)
-#define bfin_write_MDMA1_D1_Y_COUNT(val)     bfin_write16(MDMA1_D1_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_X_MODIFY()        bfin_read16(MDMA1_D1_X_MODIFY)
-#define bfin_write_MDMA1_D1_X_MODIFY(val)    bfin_write16(MDMA1_D1_X_MODIFY,val)
-#define bfin_read_MDMA1_D1_Y_MODIFY()        bfin_read16(MDMA1_D1_Y_MODIFY)
-#define bfin_write_MDMA1_D1_Y_MODIFY(val)    bfin_write16(MDMA1_D1_Y_MODIFY,val)
-#define bfin_read_MDMA1_D1_CURR_DESC_PTR()   bfin_read32(MDMA1_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_D1_CURR_DESC_PTR(val) bfin_write32(MDMA1_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_D1_CURR_ADDR()       bfin_read32(MDMA1_D1_CURR_ADDR)
-#define bfin_write_MDMA1_D1_CURR_ADDR(val)   bfin_write32(MDMA1_D1_CURR_ADDR,val)
-#define bfin_read_MDMA1_D1_CURR_X_COUNT()    bfin_read16(MDMA1_D1_CURR_X_COUNT)
-#define bfin_write_MDMA1_D1_CURR_X_COUNT(val) bfin_write16(MDMA1_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_D1_CURR_Y_COUNT()    bfin_read16(MDMA1_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_D1_CURR_Y_COUNT(val) bfin_write16(MDMA1_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_D1_IRQ_STATUS()      bfin_read16(MDMA1_D1_IRQ_STATUS)
-#define bfin_write_MDMA1_D1_IRQ_STATUS(val)  bfin_write16(MDMA1_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_D1_PERIPHERAL_MAP()  bfin_read16(MDMA1_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA1_S1_CONFIG()          bfin_read16(MDMA1_S1_CONFIG)
-#define bfin_write_MDMA1_S1_CONFIG(val)      bfin_write16(MDMA1_S1_CONFIG,val)
-#define bfin_read_MDMA1_S1_NEXT_DESC_PTR()   bfin_read32(MDMA1_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA1_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA1_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_START_ADDR()      bfin_read32(MDMA1_S1_START_ADDR)
-#define bfin_write_MDMA1_S1_START_ADDR(val)  bfin_write32(MDMA1_S1_START_ADDR,val)
-#define bfin_read_MDMA1_S1_X_COUNT()         bfin_read16(MDMA1_S1_X_COUNT)
-#define bfin_write_MDMA1_S1_X_COUNT(val)     bfin_write16(MDMA1_S1_X_COUNT,val)
-#define bfin_read_MDMA1_S1_Y_COUNT()         bfin_read16(MDMA1_S1_Y_COUNT)
-#define bfin_write_MDMA1_S1_Y_COUNT(val)     bfin_write16(MDMA1_S1_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_X_MODIFY()        bfin_read16(MDMA1_S1_X_MODIFY)
-#define bfin_write_MDMA1_S1_X_MODIFY(val)    bfin_write16(MDMA1_S1_X_MODIFY,val)
-#define bfin_read_MDMA1_S1_Y_MODIFY()        bfin_read16(MDMA1_S1_Y_MODIFY)
-#define bfin_write_MDMA1_S1_Y_MODIFY(val)    bfin_write16(MDMA1_S1_Y_MODIFY,val)
-#define bfin_read_MDMA1_S1_CURR_DESC_PTR()   bfin_read32(MDMA1_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA1_S1_CURR_DESC_PTR(val) bfin_write32(MDMA1_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA1_S1_CURR_ADDR()       bfin_read32(MDMA1_S1_CURR_ADDR)
-#define bfin_write_MDMA1_S1_CURR_ADDR(val)   bfin_write32(MDMA1_S1_CURR_ADDR,val)
-#define bfin_read_MDMA1_S1_CURR_X_COUNT()    bfin_read16(MDMA1_S1_CURR_X_COUNT)
-#define bfin_write_MDMA1_S1_CURR_X_COUNT(val) bfin_write16(MDMA1_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA1_S1_CURR_Y_COUNT()    bfin_read16(MDMA1_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA1_S1_CURR_Y_COUNT(val) bfin_write16(MDMA1_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA1_S1_IRQ_STATUS()      bfin_read16(MDMA1_S1_IRQ_STATUS)
-#define bfin_write_MDMA1_S1_IRQ_STATUS(val)  bfin_write16(MDMA1_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA1_S1_PERIPHERAL_MAP()  bfin_read16(MDMA1_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA1_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA1_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D2_CONFIG()          bfin_read16(MDMA_D2_CONFIG)
+#define bfin_write_MDMA_D2_CONFIG(val)      bfin_write16(MDMA_D2_CONFIG,val)
+#define bfin_read_MDMA_D2_NEXT_DESC_PTR()   bfin_read32(MDMA_D2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D2_NEXT_DESC_PTR(val) bfin_write32(MDMA_D2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D2_START_ADDR()      bfin_read32(MDMA_D2_START_ADDR)
+#define bfin_write_MDMA_D2_START_ADDR(val)  bfin_write32(MDMA_D2_START_ADDR,val)
+#define bfin_read_MDMA_D2_X_COUNT()         bfin_read16(MDMA_D2_X_COUNT)
+#define bfin_write_MDMA_D2_X_COUNT(val)     bfin_write16(MDMA_D2_X_COUNT,val)
+#define bfin_read_MDMA_D2_Y_COUNT()         bfin_read16(MDMA_D2_Y_COUNT)
+#define bfin_write_MDMA_D2_Y_COUNT(val)     bfin_write16(MDMA_D2_Y_COUNT,val)
+#define bfin_read_MDMA_D2_X_MODIFY()        bfin_read16(MDMA_D2_X_MODIFY)
+#define bfin_write_MDMA_D2_X_MODIFY(val)    bfin_write16(MDMA_D2_X_MODIFY,val)
+#define bfin_read_MDMA_D2_Y_MODIFY()        bfin_read16(MDMA_D2_Y_MODIFY)
+#define bfin_write_MDMA_D2_Y_MODIFY(val)    bfin_write16(MDMA_D2_Y_MODIFY,val)
+#define bfin_read_MDMA_D2_CURR_DESC_PTR()   bfin_read32(MDMA_D2_CURR_DESC_PTR)
+#define bfin_write_MDMA_D2_CURR_DESC_PTR(val) bfin_write32(MDMA_D2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D2_CURR_ADDR()       bfin_read32(MDMA_D2_CURR_ADDR)
+#define bfin_write_MDMA_D2_CURR_ADDR(val)   bfin_write32(MDMA_D2_CURR_ADDR,val)
+#define bfin_read_MDMA_D2_CURR_X_COUNT()    bfin_read16(MDMA_D2_CURR_X_COUNT)
+#define bfin_write_MDMA_D2_CURR_X_COUNT(val) bfin_write16(MDMA_D2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D2_CURR_Y_COUNT()    bfin_read16(MDMA_D2_CURR_Y_COUNT)
+#define bfin_write_MDMA_D2_CURR_Y_COUNT(val) bfin_write16(MDMA_D2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D2_IRQ_STATUS()      bfin_read16(MDMA_D2_IRQ_STATUS)
+#define bfin_write_MDMA_D2_IRQ_STATUS(val)  bfin_write16(MDMA_D2_IRQ_STATUS,val)
+#define bfin_read_MDMA_D2_PERIPHERAL_MAP()  bfin_read16(MDMA_D2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D2_PERIPHERAL_MAP(val) bfin_write16(MDMA_D2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S2_CONFIG()          bfin_read16(MDMA_S2_CONFIG)
+#define bfin_write_MDMA_S2_CONFIG(val)      bfin_write16(MDMA_S2_CONFIG,val)
+#define bfin_read_MDMA_S2_NEXT_DESC_PTR()   bfin_read32(MDMA_S2_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S2_NEXT_DESC_PTR(val) bfin_write32(MDMA_S2_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S2_START_ADDR()      bfin_read32(MDMA_S2_START_ADDR)
+#define bfin_write_MDMA_S2_START_ADDR(val)  bfin_write32(MDMA_S2_START_ADDR,val)
+#define bfin_read_MDMA_S2_X_COUNT()         bfin_read16(MDMA_S2_X_COUNT)
+#define bfin_write_MDMA_S2_X_COUNT(val)     bfin_write16(MDMA_S2_X_COUNT,val)
+#define bfin_read_MDMA_S2_Y_COUNT()         bfin_read16(MDMA_S2_Y_COUNT)
+#define bfin_write_MDMA_S2_Y_COUNT(val)     bfin_write16(MDMA_S2_Y_COUNT,val)
+#define bfin_read_MDMA_S2_X_MODIFY()        bfin_read16(MDMA_S2_X_MODIFY)
+#define bfin_write_MDMA_S2_X_MODIFY(val)    bfin_write16(MDMA_S2_X_MODIFY,val)
+#define bfin_read_MDMA_S2_Y_MODIFY()        bfin_read16(MDMA_S2_Y_MODIFY)
+#define bfin_write_MDMA_S2_Y_MODIFY(val)    bfin_write16(MDMA_S2_Y_MODIFY,val)
+#define bfin_read_MDMA_S2_CURR_DESC_PTR()   bfin_read32(MDMA_S2_CURR_DESC_PTR)
+#define bfin_write_MDMA_S2_CURR_DESC_PTR(val) bfin_write32(MDMA_S2_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S2_CURR_ADDR()       bfin_read32(MDMA_S2_CURR_ADDR)
+#define bfin_write_MDMA_S2_CURR_ADDR(val)   bfin_write32(MDMA_S2_CURR_ADDR,val)
+#define bfin_read_MDMA_S2_CURR_X_COUNT()    bfin_read16(MDMA_S2_CURR_X_COUNT)
+#define bfin_write_MDMA_S2_CURR_X_COUNT(val) bfin_write16(MDMA_S2_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S2_CURR_Y_COUNT()    bfin_read16(MDMA_S2_CURR_Y_COUNT)
+#define bfin_write_MDMA_S2_CURR_Y_COUNT(val) bfin_write16(MDMA_S2_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S2_IRQ_STATUS()      bfin_read16(MDMA_S2_IRQ_STATUS)
+#define bfin_write_MDMA_S2_IRQ_STATUS(val)  bfin_write16(MDMA_S2_IRQ_STATUS,val)
+#define bfin_read_MDMA_S2_PERIPHERAL_MAP()  bfin_read16(MDMA_S2_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S2_PERIPHERAL_MAP(val) bfin_write16(MDMA_S2_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D3_CONFIG()          bfin_read16(MDMA_D3_CONFIG)
+#define bfin_write_MDMA_D3_CONFIG(val)      bfin_write16(MDMA_D3_CONFIG,val)
+#define bfin_read_MDMA_D3_NEXT_DESC_PTR()   bfin_read32(MDMA_D3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D3_NEXT_DESC_PTR(val) bfin_write32(MDMA_D3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D3_START_ADDR()      bfin_read32(MDMA_D3_START_ADDR)
+#define bfin_write_MDMA_D3_START_ADDR(val)  bfin_write32(MDMA_D3_START_ADDR,val)
+#define bfin_read_MDMA_D3_X_COUNT()         bfin_read16(MDMA_D3_X_COUNT)
+#define bfin_write_MDMA_D3_X_COUNT(val)     bfin_write16(MDMA_D3_X_COUNT,val)
+#define bfin_read_MDMA_D3_Y_COUNT()         bfin_read16(MDMA_D3_Y_COUNT)
+#define bfin_write_MDMA_D3_Y_COUNT(val)     bfin_write16(MDMA_D3_Y_COUNT,val)
+#define bfin_read_MDMA_D3_X_MODIFY()        bfin_read16(MDMA_D3_X_MODIFY)
+#define bfin_write_MDMA_D3_X_MODIFY(val)    bfin_write16(MDMA_D3_X_MODIFY,val)
+#define bfin_read_MDMA_D3_Y_MODIFY()        bfin_read16(MDMA_D3_Y_MODIFY)
+#define bfin_write_MDMA_D3_Y_MODIFY(val)    bfin_write16(MDMA_D3_Y_MODIFY,val)
+#define bfin_read_MDMA_D3_CURR_DESC_PTR()   bfin_read32(MDMA_D3_CURR_DESC_PTR)
+#define bfin_write_MDMA_D3_CURR_DESC_PTR(val) bfin_write32(MDMA_D3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D3_CURR_ADDR()       bfin_read32(MDMA_D3_CURR_ADDR)
+#define bfin_write_MDMA_D3_CURR_ADDR(val)   bfin_write32(MDMA_D3_CURR_ADDR,val)
+#define bfin_read_MDMA_D3_CURR_X_COUNT()    bfin_read16(MDMA_D3_CURR_X_COUNT)
+#define bfin_write_MDMA_D3_CURR_X_COUNT(val) bfin_write16(MDMA_D3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D3_CURR_Y_COUNT()    bfin_read16(MDMA_D3_CURR_Y_COUNT)
+#define bfin_write_MDMA_D3_CURR_Y_COUNT(val) bfin_write16(MDMA_D3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D3_IRQ_STATUS()      bfin_read16(MDMA_D3_IRQ_STATUS)
+#define bfin_write_MDMA_D3_IRQ_STATUS(val)  bfin_write16(MDMA_D3_IRQ_STATUS,val)
+#define bfin_read_MDMA_D3_PERIPHERAL_MAP()  bfin_read16(MDMA_D3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D3_PERIPHERAL_MAP(val) bfin_write16(MDMA_D3_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S3_CONFIG()          bfin_read16(MDMA_S3_CONFIG)
+#define bfin_write_MDMA_S3_CONFIG(val)      bfin_write16(MDMA_S3_CONFIG,val)
+#define bfin_read_MDMA_S3_NEXT_DESC_PTR()   bfin_read32(MDMA_S3_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S3_NEXT_DESC_PTR(val) bfin_write32(MDMA_S3_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S3_START_ADDR()      bfin_read32(MDMA_S3_START_ADDR)
+#define bfin_write_MDMA_S3_START_ADDR(val)  bfin_write32(MDMA_S3_START_ADDR,val)
+#define bfin_read_MDMA_S3_X_COUNT()         bfin_read16(MDMA_S3_X_COUNT)
+#define bfin_write_MDMA_S3_X_COUNT(val)     bfin_write16(MDMA_S3_X_COUNT,val)
+#define bfin_read_MDMA_S3_Y_COUNT()         bfin_read16(MDMA_S3_Y_COUNT)
+#define bfin_write_MDMA_S3_Y_COUNT(val)     bfin_write16(MDMA_S3_Y_COUNT,val)
+#define bfin_read_MDMA_S3_X_MODIFY()        bfin_read16(MDMA_S3_X_MODIFY)
+#define bfin_write_MDMA_S3_X_MODIFY(val)    bfin_write16(MDMA_S3_X_MODIFY,val)
+#define bfin_read_MDMA_S3_Y_MODIFY()        bfin_read16(MDMA_S3_Y_MODIFY)
+#define bfin_write_MDMA_S3_Y_MODIFY(val)    bfin_write16(MDMA_S3_Y_MODIFY,val)
+#define bfin_read_MDMA_S3_CURR_DESC_PTR()   bfin_read32(MDMA_S3_CURR_DESC_PTR)
+#define bfin_write_MDMA_S3_CURR_DESC_PTR(val) bfin_write32(MDMA_S3_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S3_CURR_ADDR()       bfin_read32(MDMA_S3_CURR_ADDR)
+#define bfin_write_MDMA_S3_CURR_ADDR(val)   bfin_write32(MDMA_S3_CURR_ADDR,val)
+#define bfin_read_MDMA_S3_CURR_X_COUNT()    bfin_read16(MDMA_S3_CURR_X_COUNT)
+#define bfin_write_MDMA_S3_CURR_X_COUNT(val) bfin_write16(MDMA_S3_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S3_CURR_Y_COUNT()    bfin_read16(MDMA_S3_CURR_Y_COUNT)
+#define bfin_write_MDMA_S3_CURR_Y_COUNT(val) bfin_write16(MDMA_S3_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S3_IRQ_STATUS()      bfin_read16(MDMA_S3_IRQ_STATUS)
+#define bfin_write_MDMA_S3_IRQ_STATUS(val)  bfin_write16(MDMA_S3_IRQ_STATUS,val)
+#define bfin_read_MDMA_S3_PERIPHERAL_MAP()  bfin_read16(MDMA_S3_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S3_PERIPHERAL_MAP(val) bfin_write16(MDMA_S3_PERIPHERAL_MAP,val)
 /* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
 #define bfin_read_DMA2_0_CONFIG()            bfin_read16(DMA2_0_CONFIG)
 #define bfin_write_DMA2_0_CONFIG(val)        bfin_write16(DMA2_0_CONFIG,val)
@@ -1263,110 +1255,110 @@
 #define bfin_read_DMA2_11_PERIPHERAL_MAP()   bfin_read16(DMA2_11_PERIPHERAL_MAP)
 #define bfin_write_DMA2_11_PERIPHERAL_MAP(val) bfin_write16(DMA2_11_PERIPHERAL_MAP,val)
 /* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define bfin_read_MDMA2_D0_CONFIG()          bfin_read16(MDMA2_D0_CONFIG)
-#define bfin_write_MDMA2_D0_CONFIG(val)      bfin_write16(MDMA2_D0_CONFIG,val)
-#define bfin_read_MDMA2_D0_NEXT_DESC_PTR()   bfin_read32(MDMA2_D0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_START_ADDR()      bfin_read32(MDMA2_D0_START_ADDR)
-#define bfin_write_MDMA2_D0_START_ADDR(val)  bfin_write32(MDMA2_D0_START_ADDR,val)
-#define bfin_read_MDMA2_D0_X_COUNT()         bfin_read16(MDMA2_D0_X_COUNT)
-#define bfin_write_MDMA2_D0_X_COUNT(val)     bfin_write16(MDMA2_D0_X_COUNT,val)
-#define bfin_read_MDMA2_D0_Y_COUNT()         bfin_read16(MDMA2_D0_Y_COUNT)
-#define bfin_write_MDMA2_D0_Y_COUNT(val)     bfin_write16(MDMA2_D0_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_X_MODIFY()        bfin_read16(MDMA2_D0_X_MODIFY)
-#define bfin_write_MDMA2_D0_X_MODIFY(val)    bfin_write16(MDMA2_D0_X_MODIFY,val)
-#define bfin_read_MDMA2_D0_Y_MODIFY()        bfin_read16(MDMA2_D0_Y_MODIFY)
-#define bfin_write_MDMA2_D0_Y_MODIFY(val)    bfin_write16(MDMA2_D0_Y_MODIFY,val)
-#define bfin_read_MDMA2_D0_CURR_DESC_PTR()   bfin_read32(MDMA2_D0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D0_CURR_DESC_PTR(val) bfin_write32(MDMA2_D0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D0_CURR_ADDR()       bfin_read32(MDMA2_D0_CURR_ADDR)
-#define bfin_write_MDMA2_D0_CURR_ADDR(val)   bfin_write32(MDMA2_D0_CURR_ADDR,val)
-#define bfin_read_MDMA2_D0_CURR_X_COUNT()    bfin_read16(MDMA2_D0_CURR_X_COUNT)
-#define bfin_write_MDMA2_D0_CURR_X_COUNT(val) bfin_write16(MDMA2_D0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D0_CURR_Y_COUNT()    bfin_read16(MDMA2_D0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D0_CURR_Y_COUNT(val) bfin_write16(MDMA2_D0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D0_IRQ_STATUS()      bfin_read16(MDMA2_D0_IRQ_STATUS)
-#define bfin_write_MDMA2_D0_IRQ_STATUS(val)  bfin_write16(MDMA2_D0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D0_PERIPHERAL_MAP()  bfin_read16(MDMA2_D0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S0_CONFIG()          bfin_read16(MDMA2_S0_CONFIG)
-#define bfin_write_MDMA2_S0_CONFIG(val)      bfin_write16(MDMA2_S0_CONFIG,val)
-#define bfin_read_MDMA2_S0_NEXT_DESC_PTR()   bfin_read32(MDMA2_S0_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S0_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_START_ADDR()      bfin_read32(MDMA2_S0_START_ADDR)
-#define bfin_write_MDMA2_S0_START_ADDR(val)  bfin_write32(MDMA2_S0_START_ADDR,val)
-#define bfin_read_MDMA2_S0_X_COUNT()         bfin_read16(MDMA2_S0_X_COUNT)
-#define bfin_write_MDMA2_S0_X_COUNT(val)     bfin_write16(MDMA2_S0_X_COUNT,val)
-#define bfin_read_MDMA2_S0_Y_COUNT()         bfin_read16(MDMA2_S0_Y_COUNT)
-#define bfin_write_MDMA2_S0_Y_COUNT(val)     bfin_write16(MDMA2_S0_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_X_MODIFY()        bfin_read16(MDMA2_S0_X_MODIFY)
-#define bfin_write_MDMA2_S0_X_MODIFY(val)    bfin_write16(MDMA2_S0_X_MODIFY,val)
-#define bfin_read_MDMA2_S0_Y_MODIFY()        bfin_read16(MDMA2_S0_Y_MODIFY)
-#define bfin_write_MDMA2_S0_Y_MODIFY(val)    bfin_write16(MDMA2_S0_Y_MODIFY,val)
-#define bfin_read_MDMA2_S0_CURR_DESC_PTR()   bfin_read32(MDMA2_S0_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S0_CURR_DESC_PTR(val) bfin_write32(MDMA2_S0_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S0_CURR_ADDR()       bfin_read32(MDMA2_S0_CURR_ADDR)
-#define bfin_write_MDMA2_S0_CURR_ADDR(val)   bfin_write32(MDMA2_S0_CURR_ADDR,val)
-#define bfin_read_MDMA2_S0_CURR_X_COUNT()    bfin_read16(MDMA2_S0_CURR_X_COUNT)
-#define bfin_write_MDMA2_S0_CURR_X_COUNT(val) bfin_write16(MDMA2_S0_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S0_CURR_Y_COUNT()    bfin_read16(MDMA2_S0_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S0_CURR_Y_COUNT(val) bfin_write16(MDMA2_S0_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S0_IRQ_STATUS()      bfin_read16(MDMA2_S0_IRQ_STATUS)
-#define bfin_write_MDMA2_S0_IRQ_STATUS(val)  bfin_write16(MDMA2_S0_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S0_PERIPHERAL_MAP()  bfin_read16(MDMA2_S0_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S0_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_D1_CONFIG()          bfin_read16(MDMA2_D1_CONFIG)
-#define bfin_write_MDMA2_D1_CONFIG(val)      bfin_write16(MDMA2_D1_CONFIG,val)
-#define bfin_read_MDMA2_D1_NEXT_DESC_PTR()   bfin_read32(MDMA2_D1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_D1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_START_ADDR()      bfin_read32(MDMA2_D1_START_ADDR)
-#define bfin_write_MDMA2_D1_START_ADDR(val)  bfin_write32(MDMA2_D1_START_ADDR,val)
-#define bfin_read_MDMA2_D1_X_COUNT()         bfin_read16(MDMA2_D1_X_COUNT)
-#define bfin_write_MDMA2_D1_X_COUNT(val)     bfin_write16(MDMA2_D1_X_COUNT,val)
-#define bfin_read_MDMA2_D1_Y_COUNT()         bfin_read16(MDMA2_D1_Y_COUNT)
-#define bfin_write_MDMA2_D1_Y_COUNT(val)     bfin_write16(MDMA2_D1_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_X_MODIFY()        bfin_read16(MDMA2_D1_X_MODIFY)
-#define bfin_write_MDMA2_D1_X_MODIFY(val)    bfin_write16(MDMA2_D1_X_MODIFY,val)
-#define bfin_read_MDMA2_D1_Y_MODIFY()        bfin_read16(MDMA2_D1_Y_MODIFY)
-#define bfin_write_MDMA2_D1_Y_MODIFY(val)    bfin_write16(MDMA2_D1_Y_MODIFY,val)
-#define bfin_read_MDMA2_D1_CURR_DESC_PTR()   bfin_read32(MDMA2_D1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_D1_CURR_DESC_PTR(val) bfin_write32(MDMA2_D1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_D1_CURR_ADDR()       bfin_read32(MDMA2_D1_CURR_ADDR)
-#define bfin_write_MDMA2_D1_CURR_ADDR(val)   bfin_write32(MDMA2_D1_CURR_ADDR,val)
-#define bfin_read_MDMA2_D1_CURR_X_COUNT()    bfin_read16(MDMA2_D1_CURR_X_COUNT)
-#define bfin_write_MDMA2_D1_CURR_X_COUNT(val) bfin_write16(MDMA2_D1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_D1_CURR_Y_COUNT()    bfin_read16(MDMA2_D1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_D1_CURR_Y_COUNT(val) bfin_write16(MDMA2_D1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_D1_IRQ_STATUS()      bfin_read16(MDMA2_D1_IRQ_STATUS)
-#define bfin_write_MDMA2_D1_IRQ_STATUS(val)  bfin_write16(MDMA2_D1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_D1_PERIPHERAL_MAP()  bfin_read16(MDMA2_D1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_D1_PERIPHERAL_MAP,val)
-#define bfin_read_MDMA2_S1_CONFIG()          bfin_read16(MDMA2_S1_CONFIG)
-#define bfin_write_MDMA2_S1_CONFIG(val)      bfin_write16(MDMA2_S1_CONFIG,val)
-#define bfin_read_MDMA2_S1_NEXT_DESC_PTR()   bfin_read32(MDMA2_S1_NEXT_DESC_PTR)
-#define bfin_write_MDMA2_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA2_S1_NEXT_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_START_ADDR()      bfin_read32(MDMA2_S1_START_ADDR)
-#define bfin_write_MDMA2_S1_START_ADDR(val)  bfin_write32(MDMA2_S1_START_ADDR,val)
-#define bfin_read_MDMA2_S1_X_COUNT()         bfin_read16(MDMA2_S1_X_COUNT)
-#define bfin_write_MDMA2_S1_X_COUNT(val)     bfin_write16(MDMA2_S1_X_COUNT,val)
-#define bfin_read_MDMA2_S1_Y_COUNT()         bfin_read16(MDMA2_S1_Y_COUNT)
-#define bfin_write_MDMA2_S1_Y_COUNT(val)     bfin_write16(MDMA2_S1_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_X_MODIFY()        bfin_read16(MDMA2_S1_X_MODIFY)
-#define bfin_write_MDMA2_S1_X_MODIFY(val)    bfin_write16(MDMA2_S1_X_MODIFY,val)
-#define bfin_read_MDMA2_S1_Y_MODIFY()        bfin_read16(MDMA2_S1_Y_MODIFY)
-#define bfin_write_MDMA2_S1_Y_MODIFY(val)    bfin_write16(MDMA2_S1_Y_MODIFY,val)
-#define bfin_read_MDMA2_S1_CURR_DESC_PTR()   bfin_read32(MDMA2_S1_CURR_DESC_PTR)
-#define bfin_write_MDMA2_S1_CURR_DESC_PTR(val) bfin_write32(MDMA2_S1_CURR_DESC_PTR,val)
-#define bfin_read_MDMA2_S1_CURR_ADDR()       bfin_read32(MDMA2_S1_CURR_ADDR)
-#define bfin_write_MDMA2_S1_CURR_ADDR(val)   bfin_write32(MDMA2_S1_CURR_ADDR,val)
-#define bfin_read_MDMA2_S1_CURR_X_COUNT()    bfin_read16(MDMA2_S1_CURR_X_COUNT)
-#define bfin_write_MDMA2_S1_CURR_X_COUNT(val) bfin_write16(MDMA2_S1_CURR_X_COUNT,val)
-#define bfin_read_MDMA2_S1_CURR_Y_COUNT()    bfin_read16(MDMA2_S1_CURR_Y_COUNT)
-#define bfin_write_MDMA2_S1_CURR_Y_COUNT(val) bfin_write16(MDMA2_S1_CURR_Y_COUNT,val)
-#define bfin_read_MDMA2_S1_IRQ_STATUS()      bfin_read16(MDMA2_S1_IRQ_STATUS)
-#define bfin_write_MDMA2_S1_IRQ_STATUS(val)  bfin_write16(MDMA2_S1_IRQ_STATUS,val)
-#define bfin_read_MDMA2_S1_PERIPHERAL_MAP()  bfin_read16(MDMA2_S1_PERIPHERAL_MAP)
-#define bfin_write_MDMA2_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA2_S1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D0_CONFIG()          bfin_read16(MDMA_D0_CONFIG)
+#define bfin_write_MDMA_D0_CONFIG(val)      bfin_write16(MDMA_D0_CONFIG,val)
+#define bfin_read_MDMA_D0_NEXT_DESC_PTR()   bfin_read32(MDMA_D0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D0_NEXT_DESC_PTR(val) bfin_write32(MDMA_D0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D0_START_ADDR()      bfin_read32(MDMA_D0_START_ADDR)
+#define bfin_write_MDMA_D0_START_ADDR(val)  bfin_write32(MDMA_D0_START_ADDR,val)
+#define bfin_read_MDMA_D0_X_COUNT()         bfin_read16(MDMA_D0_X_COUNT)
+#define bfin_write_MDMA_D0_X_COUNT(val)     bfin_write16(MDMA_D0_X_COUNT,val)
+#define bfin_read_MDMA_D0_Y_COUNT()         bfin_read16(MDMA_D0_Y_COUNT)
+#define bfin_write_MDMA_D0_Y_COUNT(val)     bfin_write16(MDMA_D0_Y_COUNT,val)
+#define bfin_read_MDMA_D0_X_MODIFY()        bfin_read16(MDMA_D0_X_MODIFY)
+#define bfin_write_MDMA_D0_X_MODIFY(val)    bfin_write16(MDMA_D0_X_MODIFY,val)
+#define bfin_read_MDMA_D0_Y_MODIFY()        bfin_read16(MDMA_D0_Y_MODIFY)
+#define bfin_write_MDMA_D0_Y_MODIFY(val)    bfin_write16(MDMA_D0_Y_MODIFY,val)
+#define bfin_read_MDMA_D0_CURR_DESC_PTR()   bfin_read32(MDMA_D0_CURR_DESC_PTR)
+#define bfin_write_MDMA_D0_CURR_DESC_PTR(val) bfin_write32(MDMA_D0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D0_CURR_ADDR()       bfin_read32(MDMA_D0_CURR_ADDR)
+#define bfin_write_MDMA_D0_CURR_ADDR(val)   bfin_write32(MDMA_D0_CURR_ADDR,val)
+#define bfin_read_MDMA_D0_CURR_X_COUNT()    bfin_read16(MDMA_D0_CURR_X_COUNT)
+#define bfin_write_MDMA_D0_CURR_X_COUNT(val) bfin_write16(MDMA_D0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D0_CURR_Y_COUNT()    bfin_read16(MDMA_D0_CURR_Y_COUNT)
+#define bfin_write_MDMA_D0_CURR_Y_COUNT(val) bfin_write16(MDMA_D0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D0_IRQ_STATUS()      bfin_read16(MDMA_D0_IRQ_STATUS)
+#define bfin_write_MDMA_D0_IRQ_STATUS(val)  bfin_write16(MDMA_D0_IRQ_STATUS,val)
+#define bfin_read_MDMA_D0_PERIPHERAL_MAP()  bfin_read16(MDMA_D0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D0_PERIPHERAL_MAP(val) bfin_write16(MDMA_D0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S0_CONFIG()          bfin_read16(MDMA_S0_CONFIG)
+#define bfin_write_MDMA_S0_CONFIG(val)      bfin_write16(MDMA_S0_CONFIG,val)
+#define bfin_read_MDMA_S0_NEXT_DESC_PTR()   bfin_read32(MDMA_S0_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S0_NEXT_DESC_PTR(val) bfin_write32(MDMA_S0_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S0_START_ADDR()      bfin_read32(MDMA_S0_START_ADDR)
+#define bfin_write_MDMA_S0_START_ADDR(val)  bfin_write32(MDMA_S0_START_ADDR,val)
+#define bfin_read_MDMA_S0_X_COUNT()         bfin_read16(MDMA_S0_X_COUNT)
+#define bfin_write_MDMA_S0_X_COUNT(val)     bfin_write16(MDMA_S0_X_COUNT,val)
+#define bfin_read_MDMA_S0_Y_COUNT()         bfin_read16(MDMA_S0_Y_COUNT)
+#define bfin_write_MDMA_S0_Y_COUNT(val)     bfin_write16(MDMA_S0_Y_COUNT,val)
+#define bfin_read_MDMA_S0_X_MODIFY()        bfin_read16(MDMA_S0_X_MODIFY)
+#define bfin_write_MDMA_S0_X_MODIFY(val)    bfin_write16(MDMA_S0_X_MODIFY,val)
+#define bfin_read_MDMA_S0_Y_MODIFY()        bfin_read16(MDMA_S0_Y_MODIFY)
+#define bfin_write_MDMA_S0_Y_MODIFY(val)    bfin_write16(MDMA_S0_Y_MODIFY,val)
+#define bfin_read_MDMA_S0_CURR_DESC_PTR()   bfin_read32(MDMA_S0_CURR_DESC_PTR)
+#define bfin_write_MDMA_S0_CURR_DESC_PTR(val) bfin_write32(MDMA_S0_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S0_CURR_ADDR()       bfin_read32(MDMA_S0_CURR_ADDR)
+#define bfin_write_MDMA_S0_CURR_ADDR(val)   bfin_write32(MDMA_S0_CURR_ADDR,val)
+#define bfin_read_MDMA_S0_CURR_X_COUNT()    bfin_read16(MDMA_S0_CURR_X_COUNT)
+#define bfin_write_MDMA_S0_CURR_X_COUNT(val) bfin_write16(MDMA_S0_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S0_CURR_Y_COUNT()    bfin_read16(MDMA_S0_CURR_Y_COUNT)
+#define bfin_write_MDMA_S0_CURR_Y_COUNT(val) bfin_write16(MDMA_S0_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S0_IRQ_STATUS()      bfin_read16(MDMA_S0_IRQ_STATUS)
+#define bfin_write_MDMA_S0_IRQ_STATUS(val)  bfin_write16(MDMA_S0_IRQ_STATUS,val)
+#define bfin_read_MDMA_S0_PERIPHERAL_MAP()  bfin_read16(MDMA_S0_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S0_PERIPHERAL_MAP(val) bfin_write16(MDMA_S0_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_D1_CONFIG()          bfin_read16(MDMA_D1_CONFIG)
+#define bfin_write_MDMA_D1_CONFIG(val)      bfin_write16(MDMA_D1_CONFIG,val)
+#define bfin_read_MDMA_D1_NEXT_DESC_PTR()   bfin_read32(MDMA_D1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_D1_NEXT_DESC_PTR(val) bfin_write32(MDMA_D1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_D1_START_ADDR()      bfin_read32(MDMA_D1_START_ADDR)
+#define bfin_write_MDMA_D1_START_ADDR(val)  bfin_write32(MDMA_D1_START_ADDR,val)
+#define bfin_read_MDMA_D1_X_COUNT()         bfin_read16(MDMA_D1_X_COUNT)
+#define bfin_write_MDMA_D1_X_COUNT(val)     bfin_write16(MDMA_D1_X_COUNT,val)
+#define bfin_read_MDMA_D1_Y_COUNT()         bfin_read16(MDMA_D1_Y_COUNT)
+#define bfin_write_MDMA_D1_Y_COUNT(val)     bfin_write16(MDMA_D1_Y_COUNT,val)
+#define bfin_read_MDMA_D1_X_MODIFY()        bfin_read16(MDMA_D1_X_MODIFY)
+#define bfin_write_MDMA_D1_X_MODIFY(val)    bfin_write16(MDMA_D1_X_MODIFY,val)
+#define bfin_read_MDMA_D1_Y_MODIFY()        bfin_read16(MDMA_D1_Y_MODIFY)
+#define bfin_write_MDMA_D1_Y_MODIFY(val)    bfin_write16(MDMA_D1_Y_MODIFY,val)
+#define bfin_read_MDMA_D1_CURR_DESC_PTR()   bfin_read32(MDMA_D1_CURR_DESC_PTR)
+#define bfin_write_MDMA_D1_CURR_DESC_PTR(val) bfin_write32(MDMA_D1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_D1_CURR_ADDR()       bfin_read32(MDMA_D1_CURR_ADDR)
+#define bfin_write_MDMA_D1_CURR_ADDR(val)   bfin_write32(MDMA_D1_CURR_ADDR,val)
+#define bfin_read_MDMA_D1_CURR_X_COUNT()    bfin_read16(MDMA_D1_CURR_X_COUNT)
+#define bfin_write_MDMA_D1_CURR_X_COUNT(val) bfin_write16(MDMA_D1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_D1_CURR_Y_COUNT()    bfin_read16(MDMA_D1_CURR_Y_COUNT)
+#define bfin_write_MDMA_D1_CURR_Y_COUNT(val) bfin_write16(MDMA_D1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_D1_IRQ_STATUS()      bfin_read16(MDMA_D1_IRQ_STATUS)
+#define bfin_write_MDMA_D1_IRQ_STATUS(val)  bfin_write16(MDMA_D1_IRQ_STATUS,val)
+#define bfin_read_MDMA_D1_PERIPHERAL_MAP()  bfin_read16(MDMA_D1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_D1_PERIPHERAL_MAP(val) bfin_write16(MDMA_D1_PERIPHERAL_MAP,val)
+#define bfin_read_MDMA_S1_CONFIG()          bfin_read16(MDMA_S1_CONFIG)
+#define bfin_write_MDMA_S1_CONFIG(val)      bfin_write16(MDMA_S1_CONFIG,val)
+#define bfin_read_MDMA_S1_NEXT_DESC_PTR()   bfin_read32(MDMA_S1_NEXT_DESC_PTR)
+#define bfin_write_MDMA_S1_NEXT_DESC_PTR(val) bfin_write32(MDMA_S1_NEXT_DESC_PTR,val)
+#define bfin_read_MDMA_S1_START_ADDR()      bfin_read32(MDMA_S1_START_ADDR)
+#define bfin_write_MDMA_S1_START_ADDR(val)  bfin_write32(MDMA_S1_START_ADDR,val)
+#define bfin_read_MDMA_S1_X_COUNT()         bfin_read16(MDMA_S1_X_COUNT)
+#define bfin_write_MDMA_S1_X_COUNT(val)     bfin_write16(MDMA_S1_X_COUNT,val)
+#define bfin_read_MDMA_S1_Y_COUNT()         bfin_read16(MDMA_S1_Y_COUNT)
+#define bfin_write_MDMA_S1_Y_COUNT(val)     bfin_write16(MDMA_S1_Y_COUNT,val)
+#define bfin_read_MDMA_S1_X_MODIFY()        bfin_read16(MDMA_S1_X_MODIFY)
+#define bfin_write_MDMA_S1_X_MODIFY(val)    bfin_write16(MDMA_S1_X_MODIFY,val)
+#define bfin_read_MDMA_S1_Y_MODIFY()        bfin_read16(MDMA_S1_Y_MODIFY)
+#define bfin_write_MDMA_S1_Y_MODIFY(val)    bfin_write16(MDMA_S1_Y_MODIFY,val)
+#define bfin_read_MDMA_S1_CURR_DESC_PTR()   bfin_read32(MDMA_S1_CURR_DESC_PTR)
+#define bfin_write_MDMA_S1_CURR_DESC_PTR(val) bfin_write32(MDMA_S1_CURR_DESC_PTR,val)
+#define bfin_read_MDMA_S1_CURR_ADDR()       bfin_read32(MDMA_S1_CURR_ADDR)
+#define bfin_write_MDMA_S1_CURR_ADDR(val)   bfin_write32(MDMA_S1_CURR_ADDR,val)
+#define bfin_read_MDMA_S1_CURR_X_COUNT()    bfin_read16(MDMA_S1_CURR_X_COUNT)
+#define bfin_write_MDMA_S1_CURR_X_COUNT(val) bfin_write16(MDMA_S1_CURR_X_COUNT,val)
+#define bfin_read_MDMA_S1_CURR_Y_COUNT()    bfin_read16(MDMA_S1_CURR_Y_COUNT)
+#define bfin_write_MDMA_S1_CURR_Y_COUNT(val) bfin_write16(MDMA_S1_CURR_Y_COUNT,val)
+#define bfin_read_MDMA_S1_IRQ_STATUS()      bfin_read16(MDMA_S1_IRQ_STATUS)
+#define bfin_write_MDMA_S1_IRQ_STATUS(val)  bfin_write16(MDMA_S1_IRQ_STATUS,val)
+#define bfin_read_MDMA_S1_PERIPHERAL_MAP()  bfin_read16(MDMA_S1_PERIPHERAL_MAP)
+#define bfin_write_MDMA_S1_PERIPHERAL_MAP(val) bfin_write16(MDMA_S1_PERIPHERAL_MAP,val)
 /* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
 #define bfin_read_IMDMA_D0_CONFIG()          bfin_read16(IMDMA_D0_CONFIG)
 #define bfin_write_IMDMA_D0_CONFIG(val)      bfin_write16(IMDMA_D0_CONFIG,val)
@@ -1465,65 +1457,4 @@
 #define bfin_read_IMDMA_S1_IRQ_STATUS()      bfin_read16(IMDMA_S1_IRQ_STATUS)
 #define bfin_write_IMDMA_S1_IRQ_STATUS(val)  bfin_write16(IMDMA_S1_IRQ_STATUS,val)
 
-#define bfin_read_MDMA_S0_CONFIG()  bfin_read_MDMA1_S0_CONFIG()
-#define bfin_write_MDMA_S0_CONFIG(val) bfin_write_MDMA1_S0_CONFIG(val)
-#define bfin_read_MDMA_S0_IRQ_STATUS()  bfin_read_MDMA1_S0_IRQ_STATUS()
-#define bfin_write_MDMA_S0_IRQ_STATUS(val) bfin_write_MDMA1_S0_IRQ_STATUS(val)
-#define bfin_read_MDMA_S0_X_MODIFY()  bfin_read_MDMA1_S0_X_MODIFY()
-#define bfin_write_MDMA_S0_X_MODIFY(val) bfin_write_MDMA1_S0_X_MODIFY(val)
-#define bfin_read_MDMA_S0_Y_MODIFY()  bfin_read_MDMA1_S0_Y_MODIFY()
-#define bfin_write_MDMA_S0_Y_MODIFY(val) bfin_write_MDMA1_S0_Y_MODIFY(val)
-#define bfin_read_MDMA_S0_X_COUNT()  bfin_read_MDMA1_S0_X_COUNT()
-#define bfin_write_MDMA_S0_X_COUNT(val) bfin_write_MDMA1_S0_X_COUNT(val)
-#define bfin_read_MDMA_S0_Y_COUNT()  bfin_read_MDMA1_S0_Y_COUNT()
-#define bfin_write_MDMA_S0_Y_COUNT(val) bfin_write_MDMA1_S0_Y_COUNT(val)
-#define bfin_read_MDMA_S0_START_ADDR()  bfin_read_MDMA1_S0_START_ADDR()
-#define bfin_write_MDMA_S0_START_ADDR(val) bfin_write_MDMA1_S0_START_ADDR(val)
-#define bfin_read_MDMA_D0_CONFIG()  bfin_read_MDMA1_D0_CONFIG()
-#define bfin_write_MDMA_D0_CONFIG(val) bfin_write_MDMA1_D0_CONFIG(val)
-#define bfin_read_MDMA_D0_IRQ_STATUS()  bfin_read_MDMA1_D0_IRQ_STATUS()
-#define bfin_write_MDMA_D0_IRQ_STATUS(val) bfin_write_MDMA1_D0_IRQ_STATUS(val)
-#define bfin_read_MDMA_D0_X_MODIFY()  bfin_read_MDMA1_D0_X_MODIFY()
-#define bfin_write_MDMA_D0_X_MODIFY(val) bfin_write_MDMA1_D0_X_MODIFY(val)
-#define bfin_read_MDMA_D0_Y_MODIFY()  bfin_read_MDMA1_D0_Y_MODIFY()
-#define bfin_write_MDMA_D0_Y_MODIFY(val) bfin_write_MDMA1_D0_Y_MODIFY(val)
-#define bfin_read_MDMA_D0_X_COUNT()  bfin_read_MDMA1_D0_X_COUNT()
-#define bfin_write_MDMA_D0_X_COUNT(val) bfin_write_MDMA1_D0_X_COUNT(val)
-#define bfin_read_MDMA_D0_Y_COUNT()  bfin_read_MDMA1_D0_Y_COUNT()
-#define bfin_write_MDMA_D0_Y_COUNT(val) bfin_write_MDMA1_D0_Y_COUNT(val)
-#define bfin_read_MDMA_D0_START_ADDR()  bfin_read_MDMA1_D0_START_ADDR()
-#define bfin_write_MDMA_D0_START_ADDR(val) bfin_write_MDMA1_D0_START_ADDR(val)
-
-#define bfin_read_MDMA_S1_CONFIG()  bfin_read_MDMA1_S1_CONFIG()
-#define bfin_write_MDMA_S1_CONFIG(val) bfin_write_MDMA1_S1_CONFIG(val)
-#define bfin_read_MDMA_S1_IRQ_STATUS()  bfin_read_MDMA1_S1_IRQ_STATUS()
-#define bfin_write_MDMA_S1_IRQ_STATUS(val) bfin_write_MDMA1_S1_IRQ_STATUS(val)
-#define bfin_read_MDMA_S1_X_MODIFY()  bfin_read_MDMA1_S1_X_MODIFY()
-#define bfin_write_MDMA_S1_X_MODIFY(val) bfin_write_MDMA1_S1_X_MODIFY(val)
-#define bfin_read_MDMA_S1_Y_MODIFY()  bfin_read_MDMA1_S1_Y_MODIFY()
-#define bfin_write_MDMA_S1_Y_MODIFY(val) bfin_write_MDMA1_S1_Y_MODIFY(val)
-#define bfin_read_MDMA_S1_X_COUNT()  bfin_read_MDMA1_S1_X_COUNT()
-#define bfin_write_MDMA_S1_X_COUNT(val) bfin_write_MDMA1_S1_X_COUNT(val)
-#define bfin_read_MDMA_S1_Y_COUNT()  bfin_read_MDMA1_S1_Y_COUNT()
-#define bfin_write_MDMA_S1_Y_COUNT(val) bfin_write_MDMA1_S1_Y_COUNT(val)
-#define bfin_read_MDMA_S1_START_ADDR()  bfin_read_MDMA1_S1_START_ADDR()
-#define bfin_write_MDMA_S1_START_ADDR(val) bfin_write_MDMA1_S1_START_ADDR(val)
-#define bfin_read_MDMA_D1_CONFIG()  bfin_read_MDMA1_D1_CONFIG()
-#define bfin_write_MDMA_D1_CONFIG(val) bfin_write_MDMA1_D1_CONFIG(val)
-#define bfin_read_MDMA_D1_IRQ_STATUS()  bfin_read_MDMA1_D1_IRQ_STATUS()
-#define bfin_write_MDMA_D1_IRQ_STATUS(val) bfin_write_MDMA1_D1_IRQ_STATUS(val)
-#define bfin_read_MDMA_D1_X_MODIFY()  bfin_read_MDMA1_D1_X_MODIFY()
-#define bfin_write_MDMA_D1_X_MODIFY(val) bfin_write_MDMA1_D1_X_MODIFY(val)
-#define bfin_read_MDMA_D1_Y_MODIFY()  bfin_read_MDMA1_D1_Y_MODIFY()
-#define bfin_write_MDMA_D1_Y_MODIFY(val) bfin_write_MDMA1_D1_Y_MODIFY(val)
-#define bfin_read_MDMA_D1_X_COUNT()  bfin_read_MDMA1_D1_X_COUNT()
-#define bfin_write_MDMA_D1_X_COUNT(val) bfin_write_MDMA1_D1_X_COUNT(val)
-#define bfin_read_MDMA_D1_Y_COUNT()  bfin_read_MDMA1_D1_Y_COUNT()
-#define bfin_write_MDMA_D1_Y_COUNT(val) bfin_write_MDMA1_D1_Y_COUNT(val)
-#define bfin_read_MDMA_D1_START_ADDR()  bfin_read_MDMA1_D1_START_ADDR()
-#define bfin_write_MDMA_D1_START_ADDR(val) bfin_write_MDMA1_D1_START_ADDR(val)
-
-/* These need to be last due to the cdef/linux inter-dependencies */
-#include <asm/irq.h>
-
 #endif				/* _CDEF_BF561_H */
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h
index 79e048d..71e805e 100644
--- a/arch/blackfin/mach-bf561/include/mach/defBF561.h
+++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h
@@ -1,18 +1,11 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the ADI BSD license or the GPL-2 (or later)
  */
 
 #ifndef _DEF_BF561_H
 #define _DEF_BF561_H
-/*
-#if !defined(__ADSPBF561__)
-#warning defBF561.h should only be included for BF561 chip.
-#endif
-*/
-/* include all Core registers and bit definitions */
-#include <asm/def_LPBlackfin.h>
 
 /*********************************************************************************** */
 /* System MMR Register Map */
@@ -311,10 +304,10 @@
 #define PPI1_FRAME 					0xFFC01310	/* PPI1 Frame Length register */
 
 /*DMA traffic control registers */
-#define	DMA1_TC_PER  0xFFC01B0C	/* Traffic control periods */
-#define	DMA1_TC_CNT  0xFFC01B10	/* Traffic control current counts */
-#define	DMA2_TC_PER  0xFFC00B0C	/* Traffic control periods */
-#define	DMA2_TC_CNT  0xFFC00B10	/* Traffic control current counts        */
+#define	DMAC0_TC_PER  0xFFC00B0C	/* Traffic control periods */
+#define	DMAC0_TC_CNT  0xFFC00B10	/* Traffic control current counts        */
+#define	DMAC1_TC_PER  0xFFC01B0C	/* Traffic control periods */
+#define	DMAC1_TC_CNT  0xFFC01B10	/* Traffic control current counts */
 
 /* DMA1 Controller registers (0xFFC0 1C00-0xFFC0 1FFF) */
 #define DMA1_0_CONFIG 0xFFC01C08	/* DMA1 Channel 0 Configuration register */
@@ -486,61 +479,61 @@
 #define DMA1_11_PERIPHERAL_MAP 0xFFC01EEC	/* DMA1 Channel 11 Peripheral Map Register */
 
 /* Memory DMA1 Controller registers (0xFFC0 1E80-0xFFC0 1FFF) */
-#define MDMA1_D0_CONFIG 0xFFC01F08	/*MemDMA1 Stream 0 Destination Configuration */
-#define MDMA1_D0_NEXT_DESC_PTR 0xFFC01F00	/*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D0_START_ADDR 0xFFC01F04	/*MemDMA1 Stream 0 Destination Start Address */
-#define MDMA1_D0_X_COUNT 0xFFC01F10	/*MemDMA1 Stream 0 Destination Inner-Loop Count */
-#define MDMA1_D0_Y_COUNT 0xFFC01F18	/*MemDMA1 Stream 0 Destination Outer-Loop Count */
-#define MDMA1_D0_X_MODIFY 0xFFC01F14	/*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA1_D0_Y_MODIFY 0xFFC01F1C	/*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA1_D0_CURR_DESC_PTR 0xFFC01F20	/*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA1_D0_CURR_ADDR 0xFFC01F24	/*MemDMA1 Stream 0 Destination Current Address */
-#define MDMA1_D0_CURR_X_COUNT 0xFFC01F30	/*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
-#define MDMA1_D0_CURR_Y_COUNT 0xFFC01F38	/*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
-#define MDMA1_D0_IRQ_STATUS 0xFFC01F28	/*MemDMA1 Stream 0 Destination Interrupt/Status */
-#define MDMA1_D0_PERIPHERAL_MAP 0xFFC01F2C	/*MemDMA1 Stream 0 Destination Peripheral Map */
+#define MDMA_D2_CONFIG 0xFFC01F08	/*MemDMA1 Stream 0 Destination Configuration */
+#define MDMA_D2_NEXT_DESC_PTR 0xFFC01F00	/*MemDMA1 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D2_START_ADDR 0xFFC01F04	/*MemDMA1 Stream 0 Destination Start Address */
+#define MDMA_D2_X_COUNT 0xFFC01F10	/*MemDMA1 Stream 0 Destination Inner-Loop Count */
+#define MDMA_D2_Y_COUNT 0xFFC01F18	/*MemDMA1 Stream 0 Destination Outer-Loop Count */
+#define MDMA_D2_X_MODIFY 0xFFC01F14	/*MemDMA1 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D2_Y_MODIFY 0xFFC01F1C	/*MemDMA1 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D2_CURR_DESC_PTR 0xFFC01F20	/*MemDMA1 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D2_CURR_ADDR 0xFFC01F24	/*MemDMA1 Stream 0 Destination Current Address */
+#define MDMA_D2_CURR_X_COUNT 0xFFC01F30	/*MemDMA1 Stream 0 Dest Current Inner-Loop Count */
+#define MDMA_D2_CURR_Y_COUNT 0xFFC01F38	/*MemDMA1 Stream 0 Dest Current Outer-Loop Count */
+#define MDMA_D2_IRQ_STATUS 0xFFC01F28	/*MemDMA1 Stream 0 Destination Interrupt/Status */
+#define MDMA_D2_PERIPHERAL_MAP 0xFFC01F2C	/*MemDMA1 Stream 0 Destination Peripheral Map */
 
-#define MDMA1_S0_CONFIG 0xFFC01F48	/*MemDMA1 Stream 0 Source Configuration */
-#define MDMA1_S0_NEXT_DESC_PTR 0xFFC01F40	/*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA1_S0_START_ADDR 0xFFC01F44	/*MemDMA1 Stream 0 Source Start Address */
-#define MDMA1_S0_X_COUNT 0xFFC01F50	/*MemDMA1 Stream 0 Source Inner-Loop Count */
-#define MDMA1_S0_Y_COUNT 0xFFC01F58	/*MemDMA1 Stream 0 Source Outer-Loop Count */
-#define MDMA1_S0_X_MODIFY 0xFFC01F54	/*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
-#define MDMA1_S0_Y_MODIFY 0xFFC01F5C	/*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
-#define MDMA1_S0_CURR_DESC_PTR 0xFFC01F60	/*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA1_S0_CURR_ADDR 0xFFC01F64	/*MemDMA1 Stream 0 Source Current Address */
-#define MDMA1_S0_CURR_X_COUNT 0xFFC01F70	/*MemDMA1 Stream 0 Source Current Inner-Loop Count */
-#define MDMA1_S0_CURR_Y_COUNT 0xFFC01F78	/*MemDMA1 Stream 0 Source Current Outer-Loop Count */
-#define MDMA1_S0_IRQ_STATUS 0xFFC01F68	/*MemDMA1 Stream 0 Source Interrupt/Status */
-#define MDMA1_S0_PERIPHERAL_MAP 0xFFC01F6C	/*MemDMA1 Stream 0 Source Peripheral Map */
+#define MDMA_S2_CONFIG 0xFFC01F48	/*MemDMA1 Stream 0 Source Configuration */
+#define MDMA_S2_NEXT_DESC_PTR 0xFFC01F40	/*MemDMA1 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S2_START_ADDR 0xFFC01F44	/*MemDMA1 Stream 0 Source Start Address */
+#define MDMA_S2_X_COUNT 0xFFC01F50	/*MemDMA1 Stream 0 Source Inner-Loop Count */
+#define MDMA_S2_Y_COUNT 0xFFC01F58	/*MemDMA1 Stream 0 Source Outer-Loop Count */
+#define MDMA_S2_X_MODIFY 0xFFC01F54	/*MemDMA1 Stream 0 Source Inner-Loop Address-Increment */
+#define MDMA_S2_Y_MODIFY 0xFFC01F5C	/*MemDMA1 Stream 0 Source Outer-Loop Address-Increment */
+#define MDMA_S2_CURR_DESC_PTR 0xFFC01F60	/*MemDMA1 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S2_CURR_ADDR 0xFFC01F64	/*MemDMA1 Stream 0 Source Current Address */
+#define MDMA_S2_CURR_X_COUNT 0xFFC01F70	/*MemDMA1 Stream 0 Source Current Inner-Loop Count */
+#define MDMA_S2_CURR_Y_COUNT 0xFFC01F78	/*MemDMA1 Stream 0 Source Current Outer-Loop Count */
+#define MDMA_S2_IRQ_STATUS 0xFFC01F68	/*MemDMA1 Stream 0 Source Interrupt/Status */
+#define MDMA_S2_PERIPHERAL_MAP 0xFFC01F6C	/*MemDMA1 Stream 0 Source Peripheral Map */
 
-#define MDMA1_D1_CONFIG 0xFFC01F88	/*MemDMA1 Stream 1 Destination Configuration */
-#define MDMA1_D1_NEXT_DESC_PTR 0xFFC01F80	/*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA1_D1_START_ADDR 0xFFC01F84	/*MemDMA1 Stream 1 Destination Start Address */
-#define MDMA1_D1_X_COUNT 0xFFC01F90	/*MemDMA1 Stream 1 Destination Inner-Loop Count */
-#define MDMA1_D1_Y_COUNT 0xFFC01F98	/*MemDMA1 Stream 1 Destination Outer-Loop Count */
-#define MDMA1_D1_X_MODIFY 0xFFC01F94	/*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA1_D1_Y_MODIFY 0xFFC01F9C	/*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA1_D1_CURR_DESC_PTR 0xFFC01FA0	/*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
-#define MDMA1_D1_CURR_ADDR 0xFFC01FA4	/*MemDMA1 Stream 1 Dest Current Address */
-#define MDMA1_D1_CURR_X_COUNT 0xFFC01FB0	/*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
-#define MDMA1_D1_CURR_Y_COUNT 0xFFC01FB8	/*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
-#define MDMA1_D1_IRQ_STATUS 0xFFC01FA8	/*MemDMA1 Stream 1 Dest Interrupt/Status */
-#define MDMA1_D1_PERIPHERAL_MAP 0xFFC01FAC	/*MemDMA1 Stream 1 Dest Peripheral Map */
+#define MDMA_D3_CONFIG 0xFFC01F88	/*MemDMA1 Stream 1 Destination Configuration */
+#define MDMA_D3_NEXT_DESC_PTR 0xFFC01F80	/*MemDMA1 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D3_START_ADDR 0xFFC01F84	/*MemDMA1 Stream 1 Destination Start Address */
+#define MDMA_D3_X_COUNT 0xFFC01F90	/*MemDMA1 Stream 1 Destination Inner-Loop Count */
+#define MDMA_D3_Y_COUNT 0xFFC01F98	/*MemDMA1 Stream 1 Destination Outer-Loop Count */
+#define MDMA_D3_X_MODIFY 0xFFC01F94	/*MemDMA1 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D3_Y_MODIFY 0xFFC01F9C	/*MemDMA1 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D3_CURR_DESC_PTR 0xFFC01FA0	/*MemDMA1 Stream 1 Dest Current Descriptor Ptr reg */
+#define MDMA_D3_CURR_ADDR 0xFFC01FA4	/*MemDMA1 Stream 1 Dest Current Address */
+#define MDMA_D3_CURR_X_COUNT 0xFFC01FB0	/*MemDMA1 Stream 1 Dest Current Inner-Loop Count */
+#define MDMA_D3_CURR_Y_COUNT 0xFFC01FB8	/*MemDMA1 Stream 1 Dest Current Outer-Loop Count */
+#define MDMA_D3_IRQ_STATUS 0xFFC01FA8	/*MemDMA1 Stream 1 Dest Interrupt/Status */
+#define MDMA_D3_PERIPHERAL_MAP 0xFFC01FAC	/*MemDMA1 Stream 1 Dest Peripheral Map */
 
-#define MDMA1_S1_CONFIG 0xFFC01FC8	/*MemDMA1 Stream 1 Source Configuration */
-#define MDMA1_S1_NEXT_DESC_PTR 0xFFC01FC0	/*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA1_S1_START_ADDR 0xFFC01FC4	/*MemDMA1 Stream 1 Source Start Address */
-#define MDMA1_S1_X_COUNT 0xFFC01FD0	/*MemDMA1 Stream 1 Source Inner-Loop Count */
-#define MDMA1_S1_Y_COUNT 0xFFC01FD8	/*MemDMA1 Stream 1 Source Outer-Loop Count */
-#define MDMA1_S1_X_MODIFY 0xFFC01FD4	/*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
-#define MDMA1_S1_Y_MODIFY 0xFFC01FDC	/*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA1_S1_CURR_DESC_PTR 0xFFC01FE0	/*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA1_S1_CURR_ADDR 0xFFC01FE4	/*MemDMA1 Stream 1 Source Current Address */
-#define MDMA1_S1_CURR_X_COUNT 0xFFC01FF0	/*MemDMA1 Stream 1 Source Current Inner-Loop Count */
-#define MDMA1_S1_CURR_Y_COUNT 0xFFC01FF8	/*MemDMA1 Stream 1 Source Current Outer-Loop Count */
-#define MDMA1_S1_IRQ_STATUS 0xFFC01FE8	/*MemDMA1 Stream 1 Source Interrupt/Status */
-#define MDMA1_S1_PERIPHERAL_MAP 0xFFC01FEC	/*MemDMA1 Stream 1 Source Peripheral Map */
+#define MDMA_S3_CONFIG 0xFFC01FC8	/*MemDMA1 Stream 1 Source Configuration */
+#define MDMA_S3_NEXT_DESC_PTR 0xFFC01FC0	/*MemDMA1 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S3_START_ADDR 0xFFC01FC4	/*MemDMA1 Stream 1 Source Start Address */
+#define MDMA_S3_X_COUNT 0xFFC01FD0	/*MemDMA1 Stream 1 Source Inner-Loop Count */
+#define MDMA_S3_Y_COUNT 0xFFC01FD8	/*MemDMA1 Stream 1 Source Outer-Loop Count */
+#define MDMA_S3_X_MODIFY 0xFFC01FD4	/*MemDMA1 Stream 1 Source Inner-Loop Address-Increment */
+#define MDMA_S3_Y_MODIFY 0xFFC01FDC	/*MemDMA1 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S3_CURR_DESC_PTR 0xFFC01FE0	/*MemDMA1 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S3_CURR_ADDR 0xFFC01FE4	/*MemDMA1 Stream 1 Source Current Address */
+#define MDMA_S3_CURR_X_COUNT 0xFFC01FF0	/*MemDMA1 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S3_CURR_Y_COUNT 0xFFC01FF8	/*MemDMA1 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S3_IRQ_STATUS 0xFFC01FE8	/*MemDMA1 Stream 1 Source Interrupt/Status */
+#define MDMA_S3_PERIPHERAL_MAP 0xFFC01FEC	/*MemDMA1 Stream 1 Source Peripheral Map */
 
 /* DMA2 Controller registers (0xFFC0 0C00-0xFFC0 0DFF) */
 #define DMA2_0_CONFIG 0xFFC00C08	/* DMA2 Channel 0 Configuration register */
@@ -712,117 +705,61 @@
 #define DMA2_11_PERIPHERAL_MAP 0xFFC00EEC	/* DMA2 Channel 11 Peripheral Map Register */
 
 /* Memory DMA2 Controller registers (0xFFC0 0E80-0xFFC0 0FFF) */
-#define MDMA2_D0_CONFIG 0xFFC00F08	/*MemDMA2 Stream 0 Destination Configuration register */
-#define MDMA2_D0_NEXT_DESC_PTR 0xFFC00F00	/*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D0_START_ADDR 0xFFC00F04	/*MemDMA2 Stream 0 Destination Start Address */
-#define MDMA2_D0_X_COUNT 0xFFC00F10	/*MemDMA2 Stream 0 Dest Inner-Loop Count register */
-#define MDMA2_D0_Y_COUNT 0xFFC00F18	/*MemDMA2 Stream 0 Dest Outer-Loop Count register */
-#define MDMA2_D0_X_MODIFY 0xFFC00F14	/*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
-#define MDMA2_D0_Y_MODIFY 0xFFC00F1C	/*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
-#define MDMA2_D0_CURR_DESC_PTR 0xFFC00F20	/*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
-#define MDMA2_D0_CURR_ADDR 0xFFC00F24	/*MemDMA2 Stream 0 Destination Current Address */
-#define MDMA2_D0_CURR_X_COUNT 0xFFC00F30	/*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
-#define MDMA2_D0_CURR_Y_COUNT 0xFFC00F38	/*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
-#define MDMA2_D0_IRQ_STATUS 0xFFC00F28	/*MemDMA2 Stream 0 Dest Interrupt/Status Register */
-#define MDMA2_D0_PERIPHERAL_MAP 0xFFC00F2C	/*MemDMA2 Stream 0 Destination Peripheral Map register */
+#define MDMA_D0_CONFIG 0xFFC00F08	/*MemDMA2 Stream 0 Destination Configuration register */
+#define MDMA_D0_NEXT_DESC_PTR 0xFFC00F00	/*MemDMA2 Stream 0 Destination Next Descriptor Ptr Reg */
+#define MDMA_D0_START_ADDR 0xFFC00F04	/*MemDMA2 Stream 0 Destination Start Address */
+#define MDMA_D0_X_COUNT 0xFFC00F10	/*MemDMA2 Stream 0 Dest Inner-Loop Count register */
+#define MDMA_D0_Y_COUNT 0xFFC00F18	/*MemDMA2 Stream 0 Dest Outer-Loop Count register */
+#define MDMA_D0_X_MODIFY 0xFFC00F14	/*MemDMA2 Stream 0 Dest Inner-Loop Address-Increment */
+#define MDMA_D0_Y_MODIFY 0xFFC00F1C	/*MemDMA2 Stream 0 Dest Outer-Loop Address-Increment */
+#define MDMA_D0_CURR_DESC_PTR 0xFFC00F20	/*MemDMA2 Stream 0 Dest Current Descriptor Ptr reg */
+#define MDMA_D0_CURR_ADDR 0xFFC00F24	/*MemDMA2 Stream 0 Destination Current Address */
+#define MDMA_D0_CURR_X_COUNT 0xFFC00F30	/*MemDMA2 Stream 0 Dest Current Inner-Loop Count reg */
+#define MDMA_D0_CURR_Y_COUNT 0xFFC00F38	/*MemDMA2 Stream 0 Dest Current Outer-Loop Count reg */
+#define MDMA_D0_IRQ_STATUS 0xFFC00F28	/*MemDMA2 Stream 0 Dest Interrupt/Status Register */
+#define MDMA_D0_PERIPHERAL_MAP 0xFFC00F2C	/*MemDMA2 Stream 0 Destination Peripheral Map register */
 
-#define MDMA2_S0_CONFIG 0xFFC00F48	/*MemDMA2 Stream 0 Source Configuration register */
-#define MDMA2_S0_NEXT_DESC_PTR 0xFFC00F40	/*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
-#define MDMA2_S0_START_ADDR 0xFFC00F44	/*MemDMA2 Stream 0 Source Start Address */
-#define MDMA2_S0_X_COUNT 0xFFC00F50	/*MemDMA2 Stream 0 Source Inner-Loop Count register */
-#define MDMA2_S0_Y_COUNT 0xFFC00F58	/*MemDMA2 Stream 0 Source Outer-Loop Count register */
-#define MDMA2_S0_X_MODIFY 0xFFC00F54	/*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
-#define MDMA2_S0_Y_MODIFY 0xFFC00F5C	/*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
-#define MDMA2_S0_CURR_DESC_PTR 0xFFC00F60	/*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
-#define MDMA2_S0_CURR_ADDR 0xFFC00F64	/*MemDMA2 Stream 0 Source Current Address */
-#define MDMA2_S0_CURR_X_COUNT 0xFFC00F70	/*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
-#define MDMA2_S0_CURR_Y_COUNT 0xFFC00F78	/*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
-#define MDMA2_S0_IRQ_STATUS 0xFFC00F68	/*MemDMA2 Stream 0 Source Interrupt/Status Register */
-#define MDMA2_S0_PERIPHERAL_MAP 0xFFC00F6C	/*MemDMA2 Stream 0 Source Peripheral Map register */
+#define MDMA_S0_CONFIG 0xFFC00F48	/*MemDMA2 Stream 0 Source Configuration register */
+#define MDMA_S0_NEXT_DESC_PTR 0xFFC00F40	/*MemDMA2 Stream 0 Source Next Descriptor Ptr Reg */
+#define MDMA_S0_START_ADDR 0xFFC00F44	/*MemDMA2 Stream 0 Source Start Address */
+#define MDMA_S0_X_COUNT 0xFFC00F50	/*MemDMA2 Stream 0 Source Inner-Loop Count register */
+#define MDMA_S0_Y_COUNT 0xFFC00F58	/*MemDMA2 Stream 0 Source Outer-Loop Count register */
+#define MDMA_S0_X_MODIFY 0xFFC00F54	/*MemDMA2 Stream 0 Src Inner-Loop Addr-Increment reg */
+#define MDMA_S0_Y_MODIFY 0xFFC00F5C	/*MemDMA2 Stream 0 Src Outer-Loop Addr-Increment reg */
+#define MDMA_S0_CURR_DESC_PTR 0xFFC00F60	/*MemDMA2 Stream 0 Source Current Descriptor Ptr reg */
+#define MDMA_S0_CURR_ADDR 0xFFC00F64	/*MemDMA2 Stream 0 Source Current Address */
+#define MDMA_S0_CURR_X_COUNT 0xFFC00F70	/*MemDMA2 Stream 0 Src Current Inner-Loop Count reg */
+#define MDMA_S0_CURR_Y_COUNT 0xFFC00F78	/*MemDMA2 Stream 0 Src Current Outer-Loop Count reg */
+#define MDMA_S0_IRQ_STATUS 0xFFC00F68	/*MemDMA2 Stream 0 Source Interrupt/Status Register */
+#define MDMA_S0_PERIPHERAL_MAP 0xFFC00F6C	/*MemDMA2 Stream 0 Source Peripheral Map register */
 
-#define MDMA2_D1_CONFIG 0xFFC00F88	/*MemDMA2 Stream 1 Destination Configuration register */
-#define MDMA2_D1_NEXT_DESC_PTR 0xFFC00F80	/*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
-#define MDMA2_D1_START_ADDR 0xFFC00F84	/*MemDMA2 Stream 1 Destination Start Address */
-#define MDMA2_D1_X_COUNT 0xFFC00F90	/*MemDMA2 Stream 1 Dest Inner-Loop Count register */
-#define MDMA2_D1_Y_COUNT 0xFFC00F98	/*MemDMA2 Stream 1 Dest Outer-Loop Count register */
-#define MDMA2_D1_X_MODIFY 0xFFC00F94	/*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
-#define MDMA2_D1_Y_MODIFY 0xFFC00F9C	/*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
-#define MDMA2_D1_CURR_DESC_PTR 0xFFC00FA0	/*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
-#define MDMA2_D1_CURR_ADDR 0xFFC00FA4	/*MemDMA2 Stream 1 Destination Current Address reg */
-#define MDMA2_D1_CURR_X_COUNT 0xFFC00FB0	/*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
-#define MDMA2_D1_CURR_Y_COUNT 0xFFC00FB8	/*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
-#define MDMA2_D1_IRQ_STATUS 0xFFC00FA8	/*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
-#define MDMA2_D1_PERIPHERAL_MAP 0xFFC00FAC	/*MemDMA2 Stream 1 Destination Peripheral Map register */
+#define MDMA_D1_CONFIG 0xFFC00F88	/*MemDMA2 Stream 1 Destination Configuration register */
+#define MDMA_D1_NEXT_DESC_PTR 0xFFC00F80	/*MemDMA2 Stream 1 Destination Next Descriptor Ptr Reg */
+#define MDMA_D1_START_ADDR 0xFFC00F84	/*MemDMA2 Stream 1 Destination Start Address */
+#define MDMA_D1_X_COUNT 0xFFC00F90	/*MemDMA2 Stream 1 Dest Inner-Loop Count register */
+#define MDMA_D1_Y_COUNT 0xFFC00F98	/*MemDMA2 Stream 1 Dest Outer-Loop Count register */
+#define MDMA_D1_X_MODIFY 0xFFC00F94	/*MemDMA2 Stream 1 Dest Inner-Loop Address-Increment */
+#define MDMA_D1_Y_MODIFY 0xFFC00F9C	/*MemDMA2 Stream 1 Dest Outer-Loop Address-Increment */
+#define MDMA_D1_CURR_DESC_PTR 0xFFC00FA0	/*MemDMA2 Stream 1 Destination Current Descriptor Ptr */
+#define MDMA_D1_CURR_ADDR 0xFFC00FA4	/*MemDMA2 Stream 1 Destination Current Address reg */
+#define MDMA_D1_CURR_X_COUNT 0xFFC00FB0	/*MemDMA2 Stream 1 Dest Current Inner-Loop Count reg */
+#define MDMA_D1_CURR_Y_COUNT 0xFFC00FB8	/*MemDMA2 Stream 1 Dest Current Outer-Loop Count reg */
+#define MDMA_D1_IRQ_STATUS 0xFFC00FA8	/*MemDMA2 Stream 1 Destination Interrupt/Status Reg */
+#define MDMA_D1_PERIPHERAL_MAP 0xFFC00FAC	/*MemDMA2 Stream 1 Destination Peripheral Map register */
 
-#define MDMA2_S1_CONFIG 0xFFC00FC8	/*MemDMA2 Stream 1 Source Configuration register */
-#define MDMA2_S1_NEXT_DESC_PTR 0xFFC00FC0	/*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
-#define MDMA2_S1_START_ADDR 0xFFC00FC4	/*MemDMA2 Stream 1 Source Start Address */
-#define MDMA2_S1_X_COUNT 0xFFC00FD0	/*MemDMA2 Stream 1 Source Inner-Loop Count register */
-#define MDMA2_S1_Y_COUNT 0xFFC00FD8	/*MemDMA2 Stream 1 Source Outer-Loop Count register */
-#define MDMA2_S1_X_MODIFY 0xFFC00FD4	/*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
-#define MDMA2_S1_Y_MODIFY 0xFFC00FDC	/*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
-#define MDMA2_S1_CURR_DESC_PTR 0xFFC00FE0	/*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
-#define MDMA2_S1_CURR_ADDR 0xFFC00FE4	/*MemDMA2 Stream 1 Source Current Address */
-#define MDMA2_S1_CURR_X_COUNT 0xFFC00FF0	/*MemDMA2 Stream 1 Source Current Inner-Loop Count */
-#define MDMA2_S1_CURR_Y_COUNT 0xFFC00FF8	/*MemDMA2 Stream 1 Source Current Outer-Loop Count */
-#define MDMA2_S1_IRQ_STATUS 0xFFC00FE8	/*MemDMA2 Stream 1 Source Interrupt/Status Register */
-#define MDMA2_S1_PERIPHERAL_MAP 0xFFC00FEC	/*MemDMA2 Stream 1 Source Peripheral Map register */
-
-#define MDMA_D0_NEXT_DESC_PTR MDMA1_D0_NEXT_DESC_PTR
-#define MDMA_D0_START_ADDR MDMA1_D0_START_ADDR
-#define MDMA_D0_CONFIG MDMA1_D0_CONFIG
-#define MDMA_D0_X_COUNT MDMA1_D0_X_COUNT
-#define MDMA_D0_X_MODIFY MDMA1_D0_X_MODIFY
-#define MDMA_D0_Y_COUNT MDMA1_D0_Y_COUNT
-#define MDMA_D0_Y_MODIFY MDMA1_D0_Y_MODIFY
-#define MDMA_D0_CURR_DESC_PTR MDMA1_D0_CURR_DESC_PTR
-#define MDMA_D0_CURR_ADDR MDMA1_D0_CURR_ADDR
-#define MDMA_D0_IRQ_STATUS MDMA1_D0_IRQ_STATUS
-#define MDMA_D0_PERIPHERAL_MAP MDMA1_D0_PERIPHERAL_MAP
-#define MDMA_D0_CURR_X_COUNT MDMA1_D0_CURR_X_COUNT
-#define MDMA_D0_CURR_Y_COUNT MDMA1_D0_CURR_Y_COUNT
-
-#define MDMA_S0_NEXT_DESC_PTR MDMA1_S0_NEXT_DESC_PTR
-#define MDMA_S0_START_ADDR MDMA1_S0_START_ADDR
-#define MDMA_S0_CONFIG MDMA1_S0_CONFIG
-#define MDMA_S0_X_COUNT MDMA1_S0_X_COUNT
-#define MDMA_S0_X_MODIFY MDMA1_S0_X_MODIFY
-#define MDMA_S0_Y_COUNT MDMA1_S0_Y_COUNT
-#define MDMA_S0_Y_MODIFY MDMA1_S0_Y_MODIFY
-#define MDMA_S0_CURR_DESC_PTR MDMA1_S0_CURR_DESC_PTR
-#define MDMA_S0_CURR_ADDR MDMA1_S0_CURR_ADDR
-#define MDMA_S0_IRQ_STATUS MDMA1_S0_IRQ_STATUS
-#define MDMA_S0_PERIPHERAL_MAP MDMA1_S0_PERIPHERAL_MAP
-#define MDMA_S0_CURR_X_COUNT MDMA1_S0_CURR_X_COUNT
-#define MDMA_S0_CURR_Y_COUNT MDMA1_S0_CURR_Y_COUNT
-
-#define MDMA_D1_NEXT_DESC_PTR MDMA1_D1_NEXT_DESC_PTR
-#define MDMA_D1_START_ADDR MDMA1_D1_START_ADDR
-#define MDMA_D1_CONFIG MDMA1_D1_CONFIG
-#define MDMA_D1_X_COUNT MDMA1_D1_X_COUNT
-#define MDMA_D1_X_MODIFY MDMA1_D1_X_MODIFY
-#define MDMA_D1_Y_COUNT MDMA1_D1_Y_COUNT
-#define MDMA_D1_Y_MODIFY MDMA1_D1_Y_MODIFY
-#define MDMA_D1_CURR_DESC_PTR MDMA1_D1_CURR_DESC_PTR
-#define MDMA_D1_CURR_ADDR MDMA1_D1_CURR_ADDR
-#define MDMA_D1_IRQ_STATUS MDMA1_D1_IRQ_STATUS
-#define MDMA_D1_PERIPHERAL_MAP MDMA1_D1_PERIPHERAL_MAP
-#define MDMA_D1_CURR_X_COUNT MDMA1_D1_CURR_X_COUNT
-#define MDMA_D1_CURR_Y_COUNT MDMA1_D1_CURR_Y_COUNT
-
-#define MDMA_S1_NEXT_DESC_PTR MDMA1_S1_NEXT_DESC_PTR
-#define MDMA_S1_START_ADDR MDMA1_S1_START_ADDR
-#define MDMA_S1_CONFIG MDMA1_S1_CONFIG
-#define MDMA_S1_X_COUNT MDMA1_S1_X_COUNT
-#define MDMA_S1_X_MODIFY MDMA1_S1_X_MODIFY
-#define MDMA_S1_Y_COUNT MDMA1_S1_Y_COUNT
-#define MDMA_S1_Y_MODIFY MDMA1_S1_Y_MODIFY
-#define MDMA_S1_CURR_DESC_PTR MDMA1_S1_CURR_DESC_PTR
-#define MDMA_S1_CURR_ADDR MDMA1_S1_CURR_ADDR
-#define MDMA_S1_IRQ_STATUS MDMA1_S1_IRQ_STATUS
-#define MDMA_S1_PERIPHERAL_MAP MDMA1_S1_PERIPHERAL_MAP
-#define MDMA_S1_CURR_X_COUNT MDMA1_S1_CURR_X_COUNT
-#define MDMA_S1_CURR_Y_COUNT MDMA1_S1_CURR_Y_COUNT
+#define MDMA_S1_CONFIG 0xFFC00FC8	/*MemDMA2 Stream 1 Source Configuration register */
+#define MDMA_S1_NEXT_DESC_PTR 0xFFC00FC0	/*MemDMA2 Stream 1 Source Next Descriptor Ptr Reg */
+#define MDMA_S1_START_ADDR 0xFFC00FC4	/*MemDMA2 Stream 1 Source Start Address */
+#define MDMA_S1_X_COUNT 0xFFC00FD0	/*MemDMA2 Stream 1 Source Inner-Loop Count register */
+#define MDMA_S1_Y_COUNT 0xFFC00FD8	/*MemDMA2 Stream 1 Source Outer-Loop Count register */
+#define MDMA_S1_X_MODIFY 0xFFC00FD4	/*MemDMA2 Stream 1 Src Inner-Loop Address-Increment */
+#define MDMA_S1_Y_MODIFY 0xFFC00FDC	/*MemDMA2 Stream 1 Source Outer-Loop Address-Increment */
+#define MDMA_S1_CURR_DESC_PTR 0xFFC00FE0	/*MemDMA2 Stream 1 Source Current Descriptor Ptr reg */
+#define MDMA_S1_CURR_ADDR 0xFFC00FE4	/*MemDMA2 Stream 1 Source Current Address */
+#define MDMA_S1_CURR_X_COUNT 0xFFC00FF0	/*MemDMA2 Stream 1 Source Current Inner-Loop Count */
+#define MDMA_S1_CURR_Y_COUNT 0xFFC00FF8	/*MemDMA2 Stream 1 Source Current Outer-Loop Count */
+#define MDMA_S1_IRQ_STATUS 0xFFC00FE8	/*MemDMA2 Stream 1 Source Interrupt/Status Register */
+#define MDMA_S1_PERIPHERAL_MAP 0xFFC00FEC	/*MemDMA2 Stream 1 Source Peripheral Map register */
 
 /* Internal Memory DMA Registers (0xFFC0_1800 - 0xFFC0_19FF) */
 #define IMDMA_D0_CONFIG 0xFFC01808	/*IMDMA Stream 0 Destination Configuration */
@@ -927,83 +864,6 @@
 #define IWR_ENABLE(x)	       (1 << (x))	/* Wakeup Enable Peripheral #x */
 #define IWR_DISABLE(x) (0xFFFFFFFF ^ (1 << (x)))	/* Wakeup Disable Peripheral #x */
 
-/* ***************************** UART CONTROLLER MASKS ********************** */
-
-/* UART_LCR Register */
-
-#define DLAB	0x80
-#define SB      0x40
-#define STP      0x20
-#define EPS     0x10
-#define PEN	0x08
-#define STB	0x04
-#define WLS(x)	((x-5) & 0x03)
-
-#define DLAB_P	0x07
-#define SB_P	0x06
-#define STP_P	0x05
-#define EPS_P	0x04
-#define PEN_P	0x03
-#define STB_P	0x02
-#define WLS_P1	0x01
-#define WLS_P0	0x00
-
-/* UART_MCR Register */
-#define LOOP_ENA	0x10
-#define LOOP_ENA_P	0x04
-
-/* UART_LSR Register */
-#define TEMT	0x40
-#define THRE	0x20
-#define BI	0x10
-#define FE	0x08
-#define PE	0x04
-#define OE	0x02
-#define DR	0x01
-
-#define TEMP_P	0x06
-#define THRE_P	0x05
-#define BI_P	0x04
-#define FE_P	0x03
-#define PE_P	0x02
-#define OE_P	0x01
-#define DR_P	0x00
-
-/* UART_IER Register */
-#define ELSI	0x04
-#define ETBEI	0x02
-#define ERBFI	0x01
-
-#define ELSI_P	0x02
-#define ETBEI_P	0x01
-#define ERBFI_P	0x00
-
-/* UART_IIR Register */
-#define STATUS(x)	((x << 1) & 0x06)
-#define NINT		0x01
-#define STATUS_P1	0x02
-#define STATUS_P0	0x01
-#define NINT_P		0x00
-#define IIR_TX_READY    0x02	/* UART_THR empty                               */
-#define IIR_RX_READY    0x04	/* Receive data ready                           */
-#define IIR_LINE_CHANGE 0x06	/* Receive line status                          */
-#define IIR_STATUS	0x06
-
-/* UART_GCTL Register */
-#define FFE	0x20
-#define FPE	0x10
-#define RPOLC	0x08
-#define TPOLC	0x04
-#define IREN	0x02
-#define UCEN	0x01
-
-#define FFE_P	0x05
-#define FPE_P	0x04
-#define RPOLC_P	0x03
-#define TPOLC_P	0x02
-#define IREN_P	0x01
-#define UCEN_P	0x00
-
 /*  *********  PARALLEL PERIPHERAL INTERFACE (PPI) MASKS ****************   */
 
 /*  PPI_CONTROL Masks         */
@@ -1230,44 +1090,6 @@
 #define ERR_TYP_P0		0x0E
 #define ERR_TYP_P1		0x0F
 
-/*/ ******************   PROGRAMMABLE FLAG MASKS  ********************* */
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  Masks */
-#define PF0         0x0001
-#define PF1         0x0002
-#define PF2         0x0004
-#define PF3         0x0008
-#define PF4         0x0010
-#define PF5         0x0020
-#define PF6         0x0040
-#define PF7         0x0080
-#define PF8         0x0100
-#define PF9         0x0200
-#define PF10        0x0400
-#define PF11        0x0800
-#define PF12        0x1000
-#define PF13        0x2000
-#define PF14        0x4000
-#define PF15        0x8000
-
-/*  General Purpose IO (0xFFC00700 - 0xFFC007FF)  BIT POSITIONS */
-#define PF0_P         0
-#define PF1_P         1
-#define PF2_P         2
-#define PF3_P         3
-#define PF4_P         4
-#define PF5_P         5
-#define PF6_P         6
-#define PF7_P         7
-#define PF8_P         8
-#define PF9_P         9
-#define PF10_P        10
-#define PF11_P        11
-#define PF12_P        12
-#define PF13_P        13
-#define PF14_P        14
-#define PF15_P        15
-
 /* *********************  ASYNCHRONOUS MEMORY CONTROLLER MASKS  ************* */
 
 /* AMGCTL Masks */
diff --git a/arch/blackfin/mach-bf561/include/mach/gpio.h b/arch/blackfin/mach-bf561/include/mach/gpio.h
index 4f8aa5d..57d5eab 100644
--- a/arch/blackfin/mach-bf561/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf561/include/mach/gpio.h
@@ -62,4 +62,6 @@
 #define PORT_FIO1 GPIO_16
 #define PORT_FIO2 GPIO_32
 
+#include <mach-common/ports-f.h>
+
 #endif /* _MACH_GPIO_H_ */
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h
index 5b96ea5..4cc9199 100644
--- a/arch/blackfin/mach-bf561/include/mach/mem_map.h
+++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h
@@ -106,7 +106,7 @@
 #define COREA_L1_SCRATCH_START	0xFFB00000
 #define COREB_L1_SCRATCH_START	0xFF700000
 
-#ifdef __ASSEMBLY__
+#ifdef CONFIG_SMP
 
 /*
  * The following macros both return the address of the PDA for the
@@ -121,8 +121,7 @@
  * is allowed to use the specified Dreg for determining the PDA
  * address to be returned into Preg.
  */
-#ifdef CONFIG_SMP
-#define GET_PDA_SAFE(preg)		\
+# define GET_PDA_SAFE(preg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	preg = [preg];			\
@@ -158,7 +157,7 @@
 	preg = [preg];			\
 4:
 
-#define GET_PDA(preg, dreg)		\
+# define GET_PDA(preg, dreg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	dreg = [preg];			\
@@ -169,13 +168,17 @@
 	preg = [preg];			\
 1:					\
 
-#define GET_CPUID(preg, dreg)		\
+# define GET_CPUID(preg, dreg)		\
 	preg.l = lo(DSPID);		\
 	preg.h = hi(DSPID);		\
 	dreg = [preg];			\
 	dreg = ROT dreg BY -1;		\
 	dreg = CC;
 
+# ifndef __ASSEMBLY__
+
+#  include <asm/processor.h>
+
 static inline unsigned long get_l1_scratch_start_cpu(int cpu)
 {
 	return cpu ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;
@@ -210,8 +213,7 @@
 	return get_l1_data_b_start_cpu(blackfin_core_id());
 }
 
+# endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */
 
-#endif /* __ASSEMBLY__ */
-
 #endif
diff --git a/arch/blackfin/mach-bf561/include/mach/pll.h b/arch/blackfin/mach-bf561/include/mach/pll.h
index f2b1fbd..7977db2 100644
--- a/arch/blackfin/mach-bf561/include/mach/pll.h
+++ b/arch/blackfin/mach-bf561/include/mach/pll.h
@@ -1,5 +1,5 @@
 /*
- * Copyright 2005-2009 Analog Devices Inc.
+ * Copyright 2005-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -7,57 +7,48 @@
 #ifndef _MACH_PLL_H
 #define _MACH_PLL_H
 
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+
 #include <asm/blackfin.h>
 #include <asm/irqflags.h>
+#include <mach/irq.h>
 
-/* Writing to PLL_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_PLL_CTL(unsigned int val)
+#define SUPPLE_0_WAKEUP ((IRQ_SUPPLE_0 - (IRQ_CORETMR + 1)) % 32)
+
+static inline void
+bfin_iwr_restore(unsigned long iwr0, unsigned long iwr1, unsigned long iwr2)
 {
-	unsigned long flags, iwr0, iwr1;
+	unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
 
-	if (val == bfin_read_PLL_CTL())
-		return;
+	bfin_write32(SIC_IWR0 + SICA_SICB_OFF, iwr0);
+	bfin_write32(SIC_IWR1 + SICA_SICB_OFF, iwr1);
+}
+#define bfin_iwr_restore bfin_iwr_restore
 
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
+static inline void
+bfin_iwr_save(unsigned long niwr0, unsigned long niwr1, unsigned long niwr2,
+              unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+	unsigned long SICA_SICB_OFF = ((bfin_read_DSPID() & 0xff) ? 0x1000 : 0);
 
-	bfin_write16(PLL_CTL, val);
-	SSYNC();
-	asm("IDLE;");
+	*iwr0 = bfin_read32(SIC_IWR0 + SICA_SICB_OFF);
+	*iwr1 = bfin_read32(SIC_IWR1 + SICA_SICB_OFF);
+	bfin_iwr_restore(niwr0, niwr1, niwr2);
+}
+#define bfin_iwr_save bfin_iwr_save
 
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	hard_local_irq_restore(flags);
+static inline void
+bfin_iwr_set_sup0(unsigned long *iwr0, unsigned long *iwr1, unsigned long *iwr2)
+{
+	bfin_iwr_save(0, IWR_ENABLE(SUPPLE_0_WAKEUP), 0, iwr0, iwr1, iwr2);
 }
 
-/* Writing to VR_CTL initiates a PLL relock sequence. */
-static __inline__ void bfin_write_VR_CTL(unsigned int val)
-{
-	unsigned long flags, iwr0, iwr1;
+#endif
 
-	if (val == bfin_read_VR_CTL())
-		return;
+#endif
 
-	flags = hard_local_irq_save();
-	/* Enable the PLL Wakeup bit in SIC IWR */
-	iwr0 = bfin_read32(SICA_IWR0);
-	iwr1 = bfin_read32(SICA_IWR1);
-	/* Only allow PPL Wakeup) */
-	bfin_write32(SICA_IWR0, IWR_ENABLE(0));
-	bfin_write32(SICA_IWR1, 0);
+#include <mach-common/pll.h>
 
-	bfin_write16(VR_CTL, val);
-	SSYNC();
-	asm("IDLE;");
-
-	bfin_write32(SICA_IWR0, iwr0);
-	bfin_write32(SICA_IWR1, iwr1);
-	hard_local_irq_restore(flags);
-}
-
-#endif /* _MACH_PLL_H */
+#endif
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h
index 2c8c514..346c605 100644
--- a/arch/blackfin/mach-bf561/include/mach/smp.h
+++ b/arch/blackfin/mach-bf561/include/mach/smp.h
@@ -7,6 +7,8 @@
 #ifndef _MACH_BF561_SMP
 #define _MACH_BF561_SMP
 
+/* This header has to stand alone to avoid circular deps */
+
 struct task_struct;
 
 void platform_init_cpus(void);
@@ -17,13 +19,13 @@
 
 void platform_secondary_init(unsigned int cpu);
 
-void platform_request_ipi(int (*handler)(int, void *));
+void platform_request_ipi(int irq, /*irq_handler_t*/ void *handler);
 
-void platform_send_ipi(cpumask_t callmap);
+void platform_send_ipi(cpumask_t callmap, int irq);
 
-void platform_send_ipi_cpu(unsigned int cpu);
+void platform_send_ipi_cpu(unsigned int cpu, int irq);
 
-void platform_clear_ipi(unsigned int cpu);
+void platform_clear_ipi(unsigned int cpu, int irq);
 
 void bfin_local_timer_setup(void);
 
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index f540ed1..1074a7e 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -86,12 +86,12 @@
 
 	spin_lock(&boot_lock);
 
-	if ((bfin_read_SIC_SYSCR() & COREB_SRAM_INIT) == 0) {
+	if ((bfin_read_SYSCR() & COREB_SRAM_INIT) == 0) {
 		/* CoreB already running, sending ipi to wakeup it */
 		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
 	} else {
 		/* Kick CoreB, which should start execution from CORE_SRAM_BASE. */
-		bfin_write_SIC_SYSCR(bfin_read_SIC_SYSCR() & ~COREB_SRAM_INIT);
+		bfin_write_SYSCR(bfin_read_SYSCR() & ~COREB_SRAM_INIT);
 		SSYNC();
 	}
 
@@ -111,41 +111,46 @@
 		panic("CPU%u: processor failed to boot\n", cpu);
 }
 
-void __init platform_request_ipi(irq_handler_t handler)
+static const char supple0[] = "IRQ_SUPPLE_0";
+static const char supple1[] = "IRQ_SUPPLE_1";
+void __init platform_request_ipi(int irq, void *handler)
 {
 	int ret;
+	const char *name = (irq == IRQ_SUPPLE_0) ? supple0 : supple1;
 
-	ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED,
-			  "Supplemental Interrupt0", handler);
+	ret = request_irq(irq, handler, IRQF_DISABLED | IRQF_PERCPU, name, handler);
 	if (ret)
-		panic("Cannot request supplemental interrupt 0 for IPI service");
+		panic("Cannot request %s for IPI service", name);
 }
 
-void platform_send_ipi(cpumask_t callmap)
+void platform_send_ipi(cpumask_t callmap, int irq)
 {
 	unsigned int cpu;
+	int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
 
 	for_each_cpu_mask(cpu, callmap) {
 		BUG_ON(cpu >= 2);
 		SSYNC();
-		bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+		bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 		SSYNC();
 	}
 }
 
-void platform_send_ipi_cpu(unsigned int cpu)
+void platform_send_ipi_cpu(unsigned int cpu, int irq)
 {
+	int offset = (irq == IRQ_SUPPLE_0) ? 6 : 8;
 	BUG_ON(cpu >= 2);
 	SSYNC();
-	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu)));
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 	SSYNC();
 }
 
-void platform_clear_ipi(unsigned int cpu)
+void platform_clear_ipi(unsigned int cpu, int irq)
 {
+	int offset = (irq == IRQ_SUPPLE_0) ? 10 : 12;
 	BUG_ON(cpu >= 2);
 	SSYNC();
-	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu)));
+	bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (offset + cpu)));
 	SSYNC();
 }
 
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 2ca915e..bc08c98 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -615,7 +615,7 @@
 #ifdef CONFIG_IPIPE
 	r0 = sp;
 	SP += -12;
-	call ___ipipe_syscall_root;
+	pseudo_long_call ___ipipe_syscall_root, p0;
 	SP += 12;
 	cc = r0 == 1;
 	if cc jump .Lsyscall_really_exit;
@@ -692,7 +692,7 @@
 	[--sp] = reti;
 	SP += 4; /* don't merge with next insn to keep the pattern obvious */
 	SP += -12;
-	call ___ipipe_sync_root;
+	pseudo_long_call ___ipipe_sync_root, p4;
 	SP += 12;
 	jump .Lresume_userspace_1;
 .Lsyscall_no_irqsync:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index da7e3c6..a604f19 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -866,7 +866,6 @@
 	u32 pintbit = PINT_BIT(pint_val);
 	u32 bank = PINT_2_BANK(pint_val);
 
-	pint[bank]->request = pintbit;
 	pint[bank]->mask_set = pintbit;
 }
 
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 80884b1..3c648a0 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -23,9 +23,6 @@
 
 void bfin_pm_suspend_standby_enter(void)
 {
-	unsigned long flags;
-
-	flags = hard_local_irq_save();
 	bfin_pm_standby_setup();
 
 #ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -55,8 +52,6 @@
 #else
 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
 #endif
-
-	hard_local_irq_restore(flags);
 }
 
 int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -127,7 +122,6 @@
 
 int bfin_pm_suspend_mem_enter(void)
 {
-	unsigned long flags;
 	int wakeup, ret;
 
 	unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
@@ -149,12 +143,9 @@
 	wakeup |= GPWE;
 #endif
 
-	flags = hard_local_irq_save();
-
 	ret = blackfin_dma_suspend();
 
 	if (ret) {
-		hard_local_irq_restore(flags);
 		kfree(memptr);
 		return ret;
 	}
@@ -178,7 +169,6 @@
 	bfin_gpio_pm_hibernate_restore();
 	blackfin_dma_resume();
 
-	hard_local_irq_restore(flags);
 	kfree(memptr);
 
 	return 0;
@@ -233,7 +223,7 @@
 	return 0;
 }
 
-struct platform_suspend_ops bfin_pm_ops = {
+static const struct platform_suspend_ops bfin_pm_ops = {
 	.enter = bfin_pm_enter,
 	.valid	= bfin_pm_valid,
 };
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index a17107a..9f25140 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/cpumask.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
@@ -43,12 +44,6 @@
 	*init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
 	*init_saved_dcplb_fault_addr_coreb;
 
-cpumask_t cpu_possible_map;
-EXPORT_SYMBOL(cpu_possible_map);
-
-cpumask_t cpu_online_map;
-EXPORT_SYMBOL(cpu_online_map);
-
 #define BFIN_IPI_RESCHEDULE   0
 #define BFIN_IPI_CALL_FUNC    1
 #define BFIN_IPI_CPU_STOP     2
@@ -65,8 +60,7 @@
 	void (*func)(void *info);
 	void *info;
 	int wait;
-	cpumask_t pending;
-	cpumask_t waitmask;
+	cpumask_t *waitmask;
 };
 
 static struct blackfin_flush_data smp_flush_data;
@@ -74,15 +68,19 @@
 static DEFINE_SPINLOCK(stop_lock);
 
 struct ipi_message {
-	struct list_head list;
 	unsigned long type;
 	struct smp_call_struct call_struct;
 };
 
+/* A magic number - stress test shows this is safe for common cases */
+#define BFIN_IPI_MSGQ_LEN 5
+
+/* Simple FIFO buffer, overflow leads to panic */
 struct ipi_message_queue {
-	struct list_head head;
 	spinlock_t lock;
 	unsigned long count;
+	unsigned long head; /* head of the queue */
+	struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
 };
 
 static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -121,7 +119,6 @@
 	func = msg->call_struct.func;
 	info = msg->call_struct.info;
 	wait = msg->call_struct.wait;
-	cpu_clear(cpu, msg->call_struct.pending);
 	func(info);
 	if (wait) {
 #ifdef __ARCH_SYNC_CORE_DCACHE
@@ -132,51 +129,57 @@
 		 */
 		resync_core_dcache();
 #endif
-		cpu_clear(cpu, msg->call_struct.waitmask);
-	} else
-		kfree(msg);
+		cpu_clear(cpu, *msg->call_struct.waitmask);
+	}
 }
 
-static irqreturn_t ipi_handler(int irq, void *dev_instance)
+/* Use IRQ_SUPPLE_0 to request reschedule.
+ * When returning from interrupt to user space,
+ * there is chance to reschedule */
+static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
+{
+	unsigned int cpu = smp_processor_id();
+
+	platform_clear_ipi(cpu, IRQ_SUPPLE_0);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
 	struct ipi_message *msg;
 	struct ipi_message_queue *msg_queue;
 	unsigned int cpu = smp_processor_id();
+	unsigned long flags;
 
-	platform_clear_ipi(cpu);
+	platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
 	msg_queue = &__get_cpu_var(ipi_msg_queue);
-	msg_queue->count++;
 
-	spin_lock(&msg_queue->lock);
-	while (!list_empty(&msg_queue->head)) {
-		msg = list_entry(msg_queue->head.next, typeof(*msg), list);
-		list_del(&msg->list);
+	spin_lock_irqsave(&msg_queue->lock, flags);
+
+	while (msg_queue->count) {
+		msg = &msg_queue->ipi_message[msg_queue->head];
 		switch (msg->type) {
-		case BFIN_IPI_RESCHEDULE:
-			/* That's the easiest one; leave it to
-			 * return_from_int. */
-			kfree(msg);
-			break;
 		case BFIN_IPI_CALL_FUNC:
-			spin_unlock(&msg_queue->lock);
+			spin_unlock_irqrestore(&msg_queue->lock, flags);
 			ipi_call_function(cpu, msg);
-			spin_lock(&msg_queue->lock);
+			spin_lock_irqsave(&msg_queue->lock, flags);
 			break;
 		case BFIN_IPI_CPU_STOP:
-			spin_unlock(&msg_queue->lock);
+			spin_unlock_irqrestore(&msg_queue->lock, flags);
 			ipi_cpu_stop(cpu);
-			spin_lock(&msg_queue->lock);
-			kfree(msg);
+			spin_lock_irqsave(&msg_queue->lock, flags);
 			break;
 		default:
 			printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
 			       cpu, msg->type);
-			kfree(msg);
 			break;
 		}
+		msg_queue->head++;
+		msg_queue->head %= BFIN_IPI_MSGQ_LEN;
+		msg_queue->count--;
 	}
-	spin_unlock(&msg_queue->lock);
+	spin_unlock_irqrestore(&msg_queue->lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -186,48 +189,47 @@
 	struct ipi_message_queue *msg_queue;
 	for_each_possible_cpu(cpu) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
-		INIT_LIST_HEAD(&msg_queue->head);
 		spin_lock_init(&msg_queue->lock);
 		msg_queue->count = 0;
+		msg_queue->head = 0;
 	}
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+static inline void smp_send_message(cpumask_t callmap, unsigned long type,
+					void (*func) (void *info), void *info, int wait)
 {
 	unsigned int cpu;
-	cpumask_t callmap;
-	unsigned long flags;
 	struct ipi_message_queue *msg_queue;
 	struct ipi_message *msg;
-
-	callmap = cpu_online_map;
-	cpu_clear(smp_processor_id(), callmap);
-	if (cpus_empty(callmap))
-		return 0;
-
-	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&msg->list);
-	msg->call_struct.func = func;
-	msg->call_struct.info = info;
-	msg->call_struct.wait = wait;
-	msg->call_struct.pending = callmap;
-	msg->call_struct.waitmask = callmap;
-	msg->type = BFIN_IPI_CALL_FUNC;
+	unsigned long flags, next_msg;
+	cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */
 
 	for_each_cpu_mask(cpu, callmap) {
 		msg_queue = &per_cpu(ipi_msg_queue, cpu);
 		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add_tail(&msg->list, &msg_queue->head);
+		if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
+			next_msg = (msg_queue->head + msg_queue->count)
+					% BFIN_IPI_MSGQ_LEN;
+			msg = &msg_queue->ipi_message[next_msg];
+			msg->type = type;
+			if (type == BFIN_IPI_CALL_FUNC) {
+				msg->call_struct.func = func;
+				msg->call_struct.info = info;
+				msg->call_struct.wait = wait;
+				msg->call_struct.waitmask = &waitmask;
+			}
+			msg_queue->count++;
+		} else
+			panic("IPI message queue overflow\n");
 		spin_unlock_irqrestore(&msg_queue->lock, flags);
-		platform_send_ipi_cpu(cpu);
+		platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
 	}
+
 	if (wait) {
-		while (!cpus_empty(msg->call_struct.waitmask))
+		while (!cpus_empty(waitmask))
 			blackfin_dcache_invalidate_range(
-				(unsigned long)(&msg->call_struct.waitmask),
-				(unsigned long)(&msg->call_struct.waitmask));
+				(unsigned long)(&waitmask),
+				(unsigned long)(&waitmask));
 #ifdef __ARCH_SYNC_CORE_DCACHE
 		/*
 		 * Invalidate D cache in case shared data was changed by
@@ -235,8 +237,20 @@
 		 */
 		resync_core_dcache();
 #endif
-		kfree(msg);
 	}
+}
+
+int smp_call_function(void (*func)(void *info), void *info, int wait)
+{
+	cpumask_t callmap;
+
+	callmap = cpu_online_map;
+	cpu_clear(smp_processor_id(), callmap);
+	if (cpus_empty(callmap))
+		return 0;
+
+	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
+
 	return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function);
@@ -246,100 +260,39 @@
 {
 	unsigned int cpu = cpuid;
 	cpumask_t callmap;
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
 
 	if (cpu_is_offline(cpu))
 		return 0;
 	cpus_clear(callmap);
 	cpu_set(cpu, callmap);
 
-	msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return -ENOMEM;
-	INIT_LIST_HEAD(&msg->list);
-	msg->call_struct.func = func;
-	msg->call_struct.info = info;
-	msg->call_struct.wait = wait;
-	msg->call_struct.pending = callmap;
-	msg->call_struct.waitmask = callmap;
-	msg->type = BFIN_IPI_CALL_FUNC;
+	smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
 
-	msg_queue = &per_cpu(ipi_msg_queue, cpu);
-	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add_tail(&msg->list, &msg_queue->head);
-	spin_unlock_irqrestore(&msg_queue->lock, flags);
-	platform_send_ipi_cpu(cpu);
-
-	if (wait) {
-		while (!cpus_empty(msg->call_struct.waitmask))
-			blackfin_dcache_invalidate_range(
-				(unsigned long)(&msg->call_struct.waitmask),
-				(unsigned long)(&msg->call_struct.waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-		/*
-		 * Invalidate D cache in case shared data was changed by
-		 * other processors to ensure cache coherence.
-		 */
-		resync_core_dcache();
-#endif
-		kfree(msg);
-	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
-
+	/* simply trigger an ipi */
 	if (cpu_is_offline(cpu))
 		return;
-
-	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return;
-	INIT_LIST_HEAD(&msg->list);
-	msg->type = BFIN_IPI_RESCHEDULE;
-
-	msg_queue = &per_cpu(ipi_msg_queue, cpu);
-	spin_lock_irqsave(&msg_queue->lock, flags);
-	list_add_tail(&msg->list, &msg_queue->head);
-	spin_unlock_irqrestore(&msg_queue->lock, flags);
-	platform_send_ipi_cpu(cpu);
+	platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
 
 	return;
 }
 
 void smp_send_stop(void)
 {
-	unsigned int cpu;
 	cpumask_t callmap;
-	unsigned long flags;
-	struct ipi_message_queue *msg_queue;
-	struct ipi_message *msg;
 
 	callmap = cpu_online_map;
 	cpu_clear(smp_processor_id(), callmap);
 	if (cpus_empty(callmap))
 		return;
 
-	msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
-	if (!msg)
-		return;
-	INIT_LIST_HEAD(&msg->list);
-	msg->type = BFIN_IPI_CPU_STOP;
+	smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
 
-	for_each_cpu_mask(cpu, callmap) {
-		msg_queue = &per_cpu(ipi_msg_queue, cpu);
-		spin_lock_irqsave(&msg_queue->lock, flags);
-		list_add_tail(&msg->list, &msg_queue->head);
-		spin_unlock_irqrestore(&msg_queue->lock, flags);
-		platform_send_ipi_cpu(cpu);
-	}
 	return;
 }
 
@@ -446,7 +399,8 @@
 {
 	platform_prepare_cpus(max_cpus);
 	ipi_queue_init();
-	platform_request_ipi(&ipi_handler);
+	platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
+	platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index 627e04b..dfd304a 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -704,18 +704,18 @@
 {
 	struct sram_list_struct *lsl, **tmp;
 	struct mm_struct *mm = current->mm;
+	int ret = -1;
 
 	for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
-		if ((*tmp)->addr == addr)
-			goto found;
-	return -1;
-found:
-	lsl = *tmp;
-	sram_free(addr);
-	*tmp = lsl->next;
-	kfree(lsl);
+		if ((*tmp)->addr == addr) {
+			lsl = *tmp;
+			ret = sram_free(addr);
+			*tmp = lsl->next;
+			kfree(lsl);
+			break;
+		}
 
-	return 0;
+	return ret;
 }
 EXPORT_SYMBOL(sram_free_with_lsl);
 
diff --git a/arch/cris/arch-v32/lib/nand_init.S b/arch/cris/arch-v32/lib/nand_init.S
index e705f5c..d671fed 100644
--- a/arch/cris/arch-v32/lib/nand_init.S
+++ b/arch/cris/arch-v32/lib/nand_init.S
@@ -139,7 +139,7 @@
 	lsrq	8, $r4
 	move.b	$r4, [$r1]	; Row address
 	lsrq	8, $r4
-	move.b	$r4, [$r1]	; Row adddress
+	move.b	$r4, [$r1]	; Row address
 	moveq	20, $r4
 2:	bne	2b
 	subq	1, $r4
diff --git a/arch/cris/include/asm/etraxgpio.h b/arch/cris/include/asm/etraxgpio.h
index d474818..461c089 100644
--- a/arch/cris/include/asm/etraxgpio.h
+++ b/arch/cris/include/asm/etraxgpio.h
@@ -1,5 +1,5 @@
 /*
- * The following devices are accessable using this driver using
+ * The following devices are accessible using this driver using
  * GPIO_MAJOR (120) and a couple of minor numbers.
  *
  * For ETRAX 100LX (CONFIG_ETRAX_ARCH_V10):
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index b509643..4e73092 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -86,7 +86,7 @@
 		CMOS_WRITE(real_seconds,RTC_SECONDS);
 		CMOS_WRITE(real_minutes,RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug
index ee671c3..e8d1b23 100644
--- a/arch/h8300/Kconfig.debug
+++ b/arch/h8300/Kconfig.debug
@@ -48,7 +48,7 @@
 	  builtin kernel commandline enabled.
 
 config KERNEL_COMMAND
-	string "Buildin commmand string"
+	string "Buildin command string"
 	depends on DEFAULT_CMDLINE
 	help
 	  builtin kernel commandline strings.
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 2f229e5..2689ee5 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -590,6 +590,10 @@
 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
 void kvm_sal_emul(struct kvm_vcpu *vcpu);
 
+#define __KVM_HAVE_ARCH_VM_ALLOC 1
+struct kvm *kvm_arch_alloc_vm(void);
+void kvm_arch_free_vm(struct kvm *kvm);
+
 #endif /* __ASSEMBLY__*/
 
 #endif
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index d92d5b5..ac76da0 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -617,11 +617,14 @@
 	return get_unmapped_area(file, addr, len, pgoff, flags);
 }
 
+/* forward declaration */
+static static const struct dentry_operations pfmfs_dentry_operations;
 
 static struct dentry *
 pfmfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+	return mount_pseudo(fs_type, "pfm:", NULL, &pfmfs_dentry_operations,
+			PFMFS_MAGIC);
 }
 
 static struct file_system_type pfm_fs_type = {
@@ -2232,7 +2235,6 @@
 	}
 	path.mnt = mntget(pfmfs_mnt);
 
-	d_set_d_op(path.dentry, &pfmfs_dentry_operations);
 	d_add(path.dentry, inode);
 
 	file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index f56a631..70d224d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -749,7 +749,7 @@
 	return r;
 }
 
-static struct kvm *kvm_alloc_kvm(void)
+struct kvm *kvm_arch_alloc_vm(void)
 {
 
 	struct kvm *kvm;
@@ -760,7 +760,7 @@
 	vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
 
 	if (!vm_base)
-		return ERR_PTR(-ENOMEM);
+		return NULL;
 
 	memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
 	kvm = (struct kvm *)(vm_base +
@@ -806,10 +806,12 @@
 #define GUEST_PHYSICAL_RR4	0x2739
 #define VMM_INIT_RR		0x1660
 
-static void kvm_init_vm(struct kvm *kvm)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
 	BUG_ON(!kvm);
 
+	kvm->arch.is_sn2 = ia64_platform_is("sn2");
+
 	kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
 	kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
 	kvm->arch.vmm_init_rr = VMM_INIT_RR;
@@ -823,21 +825,8 @@
 
 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
-}
 
-struct  kvm *kvm_arch_create_vm(void)
-{
-	struct kvm *kvm = kvm_alloc_kvm();
-
-	if (IS_ERR(kvm))
-		return ERR_PTR(-ENOMEM);
-
-	kvm->arch.is_sn2 = ia64_platform_is("sn2");
-
-	kvm_init_vm(kvm);
-
-	return kvm;
-
+	return 0;
 }
 
 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
@@ -962,7 +951,9 @@
 			goto out;
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			kvm_ioapic_destroy(kvm);
+			mutex_unlock(&kvm->slots_lock);
 			goto out;
 		}
 		break;
@@ -1357,7 +1348,7 @@
 	return -EINVAL;
 }
 
-static void free_kvm(struct kvm *kvm)
+void kvm_arch_free_vm(struct kvm *kvm)
 {
 	unsigned long vm_base = kvm->arch.vm_base;
 
@@ -1399,9 +1390,6 @@
 #endif
 	kfree(kvm->arch.vioapic);
 	kvm_release_vm_pages(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	free_kvm(kvm);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index fb8f9f5..f1e17d3 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -130,7 +130,7 @@
 
 	local_irq_save(psr);
 
-	/*Intercept the acces for PIB range*/
+	/*Intercept the access for PIB range*/
 	if (iot == GPFN_PIB) {
 		if (!dir)
 			lsapic_write(vcpu, src_pa, s, *dest);
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
index 73613b5..26e85e2 100644
--- a/arch/m68k/ifpsp060/src/fpsp.S
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -3881,7 +3881,7 @@
 # FP Unimplemented Instruction stack frame and jump to that entry
 # point.
 #
-# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# but, if the FPU is disabled, then we need to jump to the FPU disabled
 # entry point.
 	movc		%pcr,%d0
 	btst		&0x1,%d0
diff --git a/arch/m68k/include/asm/m548xgpt.h b/arch/m68k/include/asm/m548xgpt.h
index c8ef158..33b2eef 100644
--- a/arch/m68k/include/asm/m548xgpt.h
+++ b/arch/m68k/include/asm/m548xgpt.h
@@ -59,11 +59,13 @@
 #define MCF_GPT_GMS_GPIO_INPUT     (0x00000000)
 #define MCF_GPT_GMS_GPIO_OUTLO     (0x00000020)
 #define MCF_GPT_GMS_GPIO_OUTHI     (0x00000030)
+#define MCF_GPT_GMS_GPIO_MASK      (0x00000030)
 #define MCF_GPT_GMS_TMS_DISABLE    (0x00000000)
 #define MCF_GPT_GMS_TMS_INCAPT     (0x00000001)
 #define MCF_GPT_GMS_TMS_OUTCAPT    (0x00000002)
 #define MCF_GPT_GMS_TMS_PWM        (0x00000003)
 #define MCF_GPT_GMS_TMS_GPIO       (0x00000004)
+#define MCF_GPT_GMS_TMS_MASK       (0x00000007)
 
 /* Bit definitions and macros for MCF_GPT_GCIR */
 #define MCF_GPT_GCIR_CNT(x)        (((x)&0x0000FFFF)<<0)
diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c
index ba6ccab..a4c3eb6 100644
--- a/arch/m68k/mac/psc.c
+++ b/arch/m68k/mac/psc.c
@@ -88,7 +88,7 @@
 
 	/*
 	 * The PSC is always at the same spot, but using psc
-	 * keeps things consisant with the psc_xxxx functions.
+	 * keeps things consistent with the psc_xxxx functions.
 	 */
 
 	psc = (void *) PSC_BASE;
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index e66e25c..012e377 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -23,8 +23,4 @@
 	  This option turns on/off heart beat kernel functionality.
 	  First GPIO node is taken.
 
-config DEBUG_BOOTMEM
-	depends on DEBUG_KERNEL
-	bool "Debug BOOTMEM initialization"
-
 endmenu
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 15f1f1d..6f432e6 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -17,7 +17,7 @@
 # The various CONFIG_XILINX cpu features options are integers 0/1/2...
 # rather than bools y/n
 
-# Work out HW multipler support.  This is icky.
+# Work out HW multipler support. This is tricky.
 # 1. Spartan2 has no HW multiplers.
 # 2. MicroBlaze v3.x always uses them, except in Spartan 2
 # 3. All other FPGa/CPU ver combos, we can trust the CONFIG_ settings
diff --git a/arch/microblaze/configs/mmu_defconfig b/arch/microblaze/configs/mmu_defconfig
index 8b422b1..ab8fbe7 100644
--- a/arch/microblaze/configs/mmu_defconfig
+++ b/arch/microblaze/configs/mmu_defconfig
@@ -66,5 +66,4 @@
 CONFIG_DEBUG_INFO=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
 CONFIG_EARLY_PRINTK=y
-CONFIG_DEBUG_BOOTMEM=y
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/microblaze/include/asm/pvr.h b/arch/microblaze/include/asm/pvr.h
index 37db96a..a10bec6 100644
--- a/arch/microblaze/include/asm/pvr.h
+++ b/arch/microblaze/include/asm/pvr.h
@@ -1,9 +1,9 @@
 /*
  * Support for the MicroBlaze PVR (Processor Version Register)
  *
- * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 - 2011 Michal Simek <monstr@monstr.eu>
  * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
- * Copyright (C) 2007 - 2009 PetaLogix
+ * Copyright (C) 2007 - 2011 PetaLogix
  *
  * This file is subject to the terms and conditions of the GNU General
  * Public License. See the file COPYING in the main directory of this
@@ -46,11 +46,11 @@
 #define PVR2_I_LMB_MASK			0x10000000
 #define PVR2_INTERRUPT_IS_EDGE_MASK	0x08000000
 #define PVR2_EDGE_IS_POSITIVE_MASK	0x04000000
-#define PVR2_D_PLB_MASK			0x02000000	/* new */
-#define PVR2_I_PLB_MASK			0x01000000	/* new */
-#define PVR2_INTERCONNECT		0x00800000	/* new */
-#define PVR2_USE_EXTEND_FSL		0x00080000	/* new */
-#define PVR2_USE_FSL_EXC		0x00040000	/* new */
+#define PVR2_D_PLB_MASK			0x02000000 /* new */
+#define PVR2_I_PLB_MASK			0x01000000 /* new */
+#define PVR2_INTERCONNECT		0x00800000 /* new */
+#define PVR2_USE_EXTEND_FSL		0x00080000 /* new */
+#define PVR2_USE_FSL_EXC		0x00040000 /* new */
 #define PVR2_USE_MSR_INSTR		0x00020000
 #define PVR2_USE_PCMP_INSTR		0x00010000
 #define PVR2_AREA_OPTIMISED		0x00008000
@@ -59,7 +59,7 @@
 #define PVR2_USE_HW_MUL_MASK		0x00001000
 #define PVR2_USE_FPU_MASK		0x00000800
 #define PVR2_USE_MUL64_MASK		0x00000400
-#define PVR2_USE_FPU2_MASK		0x00000200	/* new */
+#define PVR2_USE_FPU2_MASK		0x00000200 /* new */
 #define PVR2_USE_IPLBEXC 		0x00000100
 #define PVR2_USE_DPLBEXC		0x00000080
 #define PVR2_OPCODE_0x0_ILL_MASK	0x00000040
@@ -122,96 +122,103 @@
 
 
 /* PVR access macros */
-#define PVR_IS_FULL(pvr)		(pvr.pvr[0] & PVR0_PVR_FULL_MASK)
-#define PVR_USE_BARREL(pvr)		(pvr.pvr[0] & PVR0_USE_BARREL_MASK)
-#define PVR_USE_DIV(pvr)		(pvr.pvr[0] & PVR0_USE_DIV_MASK)
-#define PVR_USE_HW_MUL(pvr)		(pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
-#define PVR_USE_FPU(pvr)		(pvr.pvr[0] & PVR0_USE_FPU_MASK)
-#define PVR_USE_FPU2(pvr)		(pvr.pvr[2] & PVR2_USE_FPU2_MASK)
-#define PVR_USE_ICACHE(pvr)		(pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
-#define PVR_USE_DCACHE(pvr)		(pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
-#define PVR_VERSION(pvr)	((pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
-#define PVR_USER1(pvr)			(pvr.pvr[0] & PVR0_USER1_MASK)
-#define PVR_USER2(pvr)			(pvr.pvr[1] & PVR1_USER2_MASK)
+#define PVR_IS_FULL(_pvr)	(_pvr.pvr[0] & PVR0_PVR_FULL_MASK)
+#define PVR_USE_BARREL(_pvr)	(_pvr.pvr[0] & PVR0_USE_BARREL_MASK)
+#define PVR_USE_DIV(_pvr)	(_pvr.pvr[0] & PVR0_USE_DIV_MASK)
+#define PVR_USE_HW_MUL(_pvr)	(_pvr.pvr[0] & PVR0_USE_HW_MUL_MASK)
+#define PVR_USE_FPU(_pvr)	(_pvr.pvr[0] & PVR0_USE_FPU_MASK)
+#define PVR_USE_FPU2(_pvr)	(_pvr.pvr[2] & PVR2_USE_FPU2_MASK)
+#define PVR_USE_ICACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_ICACHE_MASK)
+#define PVR_USE_DCACHE(_pvr)	(_pvr.pvr[0] & PVR0_USE_DCACHE_MASK)
+#define PVR_VERSION(_pvr)	((_pvr.pvr[0] & PVR0_VERSION_MASK) >> 8)
+#define PVR_USER1(_pvr)		(_pvr.pvr[0] & PVR0_USER1_MASK)
+#define PVR_USER2(_pvr)		(_pvr.pvr[1] & PVR1_USER2_MASK)
 
-#define PVR_D_OPB(pvr)			(pvr.pvr[2] & PVR2_D_OPB_MASK)
-#define PVR_D_LMB(pvr)			(pvr.pvr[2] & PVR2_D_LMB_MASK)
-#define PVR_I_OPB(pvr)			(pvr.pvr[2] & PVR2_I_OPB_MASK)
-#define PVR_I_LMB(pvr)			(pvr.pvr[2] & PVR2_I_LMB_MASK)
-#define PVR_INTERRUPT_IS_EDGE(pvr) \
-			(pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
-#define PVR_EDGE_IS_POSITIVE(pvr) \
-			(pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
-#define PVR_USE_MSR_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_MSR_INSTR)
-#define PVR_USE_PCMP_INSTR(pvr)		(pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
-#define PVR_AREA_OPTIMISED(pvr)		(pvr.pvr[2] & PVR2_AREA_OPTIMISED)
-#define PVR_USE_MUL64(pvr)		(pvr.pvr[2] & PVR2_USE_MUL64_MASK)
-#define PVR_OPCODE_0x0_ILLEGAL(pvr) \
-			(pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
-#define PVR_UNALIGNED_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
-#define PVR_ILL_OPCODE_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
-#define PVR_IOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
-#define PVR_DOPB_BUS_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
-#define PVR_DIV_ZERO_EXCEPTION(pvr) \
-			(pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
-#define PVR_FPU_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_FPU_EXC_MASK)
-#define PVR_FSL_EXCEPTION(pvr)		(pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
+#define PVR_D_OPB(_pvr)		(_pvr.pvr[2] & PVR2_D_OPB_MASK)
+#define PVR_D_LMB(_pvr)		(_pvr.pvr[2] & PVR2_D_LMB_MASK)
+#define PVR_I_OPB(_pvr)		(_pvr.pvr[2] & PVR2_I_OPB_MASK)
+#define PVR_I_LMB(_pvr)		(_pvr.pvr[2] & PVR2_I_LMB_MASK)
+#define PVR_INTERRUPT_IS_EDGE(_pvr) \
+			(_pvr.pvr[2] & PVR2_INTERRUPT_IS_EDGE_MASK)
+#define PVR_EDGE_IS_POSITIVE(_pvr) \
+			(_pvr.pvr[2] & PVR2_EDGE_IS_POSITIVE_MASK)
+#define PVR_USE_MSR_INSTR(_pvr)		(_pvr.pvr[2] & PVR2_USE_MSR_INSTR)
+#define PVR_USE_PCMP_INSTR(_pvr)	(_pvr.pvr[2] & PVR2_USE_PCMP_INSTR)
+#define PVR_AREA_OPTIMISED(_pvr)	(_pvr.pvr[2] & PVR2_AREA_OPTIMISED)
+#define PVR_USE_MUL64(_pvr)		(_pvr.pvr[2] & PVR2_USE_MUL64_MASK)
+#define PVR_OPCODE_0x0_ILLEGAL(_pvr) \
+			(_pvr.pvr[2] & PVR2_OPCODE_0x0_ILL_MASK)
+#define PVR_UNALIGNED_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_UNALIGNED_EXC_MASK)
+#define PVR_ILL_OPCODE_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_ILL_OPCODE_EXC_MASK)
+#define PVR_IOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_IOPB_BUS_EXC_MASK)
+#define PVR_DOPB_BUS_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DOPB_BUS_EXC_MASK)
+#define PVR_DIV_ZERO_EXCEPTION(_pvr) \
+			(_pvr.pvr[2] & PVR2_DIV_ZERO_EXC_MASK)
+#define PVR_FPU_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_FPU_EXC_MASK)
+#define PVR_FSL_EXCEPTION(_pvr)		(_pvr.pvr[2] & PVR2_USE_EXTEND_FSL)
 
-#define PVR_DEBUG_ENABLED(pvr)		(pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
-#define PVR_NUMBER_OF_PC_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
-#define PVR_NUMBER_OF_RD_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
-#define PVR_NUMBER_OF_WR_ADDR_BRK(pvr) \
-			((pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
-#define PVR_FSL_LINKS(pvr)	((pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
+#define PVR_DEBUG_ENABLED(_pvr)		(_pvr.pvr[3] & PVR3_DEBUG_ENABLED_MASK)
+#define PVR_NUMBER_OF_PC_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_PC_BRK_MASK) >> 25)
+#define PVR_NUMBER_OF_RD_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_RD_ADDR_BRK_MASK) >> 19)
+#define PVR_NUMBER_OF_WR_ADDR_BRK(_pvr) \
+			((_pvr.pvr[3] & PVR3_NUMBER_OF_WR_ADDR_BRK_MASK) >> 13)
+#define PVR_FSL_LINKS(_pvr)	((_pvr.pvr[3] & PVR3_FSL_LINKS_MASK) >> 7)
 
-#define PVR_ICACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_ICACHE_USE_FSL(pvr)		(pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
-#define PVR_ICACHE_ALLOW_WR(pvr)	(pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
-#define PVR_ICACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
-#define PVR_ICACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_ICACHE_ADDR_TAG_BITS(_pvr) \
+		((_pvr.pvr[4] & PVR4_ICACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_ICACHE_USE_FSL(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_USE_FSL_MASK)
+#define PVR_ICACHE_ALLOW_WR(_pvr) \
+		(_pvr.pvr[4] & PVR4_ICACHE_ALLOW_WR_MASK)
+#define PVR_ICACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_LINE_LEN_MASK) >> 21))
+#define PVR_ICACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[4] & PVR4_ICACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_ADDR_TAG_BITS(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
-#define PVR_DCACHE_USE_FSL(pvr)		(pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
-#define PVR_DCACHE_ALLOW_WR(pvr)	(pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
+#define PVR_DCACHE_ADDR_TAG_BITS(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_ADDR_TAG_BITS_MASK) >> 26)
+#define PVR_DCACHE_USE_FSL(_pvr)	(_pvr.pvr[5] & PVR5_DCACHE_USE_FSL_MASK)
+#define PVR_DCACHE_ALLOW_WR(_pvr) \
+			(_pvr.pvr[5] & PVR5_DCACHE_ALLOW_WR_MASK)
 /* FIXME two shifts on one line needs any comment */
-#define PVR_DCACHE_LINE_LEN(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
-#define PVR_DCACHE_BYTE_SIZE(pvr) \
-			(1 << ((pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
+#define PVR_DCACHE_LINE_LEN(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_LINE_LEN_MASK) >> 21))
+#define PVR_DCACHE_BYTE_SIZE(_pvr) \
+		(1 << ((_pvr.pvr[5] & PVR5_DCACHE_BYTE_SIZE_MASK) >> 16))
 
-#define PVR_DCACHE_USE_WRITEBACK(pvr) \
-			((pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
+#define PVR_DCACHE_USE_WRITEBACK(_pvr) \
+			((_pvr.pvr[5] & PVR5_DCACHE_USE_WRITEBACK) >> 14)
 
-#define PVR_ICACHE_BASEADDR(pvr)	(pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
-#define PVR_ICACHE_HIGHADDR(pvr)	(pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_ICACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[6] & PVR6_ICACHE_BASEADDR_MASK)
+#define PVR_ICACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[7] & PVR7_ICACHE_HIGHADDR_MASK)
+#define PVR_DCACHE_BASEADDR(_pvr) \
+			(_pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
+#define PVR_DCACHE_HIGHADDR(_pvr) \
+			(_pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
 
-#define PVR_DCACHE_BASEADDR(pvr)	(pvr.pvr[8] & PVR8_DCACHE_BASEADDR_MASK)
-#define PVR_DCACHE_HIGHADDR(pvr)	(pvr.pvr[9] & PVR9_DCACHE_HIGHADDR_MASK)
+#define PVR_TARGET_FAMILY(_pvr) \
+			((_pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
 
-#define PVR_TARGET_FAMILY(pvr)	((pvr.pvr[10] & PVR10_TARGET_FAMILY_MASK) >> 24)
-
-#define PVR_MSR_RESET_VALUE(pvr) \
-				(pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
+#define PVR_MSR_RESET_VALUE(_pvr) \
+			(_pvr.pvr[11] & PVR11_MSR_RESET_VALUE_MASK)
 
 /* mmu */
-#define PVR_USE_MMU(pvr)	((pvr.pvr[11] & PVR11_USE_MMU) >> 30)
-#define PVR_MMU_ITLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
-#define PVR_MMU_DTLB_SIZE(pvr)	(pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
-#define PVR_MMU_TLB_ACCESS(pvr)	(pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
-#define PVR_MMU_ZONES(pvr)	(pvr.pvr[11] & PVR11_MMU_ZONES)
+#define PVR_USE_MMU(_pvr)		((_pvr.pvr[11] & PVR11_USE_MMU) >> 30)
+#define PVR_MMU_ITLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ITLB_SIZE)
+#define PVR_MMU_DTLB_SIZE(_pvr)		(_pvr.pvr[11] & PVR11_MMU_DTLB_SIZE)
+#define PVR_MMU_TLB_ACCESS(_pvr)	(_pvr.pvr[11] & PVR11_MMU_TLB_ACCESS)
+#define PVR_MMU_ZONES(_pvr)		(_pvr.pvr[11] & PVR11_MMU_ZONES)
 
 /* endian */
-#define PVR_ENDIAN(pvr)	(pvr.pvr[0] & PVR0_ENDI)
+#define PVR_ENDIAN(_pvr)	(_pvr.pvr[0] & PVR0_ENDI)
 
 int cpu_has_pvr(void);
 void get_pvr(struct pvr_s *pvr);
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 87c79fa..2c309fc 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -32,6 +32,7 @@
 	{"7.30.a", 0x10},
 	{"7.30.b", 0x11},
 	{"8.00.a", 0x12},
+	{"8.00.b", 0x13},
 	{NULL, 0},
 };
 
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index 819238b..41c30cd 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -287,25 +287,44 @@
  * are masked. This is nice, means we don't have to CLI before state save
  */
 C_ENTRY(_user_exception):
-	addi	r14, r14, 4	/* return address is 4 byte after call */
 	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+	addi	r14, r14, 4	/* return address is 4 byte after call */
 
+	mfs	r1, rmsr
+	nop
+	andi	r1, r1, MSR_UMS
+	bnei	r1, 1f
+
+/* Kernel-mode state save - kernel execve */
+	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
+	SAVE_REGS
+
+	swi	r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
+	brid	2f;
+	nop;				/* Fill delay slot */
+
+/* User-mode state save.  */
+1:
 	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
 	tophys(r1,r1);
 	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
-	/* MS these three instructions can be added to one */
-	/* addik	r1, r1, THREAD_SIZE; */
-	/* tophys(r1,r1); */
-	/* addik	r1, r1, -STATE_SAVE_SIZE; */
-	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
+/* calculate kernel stack pointer from task struct 8k */
+	addik	r1, r1, THREAD_SIZE;
+	tophys(r1,r1);
+
+	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
 	SAVE_REGS
 	swi	r0, r1, PTO + PT_R3
 	swi	r0, r1, PTO + PT_R4
 
+	swi	r0, r1, PTO + PT_MODE;			/* Was in user-mode. */
 	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
 	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
 	clear_ums;
-	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+2:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
 	/* Save away the syscall number.  */
 	swi	r12, r1, PTO+PT_R0;
 	tovirt(r1,r1)
@@ -375,6 +394,9 @@
 	swi	r3, r1, PTO + PT_R3
 	swi	r4, r1, PTO + PT_R4
 
+	lwi	r11, r1, PTO + PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c.  */
+	bnei	r11, 2f;
 	/* We're returning to user mode, so check for various conditions that
 	 * trigger rescheduling. */
 	/* FIXME: Restructure all these flag checks. */
@@ -417,6 +439,16 @@
 	RESTORE_REGS;
 	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
 	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+	bri	6f;
+
+/* Return to kernel state.  */
+2:	set_bip;			/*  Ints masked for state restore */
+	VM_OFF;
+	tophys(r1,r1);
+	RESTORE_REGS;
+	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
+	tovirt(r1,r1);
+6:
 TRAP_return:		/* Make global symbol for debugging */
 	rtbd	r14, 0;	/* Instructions to return from an IRQ */
 	nop;
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 478f294..a7fa6ae7 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -25,6 +25,7 @@
 #include <linux/errno.h>
 #include <linux/ptrace.h>
 #include <asm/current.h>
+#include <asm/cacheflush.h>
 
 #define MICROBLAZE_ILL_OPCODE_EXCEPTION	0x02
 #define MICROBLAZE_IBUS_EXCEPTION	0x03
@@ -52,6 +53,8 @@
 void sw_exception(struct pt_regs *regs)
 {
 	_exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
+	flush_dcache_range(regs->r16, regs->r16 + 0x4);
+	flush_icache_range(regs->r16, regs->r16 + 0x4);
 }
 
 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 7811954..25f6e07 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -945,11 +945,20 @@
 store4:	sbi	r3, r4, 3;	/* Delay slot */
 ex_shw_vm:
 	/* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+	lbui	r3, r5, 0;
+store5:	sbi	r3, r4, 0;
+	lbui	r3, r5, 1;
+	brid	ret_from_exc;
+store6:	sbi	r3, r4, 1;	/* Delay slot */
+#else
 	lbui	r3, r5, 2;
 store5:	sbi	r3, r4, 0;
 	lbui	r3, r5, 3;
 	brid	ret_from_exc;
 store6:	sbi	r3, r4, 1;	/* Delay slot */
+#endif
+
 ex_sw_end_vm:			/* Exception handling of store word, ends. */
 
 /* We have to prevent cases that get/put_user macros get unaligned pointer
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index a105301..c881393 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -61,14 +61,12 @@
 	char *p;
 	int *addr;
 
-	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+	pr_debug("search \"serial\", depth: %d, uname: %s\n", depth, uname);
 
 /* find all serial nodes */
 	if (strncmp(uname, "serial", 6) != 0)
 		return 0;
 
-	early_init_dt_check_for_initrd(node);
-
 /* find compatible node with uartlite */
 	p = of_get_flat_dt_prop(node, "compatible", &l);
 	if ((strncmp(p, "xlnx,xps-uartlite", 17) != 0) &&
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
index 96a88c3..3451bde 100644
--- a/arch/microblaze/kernel/vmlinux.lds.S
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -123,20 +123,10 @@
 
 	__init_end_before_initramfs = .;
 
-	.init.ramfs ALIGN(PAGE_SIZE) : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
-		__initramfs_start = .;
-		*(.init.ramfs)
-		__initramfs_end = .;
-		. = ALIGN(4);
-		LONG(0);
-/*
- * FIXME this can break initramfs for MMU.
- * Pad init.ramfs up to page boundary,
- * so that __init_end == __bss_start. This will make image.elf
- * consistent with the image.bin
- */
-		/* . = ALIGN(PAGE_SIZE); */
+	.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+		INIT_RAM_FS
 	}
+
 	__init_end = .;
 
 	.bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 123e361..810fd68 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -182,7 +182,7 @@
 			for (; c >= 4; c -= 4) {
 				value = *--i_src;
 				*--i_dst = buf_hold | ((value & 0xFF000000)>> 24);
-				buf_hold = (value & 0xFFFFFF) << 8;;
+				buf_hold = (value & 0xFFFFFF) << 8;
 			}
 #endif
 			/* Realign the source */
diff --git a/arch/microblaze/lib/muldi3.S b/arch/microblaze/lib/muldi3.S
deleted file mode 100644
index ceeaa8c..0000000
--- a/arch/microblaze/lib/muldi3.S
+++ /dev/null
@@ -1,121 +0,0 @@
-#include <linux/linkage.h>
-
-/*
- * Multiply operation for 64 bit integers, for devices with hard multiply
- *	Input :	Operand1[H] in Reg r5
- *		Operand1[L] in Reg r6
- *		Operand2[H] in Reg r7
- *		Operand2[L] in Reg r8
- *	Output: Result[H] in Reg r3
- *		Result[L] in Reg r4
- *
- * Explaination:
- *
- *	Both the input numbers are divided into 16 bit number as follows
- *		op1 = A B C D
- *		op2 = E F G H
- *	result = D * H
- *		 + (C * H + D * G) << 16
- *		 + (B * H + C * G + D * F) << 32
- *		 + (A * H + B * G + C * F + D * E) << 48
- *
- *	Only 64 bits of the output are considered
- */
-
-	.text
-	.globl	__muldi3
-	.type __muldi3, @function
-	.ent __muldi3
-
-__muldi3:
-	addi	r1, r1, -40
-
-/* Save the input operands on the caller's stack */
-	swi	r5, r1, 44
-	swi	r6, r1, 48
-	swi	r7, r1, 52
-	swi	r8, r1, 56
-
-/* Store all the callee saved registers */
-	sw	r20, r1, r0
-	swi	r21, r1, 4
-	swi	r22, r1, 8
-	swi	r23, r1, 12
-	swi	r24, r1, 16
-	swi	r25, r1, 20
-	swi	r26, r1, 24
-	swi	r27, r1, 28
-
-/* Load all the 16 bit values for A thru H */
-	lhui	r20, r1, 44 /* A */
-	lhui	r21, r1, 46 /* B */
-	lhui	r22, r1, 48 /* C */
-	lhui	r23, r1, 50 /* D */
-	lhui	r24, r1, 52 /* E */
-	lhui	r25, r1, 54 /* F */
-	lhui	r26, r1, 56 /* G */
-	lhui	r27, r1, 58 /* H */
-
-/* D * H ==> LSB of the result on stack ==> Store1 */
-	mul	r9, r23, r27
-	swi	r9, r1, 36 /* Pos2 and Pos3 */
-
-/* Hi (Store1) + C * H + D * G ==> Store2 ==> Pos1 and Pos2 */
-/* Store the carry generated in position 2 for Pos 3 */
-	lhui	r11, r1, 36 /* Pos2 */
-	mul	r9, r22, r27 /* C * H */
-	mul	r10, r23, r26 /* D * G */
-	add	r9, r9, r10
-	addc	r12, r0, r0
-	add	r9, r9, r11
-	addc	r12, r12, r0 /* Store the Carry */
-	shi	r9, r1, 36 /* Store Pos2 */
-	swi	r9, r1, 32
-	lhui	r11, r1, 32
-	shi	r11, r1, 34 /* Store Pos1 */
-
-/* Hi (Store2) + B * H + C * G + D * F ==> Store3 ==> Pos0 and Pos1 */
-	mul	r9, r21, r27 /* B * H */
-	mul	r10, r22, r26 /* C * G */
-	mul	r7, r23, r25 /* D * F */
-	add	r9, r9, r11
-	add	r9, r9, r10
-	add	r9, r9, r7
-	swi	r9, r1, 32 /* Pos0 and Pos1 */
-
-/* Hi (Store3) + A * H + B * G + C * F + D * E ==> Store3 ==> Pos0 */
-	lhui	r11, r1, 32 /* Pos0 */
-	mul	r9, r20, r27 /* A * H */
-	mul	r10, r21, r26 /* B * G */
-	mul	r7, r22, r25 /* C * F */
-	mul	r8, r23, r24 /* D * E */
-	add	r9, r9, r11
-	add 	r9, r9, r10
-	add	r9, r9, r7
-	add	r9, r9, r8
-	sext16	r9, r9 /* Sign extend the MSB */
-	shi	r9, r1, 32
-
-/* Move results to r3 and r4 */
-	lhui	r3, r1, 32
-	add	r3, r3, r12
-	shi	r3, r1, 32
-	lwi	r3, r1, 32 /* Hi Part */
-	lwi	r4, r1, 36 /* Lo Part */
-
-/* Restore Callee saved registers */
-	lw	r20, r1, r0
-	lwi	r21, r1, 4
-	lwi	r22, r1, 8
-	lwi	r23, r1, 12
-	lwi	r24, r1, 16
-	lwi	r25, r1, 20
-	lwi	r26, r1, 24
-	lwi	r27, r1, 28
-
-/* Restore Frame and return */
-	rtsd	r15, 8
-	addi	r1, r1, 40
-
-.size __muldi3, . - __muldi3
-.end __muldi3
diff --git a/arch/microblaze/lib/muldi3.c b/arch/microblaze/lib/muldi3.c
new file mode 100644
index 0000000..d4860e1
--- /dev/null
+++ b/arch/microblaze/lib/muldi3.c
@@ -0,0 +1,60 @@
+#include <linux/module.h>
+
+#include "libgcc.h"
+
+#define DWtype long long
+#define UWtype unsigned long
+#define UHWtype unsigned short
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C.  */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v)						\
+	do {								\
+		UWtype __x0, __x1, __x2, __x3;				\
+		UHWtype __ul, __vl, __uh, __vh;				\
+									\
+		__ul = __ll_lowpart(u);					\
+		__uh = __ll_highpart(u);				\
+		__vl = __ll_lowpart(v);					\
+		__vh = __ll_highpart(v);				\
+									\
+		__x0 = (UWtype) __ul * __vl;				\
+		__x1 = (UWtype) __ul * __vh;				\
+		__x2 = (UWtype) __uh * __vl;				\
+		__x3 = (UWtype) __uh * __vh;				\
+									\
+		__x1 += __ll_highpart(__x0); /* this can't give carry */\
+		__x1 += __x2; /* but this indeed can */			\
+		if (__x1 < __x2) /* did we get it? */			\
+		__x3 += __ll_B; /* yes, add it in the proper pos */	\
+									\
+		(w1) = __x3 + __ll_highpart(__x1);			\
+		(w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+	} while (0)
+#endif
+
+#if !defined(__umulsidi3)
+#define __umulsidi3(u, v) ({				\
+	DWunion __w;					\
+	umul_ppmm(__w.s.high, __w.s.low, u, v);		\
+	__w.ll;						\
+	})
+#endif
+
+DWtype __muldi3(DWtype u, DWtype v)
+{
+	const DWunion uu = {.ll = u};
+	const DWunion vv = {.ll = v};
+	DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)};
+
+	w.s.high += ((UWtype) uu.s.low * (UWtype) vv.s.high
+		+ (UWtype) uu.s.high * (UWtype) vv.s.low);
+
+	return w.ll;
+}
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c
index e5916a5..647e518 100644
--- a/arch/mips/alchemy/common/power.c
+++ b/arch/mips/alchemy/common/power.c
@@ -130,7 +130,7 @@
 	au_writel(sleep_usb[1], USBD_ENABLE);
 	au_sync();
 #else
-	/* enable accces to OTG memory */
+	/* enable access to OTG memory */
 	au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
 	au_sync();
 
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index 4bbd313..acaf91b 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -110,7 +110,7 @@
 
 }
 
-static struct platform_suspend_ops db1x_pm_ops = {
+static const struct platform_suspend_ops db1x_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= db1x_pm_begin,
 	.enter		= db1x_pm_enter,
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c
index 02f505f..ea57f39 100644
--- a/arch/mips/dec/time.c
+++ b/arch/mips/dec/time.c
@@ -104,7 +104,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/include/asm/mach-powertv/ioremap.h b/arch/mips/include/asm/mach-powertv/ioremap.h
index 076f2ee..c86ef09 100644
--- a/arch/mips/include/asm/mach-powertv/ioremap.h
+++ b/arch/mips/include/asm/mach-powertv/ioremap.h
@@ -88,7 +88,7 @@
 }
 
 /* These are not portable and should not be used in drivers. Drivers should
- * be using ioremap() and friends to map physical addreses to virtual
+ * be using ioremap() and friends to map physical addresses to virtual
  * addresses and dma_map*() and friends to map virtual addresses into DMA
  * addresses and back.
  */
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index 199b457..4a08dbe 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -66,7 +66,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 5c0a357..2c0e107 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -65,7 +65,7 @@
 
 /* Early prototypes of the QI LB60 had only 1GB of NAND.
  * In order to support these devices aswell the partition and ecc layout is
- * initalized depending on the NAND size */
+ * initialized depending on the NAND size */
 static struct mtd_partition qi_lb60_partitions_1gb[] = {
 	{
 		.name = "NAND BOOT partition",
@@ -464,7 +464,7 @@
 	board_gpio_setup();
 
 	if (qi_lb60_init_platform_devices())
-		panic("Failed to initalize platform devices\n");
+		panic("Failed to initialize platform devices\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index 38f60f3..88e6aed 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -546,7 +546,7 @@
 	for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
 		jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
 
-	printk(KERN_INFO "JZ4740 GPIO initalized\n");
+	printk(KERN_INFO "JZ4740 GPIO initialized\n");
 
 	return 0;
 }
diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c
index a999458..902d5b5 100644
--- a/arch/mips/jz4740/pm.c
+++ b/arch/mips/jz4740/pm.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops jz4740_pm_ops = {
+static const struct platform_suspend_ops jz4740_pm_ops = {
 	.valid		= suspend_valid_only_mem,
 	.enter		= jz4740_pm_enter,
 };
diff --git a/arch/mips/loongson/common/pm.c b/arch/mips/loongson/common/pm.c
index 6c1fd90..f55e07a 100644
--- a/arch/mips/loongson/common/pm.c
+++ b/arch/mips/loongson/common/pm.c
@@ -147,7 +147,7 @@
 	}
 }
 
-static struct platform_suspend_ops loongson_pm_ops = {
+static const struct platform_suspend_ops loongson_pm_ops = {
 	.valid	= loongson_pm_valid_state,
 	.enter	= loongson_pm_enter,
 };
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index b27419c..a96d281 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -43,7 +43,7 @@
 static char *mtypes[3] = {
 	"Dont use memory",
 	"YAMON PROM memory",
-	"Free memmory",
+	"Free memory",
 };
 #endif
 
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 385f035..0583c463 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -900,7 +900,7 @@
 	mem_access_subid.s.ror = 0;
 	/* Disable Relaxed Ordering for Writes. */
 	mem_access_subid.s.row = 0;
-	/* PCIe Adddress Bits <63:34>. */
+	/* PCIe Address Bits <63:34>. */
 	mem_access_subid.s.ba = 0;
 
 	/*
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index 73880ad..fb3d296 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -57,7 +57,7 @@
 unsigned long ptv_memsize;
 
 /*
- * struct low_mem_reserved - Items in low memmory that are reserved
+ * struct low_mem_reserved - Items in low memory that are reserved
  * @start:	Physical address of item
  * @size:	Size, in bytes, of this item
  * @is_aliased:	True if this is RAM aliased from another location. If false,
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 9a0be81..96e69a0 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -107,7 +107,7 @@
 
 /*
  * allocate pci_controller and resources.
- * mem_base, io_base: physical addresss.  0 for auto assignment.
+ * mem_base, io_base: physical address.  0 for auto assignment.
  * mem_size and io_size means max size on auto assignment.
  * pcic must be &txx9_primary_pcic or NULL.
  */
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 41ba385..8ed41cf 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -203,6 +203,7 @@
 config SMP
 	bool "Symmetric multi-processing support"
 	default y
+	select USE_GENERIC_SMP_HELPERS
 	depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
 	---help---
 	  This enables support for systems with more than one CPU. If you have
@@ -226,11 +227,6 @@
 	depends on SMP
 	default "2"
 
-config USE_GENERIC_SMP_HELPERS
-	bool
-	depends on SMP
-	default y
-
 source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
diff --git a/arch/mn10300/kernel/rtc.c b/arch/mn10300/kernel/rtc.c
index e9e20f9..48d7058 100644
--- a/arch/mn10300/kernel/rtc.c
+++ b/arch/mn10300/kernel/rtc.c
@@ -89,7 +89,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 48fb479..959f38c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -20,6 +20,9 @@
 config ARCH_PHYS_ADDR_T_64BIT
        def_bool PPC64 || PHYS_64BIT
 
+config ARCH_DMA_ADDR_T_64BIT
+	def_bool ARCH_PHYS_ADDR_T_64BIT
+
 config MMU
 	bool
 	default y
@@ -209,7 +212,7 @@
 config ARCH_SUSPEND_POSSIBLE
 	def_bool y
 	depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
-		   PPC_85xx || PPC_86xx || PPC_PSERIES
+		   PPC_85xx || PPC_86xx || PPC_PSERIES || 44x || 40x
 
 config PPC_DCR_NATIVE
 	bool
@@ -595,13 +598,11 @@
 
 	  If unsure, leave blank
 
-if !44x || BROKEN
 config ARCH_WANTS_FREEZER_CONTROL
 	def_bool y
 	depends on ADB_PMU
 
 source kernel/power/Kconfig
-endif
 
 config SECCOMP
 	bool "Enable seccomp to safely compute untrusted bytecode"
@@ -682,6 +683,15 @@
 	  Freescale MPC85xx/MPC86xx power management controller support
 	  (suspend/resume). For MPC83xx see platforms/83xx/suspend.c
 
+config PPC4xx_CPM
+	bool
+	default y
+	depends on SUSPEND && (44x || 40x)
+	help
+	  PPC4xx Clock Power Management (CPM) support (suspend/resume).
+	  It also enables support for two different idle states (idle-wait
+	  and idle-doze).
+
 config 4xx_SOC
 	bool
 
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index a303703..5b27a4b 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -105,6 +105,15 @@
 		dcr-reg = <0x00c 0x002>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x160 0x003>;
+		unused-units = <0x00000100>;
+		idle-doze = <0x02000000>;
+		standby = <0xfeff791d>;
+	};
+
 	L2C0: l2c {
 		compatible = "ibm,l2-cache-460ex", "ibm,l2-cache";
 		dcr-reg = <0x020 0x008		/* Internal SRAM DCR's */
@@ -270,28 +279,6 @@
 				interrupts = <0x1 0x4>;
 			};
 
-			UART2: serial@ef600500 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600500 0x00000008>;
-				virtual-reg = <0xef600500>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <28 0x4>;
-			};
-
-			UART3: serial@ef600600 {
-				device_type = "serial";
-				compatible = "ns16550";
-				reg = <0xef600600 0x00000008>;
-				virtual-reg = <0xef600600>;
-				clock-frequency = <0>; /* Filled in by U-Boot */
-				current-speed = <0>; /* Filled in by U-Boot */
-				interrupt-parent = <&UIC1>;
-				interrupts = <29 0x4>;
-			};
-
 			IIC0: i2c@ef600700 {
 				compatible = "ibm,iic-460ex", "ibm,iic";
 				reg = <0xef600700 0x00000014>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 083e68e..89edb16 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -82,6 +82,15 @@
 		interrupt-parent = <&UIC0>;
 	};
 
+	CPM0: cpm {
+		compatible = "ibm,cpm";
+		dcr-access-method = "native";
+		dcr-reg = <0x0b0 0x003>;
+		unused-units = <0x00000000>;
+		idle-doze = <0x02000000>;
+		standby = <0xe3e74800>;
+	};
+
 	plb {
 		compatible = "ibm,plb-405ex", "ibm,plb4";
 		#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts
index 05a76cc..697b3f6 100644
--- a/arch/powerpc/boot/dts/mpc8308_p1m.dts
+++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts
@@ -297,6 +297,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts
index a97eb2d..d3db02f 100644
--- a/arch/powerpc/boot/dts/mpc8308rdb.dts
+++ b/arch/powerpc/boot/dts/mpc8308rdb.dts
@@ -265,6 +265,14 @@
 			interrupt-parent = < &ipic >;
 		};
 
+		dma@2c000 {
+			compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma";
+			reg = <0x2c000 0x1800>;
+			interrupts = <3 0x8
+					94 0x8>;
+			interrupt-parent = < &ipic >;
+		};
+
 	};
 
 	pci0: pcie@e0009000 {
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index 4e19ee7..34b8c1a 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -12,6 +12,8 @@
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_KILAUEA=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_WALNUT is not set
 CONFIG_SPARSE_IRQ=y
 CONFIG_PCI=y
@@ -42,6 +44,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 45c64d8..17e4dd9 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -42,6 +42,9 @@
 CONFIG_MTD_NAND=y
 CONFIG_MTD_NAND_NDFC=y
 CONFIG_PROC_DEVICETREE=y
+CONFIG_PM=y
+CONFIG_SUSPEND=y
+CONFIG_PPC4xx_CPM=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=35000
 # CONFIG_MISC_DEVICES is not set
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h
index 4b0e152..6b6dc20 100644
--- a/arch/powerpc/include/asm/8xx_immap.h
+++ b/arch/powerpc/include/asm/8xx_immap.h
@@ -93,7 +93,7 @@
 } memctl8xx_t;
 
 /*-----------------------------------------------------------------------
- * BR - Memory Controler: Base Register					16-9
+ * BR - Memory Controller: Base Register					16-9
  */
 #define BR_BA_MSK	0xffff8000	/* Base Address Mask			*/
 #define BR_AT_MSK	0x00007000	/* Address Type Mask			*/
@@ -110,7 +110,7 @@
 #define BR_V		0x00000001	/* Bank Valid				*/
 
 /*-----------------------------------------------------------------------
- * OR - Memory Controler: Option Register				16-11
+ * OR - Memory Controller: Option Register				16-11
  */
 #define OR_AM_MSK	0xffff8000	/* Address Mask Mask			*/
 #define OR_ATM_MSK	0x00007000	/* Address Type Mask Mask		*/
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 30964ae..8a7e9314 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -267,7 +267,16 @@
 #include <asm-generic/bitops/fls64.h>
 #endif /* __powerpc64__ */
 
+#ifdef CONFIG_PPC64
+unsigned int __arch_hweight8(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+#include <asm-generic/bitops/const_hweight.h>
+#else
 #include <asm-generic/bitops/hweight.h>
+#endif
+
 #include <asm-generic/bitops/find.h>
 
 /* Little-endian versions */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index f3a1fdd..f0a211d 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -199,6 +199,8 @@
 #define CPU_FTR_UNALIGNED_LD_STD	LONG_ASM_CONST(0x0080000000000000)
 #define CPU_FTR_ASYM_SMT		LONG_ASM_CONST(0x0100000000000000)
 #define CPU_FTR_STCX_CHECKS_ADDRESS	LONG_ASM_CONST(0x0200000000000000)
+#define CPU_FTR_POPCNTB			LONG_ASM_CONST(0x0400000000000000)
+#define CPU_FTR_POPCNTD			LONG_ASM_CONST(0x0800000000000000)
 
 #ifndef __ASSEMBLY__
 
@@ -403,21 +405,22 @@
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
-	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_PURR | CPU_FTR_STCX_CHECKS_ADDRESS | \
+	    CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB)
 #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_MMCRA | CPU_FTR_SMT | \
 	    CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
 	    CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
 	    CPU_FTR_DSCR | CPU_FTR_SAO  | CPU_FTR_ASYM_SMT | \
-	    CPU_FTR_STCX_CHECKS_ADDRESS)
+	    CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD)
 #define CPU_FTRS_CELL	(CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
 	    CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
 	    CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index a8e1844..f71bb4c 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -61,22 +61,25 @@
 	return cpu_thread_mask_to_cores(cpu_online_map);
 }
 
-static inline int cpu_thread_to_core(int cpu)
-{
-	return cpu >> threads_shift;
-}
+#ifdef CONFIG_SMP
+int cpu_core_index_of_thread(int cpu);
+int cpu_first_thread_of_core(int core);
+#else
+static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
+static inline int cpu_first_thread_of_core(int core) { return core; }
+#endif
 
 static inline int cpu_thread_in_core(int cpu)
 {
 	return cpu & (threads_per_core - 1);
 }
 
-static inline int cpu_first_thread_in_core(int cpu)
+static inline int cpu_first_thread_sibling(int cpu)
 {
 	return cpu & ~(threads_per_core - 1);
 }
 
-static inline int cpu_last_thread_in_core(int cpu)
+static inline int cpu_last_thread_sibling(int cpu)
 {
 	return cpu | (threads_per_core - 1);
 }
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index a3954e4..16d25c0 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -9,6 +9,12 @@
 struct dma_map_ops;
 struct device_node;
 
+/*
+ * Arch extensions to struct device.
+ *
+ * When adding fields, consider macio_add_one_device in
+ * drivers/macintosh/macio_asic.c
+ */
 struct dev_archdata {
 	/* DMA operations on that device */
 	struct dma_map_ops	*dma_ops;
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 20778a4..4ef662e 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -46,6 +46,7 @@
 #define FW_FEATURE_PS3_LV1	ASM_CONST(0x0000000000800000)
 #define FW_FEATURE_BEAT		ASM_CONST(0x0000000001000000)
 #define FW_FEATURE_CMO		ASM_CONST(0x0000000002000000)
+#define FW_FEATURE_VPHN		ASM_CONST(0x0000000004000000)
 
 #ifndef __ASSEMBLY__
 
@@ -59,7 +60,7 @@
 		FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
 		FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
 		FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
-		FW_FEATURE_CMO,
+		FW_FEATURE_CMO | FW_FEATURE_VPHN,
 	FW_FEATURE_PSERIES_ALWAYS = 0,
 	FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
 	FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index de03ca5..ec089ac 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -232,7 +232,9 @@
 #define H_GET_EM_PARMS		0x2B8
 #define H_SET_MPP		0x2D0
 #define H_GET_MPP		0x2D4
-#define MAX_HCALL_OPCODE	H_GET_MPP
+#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
+#define H_BEST_ENERGY		0x2F4
+#define MAX_HCALL_OPCODE	H_BEST_ENERGY
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 7f5e0fe..380d48b 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -62,7 +62,10 @@
 	volatile u32 dyn_pir;		// Dynamic ProcIdReg value	x20-x23
 	u32	dsei_data;           	// DSEI data                  	x24-x27
 	u64	sprg3;               	// SPRG3 value                	x28-x2F
-	u8	reserved3[80];		// Reserved			x30-x7F
+	u8	reserved3[40];		// Reserved			x30-x57
+	volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
+					// associativity change counters x58-x5F
+	u8	reserved4[32];		// Reserved			x60-x7F
 
 //=============================================================================
 // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index d045b01..8433d36 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -27,9 +27,7 @@
 struct rtc_time;
 struct file;
 struct pci_controller;
-#ifdef CONFIG_KEXEC
 struct kimage;
-#endif
 
 #ifdef CONFIG_SMP
 struct smp_ops_t {
@@ -72,7 +70,7 @@
 					     int psize, int ssize);
 	void		(*flush_hash_range)(unsigned long number, int local);
 
-	/* special for kexec, to be called in real mode, linar mapping is
+	/* special for kexec, to be called in real mode, linear mapping is
 	 * destroyed as well */
 	void		(*hpte_clear_all)(void);
 
@@ -324,8 +322,6 @@
 
 #endif /* CONFIG_PPC_PMAC */
 
-extern void setup_pci_ptrs(void);
-
 #ifdef CONFIG_SMP
 /* Poor default implementations */
 extern void __devinit smp_generic_give_timebase(void);
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index aac87cb..fd3fd58 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -33,6 +33,9 @@
 extern cpumask_var_t node_to_cpumask_map[];
 #ifdef CONFIG_MEMORY_HOTPLUG
 extern unsigned long max_pfn;
+u64 memory_hotplug_max(void);
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif
 
 /*
@@ -42,6 +45,8 @@
 #define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
 #define node_end_pfn(nid)	(NODE_DATA(nid)->node_end_pfn)
 
+#else
+#define memory_hotplug_max() memblock_end_of_DRAM()
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index 850b72f..92efe67 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -10,31 +10,7 @@
 #ifndef _ASM_POWERPC_NVRAM_H
 #define _ASM_POWERPC_NVRAM_H
 
-#include <linux/errno.h>
-
-#define NVRW_CNT 0x20
-#define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */
-#define NVRAM_BLOCK_LEN 16
-#define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN)
-#define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN)
-
-#define NVRAM_AS0  0x74
-#define NVRAM_AS1  0x75
-#define NVRAM_DATA 0x77
-
-
-/* RTC Offsets */
-
-#define MOTO_RTC_SECONDS	0x1FF9
-#define MOTO_RTC_MINUTES	0x1FFA
-#define MOTO_RTC_HOURS		0x1FFB
-#define MOTO_RTC_DAY_OF_WEEK	0x1FFC
-#define MOTO_RTC_DAY_OF_MONTH	0x1FFD
-#define MOTO_RTC_MONTH		0x1FFE
-#define MOTO_RTC_YEAR		0x1FFF
-#define MOTO_RTC_CONTROLA       0x1FF8
-#define MOTO_RTC_CONTROLB       0x1FF9
-
+/* Signatures for nvram partitions */
 #define NVRAM_SIG_SP	0x02	/* support processor */
 #define NVRAM_SIG_OF	0x50	/* open firmware config */
 #define NVRAM_SIG_FW	0x51	/* general firmware */
@@ -49,32 +25,19 @@
 #define NVRAM_SIG_OS	0xa0	/* OS defined */
 #define NVRAM_SIG_PANIC	0xa1	/* Apple OSX "panic" */
 
-/* If change this size, then change the size of NVNAME_LEN */
-struct nvram_header {
-	unsigned char signature;
-	unsigned char checksum;
-	unsigned short length;
-	char name[12];
-};
-
 #ifdef __KERNEL__
 
+#include <linux/errno.h>
 #include <linux/list.h>
 
-struct nvram_partition {
-	struct list_head partition;
-	struct nvram_header header;
-	unsigned int index;
-};
-
-
+#ifdef CONFIG_PPC_PSERIES
 extern int nvram_write_error_log(char * buff, int length,
 					 unsigned int err_type, unsigned int err_seq);
 extern int nvram_read_error_log(char * buff, int length,
 					 unsigned int * err_type, unsigned int *err_seq);
 extern int nvram_clear_error_log(void);
-
 extern int pSeries_nvram_init(void);
+#endif /* CONFIG_PPC_PSERIES */
 
 #ifdef CONFIG_MMIO_NVRAM
 extern int mmio_nvram_init(void);
@@ -85,6 +48,13 @@
 }
 #endif
 
+extern int __init nvram_scan_partitions(void);
+extern loff_t nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size);
+extern int nvram_remove_partition(const char *name, int sig);
+extern int nvram_get_partition_size(loff_t data_index);
+extern loff_t nvram_find_partition(const char *name, int sig, int *out_size);
+
 #endif /* __KERNEL__ */
 
 /* PowerMac specific nvram stuffs */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 43adc8b..1255569 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -36,6 +36,8 @@
 #define PPC_INST_NOP			0x60000000
 #define PPC_INST_POPCNTB		0x7c0000f4
 #define PPC_INST_POPCNTB_MASK		0xfc0007fe
+#define PPC_INST_POPCNTD		0x7c0003f4
+#define PPC_INST_POPCNTW		0x7c0002f4
 #define PPC_INST_RFCI			0x4c000066
 #define PPC_INST_RFDI			0x4c00004e
 #define PPC_INST_RFMCI			0x4c00004c
@@ -88,6 +90,12 @@
 					__PPC_RB(b) | __PPC_EH(eh))
 #define PPC_MSGSND(b)		stringify_in_c(.long PPC_INST_MSGSND | \
 					__PPC_RB(b))
+#define PPC_POPCNTB(a, s)	stringify_in_c(.long PPC_INST_POPCNTB | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTD(a, s)	stringify_in_c(.long PPC_INST_POPCNTD | \
+					__PPC_RA(a) | __PPC_RS(s))
+#define PPC_POPCNTW(a, s)	stringify_in_c(.long PPC_INST_POPCNTW | \
+					__PPC_RA(a) | __PPC_RS(s))
 #define PPC_RFCI		stringify_in_c(.long PPC_INST_RFCI)
 #define PPC_RFDI		stringify_in_c(.long PPC_INST_RFDI)
 #define PPC_RFMCI		stringify_in_c(.long PPC_INST_RFMCI)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 4c14187..de1967a 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -122,7 +122,6 @@
 		TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
 #endif
 
-#ifdef __KERNEL__
 #ifdef __powerpc64__
 
 #define STACK_TOP_USER64 TASK_SIZE_USER64
@@ -139,7 +138,6 @@
 #define STACK_TOP_MAX	STACK_TOP
 
 #endif /* __powerpc64__ */
-#endif /* __KERNEL__ */
 
 typedef struct {
 	unsigned long seg;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index afe4aaa..7ef0d90 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -106,9 +106,22 @@
 						int nid)
 {
 }
-
 #endif /* CONFIG_NUMA */
 
+#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
+extern int start_topology_update(void);
+extern int stop_topology_update(void);
+#else
+static inline int start_topology_update(void)
+{
+	return 0;
+}
+static inline int stop_topology_update(void)
+{
+	return 0;
+}
+#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
+
 #include <asm-generic/topology.h>
 
 #ifdef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 08679c5..25e3922 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -116,9 +116,7 @@
 
 #endif /* CONFIG_PPC64 */
 
-#ifdef __KERNEL__
 extern struct vdso_data *vdso_data;
-#endif
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 36c30f3..3bb2a3e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -29,8 +29,10 @@
 obj-y				:= cputable.o ptrace.o syscalls.o \
 				   irq.o align.o signal_32.o pmc.o vdso.o \
 				   init_task.o process.o systbl.o idle.o \
-				   signal.o sysfs.o cacheinfo.o
-obj-y				+= vdso32/
+				   signal.o sysfs.o cacheinfo.o time.o \
+				   prom.o traps.o setup-common.o \
+				   udbg.o misc.o io.o dma.o \
+				   misc_$(CONFIG_WORD_SIZE).o vdso32/
 obj-$(CONFIG_PPC64)		+= setup_64.o sys_ppc32.o \
 				   signal_64.o ptrace32.o \
 				   paca.o nvram_64.o firmware.o
@@ -80,9 +82,6 @@
 extra-$(CONFIG_8xx)		:= head_8xx.o
 extra-y				+= vmlinux.lds
 
-obj-y				+= time.o prom.o traps.o setup-common.o \
-				   udbg.o misc.o io.o dma.o \
-				   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
 obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index bd0df2e..23e6a93 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -209,7 +209,6 @@
 	DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
 
 	/* Interrupt register frame */
-	DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
 	DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
 	DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 96a908f..be5ab18 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -457,16 +457,26 @@
 		.dcache_bsize		= 128,
 		.num_pmcs		= 6,
 		.pmc_type		= PPC_PMC_IBM,
-		.cpu_setup		= __setup_cpu_power7,
-		.cpu_restore		= __restore_cpu_power7,
 		.oprofile_cpu_type	= "ppc64/power7",
 		.oprofile_type		= PPC_OPROFILE_POWER4,
-		.oprofile_mmcra_sihv	= POWER6_MMCRA_SIHV,
-		.oprofile_mmcra_sipr	= POWER6_MMCRA_SIPR,
-		.oprofile_mmcra_clear	= POWER6_MMCRA_THRM |
-			POWER6_MMCRA_OTHER,
 		.platform		= "power7",
 	},
+	{	/* Power7+ */
+		.pvr_mask		= 0xffff0000,
+		.pvr_value		= 0x004A0000,
+		.cpu_name		= "POWER7+ (raw)",
+		.cpu_features		= CPU_FTRS_POWER7,
+		.cpu_user_features	= COMMON_USER_POWER7,
+		.mmu_features		= MMU_FTR_HPTE_TABLE |
+			MMU_FTR_TLBIE_206,
+		.icache_bsize		= 128,
+		.dcache_bsize		= 128,
+		.num_pmcs		= 6,
+		.pmc_type		= PPC_PMC_IBM,
+		.oprofile_cpu_type	= "ppc64/power7",
+		.oprofile_type		= PPC_OPROFILE_POWER4,
+		.platform		= "power7+",
+	},
 	{	/* Cell Broadband Engine */
 		.pvr_mask		= 0xffff0000,
 		.pvr_value		= 0x00700000,
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 8e05c16..0a2af50 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -19,6 +19,7 @@
 #include <asm/prom.h>
 #include <asm/firmware.h>
 #include <asm/uaccess.h>
+#include <asm/rtas.h>
 
 #ifdef DEBUG
 #include <asm/udbg.h>
@@ -141,3 +142,35 @@
 
 	return csize;
 }
+
+#ifdef CONFIG_PPC_RTAS
+/*
+ * The crashkernel region will almost always overlap the RTAS region, so
+ * we have to be careful when shrinking the crashkernel region.
+ */
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
+{
+	unsigned long addr;
+	const u32 *basep, *sizep;
+	unsigned int rtas_start = 0, rtas_end = 0;
+
+	basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
+	sizep = of_get_property(rtas.dev, "rtas-size", NULL);
+
+	if (basep && sizep) {
+		rtas_start = *basep;
+		rtas_end = *basep + *sizep;
+	}
+
+	for (addr = begin; addr < end; addr += PAGE_SIZE) {
+		/* Does this page overlap with the RTAS region? */
+		if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start))
+			continue;
+
+		ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
+		init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
+		free_page((unsigned long)__va(addr));
+		totalram_pages++;
+	}
+}
+#endif
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 6e54a0f..e755415 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -19,7 +19,7 @@
 				      dma_addr_t *dma_handle, gfp_t flag)
 {
 	return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
-				    dma_handle, device_to_mask(dev), flag,
+				    dma_handle, dev->coherent_dma_mask, flag,
 				    dev_to_node(dev));
 }
 
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ed4aeb9..c22dc1ec 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -31,6 +31,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/ftrace.h>
+#include <asm/ptrace.h>
 
 #undef SHOW_SYSCALLS
 #undef SHOW_SYSCALLS_TASK
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 9f8b01d..8a81799 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
  */
 
 #include <asm/exception-64s.h>
+#include <asm/ptrace.h>
 
 /*
  * We layout physical memory as follows:
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index e86c040..de36955 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -23,6 +23,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 #ifdef CONFIG_VSX
 #define REST_32FPVSRS(n,c,base)						\
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8278e8b..9dd21a8 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -40,6 +40,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* As with the other PowerPC ports, it is expected that when code
  * execution begins here, the following registers contain valid, yet
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 562305b..cbb3436 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 #include <asm/synch.h>
 #include "head_booke.h"
 
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f0dd577..782f23d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -38,6 +38,7 @@
 #include <asm/page_64.h>
 #include <asm/irqflags.h>
 #include <asm/kvm_book3s_asm.h>
+#include <asm/ptrace.h>
 
 /* The physical memory is layed out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -96,7 +97,7 @@
 	.llong hvReleaseData-KERNELBASE
 #endif /* CONFIG_PPC_ISERIES */
 
-#ifdef CONFIG_CRASH_DUMP
+#ifdef CONFIG_RELOCATABLE
 	/* This flag is set to 1 by a loader if the kernel should run
 	 * at the loaded address instead of the linked address.  This
 	 * is used by kexec-tools to keep the the kdump kernel in the
@@ -384,12 +385,10 @@
 	/* process relocations for the final address of the kernel */
 	lis	r25,PAGE_OFFSET@highest	/* compute virtual base of kernel */
 	sldi	r25,r25,32
-#ifdef CONFIG_CRASH_DUMP
 	lwz	r7,__run_at_load-_stext(r26)
-	cmplwi	cr0,r7,1	/* kdump kernel ? - stay where we are */
+	cmplwi	cr0,r7,1	/* flagged to stay where we are ? */
 	bne	1f
 	add	r25,r25,r26
-#endif
 1:	mr	r3,r25
 	bl	.relocate
 #endif
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 1f1a04b..1cbf64e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,7 @@
 #include <asm/thread_info.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /* Macro to make the code more readable. */
 #ifdef CONFIG_8xx_CPU6
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 529b817..3e02710 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -41,6 +41,7 @@
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
+#include <asm/ptrace.h>
 #include "head_booke.h"
 
 /* As with the other PowerPC ports, it is expected that when code
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d583917..961bb03 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -311,8 +311,9 @@
 		/* Handle failure */
 		if (unlikely(entry == DMA_ERROR_CODE)) {
 			if (printk_ratelimit())
-				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
-				       " npages %lx\n", tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %lx npages %lu\n", tbl, vaddr,
+					 npages);
 			goto failure;
 		}
 
@@ -579,9 +580,9 @@
 					 attrs);
 		if (dma_handle == DMA_ERROR_CODE) {
 			if (printk_ratelimit())  {
-				printk(KERN_INFO "iommu_alloc failed, "
-						"tbl %p vaddr %p npages %d\n",
-						tbl, vaddr, npages);
+				dev_info(dev, "iommu_alloc failed, tbl %p "
+					 "vaddr %p npages %d\n", tbl, vaddr,
+					 npages);
 			}
 		} else
 			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
@@ -627,7 +628,8 @@
 	 * the tce tables.
 	 */
 	if (order >= IOMAP_MAX_ORDER) {
-		printk("iommu_alloc_consistent size too large: 0x%lx\n", size);
+		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
+			 size);
 		return NULL;
 	}
 
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752..b69463e 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -122,8 +122,3 @@
 	mtlr	r0
 	mr	r3,r4
 	blr
-
-_GLOBAL(__setup_cpu_power7)
-_GLOBAL(__restore_cpu_power7)
-	/* place holder */
-	blr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index a7a570d..094bd98 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -30,6 +30,7 @@
 #include <asm/processor.h>
 #include <asm/kexec.h>
 #include <asm/bug.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e514490..206a321 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -25,6 +25,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/kexec.h>
+#include <asm/ptrace.h>
 
 	.text
 
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 9cf197f..bb12b32 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -34,15 +34,26 @@
 
 #undef DEBUG_NVRAM
 
-static struct nvram_partition * nvram_part;
-static long nvram_error_log_index = -1;
-static long nvram_error_log_size = 0;
+#define NVRAM_HEADER_LEN	sizeof(struct nvram_header)
+#define NVRAM_BLOCK_LEN		NVRAM_HEADER_LEN
 
-struct err_log_info {
-	int error_type;
-	unsigned int seq_num;
+/* If change this size, then change the size of NVNAME_LEN */
+struct nvram_header {
+	unsigned char signature;
+	unsigned char checksum;
+	unsigned short length;
+	/* Terminating null required only for names < 12 chars. */
+	char name[12];
 };
 
+struct nvram_partition {
+	struct list_head partition;
+	struct nvram_header header;
+	unsigned int index;
+};
+
+static LIST_HEAD(nvram_partitions);
+
 static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
 {
 	int size;
@@ -186,14 +197,12 @@
 #ifdef DEBUG_NVRAM
 static void __init nvram_print_partitions(char * label)
 {
-	struct list_head * p;
 	struct nvram_partition * tmp_part;
 	
 	printk(KERN_WARNING "--------%s---------\n", label);
 	printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n");
-	list_for_each(p, &nvram_part->partition) {
-		tmp_part = list_entry(p, struct nvram_partition, partition);
-		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%s\n",
+	list_for_each_entry(tmp_part, &nvram_partitions, partition) {
+		printk(KERN_WARNING "%4d    \t%02x\t%02x\t%d\t%12s\n",
 		       tmp_part->index, tmp_part->header.signature,
 		       tmp_part->header.checksum, tmp_part->header.length,
 		       tmp_part->header.name);
@@ -228,95 +237,113 @@
 	return c_sum;
 }
 
-static int __init nvram_remove_os_partition(void)
+/**
+ * nvram_remove_partition - Remove one or more partitions in nvram
+ * @name: name of the partition to remove, or NULL for a
+ *        signature only match
+ * @sig: signature of the partition(s) to remove
+ */
+
+int __init nvram_remove_partition(const char *name, int sig)
 {
-	struct list_head *i;
-	struct list_head *j;
-	struct nvram_partition * part;
-	struct nvram_partition * cur_part;
+	struct nvram_partition *part, *prev, *tmp;
 	int rc;
 
-	list_for_each(i, &nvram_part->partition) {
-		part = list_entry(i, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->header.signature != sig)
 			continue;
-		
-		/* Make os partition a free partition */
+		if (name && strncmp(name, part->header.name, 12))
+			continue;
+
+		/* Make partition a free partition */
 		part->header.signature = NVRAM_SIG_FREE;
-		sprintf(part->header.name, "wwwwwwwwwwww");
+		strncpy(part->header.name, "wwwwwwwwwwww", 12);
 		part->header.checksum = nvram_checksum(&part->header);
-
-		/* Merge contiguous free partitions backwards */
-		list_for_each_prev(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-			
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-			part->index = cur_part->index;
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
-		/* Merge contiguous free partitions forwards */
-		list_for_each(j, &part->partition) {
-			cur_part = list_entry(j, struct nvram_partition, partition);
-			if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) {
-				break;
-			}
-
-			part->header.length += cur_part->header.length;
-			part->header.checksum = nvram_checksum(&part->header);
-
-			list_del(&cur_part->partition);
-			kfree(cur_part);
-			j = &part->partition; /* fixup our loop */
-		}
-		
 		rc = nvram_write_header(part);
 		if (rc <= 0) {
-			printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc);
+			printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
 			return rc;
 		}
+	}
 
+	/* Merge contiguous ones */
+	prev = NULL;
+	list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) {
+		if (part->header.signature != NVRAM_SIG_FREE) {
+			prev = NULL;
+			continue;
+		}
+		if (prev) {
+			prev->header.length += part->header.length;
+			prev->header.checksum = nvram_checksum(&part->header);
+			rc = nvram_write_header(part);
+			if (rc <= 0) {
+				printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc);
+				return rc;
+			}
+			list_del(&part->partition);
+			kfree(part);
+		} else
+			prev = part;
 	}
 	
 	return 0;
 }
 
-/* nvram_create_os_partition
+/**
+ * nvram_create_partition - Create a partition in nvram
+ * @name: name of the partition to create
+ * @sig: signature of the partition to create
+ * @req_size: size of data to allocate in bytes
+ * @min_size: minimum acceptable size (0 means req_size)
  *
- * Create a OS linux partition to buffer error logs.
- * Will create a partition starting at the first free
- * space found if space has enough room.
+ * Returns a negative error code or a positive nvram index
+ * of the beginning of the data area of the newly created
+ * partition. If you provided a min_size smaller than req_size
+ * you need to query for the actual size yourself after the
+ * call using nvram_partition_get_size().
  */
-static int __init nvram_create_os_partition(void)
+loff_t __init nvram_create_partition(const char *name, int sig,
+				     int req_size, int min_size)
 {
 	struct nvram_partition *part;
 	struct nvram_partition *new_part;
 	struct nvram_partition *free_part = NULL;
-	int seq_init[2] = { 0, 0 };
+	static char nv_init_vals[16];
 	loff_t tmp_index;
 	long size = 0;
 	int rc;
-	
+
+	/* Convert sizes from bytes to blocks */
+	req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+	min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+
+	/* If no minimum size specified, make it the same as the
+	 * requested size
+	 */
+	if (min_size == 0)
+		min_size = req_size;
+	if (min_size > req_size)
+		return -EINVAL;
+
+	/* Now add one block to each for the header */
+	req_size += 1;
+	min_size += 1;
+
 	/* Find a free partition that will give us the maximum needed size 
 	   If can't find one that will give us the minimum size needed */
-	list_for_each_entry(part, &nvram_part->partition, partition) {
+	list_for_each_entry(part, &nvram_partitions, partition) {
 		if (part->header.signature != NVRAM_SIG_FREE)
 			continue;
 
-		if (part->header.length >= NVRAM_MAX_REQ) {
-			size = NVRAM_MAX_REQ;
+		if (part->header.length >= req_size) {
+			size = req_size;
 			free_part = part;
 			break;
 		}
-		if (!size && part->header.length >= NVRAM_MIN_REQ) {
-			size = NVRAM_MIN_REQ;
+		if (part->header.length > size &&
+		    part->header.length >= min_size) {
+			size = part->header.length;
 			free_part = part;
 		}
 	}
@@ -326,136 +353,95 @@
 	/* Create our OS partition */
 	new_part = kmalloc(sizeof(*new_part), GFP_KERNEL);
 	if (!new_part) {
-		printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n");
+		pr_err("nvram_create_os_partition: kmalloc failed\n");
 		return -ENOMEM;
 	}
 
 	new_part->index = free_part->index;
-	new_part->header.signature = NVRAM_SIG_OS;
+	new_part->header.signature = sig;
 	new_part->header.length = size;
-	strcpy(new_part->header.name, "ppc64,linux");
+	strncpy(new_part->header.name, name, 12);
 	new_part->header.checksum = nvram_checksum(&new_part->header);
 
 	rc = nvram_write_header(new_part);
 	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-				"failed (%d)\n", rc);
-		return rc;
-	}
-
-	/* make sure and initialize to zero the sequence number and the error
-	   type logged */
-	tmp_index = new_part->index + NVRAM_HEADER_LEN;
-	rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write "
+		pr_err("nvram_create_os_partition: nvram_write_header "
 		       "failed (%d)\n", rc);
 		return rc;
 	}
-	
-	nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN;
-	nvram_error_log_size = ((part->header.length - 1) *
-				NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-	
 	list_add_tail(&new_part->partition, &free_part->partition);
 
-	if (free_part->header.length <= size) {
+	/* Adjust or remove the partition we stole the space from */
+	if (free_part->header.length > size) {
+		free_part->index += size * NVRAM_BLOCK_LEN;
+		free_part->header.length -= size;
+		free_part->header.checksum = nvram_checksum(&free_part->header);
+		rc = nvram_write_header(free_part);
+		if (rc <= 0) {
+			pr_err("nvram_create_os_partition: nvram_write_header "
+			       "failed (%d)\n", rc);
+			return rc;
+		}
+	} else {
 		list_del(&free_part->partition);
 		kfree(free_part);
-		return 0;
 	} 
 
-	/* Adjust the partition we stole the space from */
-	free_part->index += size * NVRAM_BLOCK_LEN;
-	free_part->header.length -= size;
-	free_part->header.checksum = nvram_checksum(&free_part->header);
-	
-	rc = nvram_write_header(free_part);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_create_os_partition: nvram_write_header "
-		       "failed (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-
-/* nvram_setup_partition
- *
- * This will setup the partition we need for buffering the
- * error logs and cleanup partitions if needed.
- *
- * The general strategy is the following:
- * 1.) If there is ppc64,linux partition large enough then use it.
- * 2.) If there is not a ppc64,linux partition large enough, search
- * for a free partition that is large enough.
- * 3.) If there is not a free partition large enough remove 
- * _all_ OS partitions and consolidate the space.
- * 4.) Will first try getting a chunk that will satisfy the maximum
- * error log size (NVRAM_MAX_REQ).
- * 5.) If the max chunk cannot be allocated then try finding a chunk
- * that will satisfy the minum needed (NVRAM_MIN_REQ).
- */
-static int __init nvram_setup_partition(void)
-{
-	struct list_head * p;
-	struct nvram_partition * part;
-	int rc;
-
-	/* For now, we don't do any of this on pmac, until I
-	 * have figured out if it's worth killing some unused stuffs
-	 * in our nvram, as Apple defined partitions use pretty much
-	 * all of the space
-	 */
-	if (machine_is(powermac))
-		return -ENOSPC;
-
-	/* see if we have an OS partition that meets our needs.
-	   will try getting the max we need.  If not we'll delete
-	   partitions and try again. */
-	list_for_each(p, &nvram_part->partition) {
-		part = list_entry(p, struct nvram_partition, partition);
-		if (part->header.signature != NVRAM_SIG_OS)
-			continue;
-
-		if (strcmp(part->header.name, "ppc64,linux"))
-			continue;
-
-		if (part->header.length >= NVRAM_MIN_REQ) {
-			/* found our partition */
-			nvram_error_log_index = part->index + NVRAM_HEADER_LEN;
-			nvram_error_log_size = ((part->header.length - 1) *
-						NVRAM_BLOCK_LEN) - sizeof(struct err_log_info);
-			return 0;
+	/* Clear the new partition */
+	for (tmp_index = new_part->index + NVRAM_HEADER_LEN;
+	     tmp_index <  ((size - 1) * NVRAM_BLOCK_LEN);
+	     tmp_index += NVRAM_BLOCK_LEN) {
+		rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index);
+		if (rc <= 0) {
+			pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc);
+			return rc;
 		}
 	}
 	
-	/* try creating a partition with the free space we have */
-	rc = nvram_create_os_partition();
-	if (!rc) {
-		return 0;
-	}
-		
-	/* need to free up some space */
-	rc = nvram_remove_os_partition();
-	if (rc) {
-		return rc;
-	}
+	return new_part->index + NVRAM_HEADER_LEN;
+}
+
+/**
+ * nvram_get_partition_size - Get the data size of an nvram partition
+ * @data_index: This is the offset of the start of the data of
+ *              the partition. The same value that is returned by
+ *              nvram_create_partition().
+ */
+int nvram_get_partition_size(loff_t data_index)
+{
+	struct nvram_partition *part;
 	
-	/* create a partition in this new space */
-	rc = nvram_create_os_partition();
-	if (rc) {
-		printk(KERN_ERR "nvram_create_os_partition: Could not find a "
-		       "NVRAM partition large enough\n");
-		return rc;
+	list_for_each_entry(part, &nvram_partitions, partition) {
+		if (part->index + NVRAM_HEADER_LEN == data_index)
+			return (part->header.length - 1) * NVRAM_BLOCK_LEN;
 	}
-	
-	return 0;
+	return -1;
 }
 
 
-static int __init nvram_scan_partitions(void)
+/**
+ * nvram_find_partition - Find an nvram partition by signature and name
+ * @name: Name of the partition or NULL for any name
+ * @sig: Signature to test against
+ * @out_size: if non-NULL, returns the size of the data part of the partition
+ */
+loff_t nvram_find_partition(const char *name, int sig, int *out_size)
+{
+	struct nvram_partition *p;
+
+	list_for_each_entry(p, &nvram_partitions, partition) {
+		if (p->header.signature == sig &&
+		    (!name || !strncmp(p->header.name, name, 12))) {
+			if (out_size)
+				*out_size = (p->header.length - 1) *
+					NVRAM_BLOCK_LEN;
+			return p->index + NVRAM_HEADER_LEN;
+		}
+	}
+	return 0;
+}
+
+int __init nvram_scan_partitions(void)
 {
 	loff_t cur_index = 0;
 	struct nvram_header phead;
@@ -465,7 +451,7 @@
 	int total_size;
 	int err;
 
-	if (ppc_md.nvram_size == NULL)
+	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return -ENODEV;
 	total_size = ppc_md.nvram_size();
 	
@@ -512,12 +498,16 @@
 		
 		memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN);
 		tmp_part->index = cur_index;
-		list_add_tail(&tmp_part->partition, &nvram_part->partition);
+		list_add_tail(&tmp_part->partition, &nvram_partitions);
 		
 		cur_index += phead.length * NVRAM_BLOCK_LEN;
 	}
 	err = 0;
 
+#ifdef DEBUG_NVRAM
+	nvram_print_partitions("NVRAM Partitions");
+#endif
+
  out:
 	kfree(header);
 	return err;
@@ -525,9 +515,10 @@
 
 static int __init nvram_init(void)
 {
-	int error;
 	int rc;
 	
+	BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
+
 	if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0)
 		return  -ENODEV;
 
@@ -537,29 +528,6 @@
 		return rc;
 	}
   	
-  	/* initialize our anchor for the nvram partition list */
-  	nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
-  	if (!nvram_part) {
-  		printk(KERN_ERR "nvram_init: Failed kmalloc\n");
-  		return -ENOMEM;
-  	}
-  	INIT_LIST_HEAD(&nvram_part->partition);
-  
-  	/* Get all the NVRAM partitions */
-  	error = nvram_scan_partitions();
-  	if (error) {
-  		printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n");
-  		return error;
-  	}
-  		
-  	if(nvram_setup_partition()) 
-  		printk(KERN_WARNING "nvram_init: Could not find nvram partition"
-  		       " for nvram buffered error logging.\n");
-  
-#ifdef DEBUG_NVRAM
-	nvram_print_partitions("NVRAM Partitions");
-#endif
-
   	return rc;
 }
 
@@ -568,135 +536,6 @@
         misc_deregister( &nvram_dev );
 }
 
-
-#ifdef CONFIG_PPC_PSERIES
-
-/* nvram_write_error_log
- *
- * We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode.  If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk.  For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name       | data             |
- * |0          |1         |2      3|4         15|16        length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log                         |
- * |0            3|4          7|8            nvram_error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
- */
-int nvram_write_error_log(char * buff, int length,
-                          unsigned int err_type, unsigned int error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1) {
-		return -ESPIPE;
-	}
-
-	if (length > nvram_error_log_size) {
-		length = nvram_error_log_size;
-	}
-
-	info.error_type = err_type;
-	info.seq_num = error_log_cnt;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_write(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-	
-	return 0;
-}
-
-/* nvram_read_error_log
- *
- * Reads nvram for error log for at most 'length'
- */
-int nvram_read_error_log(char * buff, int length,
-                         unsigned int * err_type, unsigned int * error_log_cnt)
-{
-	int rc;
-	loff_t tmp_index;
-	struct err_log_info info;
-	
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	if (length > nvram_error_log_size)
-		length = nvram_error_log_size;
-
-	tmp_index = nvram_error_log_index;
-
-	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	rc = ppc_md.nvram_read(buff, length, &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
-		return rc;
-	}
-
-	*error_log_cnt = info.seq_num;
-	*err_type = info.error_type;
-
-	return 0;
-}
-
-/* This doesn't actually zero anything, but it sets the event_logged
- * word to tell that this event is safely in syslog.
- */
-int nvram_clear_error_log(void)
-{
-	loff_t tmp_index;
-	int clear_word = ERR_FLAG_ALREADY_LOGGED;
-	int rc;
-
-	if (nvram_error_log_index == -1)
-		return -1;
-
-	tmp_index = nvram_error_log_index;
-	
-	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
-	if (rc <= 0) {
-		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
-		return rc;
-	}
-
-	return 0;
-}
-
-#endif /* CONFIG_PPC_PSERIES */
-
 module_init(nvram_init);
 module_exit(nvram_cleanup);
 MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index d43fc65..8515776 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -193,8 +193,7 @@
 	hose->io_resource.start += io_virt_offset;
 	hose->io_resource.end += io_virt_offset;
 
-	pr_debug("  hose->io_resource=0x%016llx...0x%016llx\n",
-		 hose->io_resource.start, hose->io_resource.end);
+	pr_debug("  hose->io_resource=%pR\n", &hose->io_resource);
 
 	return 0;
 }
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index ab3e392..ef3ef56 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -186,3 +186,10 @@
 EXPORT_SYMBOL(__mfdcr);
 #endif
 EXPORT_SYMBOL(empty_zero_page);
+
+#ifdef CONFIG_PPC64
+EXPORT_SYMBOL(__arch_hweight8);
+EXPORT_SYMBOL(__arch_hweight16);
+EXPORT_SYMBOL(__arch_hweight32);
+EXPORT_SYMBOL(__arch_hweight64);
+#endif
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index 5113bd2..e83ba3f 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 
 /*
  * Grab the register values as they are now.
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index a9b3296..9065369 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1316,6 +1316,10 @@
 static long ppc_set_hwdebug(struct task_struct *child,
 		     struct ppc_hw_breakpoint *bp_info)
 {
+#ifndef CONFIG_PPC_ADV_DEBUG_REGS
+	unsigned long dabr;
+#endif
+
 	if (bp_info->version != 1)
 		return -ENOTSUPP;
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1353,11 +1357,10 @@
 	/*
 	 * We only support one data breakpoint
 	 */
-	if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) ||
-	    ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) ||
-	    (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) ||
-	    (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) ||
-	    (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
+	if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+	    (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+	    bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT ||
+	    bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
 		return -EINVAL;
 
 	if (child->thread.dabr)
@@ -1366,7 +1369,14 @@
 	if ((unsigned long)bp_info->addr >= TASK_SIZE)
 		return -EIO;
 
-	child->thread.dabr = (unsigned long)bp_info->addr;
+	dabr = (unsigned long)bp_info->addr & ~7UL;
+	dabr |= DABR_TRANSLATION;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+		dabr |= DABR_DATA_READ;
+	if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+		dabr |= DABR_DATA_WRITE;
+
+	child->thread.dabr = dabr;
 
 	return 1;
 #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 8a6daf4..69c4be9 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -280,7 +280,11 @@
 		/* We only support one DABR and no IABRS at the moment */
 		if (addr > 0)
 			break;
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+		ret = put_user(child->thread.dac1, (u32 __user *)data);
+#else
 		ret = put_user(child->thread.dabr, (u32 __user *)data);
+#endif
 		break;
 	}
 
@@ -312,6 +316,9 @@
 	case PTRACE_SET_DEBUGREG:
 	case PTRACE_SYSCALL:
 	case PTRACE_CONT:
+	case PPC_PTRACE_GETHWDBGINFO:
+	case PPC_PTRACE_SETHWDEBUG:
+	case PPC_PTRACE_DELHWDEBUG:
 		ret = arch_ptrace(child, request, addr, data);
 		break;
 
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8fe8bc6..2097f2b 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -41,6 +41,7 @@
 #include <asm/atomic.h>
 #include <asm/time.h>
 #include <asm/mmu.h>
+#include <asm/topology.h>
 
 struct rtas_t rtas = {
 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
@@ -713,6 +714,7 @@
 	int cpu;
 
 	slb_set_size(SLB_MIN_SIZE);
+	stop_topology_update();
 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
 
 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -728,6 +730,7 @@
 		rc = atomic_read(&data->error);
 
 	atomic_set(&data->error, rc);
+	start_topology_update();
 
 	if (wake_when_done) {
 		atomic_set(&data->done, 1);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index ce6f61c..5a0401f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -437,8 +437,8 @@
 	unsigned int i;
 
 	/*
-	 * interrupt stacks must be under 256MB, we cannot afford to take
-	 * SLB misses on them.
+	 * Interrupt stacks must be in the first segment since we
+	 * cannot afford to take SLB misses on them.
 	 */
 	for_each_possible_cpu(i) {
 		softirq_ctx[i] = (struct thread_info *)
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 68034bb..9813605 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -466,7 +466,20 @@
 	return id;
 }
 
-/* Must be called when no change can occur to cpu_present_mask,
+/* Helper routines for cpu to core mapping */
+int cpu_core_index_of_thread(int cpu)
+{
+	return cpu >> threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
+
+int cpu_first_thread_of_core(int core)
+{
+	return core << threads_shift;
+}
+EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
+
+/* Must be called when no change can occur to cpu_present_map,
  * i.e. during cpu online or offline.
  */
 static struct device_node *cpu_to_l2cache(int cpu)
@@ -514,7 +527,7 @@
 	notify_cpu_starting(cpu);
 	set_cpu_online(cpu, true);
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		if (cpu_is_offline(base + i))
 			continue;
@@ -600,7 +613,7 @@
 		return err;
 
 	/* Update sibling maps */
-	base = cpu_first_thread_in_core(cpu);
+	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
 		cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
 		cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 0104069..09e4dea 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -155,7 +155,7 @@
 
 static u64 tb_to_ns_scale __read_mostly;
 static unsigned tb_to_ns_shift __read_mostly;
-static unsigned long boot_tb __read_mostly;
+static u64 boot_tb __read_mostly;
 
 extern struct timezone sys_tz;
 static long timezone_offset;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index fe46048..9de6f39 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -5,6 +5,7 @@
 #include <asm/cputable.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 /*
  * load_up_altivec(unused, unused, tsk)
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 441d2a7..1b695fd 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -600,6 +600,11 @@
 	vio_cmo_dealloc(viodev, alloc_size);
 }
 
+static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask)
+{
+        return dma_iommu_ops.dma_supported(dev, mask);
+}
+
 struct dma_map_ops vio_dma_mapping_ops = {
 	.alloc_coherent = vio_dma_iommu_alloc_coherent,
 	.free_coherent  = vio_dma_iommu_free_coherent,
@@ -607,6 +612,7 @@
 	.unmap_sg       = vio_dma_iommu_unmap_sg,
 	.map_page       = vio_dma_iommu_map_page,
 	.unmap_page     = vio_dma_iommu_unmap_page,
+	.dma_supported  = vio_dma_iommu_dma_supported,
 
 };
 
@@ -858,8 +864,7 @@
 
 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
 {
-	vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
-	viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
+	set_dma_ops(&viodev->dev, &vio_dma_mapping_ops);
 }
 
 /**
@@ -1244,7 +1249,7 @@
 	if (firmware_has_feature(FW_FEATURE_CMO))
 		vio_cmo_set_dma_ops(viodev);
 	else
-		viodev->dev.archdata.dma_ops = &dma_iommu_ops;
+		set_dma_ops(&viodev->dev, &dma_iommu_ops);
 	set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
 	set_dev_node(&viodev->dev, of_node_to_nid(of_node));
 
@@ -1252,6 +1257,10 @@
 	viodev->dev.parent = &vio_bus_device.dev;
 	viodev->dev.bus = &vio_bus_type;
 	viodev->dev.release = vio_dev_release;
+        /* needed to ensure proper operation of coherent allocations
+         * later, in case driver doesn't set it explicitly */
+        dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
+        dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
 
 	/* register with generic device framework */
 	if (device_register(&viodev->dev)) {
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e316847..badc983 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -1307,12 +1307,10 @@
 	int err = -ENOMEM;
 	unsigned long p;
 
-	vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
+	vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
 	if (!vcpu_book3s)
 		goto out;
 
-	memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
-
 	vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
 		kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
 	if (!vcpu_book3s->shadow_vcpu)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 38f756f..9975846 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -145,18 +145,12 @@
 	*(int *)rtn = kvmppc_core_check_processor_compat();
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
-
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
-	return kvm;
+	return 0;
 }
 
-static void kvmppc_free_vcpus(struct kvm *kvm)
+void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	unsigned int i;
 	struct kvm_vcpu *vcpu;
@@ -176,14 +170,6 @@
 {
 }
 
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-	kvmppc_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
-}
-
 int kvm_dev_ioctl_check_extension(long ext)
 {
 	int r;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 889f2bc..166a6a0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -16,7 +16,7 @@
 
 obj-$(CONFIG_PPC64)	+= copypage_64.o copyuser_64.o \
 			   memcpy_64.o usercopy_64.o mem_64.o string.o \
-			   checksum_wrappers_64.o
+			   checksum_wrappers_64.o hweight_64.o
 obj-$(CONFIG_XMON)	+= sstep.o ldstfp.o
 obj-$(CONFIG_KPROBES)	+= sstep.o ldstfp.o
 obj-$(CONFIG_HAVE_HW_BREAKPOINT)	+= sstep.o ldstfp.o
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
new file mode 100644
index 0000000..fda2786
--- /dev/null
+++ b/arch/powerpc/lib/hweight_64.S
@@ -0,0 +1,110 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2010
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+
+/* Note: This code relies on -mminimal-toc */
+
+_GLOBAL(__arch_hweight8)
+BEGIN_FTR_SECTION
+	b .__sw_hweight8
+	nop
+	nop
+FTR_SECTION_ELSE
+	PPC_POPCNTB(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight16)
+BEGIN_FTR_SECTION
+	b .__sw_hweight16
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(50)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(50)
+	clrlwi  r3,r3,16
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight32)
+BEGIN_FTR_SECTION
+	b .__sw_hweight32
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(51)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(51)
+	PPC_POPCNTW(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
+
+_GLOBAL(__arch_hweight64)
+BEGIN_FTR_SECTION
+	b .__sw_hweight64
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+FTR_SECTION_ELSE
+  BEGIN_FTR_SECTION_NESTED(52)
+	PPC_POPCNTB(r3,r3)
+	srdi	r4,r3,32
+	add	r3,r4,r3
+	srdi	r4,r3,16
+	add	r3,r4,r3
+	srdi	r4,r3,8
+	add	r3,r4,r3
+	clrldi	r3,r3,64-8
+	blr
+  FTR_SECTION_ELSE_NESTED(52)
+	PPC_POPCNTD(r3,r3)
+	clrldi	r3,r3,64-8
+	blr
+  ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 5e95844..a5991fa 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1070,7 +1070,7 @@
 		  unsigned long access, unsigned long trap)
 {
 	unsigned long vsid;
-	void *pgdir;
+	pgd_t *pgdir;
 	pte_t *ptep;
 	unsigned long flags;
 	int rc, ssize, local = 0;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5ce9984..c0aab52 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -111,8 +111,8 @@
 		 * a core map instead but this will do for now.
 		 */
 		for_each_cpu(cpu, mm_cpumask(mm)) {
-			for (i = cpu_first_thread_in_core(cpu);
-			     i <= cpu_last_thread_in_core(cpu); i++)
+			for (i = cpu_first_thread_sibling(cpu);
+			     i <= cpu_last_thread_sibling(cpu); i++)
 				__set_bit(id, stale_map[i]);
 			cpu = i - 1;
 		}
@@ -264,14 +264,14 @@
 	 */
 	if (test_bit(id, stale_map[cpu])) {
 		pr_hardcont(" | stale flush %d [%d..%d]",
-			    id, cpu_first_thread_in_core(cpu),
-			    cpu_last_thread_in_core(cpu));
+			    id, cpu_first_thread_sibling(cpu),
+			    cpu_last_thread_sibling(cpu));
 
 		local_flush_tlb_mm(next);
 
 		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-		for (i = cpu_first_thread_in_core(cpu);
-		     i <= cpu_last_thread_in_core(cpu); i++) {
+		for (i = cpu_first_thread_sibling(cpu);
+		     i <= cpu_last_thread_sibling(cpu); i++) {
 			__clear_bit(id, stale_map[i]);
 		}
 	}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 74505b2..bf5cb91 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -20,10 +20,15 @@
 #include <linux/memblock.h>
 #include <linux/of.h>
 #include <linux/pfn.h>
+#include <linux/cpuset.h>
+#include <linux/node.h>
 #include <asm/sparsemem.h>
 #include <asm/prom.h>
 #include <asm/system.h>
 #include <asm/smp.h>
+#include <asm/firmware.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
 
 static int numa_enabled = 1;
 
@@ -163,7 +168,7 @@
 	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
 }
 
-static void __cpuinit map_cpu_to_node(int cpu, int node)
+static void map_cpu_to_node(int cpu, int node)
 {
 	numa_cpu_lookup_table[cpu] = node;
 
@@ -173,7 +178,7 @@
 		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
 static void unmap_cpu_from_node(unsigned long cpu)
 {
 	int node = numa_cpu_lookup_table[cpu];
@@ -187,7 +192,7 @@
 		       cpu, node);
 	}
 }
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
 
 /* must hold reference to node during call */
 static const int *of_get_associativity(struct device_node *dev)
@@ -246,32 +251,41 @@
 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
  * info is found.
  */
-static int of_node_to_nid_single(struct device_node *device)
+static int associativity_to_nid(const unsigned int *associativity)
 {
 	int nid = -1;
-	const unsigned int *tmp;
 
 	if (min_common_depth == -1)
 		goto out;
 
-	tmp = of_get_associativity(device);
-	if (!tmp)
-		goto out;
-
-	if (tmp[0] >= min_common_depth)
-		nid = tmp[min_common_depth];
+	if (associativity[0] >= min_common_depth)
+		nid = associativity[min_common_depth];
 
 	/* POWER4 LPAR uses 0xffff as invalid node */
 	if (nid == 0xffff || nid >= MAX_NUMNODES)
 		nid = -1;
 
-	if (nid > 0 && tmp[0] >= distance_ref_points_depth)
-		initialize_distance_lookup_table(nid, tmp);
+	if (nid > 0 && associativity[0] >= distance_ref_points_depth)
+		initialize_distance_lookup_table(nid, associativity);
 
 out:
 	return nid;
 }
 
+/* Returns the nid associated with the given device tree node,
+ * or -1 if not found.
+ */
+static int of_node_to_nid_single(struct device_node *device)
+{
+	int nid = -1;
+	const unsigned int *tmp;
+
+	tmp = of_get_associativity(device);
+	if (tmp)
+		nid = associativity_to_nid(tmp);
+	return nid;
+}
+
 /* Walk the device tree upwards, looking for an associativity id */
 int of_node_to_nid(struct device_node *device)
 {
@@ -1247,4 +1261,275 @@
 	return nid;
 }
 
+static u64 hot_add_drconf_memory_max(void)
+{
+        struct device_node *memory = NULL;
+        unsigned int drconf_cell_cnt = 0;
+        u64 lmb_size = 0;
+        const u32 *dm = 0;
+
+        memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+        if (memory) {
+                drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
+                lmb_size = of_get_lmb_size(memory);
+                of_node_put(memory);
+        }
+        return lmb_size * drconf_cell_cnt;
+}
+
+/*
+ * memory_hotplug_max - return max address of memory that may be added
+ *
+ * This is currently only used on systems that support drconfig memory
+ * hotplug.
+ */
+u64 memory_hotplug_max(void)
+{
+        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
+}
 #endif /* CONFIG_MEMORY_HOTPLUG */
+
+/* Vrtual Processor Home Node (VPHN) support */
+#ifdef CONFIG_PPC_SPLPAR
+#define VPHN_NR_CHANGE_CTRS (8)
+static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
+static cpumask_t cpu_associativity_changes_mask;
+static int vphn_enabled;
+static void set_topology_timer(void);
+
+/*
+ * Store the current values of the associativity change counters in the
+ * hypervisor.
+ */
+static void setup_cpu_associativity_change_counters(void)
+{
+	int cpu = 0;
+
+	for_each_possible_cpu(cpu) {
+		int i = 0;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+			counts[i] = hypervisor_counts[i];
+		}
+	}
+}
+
+/*
+ * The hypervisor maintains a set of 8 associativity change counters in
+ * the VPA of each cpu that correspond to the associativity levels in the
+ * ibm,associativity-reference-points property. When an associativity
+ * level changes, the corresponding counter is incremented.
+ *
+ * Set a bit in cpu_associativity_changes_mask for each cpu whose home
+ * node associativity levels have changed.
+ *
+ * Returns the number of cpus with unhandled associativity changes.
+ */
+static int update_cpu_associativity_changes_mask(void)
+{
+	int cpu = 0, nr_cpus = 0;
+	cpumask_t *changes = &cpu_associativity_changes_mask;
+
+	cpumask_clear(changes);
+
+	for_each_possible_cpu(cpu) {
+		int i, changed = 0;
+		u8 *counts = vphn_cpu_change_counts[cpu];
+		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
+
+		for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
+			if (hypervisor_counts[i] > counts[i]) {
+				counts[i] = hypervisor_counts[i];
+				changed = 1;
+			}
+		}
+		if (changed) {
+			cpumask_set_cpu(cpu, changes);
+			nr_cpus++;
+		}
+	}
+
+	return nr_cpus;
+}
+
+/* 6 64-bit registers unpacked into 12 32-bit associativity values */
+#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
+
+/*
+ * Convert the associativity domain numbers returned from the hypervisor
+ * to the sequence they would appear in the ibm,associativity property.
+ */
+static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
+{
+	int i = 0;
+	int nr_assoc_doms = 0;
+	const u16 *field = (const u16*) packed;
+
+#define VPHN_FIELD_UNUSED	(0xffff)
+#define VPHN_FIELD_MSB		(0x8000)
+#define VPHN_FIELD_MASK		(~VPHN_FIELD_MSB)
+
+	for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
+		if (*field == VPHN_FIELD_UNUSED) {
+			/* All significant fields processed, and remaining
+			 * fields contain the reserved value of all 1's.
+			 * Just store them.
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+		}
+		else if (*field & VPHN_FIELD_MSB) {
+			/* Data is in the lower 15 bits of this field */
+			unpacked[i] = *field & VPHN_FIELD_MASK;
+			field++;
+			nr_assoc_doms++;
+		}
+		else {
+			/* Data is in the lower 15 bits of this field
+			 * concatenated with the next 16 bit field
+			 */
+			unpacked[i] = *((u32*)field);
+			field += 2;
+			nr_assoc_doms++;
+		}
+	}
+
+	return nr_assoc_doms;
+}
+
+/*
+ * Retrieve the new associativity information for a virtual processor's
+ * home node.
+ */
+static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
+{
+	long rc = 0;
+	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+	u64 flags = 1;
+	int hwcpu = get_hard_smp_processor_id(cpu);
+
+	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
+	vphn_unpack_associativity(retbuf, associativity);
+
+	return rc;
+}
+
+static long vphn_get_associativity(unsigned long cpu,
+					unsigned int *associativity)
+{
+	long rc = 0;
+
+	rc = hcall_vphn(cpu, associativity);
+
+	switch (rc) {
+	case H_FUNCTION:
+		printk(KERN_INFO
+			"VPHN is not supported. Disabling polling...\n");
+		stop_topology_update();
+		break;
+	case H_HARDWARE:
+		printk(KERN_ERR
+			"hcall_vphn() experienced a hardware fault "
+			"preventing VPHN. Disabling polling...\n");
+		stop_topology_update();
+	}
+
+	return rc;
+}
+
+/*
+ * Update the node maps and sysfs entries for each cpu whose home node
+ * has changed.
+ */
+int arch_update_cpu_topology(void)
+{
+	int cpu = 0, nid = 0, old_nid = 0;
+	unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
+	struct sys_device *sysdev = NULL;
+
+	for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
+		vphn_get_associativity(cpu, associativity);
+		nid = associativity_to_nid(associativity);
+
+		if (nid < 0 || !node_online(nid))
+			nid = first_online_node;
+
+		old_nid = numa_cpu_lookup_table[cpu];
+
+		/* Disable hotplug while we update the cpu
+		 * masks and sysfs.
+		 */
+		get_online_cpus();
+		unregister_cpu_under_node(cpu, old_nid);
+		unmap_cpu_from_node(cpu);
+		map_cpu_to_node(cpu, nid);
+		register_cpu_under_node(cpu, nid);
+		put_online_cpus();
+
+		sysdev = get_cpu_sysdev(cpu);
+		if (sysdev)
+			kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+	}
+
+	return 1;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+	rebuild_sched_domains();
+}
+static DECLARE_WORK(topology_work, topology_work_fn);
+
+void topology_schedule_update(void)
+{
+	schedule_work(&topology_work);
+}
+
+static void topology_timer_fn(unsigned long ignored)
+{
+	if (!vphn_enabled)
+		return;
+	if (update_cpu_associativity_changes_mask() > 0)
+		topology_schedule_update();
+	set_topology_timer();
+}
+static struct timer_list topology_timer =
+	TIMER_INITIALIZER(topology_timer_fn, 0, 0);
+
+static void set_topology_timer(void)
+{
+	topology_timer.data = 0;
+	topology_timer.expires = jiffies + 60 * HZ;
+	add_timer(&topology_timer);
+}
+
+/*
+ * Start polling for VPHN associativity changes.
+ */
+int start_topology_update(void)
+{
+	int rc = 0;
+
+	if (firmware_has_feature(FW_FEATURE_VPHN)) {
+		vphn_enabled = 1;
+		setup_cpu_associativity_change_counters();
+		init_timer_deferrable(&topology_timer);
+		set_topology_timer();
+		rc = 1;
+	}
+
+	return rc;
+}
+__initcall(start_topology_update);
+
+/*
+ * Disable polling for VPHN associativity changes.
+ */
+int stop_topology_update(void)
+{
+	vphn_enabled = 0;
+	return del_timer_sync(&topology_timer);
+}
+#endif /* CONFIG_PPC_SPLPAR */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index a87ead0..8dc41c0 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -78,7 +78,7 @@
 
 	/* pgdir take page or two with 4K pages and a page fraction otherwise */
 #ifndef CONFIG_PPC_4K_PAGES
-	ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
+	ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL);
 #else
 	ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
 			PGDIR_ORDER - PAGE_SHIFT);
@@ -230,6 +230,7 @@
 		area = get_vm_area_caller(size, VM_IOREMAP, caller);
 		if (area == 0)
 			return NULL;
+		area->phys_addr = p;
 		v = (unsigned long) area->addr;
 	} else {
 		v = (ioremap_bot -= size);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 21d6dfa..88927a0 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -223,6 +223,8 @@
 					    caller);
 		if (area == NULL)
 			return NULL;
+
+		area->phys_addr = paligned;
 		ret = __ioremap_at(paligned, area->addr, size, flags);
 		if (!ret)
 			vunmap(area->addr);
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
index 7fd90d0..c4d2b71 100644
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ b/arch/powerpc/oprofile/op_model_cell.c
@@ -1469,7 +1469,7 @@
  * The pm_interval register is setup to write the SPU PC value into the
  * trace buffer at the maximum rate possible.  The trace buffer is configured
  * to store the PCs, wrapping when it is full.  The performance counter is
- * intialized to the max hardware count minus the number of events, N, between
+ * initialized to the max hardware count minus the number of events, N, between
  * samples.  Once the N events have occured, a HW counter overflow occurs
  * causing the generation of a HW counter interrupt which also stops the
  * writing of the SPU PC values to the trace buffer.  Hence the last PC
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 82ff326..c04d16d 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -1,4 +1,7 @@
-obj-$(CONFIG_44x)	:= misc_44x.o idle.o
+obj-$(CONFIG_44x)	+= misc_44x.o
+ifneq ($(CONFIG_PPC4xx_CPM),y)
+obj-$(CONFIG_44x)	+= idle.o
+endif
 obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
 obj-$(CONFIG_EBONY)	+= ebony.o
 obj-$(CONFIG_SAM440EP) 	+= sam440ep.o
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index 80234e5..eda0fc2 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -232,7 +232,7 @@
 	lite5200_pm_target_state = PM_SUSPEND_ON;
 }
 
-static struct platform_suspend_ops lite5200_pm_ops = {
+static const struct platform_suspend_ops lite5200_pm_ops = {
 	.valid		= lite5200_pm_valid,
 	.begin		= lite5200_pm_begin,
 	.prepare	= lite5200_pm_prepare,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index 568cef6..8310e8b 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -186,7 +186,7 @@
 	iounmap(mbar);
 }
 
-static struct platform_suspend_ops mpc52xx_pm_ops = {
+static const struct platform_suspend_ops mpc52xx_pm_ops = {
 	.valid		= mpc52xx_pm_valid,
 	.prepare	= mpc52xx_pm_prepare,
 	.enter		= mpc52xx_pm_enter,
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 1930543..3d1ecd2 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -231,7 +231,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT0L, r4
 	lis	r8, TMP_VIRT_IMMR@h
-	ori	r4, r8, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r8, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT0U, r4
 	isync
 
@@ -241,7 +241,7 @@
 	ori	r4, r4, 0x002a
 	mtspr	SPRN_DBAT1L, r4
 	lis	r9, (TMP_VIRT_IMMR + 0x01000000)@h
-	ori	r4, r9, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r9, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT1U, r4
 	isync
 
@@ -253,7 +253,7 @@
 	li	r4, 0x0002
 	mtspr	SPRN_DBAT2L, r4
 	lis	r4, KERNELBASE@h
-	ori	r4, r4, 0x001e	/* 1 MByte accessable from Kernel Space only */
+	ori	r4, r4, 0x001e	/* 1 MByte accessible from Kernel Space only */
 	mtspr	SPRN_DBAT2U, r4
 	isync
 
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index 75ae77f..fd4f2f2 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -311,7 +311,7 @@
 	return ret;
 }
 
-static struct platform_suspend_ops mpc83xx_suspend_ops = {
+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
 	.valid = mpc83xx_suspend_valid,
 	.begin = mpc83xx_suspend_begin,
 	.enter = mpc83xx_suspend_enter,
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index aa34cac..747d1ee 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -309,7 +309,7 @@
 			/* P1021 has pins muxed for QE and other functions. To
 			 * enable QE UEC mode, we need to set bit QE0 for UCC1
 			 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
-			 * and QE12 for QE MII management singals in PMUXCR
+			 * and QE12 for QE MII management signals in PMUXCR
 			 * register.
 			 */
 				setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 956154f..2057682 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -313,13 +313,14 @@
 source "arch/powerpc/sysdev/bestcomm/Kconfig"
 
 config MPC8xxx_GPIO
-	bool "MPC8xxx GPIO support"
-	depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx
+	bool "MPC512x/MPC8xxx GPIO support"
+	depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
+		   FSL_SOC_BOOKE || PPC_86xx
 	select GENERIC_GPIO
 	select ARCH_REQUIRE_GPIOLIB
 	help
 	  Say Y here if you're going to use hardware that connects to the
-	  MPC831x/834x/837x/8572/8610 GPIOs.
+	  MPC512x/831x/834x/837x/8572/8610 GPIOs.
 
 config SIMPLE_GPIO
 	bool "Support for simple, memory-mapped GPIO controllers"
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
index beec405..3ce6855 100644
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ b/arch/powerpc/platforms/cell/beat_iommu.c
@@ -76,7 +76,7 @@
 
 static void celleb_dma_dev_setup(struct device *dev)
 {
-	dev->archdata.dma_ops = get_pci_dma_ops();
+	set_dma_ops(dev, &dma_direct_ops);
 	set_dma_offset(dev, celleb_dma_direct_offset);
 }
 
@@ -106,7 +106,6 @@
 static int __init celleb_init_iommu(void)
 {
 	celleb_init_direct_mapping();
-	set_pci_dma_ops(&dma_direct_ops);
 	ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
 	bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
 
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
index a101abf..3b894f5 100644
--- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
+++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c
@@ -36,10 +36,9 @@
 	struct spu_lscsa *lscsa;
 	unsigned char *p;
 
-	lscsa = vmalloc(sizeof(struct spu_lscsa));
+	lscsa = vzalloc(sizeof(struct spu_lscsa));
 	if (!lscsa)
 		return -ENOMEM;
-	memset(lscsa, 0, sizeof(struct spu_lscsa));
 	csa->lscsa = lscsa;
 
 	/* Set LS pages reserved to allow for user-space mapping. */
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index 054dfe5..f803f4b 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -29,6 +29,10 @@
 
 extern spinlock_t rtc_lock;
 
+#define NVRAM_AS0  0x74
+#define NVRAM_AS1  0x75
+#define NVRAM_DATA 0x77
+
 static int nvram_as1 = NVRAM_AS1;
 static int nvram_as0 = NVRAM_AS0;
 static int nvram_data = NVRAM_DATA;
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
index 42d0a88..b5e026b 100644
--- a/arch/powerpc/platforms/iseries/mf.c
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -1045,71 +1045,9 @@
 	.write		= mf_side_proc_write,
 };
 
-#if 0
-static void mf_getSrcHistory(char *buffer, int size)
-{
-	struct IplTypeReturnStuff return_stuff;
-	struct pending_event *ev = new_pending_event();
-	int rc = 0;
-	char *pages[4];
-
-	pages[0] = kmalloc(4096, GFP_ATOMIC);
-	pages[1] = kmalloc(4096, GFP_ATOMIC);
-	pages[2] = kmalloc(4096, GFP_ATOMIC);
-	pages[3] = kmalloc(4096, GFP_ATOMIC);
-	if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
-			 || (pages[2] == NULL) || (pages[3] == NULL))
-		return -ENOMEM;
-
-	return_stuff.xType = 0;
-	return_stuff.xRc = 0;
-	return_stuff.xDone = 0;
-	ev->event.hp_lp_event.xSubtype = 6;
-	ev->event.hp_lp_event.x.xSubtypeData =
-		subtype_data('M', 'F', 'V', 'I');
-	ev->event.data.vsp_cmd.xEvent = &return_stuff;
-	ev->event.data.vsp_cmd.cmd = 4;
-	ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
-	ev->event.data.vsp_cmd.result_code = 0xFF;
-	ev->event.data.vsp_cmd.reserved = 0;
-	ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]);
-	ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]);
-	ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]);
-	ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]);
-	mb();
-	if (signal_event(ev) != 0)
-		return;
-
- 	while (return_stuff.xDone != 1)
- 		udelay(10);
- 	if (return_stuff.xRc == 0)
- 		memcpy(buffer, pages[0], size);
-	kfree(pages[0]);
-	kfree(pages[1]);
-	kfree(pages[2]);
-	kfree(pages[3]);
-}
-#endif
-
 static int mf_src_proc_show(struct seq_file *m, void *v)
 {
-#if 0
-	int len;
-
-	mf_getSrcHistory(page, count);
-	len = count;
-	len -= off;
-	if (len < count) {
-		*eof = 1;
-		if (len <= 0)
-			return 0;
-	} else
-		len = count;
-	*start = page + off;
-	return len;
-#else
 	return 0;
-#endif
 }
 
 static int mf_src_proc_open(struct inode *inode, struct file *file)
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 1f9fb2c..14943ef 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -156,20 +156,12 @@
 
 static void pci_dma_bus_setup_pasemi(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self);
 
 	if (!iommu_table_iobmap_inited) {
 		iommu_table_iobmap_inited = 1;
 		iommu_table_iobmap_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_iobmap;
-
 }
 
 
@@ -192,9 +184,6 @@
 	set_iommu_table_base(&dev->dev, &iommu_table_iobmap);
 }
 
-static void pci_dma_bus_setup_null(struct pci_bus *b) { }
-static void pci_dma_dev_setup_null(struct pci_dev *d) { }
-
 int __init iob_init(struct device_node *dn)
 {
 	unsigned long tmp;
@@ -251,14 +240,8 @@
 	iommu_off = of_chosen &&
 			of_get_property(of_chosen, "linux,iommu-off", NULL);
 #endif
-	if (iommu_off) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null;
-		ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null;
-		set_pci_dma_ops(&dma_direct_ops);
-
+	if (iommu_off)
 		return;
-	}
 
 	iob_init(NULL);
 
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 9deb274..d5aceb7 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -506,6 +506,15 @@
 		of_platform_device_create(np, "smu", NULL);
 		of_node_put(np);
 	}
+	np = of_find_node_by_type(NULL, "fcu");
+	if (np == NULL) {
+		/* Some machines have strangely broken device-tree */
+		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
+	}
+	if (np) {
+		of_platform_device_create(np, "temperature", NULL);
+		of_node_put(np);
+	}
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index b341018..6c4b583 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -566,10 +566,10 @@
 	case PS3_DEV_TYPE_STOR_DISK:
 		result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK);
 
-		/* Some devices are not accessable from the Other OS lpar. */
+		/* Some devices are not accessible from the Other OS lpar. */
 		if (result == -ENODEV) {
 			result = 0;
-			pr_debug("%s:%u: not accessable\n", __func__,
+			pr_debug("%s:%u: not accessible\n", __func__,
 				 __LINE__);
 		}
 
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 59d9712..92290ff 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -44,7 +44,7 @@
  * @lock:
  * @ipi_debug_brk_mask:
  *
- * The HV mantains per SMT thread mappings of HV outlet to HV plug on
+ * The HV maintains per SMT thread mappings of HV outlet to HV plug on
  * behalf of the guest.  These mappings are implemented as 256 bit guest
  * supplied bitmaps indexed by plug number.  The addresses of the bitmaps
  * are registered with the HV through lv1_configure_irq_state_bitmap().
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 3139814..5d1b743 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -33,6 +33,16 @@
        depends on PCI_MSI && EEH
        default y
 
+config PSERIES_ENERGY
+	tristate "pSeries energy management capabilities driver"
+	depends on PPC_PSERIES
+	default y
+	help
+	  Provides interface to platform energy management capabilities
+	  on supported PSERIES platforms.
+	  Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
+	  and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
+
 config SCANLOG
 	tristate "Scanlog dump interface"
 	depends on RTAS_PROC && PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 59eb8bd..fc52378 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -11,6 +11,7 @@
 obj-$(CONFIG_KEXEC)	+= kexec.o
 obj-$(CONFIG_PCI)	+= pci.o pci_dlpar.o
 obj-$(CONFIG_PSERIES_MSI)	+= msi.o
+obj-$(CONFIG_PSERIES_ENERGY)	+= pseries_energy.o
 
 obj-$(CONFIG_HOTPLUG_CPU)	+= hotplug-cpu.o
 obj-$(CONFIG_MEMORY_HOTPLUG)	+= hotplug-memory.o
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 0a14d8c..0b0eff0 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -55,6 +55,7 @@
 	{FW_FEATURE_XDABR,		"hcall-xdabr"},
 	{FW_FEATURE_MULTITCE,		"hcall-multi-tce"},
 	{FW_FEATURE_SPLPAR,		"hcall-splpar"},
+	{FW_FEATURE_VPHN,		"hcall-vphn"},
 };
 
 /* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 48d2057..fd05fde 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -11,6 +11,7 @@
 #include <asm/processor.h>
 #include <asm/ppc_asm.h>
 #include <asm/asm-offsets.h>
+#include <asm/ptrace.h>
 	
 #define STK_PARM(i)     (48 + ((i)-3)*8)
 
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index e19ff02..f106662 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -55,7 +55,7 @@
 static int hc_show(struct seq_file *m, void *p)
 {
 	unsigned long h_num = (unsigned long)p;
-	struct hcall_stats *hs = (struct hcall_stats *)m->private;
+	struct hcall_stats *hs = m->private;
 
 	if (hs[h_num].num_calls) {
 		if (cpu_has_feature(CPU_FTR_PURR))
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a77bcae..edea60b 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -140,7 +140,7 @@
 	return ret;
 }
 
-static DEFINE_PER_CPU(u64 *, tce_page) = NULL;
+static DEFINE_PER_CPU(u64 *, tce_page);
 
 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
 				     long npages, unsigned long uaddr,
@@ -323,14 +323,13 @@
 static void iommu_table_setparms_lpar(struct pci_controller *phb,
 				      struct device_node *dn,
 				      struct iommu_table *tbl,
-				      const void *dma_window,
-				      int bussubno)
+				      const void *dma_window)
 {
 	unsigned long offset, size;
 
-	tbl->it_busno  = bussubno;
 	of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
 
+	tbl->it_busno = phb->bus->number;
 	tbl->it_base   = 0;
 	tbl->it_blocksize  = 16;
 	tbl->it_type = TCE_PCI;
@@ -450,14 +449,10 @@
 	if (!ppci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   ppci->phb->node);
-		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window,
-			bus->number);
+		iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window);
 		ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node);
 		pr_debug("  created table: %p\n", ppci->iommu_table);
 	}
-
-	if (pdn != dn)
-		PCI_DN(dn)->iommu_table = ppci->iommu_table;
 }
 
 
@@ -533,21 +528,11 @@
 	}
 	pr_debug("  parent is %s\n", pdn->full_name);
 
-	/* Check for parent == NULL so we don't try to setup the empty EADS
-	 * slots on POWER4 machines.
-	 */
-	if (dma_window == NULL || pdn->parent == NULL) {
-		pr_debug("  no dma window for device, linking to parent\n");
-		set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table);
-		return;
-	}
-
 	pci = PCI_DN(pdn);
 	if (!pci->iommu_table) {
 		tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL,
 				   pci->phb->node);
-		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window,
-			pci->phb->bus->number);
+		iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window);
 		pci->iommu_table = iommu_init_table(tbl, pci->phb->node);
 		pr_debug("  created table: %p\n", pci->iommu_table);
 	} else {
@@ -571,8 +556,7 @@
 
 	switch (action) {
 	case PSERIES_RECONFIG_REMOVE:
-		if (pci && pci->iommu_table &&
-		    of_get_property(np, "ibm,dma-window", NULL))
+		if (pci && pci->iommu_table)
 			iommu_free_table(pci->iommu_table, np->full_name);
 		break;
 	default:
@@ -589,13 +573,8 @@
 /* These are called very early. */
 void iommu_init_early_pSeries(void)
 {
-	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) {
-		/* Direct I/O, IOMMU off */
-		ppc_md.pci_dma_dev_setup = NULL;
-		ppc_md.pci_dma_bus_setup = NULL;
-		set_pci_dma_ops(&dma_direct_ops);
+	if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
 		return;
-	}
 
 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
 		if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
@@ -622,3 +601,17 @@
 	set_pci_dma_ops(&dma_iommu_ops);
 }
 
+static int __init disable_multitce(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_LPAR) &&
+	    firmware_has_feature(FW_FEATURE_MULTITCE)) {
+		printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
+		ppc_md.tce_build = tce_build_pSeriesLP;
+		ppc_md.tce_free	 = tce_free_pSeriesLP;
+		powerpc_firmware_features &= ~FW_FEATURE_MULTITCE;
+	}
+	return 1;
+}
+
+__setup("multitce=", disable_multitce);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f129040..5d3ea9f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -627,6 +627,18 @@
 		spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
 }
 
+static int __init disable_bulk_remove(char *str)
+{
+	if (strcmp(str, "off") == 0 &&
+	    firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
+			printk(KERN_INFO "Disabling BULK_REMOVE firmware feature");
+			powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE;
+	}
+	return 1;
+}
+
+__setup("bulk_remove=", disable_bulk_remove);
+
 void __init hpte_init_lpar(void)
 {
 	ppc_md.hpte_invalidate	= pSeries_lpar_hpte_invalidate;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index bc3c7f2..7e828ba 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -22,11 +22,25 @@
 #include <asm/prom.h>
 #include <asm/machdep.h>
 
+/* Max bytes to read/write in one go */
+#define NVRW_CNT 0x20
+
 static unsigned int nvram_size;
 static int nvram_fetch, nvram_store;
 static char nvram_buf[NVRW_CNT];	/* assume this is in the first 4GB */
 static DEFINE_SPINLOCK(nvram_lock);
 
+static long nvram_error_log_index = -1;
+static long nvram_error_log_size = 0;
+
+struct err_log_info {
+	int error_type;
+	unsigned int seq_num;
+};
+#define NVRAM_MAX_REQ		2079
+#define NVRAM_MIN_REQ		1055
+
+#define NVRAM_LOG_PART_NAME	"ibm,rtas-log"
 
 static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
 {
@@ -119,6 +133,197 @@
 	return nvram_size ? nvram_size : -ENODEV;
 }
 
+
+/* nvram_write_error_log
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode.  If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk.  For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name       | data             |
+ * |0          |1         |2      3|4         15|16        length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log                         |
+ * |0            3|4          7|8            nvram_error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_error_log(char * buff, int length,
+                          unsigned int err_type, unsigned int error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1) {
+		return -ESPIPE;
+	}
+
+	if (length > nvram_error_log_size) {
+		length = nvram_error_log_size;
+	}
+
+	info.error_type = err_type;
+	info.seq_num = error_log_cnt;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_write(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+	
+	return 0;
+}
+
+/* nvram_read_error_log
+ *
+ * Reads nvram for error log for at most 'length'
+ */
+int nvram_read_error_log(char * buff, int length,
+                         unsigned int * err_type, unsigned int * error_log_cnt)
+{
+	int rc;
+	loff_t tmp_index;
+	struct err_log_info info;
+	
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	if (length > nvram_error_log_size)
+		length = nvram_error_log_size;
+
+	tmp_index = nvram_error_log_index;
+
+	rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	rc = ppc_md.nvram_read(buff, length, &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc);
+		return rc;
+	}
+
+	*error_log_cnt = info.seq_num;
+	*err_type = info.error_type;
+
+	return 0;
+}
+
+/* This doesn't actually zero anything, but it sets the event_logged
+ * word to tell that this event is safely in syslog.
+ */
+int nvram_clear_error_log(void)
+{
+	loff_t tmp_index;
+	int clear_word = ERR_FLAG_ALREADY_LOGGED;
+	int rc;
+
+	if (nvram_error_log_index == -1)
+		return -1;
+
+	tmp_index = nvram_error_log_index;
+	
+	rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index);
+	if (rc <= 0) {
+		printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* pseries_nvram_init_log_partition
+ *
+ * This will setup the partition we need for buffering the
+ * error logs and cleanup partitions if needed.
+ *
+ * The general strategy is the following:
+ * 1.) If there is log partition large enough then use it.
+ * 2.) If there is none large enough, search
+ * for a free partition that is large enough.
+ * 3.) If there is not a free partition large enough remove 
+ * _all_ OS partitions and consolidate the space.
+ * 4.) Will first try getting a chunk that will satisfy the maximum
+ * error log size (NVRAM_MAX_REQ).
+ * 5.) If the max chunk cannot be allocated then try finding a chunk
+ * that will satisfy the minum needed (NVRAM_MIN_REQ).
+ */
+static int __init pseries_nvram_init_log_partition(void)
+{
+	loff_t p;
+	int size;
+
+	/* Scan nvram for partitions */
+	nvram_scan_partitions();
+
+	/* Lookg for ours */
+	p = nvram_find_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS, &size);
+
+	/* Found one but too small, remove it */
+	if (p && size < NVRAM_MIN_REQ) {
+		pr_info("nvram: Found too small "NVRAM_LOG_PART_NAME" partition"
+			",removing it...");
+		nvram_remove_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS);
+		p = 0;
+	}
+
+	/* Create one if we didn't find */
+	if (!p) {
+		p = nvram_create_partition(NVRAM_LOG_PART_NAME, NVRAM_SIG_OS,
+					   NVRAM_MAX_REQ, NVRAM_MIN_REQ);
+		/* No room for it, try to get rid of any OS partition
+		 * and try again
+		 */
+		if (p == -ENOSPC) {
+			pr_info("nvram: No room to create "NVRAM_LOG_PART_NAME
+				" partition, deleting all OS partitions...");
+			nvram_remove_partition(NULL, NVRAM_SIG_OS);
+			p = nvram_create_partition(NVRAM_LOG_PART_NAME,
+						   NVRAM_SIG_OS, NVRAM_MAX_REQ,
+						   NVRAM_MIN_REQ);
+		}
+	}
+
+	if (p <= 0) {
+		pr_err("nvram: Failed to find or create "NVRAM_LOG_PART_NAME
+		       " partition, err %d\n", (int)p);
+		return 0;
+	}
+
+	nvram_error_log_index = p;
+	nvram_error_log_size = nvram_get_partition_size(p) -
+		sizeof(struct err_log_info);
+	
+	return 0;
+}
+machine_arch_initcall(pseries, pseries_nvram_init_log_partition);
+
 int __init pSeries_nvram_init(void)
 {
 	struct device_node *nvram;
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
new file mode 100644
index 0000000..c8b3c69
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -0,0 +1,326 @@
+/*
+ * POWER platform energy management driver
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This pseries platform device driver provides access to
+ * platform energy management capabilities.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/sysdev.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <asm/cputhreads.h>
+#include <asm/page.h>
+#include <asm/hvcall.h>
+
+
+#define MODULE_VERS "1.0"
+#define MODULE_NAME "pseries_energy"
+
+/* Driver flags */
+
+static int sysfs_entries;
+
+/* Helper routines */
+
+/*
+ * Routine to detect firmware support for hcall
+ * return 1 if H_BEST_ENERGY is supported
+ * else return 0
+ */
+
+static int check_for_h_best_energy(void)
+{
+	struct device_node *rtas = NULL;
+	const char *hypertas, *s;
+	int length;
+	int rc = 0;
+
+	rtas = of_find_node_by_path("/rtas");
+	if (!rtas)
+		return 0;
+
+	hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length);
+	if (!hypertas) {
+		of_node_put(rtas);
+		return 0;
+	}
+
+	/* hypertas will have list of strings with hcall names */
+	for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) {
+		if (!strncmp("hcall-best-energy-1", s, 19)) {
+			rc = 1; /* Found the string */
+			break;
+		}
+	}
+	of_node_put(rtas);
+	return rc;
+}
+
+/* Helper Routines to convert between drc_index to cpu numbers */
+
+static u32 cpu_to_drc_index(int cpu)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i;
+	int rc = 1;
+	u32 ret = 0;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/* Convert logical cpu number to core number */
+	i = cpu_core_index_of_thread(cpu);
+	/*
+	 * The first element indexes[0] is the number of drc_indexes
+	 * returned in the list.  Hence i+1 will get the drc_index
+	 * corresponding to core number i.
+	 */
+	WARN_ON(i > indexes[0]);
+	ret = indexes[i + 1];
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu);
+	return ret;
+}
+
+static int drc_index_to_cpu(u32 drc_index)
+{
+	struct device_node *dn = NULL;
+	const int *indexes;
+	int i, cpu = 0;
+	int rc = 1;
+
+	dn = of_find_node_by_path("/cpus");
+	if (dn == NULL)
+		goto err;
+	indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
+	if (indexes == NULL)
+		goto err_of_node_put;
+	/*
+	 * First element in the array is the number of drc_indexes
+	 * returned.  Search through the list to find the matching
+	 * drc_index and get the core number
+	 */
+	for (i = 0; i < indexes[0]; i++) {
+		if (indexes[i + 1] == drc_index)
+			break;
+	}
+	/* Convert core number to logical cpu number */
+	cpu = cpu_first_thread_of_core(i);
+	rc = 0;
+
+err_of_node_put:
+	of_node_put(dn);
+err:
+	if (rc)
+		printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index);
+	return cpu;
+}
+
+/*
+ * pseries hypervisor call H_BEST_ENERGY provides hints to OS on
+ * preferred logical cpus to activate or deactivate for optimized
+ * energy consumption.
+ */
+
+#define FLAGS_MODE1	0x004E200000080E01
+#define FLAGS_MODE2	0x004E200000080401
+#define FLAGS_ACTIVATE  0x100
+
+static ssize_t get_best_energy_list(char *page, int activate)
+{
+	int rc, cnt, i, cpu;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+	u32 *buf_page;
+	char *s = page;
+
+	buf_page = (u32 *) get_zeroed_page(GFP_KERNEL);
+	if (!buf_page)
+		return -ENOMEM;
+
+	flags = FLAGS_MODE1;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page),
+				0, 0, 0, 0, 0, 0);
+	if (rc != H_SUCCESS) {
+		free_page((unsigned long) buf_page);
+		return -EINVAL;
+	}
+
+	cnt = retbuf[0];
+	for (i = 0; i < cnt; i++) {
+		cpu = drc_index_to_cpu(buf_page[2*i+1]);
+		if ((cpu_online(cpu) && !activate) ||
+		    (!cpu_online(cpu) && activate))
+			s += sprintf(s, "%d,", cpu);
+	}
+	if (s > page) { /* Something to show */
+		s--; /* Suppress last comma */
+		s += sprintf(s, "\n");
+	}
+
+	free_page((unsigned long) buf_page);
+	return s-page;
+}
+
+static ssize_t get_best_energy_data(struct sys_device *dev,
+					char *page, int activate)
+{
+	int rc;
+	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+	unsigned long flags = 0;
+
+	flags = FLAGS_MODE2;
+	if (activate)
+		flags |= FLAGS_ACTIVATE;
+
+	rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags,
+				cpu_to_drc_index(dev->id),
+				0, 0, 0, 0, 0, 0, 0);
+
+	if (rc != H_SUCCESS)
+		return -EINVAL;
+
+	return sprintf(page, "%lu\n", retbuf[1] >> 32);
+}
+
+/* Wrapper functions */
+
+static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 1);
+}
+
+static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class,
+			struct sysdev_class_attribute *attr, char *page)
+{
+	return get_best_energy_list(page, 0);
+}
+
+static ssize_t percpu_activate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 1);
+}
+
+static ssize_t percpu_deactivate_hint_show(struct sys_device *dev,
+			struct sysdev_attribute *attr, char *page)
+{
+	return get_best_energy_data(dev, page, 0);
+}
+
+/*
+ * Create sysfs interface:
+ * /sys/devices/system/cpu/pseries_activate_hint_list
+ * /sys/devices/system/cpu/pseries_deactivate_hint_list
+ *	Comma separated list of cpus to activate or deactivate
+ * /sys/devices/system/cpu/cpuN/pseries_activate_hint
+ * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint
+ *	Per-cpu value of the hint
+ */
+
+struct sysdev_class_attribute attr_cpu_activate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444,
+		cpu_activate_hint_list_show, NULL);
+
+struct sysdev_class_attribute attr_cpu_deactivate_hint_list =
+		_SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444,
+		cpu_deactivate_hint_list_show, NULL);
+
+struct sysdev_attribute attr_percpu_activate_hint =
+		_SYSDEV_ATTR(pseries_activate_hint, 0444,
+		percpu_activate_hint_show, NULL);
+
+struct sysdev_attribute attr_percpu_deactivate_hint =
+		_SYSDEV_ATTR(pseries_deactivate_hint, 0444,
+		percpu_deactivate_hint_show, NULL);
+
+static int __init pseries_energy_init(void)
+{
+	int cpu, err;
+	struct sys_device *cpu_sys_dev;
+
+	if (!check_for_h_best_energy()) {
+		printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n");
+		return 0;
+	}
+	/* Create the sysfs files */
+	err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+	if (!err)
+		err = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	if (err)
+		return err;
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		if (err)
+			break;
+		err = sysfs_create_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+		if (err)
+			break;
+	}
+
+	if (err)
+		return err;
+
+	sysfs_entries = 1; /* Removed entries on cleanup */
+	return 0;
+
+}
+
+static void __exit pseries_energy_cleanup(void)
+{
+	int cpu;
+	struct sys_device *cpu_sys_dev;
+
+	if (!sysfs_entries)
+		return;
+
+	/* Remove the sysfs files */
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_activate_hint_list.attr);
+
+	sysfs_remove_file(&cpu_sysdev_class.kset.kobj,
+				&attr_cpu_deactivate_hint_list.attr);
+
+	for_each_possible_cpu(cpu) {
+		cpu_sys_dev = get_cpu_sysdev(cpu);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_activate_hint.attr);
+		sysfs_remove_file(&cpu_sys_dev->kobj,
+				&attr_percpu_deactivate_hint.attr);
+	}
+}
+
+module_init(pseries_energy_init);
+module_exit(pseries_energy_cleanup);
+MODULE_DESCRIPTION("Driver for pSeries platform energy management");
+MODULE_AUTHOR("Vaidyanathan Srinivasan");
+MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index ed72098..a8ca289 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -153,7 +153,7 @@
 	.name = "power",
 };
 
-static struct platform_suspend_ops pseries_suspend_ops = {
+static const struct platform_suspend_ops pseries_suspend_ops = {
 	.valid		= suspend_valid_only_mem,
 	.begin		= pseries_suspend_begin,
 	.prepare_late	= pseries_prepare_late,
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 0bef9da..9c29734 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -41,6 +41,7 @@
 ifeq ($(CONFIG_PCI),y)
 obj-$(CONFIG_4xx)		+= ppc4xx_pci.o
 endif
+obj-$(CONFIG_PPC4xx_CPM)	+= ppc4xx_cpm.o
 obj-$(CONFIG_PPC4xx_GPIO)	+= ppc4xx_gpio.o
 
 obj-$(CONFIG_CPM)		+= cpm_common.o
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 17cf15e..8e9e06a7c 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -312,17 +312,10 @@
 
 static void pci_dma_bus_setup_dart(struct pci_bus *bus)
 {
-	struct device_node *dn;
-
 	if (!iommu_table_dart_inited) {
 		iommu_table_dart_inited = 1;
 		iommu_table_dart_setup();
 	}
-
-	dn = pci_bus_to_OF_node(bus);
-
-	if (dn)
-		PCI_DN(dn)->iommu_table = &iommu_table_dart;
 }
 
 static bool dart_device_on_pcie(struct device *dev)
@@ -373,7 +366,7 @@
 	if (dn == NULL) {
 		dn = of_find_compatible_node(NULL, "dart", "u4-dart");
 		if (dn == NULL)
-			goto bail;
+			return;	/* use default direct_dma_ops */
 		dart_is_u4 = 1;
 	}
 
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c
index 44de855..e9381bf 100644
--- a/arch/powerpc/sysdev/fsl_pmc.c
+++ b/arch/powerpc/sysdev/fsl_pmc.c
@@ -53,7 +53,7 @@
 	return 1;
 }
 
-static struct platform_suspend_ops pmc_suspend_ops = {
+static const struct platform_suspend_ops pmc_suspend_ops = {
 	.valid = pmc_suspend_valid,
 	.enter = pmc_suspend_enter,
 };
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 9725369..9f99bef 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -973,7 +973,6 @@
 	if (dsr & DOORBELL_DSR_QFI) {
 		pr_info("RIO: doorbell queue full\n");
 		out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
-		goto out;
 	}
 
 	/* XXX Need to check/dispatch until queue empty */
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c
index c0ea05e..c48cd81 100644
--- a/arch/powerpc/sysdev/mpc8xxx_gpio.c
+++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c
@@ -1,5 +1,5 @@
 /*
- * GPIOs on MPC8349/8572/8610 and compatible
+ * GPIOs on MPC512x/8349/8572/8610 and compatible
  *
  * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk>
  *
@@ -26,6 +26,7 @@
 #define GPIO_IER		0x0c
 #define GPIO_IMR		0x10
 #define GPIO_ICR		0x14
+#define GPIO_ICR2		0x18
 
 struct mpc8xxx_gpio_chip {
 	struct of_mm_gpio_chip mm_gc;
@@ -37,6 +38,7 @@
 	 */
 	u32 data;
 	struct irq_host *irq;
+	void *of_dev_id_data;
 };
 
 static inline u32 mpc8xxx_gpio2mask(unsigned int gpio)
@@ -215,6 +217,51 @@
 	return 0;
 }
 
+static int mpc512x_irq_set_type(unsigned int virq, unsigned int flow_type)
+{
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq);
+	struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc;
+	unsigned long gpio = virq_to_hw(virq);
+	void __iomem *reg;
+	unsigned int shift;
+	unsigned long flags;
+
+	if (gpio < 16) {
+		reg = mm->regs + GPIO_ICR;
+		shift = (15 - gpio) * 2;
+	} else {
+		reg = mm->regs + GPIO_ICR2;
+		shift = (15 - (gpio % 16)) * 2;
+	}
+
+	switch (flow_type) {
+	case IRQ_TYPE_EDGE_FALLING:
+	case IRQ_TYPE_LEVEL_LOW:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 2 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_RISING:
+	case IRQ_TYPE_LEVEL_HIGH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrsetbits_be32(reg, 3 << shift, 1 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	case IRQ_TYPE_EDGE_BOTH:
+		spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
+		clrbits32(reg, 3 << shift);
+		spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static struct irq_chip mpc8xxx_irq_chip = {
 	.name		= "mpc8xxx-gpio",
 	.unmask		= mpc8xxx_irq_unmask,
@@ -226,6 +273,11 @@
 static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
 				irq_hw_number_t hw)
 {
+	struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
+
+	if (mpc8xxx_gc->of_dev_id_data)
+		mpc8xxx_irq_chip.set_type = mpc8xxx_gc->of_dev_id_data;
+
 	set_irq_chip_data(virq, h->host_data);
 	set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq);
 	set_irq_type(virq, IRQ_TYPE_NONE);
@@ -253,11 +305,20 @@
 	.xlate	= mpc8xxx_gpio_irq_xlate,
 };
 
+static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
+	{ .compatible = "fsl,mpc8349-gpio", },
+	{ .compatible = "fsl,mpc8572-gpio", },
+	{ .compatible = "fsl,mpc8610-gpio", },
+	{ .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, },
+	{}
+};
+
 static void __init mpc8xxx_add_controller(struct device_node *np)
 {
 	struct mpc8xxx_gpio_chip *mpc8xxx_gc;
 	struct of_mm_gpio_chip *mm_gc;
 	struct gpio_chip *gc;
+	const struct of_device_id *id;
 	unsigned hwirq;
 	int ret;
 
@@ -297,6 +358,10 @@
 	if (!mpc8xxx_gc->irq)
 		goto skip_irq;
 
+	id = of_match_node(mpc8xxx_gpio_ids, np);
+	if (id)
+		mpc8xxx_gc->of_dev_id_data = id->data;
+
 	mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
 
 	/* ack and mask all irqs */
@@ -321,13 +386,7 @@
 {
 	struct device_node *np;
 
-	for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio")
-		mpc8xxx_add_controller(np);
-
-	for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio")
+	for_each_matching_node(np, mpc8xxx_gpio_ids)
 		mpc8xxx_add_controller(np);
 
 	for_each_compatible_node(np, NULL, "fsl,qoriq-gpio")
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
new file mode 100644
index 0000000..73b86cc
--- /dev/null
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -0,0 +1,346 @@
+/*
+ * PowerPC 4xx Clock and Power Management
+ *
+ * Copyright (C) 2010, Applied Micro Circuits Corporation
+ * Victor Gallardo (vgallardo@apm.com)
+ *
+ * Based on arch/powerpc/platforms/44x/idle.c:
+ * Jerone Young <jyoung5@us.ibm.com>
+ * Copyright 2008 IBM Corp.
+ *
+ * Based on arch/powerpc/sysdev/fsl_pmc.c:
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ * Copyright 2009  MontaVista Software, Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_platform.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/suspend.h>
+#include <asm/dcr.h>
+#include <asm/dcr-native.h>
+#include <asm/machdep.h>
+
+#define CPM_ER	0
+#define CPM_FR	1
+#define CPM_SR	2
+
+#define CPM_IDLE_WAIT	0
+#define CPM_IDLE_DOZE	1
+
+struct cpm {
+	dcr_host_t	dcr_host;
+	unsigned int	dcr_offset[3];
+	unsigned int	powersave_off;
+	unsigned int	unused;
+	unsigned int	idle_doze;
+	unsigned int	standby;
+	unsigned int	suspend;
+};
+
+static struct cpm cpm;
+
+struct cpm_idle_mode {
+	unsigned int enabled;
+	const char  *name;
+};
+
+static struct cpm_idle_mode idle_mode[] = {
+	[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
+	[CPM_IDLE_DOZE] = { 0, "doze" },
+};
+
+static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
+{
+	unsigned int value;
+
+	/* CPM controller supports 3 different types of sleep interface
+	 * known as class 1, 2 and 3. For class 1 units, they are
+	 * unconditionally put to sleep when the corresponding CPM bit is
+	 * set. For class 2 and 3 units this is not case; if they can be
+	 * put to to sleep, they will. Here we do not verify, we just
+	 * set them and expect them to eventually go off when they can.
+	 */
+	value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
+
+	/* return old state, to restore later if needed */
+	return value;
+}
+
+static void cpm_idle_wait(void)
+{
+	unsigned long msr_save;
+
+	/* save off initial state */
+	msr_save = mfmsr();
+	/* sync required when CPM0_ER[CPU] is set */
+	mb();
+	/* set wait state MSR */
+	mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
+	isync();
+	/* return to initial state */
+	mtmsr(msr_save);
+	isync();
+}
+
+static void cpm_idle_sleep(unsigned int mask)
+{
+	unsigned int er_save;
+
+	/* update CPM_ER state */
+	er_save = cpm_set(CPM_ER, mask);
+
+	/* go to wait state so that CPM0_ER[CPU] can take effect */
+	cpm_idle_wait();
+
+	/* restore CPM_ER state */
+	dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
+}
+
+static void cpm_idle_doze(void)
+{
+	cpm_idle_sleep(cpm.idle_doze);
+}
+
+static void cpm_idle_config(int mode)
+{
+	int i;
+
+	if (idle_mode[mode].enabled)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
+		idle_mode[i].enabled = 0;
+
+	idle_mode[mode].enabled = 1;
+}
+
+static ssize_t cpm_idle_show(struct kobject *kobj,
+			     struct kobj_attribute *attr, char *buf)
+{
+	char *s = buf;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (idle_mode[i].enabled)
+			s += sprintf(s, "[%s] ", idle_mode[i].name);
+		else
+			s += sprintf(s, "%s ", idle_mode[i].name);
+	}
+
+	*(s-1) = '\n'; /* convert the last space to a newline */
+
+	return s - buf;
+}
+
+static ssize_t cpm_idle_store(struct kobject *kobj,
+			      struct kobj_attribute *attr,
+			      const char *buf, size_t n)
+{
+	int i;
+	char *p;
+	int len;
+
+	p = memchr(buf, '\n', n);
+	len = p ? p - buf : n;
+
+	for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
+		if (strncmp(buf, idle_mode[i].name, len) == 0) {
+			cpm_idle_config(i);
+			return n;
+		}
+	}
+
+	return -EINVAL;
+}
+
+static struct kobj_attribute cpm_idle_attr =
+	__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
+
+static void cpm_idle_config_sysfs(void)
+{
+	struct sys_device *sys_dev;
+	unsigned long ret;
+
+	sys_dev = get_cpu_sysdev(0);
+
+	ret = sysfs_create_file(&sys_dev->kobj,
+				&cpm_idle_attr.attr);
+	if (ret)
+		printk(KERN_WARNING
+		       "cpm: failed to create idle sysfs entry\n");
+}
+
+static void cpm_idle(void)
+{
+	if (idle_mode[CPM_IDLE_DOZE].enabled)
+		cpm_idle_doze();
+	else
+		cpm_idle_wait();
+}
+
+static int cpm_suspend_valid(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		return !!cpm.standby;
+	case PM_SUSPEND_MEM:
+		return !!cpm.suspend;
+	default:
+		return 0;
+	}
+}
+
+static void cpm_suspend_standby(unsigned int mask)
+{
+	unsigned long tcr_save;
+
+	/* disable decrement interrupt */
+	tcr_save = mfspr(SPRN_TCR);
+	mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
+
+	/* go to sleep state */
+	cpm_idle_sleep(mask);
+
+	/* restore decrement interrupt */
+	mtspr(SPRN_TCR, tcr_save);
+}
+
+static int cpm_suspend_enter(suspend_state_t state)
+{
+	switch (state) {
+	case PM_SUSPEND_STANDBY:
+		cpm_suspend_standby(cpm.standby);
+		break;
+	case PM_SUSPEND_MEM:
+		cpm_suspend_standby(cpm.suspend);
+		break;
+	}
+
+	return 0;
+}
+
+static struct platform_suspend_ops cpm_suspend_ops = {
+	.valid		= cpm_suspend_valid,
+	.enter		= cpm_suspend_enter,
+};
+
+static int cpm_get_uint_property(struct device_node *np,
+				 const char *name)
+{
+	int len;
+	const unsigned int *prop = of_get_property(np, name, &len);
+
+	if (prop == NULL || len < sizeof(u32))
+		return 0;
+
+	return *prop;
+}
+
+static int __init cpm_init(void)
+{
+	struct device_node *np;
+	int dcr_base, dcr_len;
+	int ret = 0;
+
+	if (!cpm.powersave_off) {
+		cpm_idle_config(CPM_IDLE_WAIT);
+		ppc_md.power_save = &cpm_idle;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
+	if (!np) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dcr_base = dcr_resource_start(np, 0);
+	dcr_len = dcr_resource_len(np, 0);
+
+	if (dcr_base == 0 || dcr_len == 0) {
+		printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
+
+	if (!DCR_MAP_OK(cpm.dcr_host)) {
+		printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
+		       np->full_name);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* All 4xx SoCs with a CPM controller have one of two
+	 * different order for the CPM registers. Some have the
+	 * CPM registers in the following order (ER,FR,SR). The
+	 * others have them in the following order (SR,ER,FR).
+	 */
+
+	if (cpm_get_uint_property(np, "er-offset") == 0) {
+		cpm.dcr_offset[CPM_ER] = 0;
+		cpm.dcr_offset[CPM_FR] = 1;
+		cpm.dcr_offset[CPM_SR] = 2;
+	} else {
+		cpm.dcr_offset[CPM_ER] = 1;
+		cpm.dcr_offset[CPM_FR] = 2;
+		cpm.dcr_offset[CPM_SR] = 0;
+	}
+
+	/* Now let's see what IPs to turn off for the following modes */
+
+	cpm.unused = cpm_get_uint_property(np, "unused-units");
+	cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
+	cpm.standby = cpm_get_uint_property(np, "standby");
+	cpm.suspend = cpm_get_uint_property(np, "suspend");
+
+	/* If some IPs are unused let's turn them off now */
+
+	if (cpm.unused) {
+		cpm_set(CPM_ER, cpm.unused);
+		cpm_set(CPM_FR, cpm.unused);
+	}
+
+	/* Now let's export interfaces */
+
+	if (!cpm.powersave_off && cpm.idle_doze)
+		cpm_idle_config_sysfs();
+
+	if (cpm.standby || cpm.suspend)
+		suspend_set_ops(&cpm_suspend_ops);
+out:
+	if (np)
+		of_node_put(np);
+	return ret;
+}
+
+late_initcall(cpm_init);
+
+static int __init cpm_powersave_off(char *arg)
+{
+	cpm.powersave_off = 1;
+	return 0;
+}
+__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index c2d675b..ee05680 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -84,8 +84,8 @@
 		memset(&tsi_eth_data, 0, sizeof(tsi_eth_data));
 
 		ret = of_address_to_resource(np, 0, &r[0]);
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[0].name, r[0].start, r[0].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+		    __func__, r[0].name, &r[0]);
 		if (ret)
 			goto err;
 
@@ -93,8 +93,8 @@
 		r[1].start = irq_of_parse_and_map(np, 0);
 		r[1].end = irq_of_parse_and_map(np, 0);
 		r[1].flags = IORESOURCE_IRQ;
-		DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n",
-			__func__,r[1].name, r[1].start, r[1].end);
+		DBG("%s: name:start->end = %s:%pR\n",
+			__func__, r[1].name, &r[1]);
 
 		tsi_eth_dev =
 		    platform_device_register_simple("tsi-ethernet", i++, &r[0],
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 985d825..bade533 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -164,24 +164,18 @@
 	return r;
 }
 
-struct kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm;
 	int rc;
 	char debug_name[16];
 
 	rc = s390_enable_sie();
 	if (rc)
-		goto out_nokvm;
-
-	rc = -ENOMEM;
-	kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-	if (!kvm)
-		goto out_nokvm;
+		goto out_err;
 
 	kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
 	if (!kvm->arch.sca)
-		goto out_nosca;
+		goto out_err;
 
 	sprintf(debug_name, "kvm-%u", current->pid);
 
@@ -195,13 +189,11 @@
 	debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
 	VM_EVENT(kvm, 3, "%s", "vm created");
 
-	return kvm;
+	return 0;
 out_nodbf:
 	free_page((unsigned long)(kvm->arch.sca));
-out_nosca:
-	kfree(kvm);
-out_nokvm:
-	return ERR_PTR(rc);
+out_err:
+	return rc;
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -240,11 +232,8 @@
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	free_page((unsigned long)(kvm->arch.sca));
 	debug_unregister(kvm->arch.dbf);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 /* Section: vcpu related */
diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
index 4499a37..adc9b4b 100644
--- a/arch/sh/boards/mach-hp6xx/pm.c
+++ b/arch/sh/boards/mach-hp6xx/pm.c
@@ -143,7 +143,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops hp6x0_pm_ops = {
+static const struct platform_suspend_ops hp6x0_pm_ops = {
 	.enter		= hp6x0_pm_enter,
 	.valid		= suspend_valid_only_mem,
 };
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index 527a0cd..b710757 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -318,6 +318,10 @@
 	},
 };
 
+static struct platform_device fsi_ak4642_device = {
+	.name		= "sh_fsi_a_ak4642",
+};
+
 /* KEYSC in SoC (Needs SW33-2 set to ON) */
 static struct sh_keysc_info keysc_info = {
 	.mode = SH_KEYSC_MODE_1,
@@ -590,6 +594,7 @@
 	&sh7724_usb0_host_device,
 	&sh7724_usb1_gadget_device,
 	&fsi_device,
+	&fsi_ak4642_device,
 	&sdhi0_cn7_device,
 	&sdhi1_cn8_device,
 	&irda_device,
diff --git a/arch/sh/boards/mach-sh03/rtc.c b/arch/sh/boards/mach-sh03/rtc.c
index 1b20099..f83ac79 100644
--- a/arch/sh/boards/mach-sh03/rtc.c
+++ b/arch/sh/boards/mach-sh03/rtc.c
@@ -108,7 +108,7 @@
 		__raw_writeb(real_minutes % 10, RTC_MIN1);
 		__raw_writeb(real_minutes / 10, RTC_MIN10);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
index e559687..a6f95ae 100644
--- a/arch/sh/kernel/cpu/shmobile/pm.c
+++ b/arch/sh/kernel/cpu/shmobile/pm.c
@@ -141,7 +141,7 @@
 	return 0;
 }
 
-static struct platform_suspend_ops sh_pm_ops = {
+static const struct platform_suspend_ops sh_pm_ops = {
 	.enter          = sh_pm_enter,
 	.valid          = suspend_valid_only_mem,
 };
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index eb4cc4e..d1bffbc 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -568,7 +568,7 @@
 }
 
 /*
- * Flush the range [start,end] of kernel virtual adddress space from
+ * Flush the range [start,end] of kernel virtual address space from
  * the I-cache.  The corresponding range must be purged from the
  * D-cache also because the SH-5 doesn't have cache snooping between
  * the caches.  The addresses will be visible through the superpage
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index e447938..0dc714f 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -375,5 +375,5 @@
 	return 0;
 }
 
-arch_initcall(cpu_type_probe);
+early_initcall(cpu_type_probe);
 #endif
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index b87873c..ae96cf5 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -168,4 +168,4 @@
 	return err;
 }
 
-arch_initcall(pcr_arch_init);
+early_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 42ad2ba..1e97709 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -622,7 +622,7 @@
 static const char CHAFSR_IERR_msg[] =
 	"Internal processor error";
 static const char CHAFSR_ISAP_msg[] =
-	"System request parity error on incoming addresss";
+	"System request parity error on incoming address";
 static const char CHAFSR_UCU_msg[] =
 	"Uncorrectable E-cache ECC error for ifetch/data";
 static const char CHAFSR_UCC_msg[] =
diff --git a/arch/um/Kconfig.um b/arch/um/Kconfig.um
index 50d6aa2..f8d1d0d 100644
--- a/arch/um/Kconfig.um
+++ b/arch/um/Kconfig.um
@@ -131,7 +131,7 @@
 
 config HIGHMEM
 	bool "Highmem support (EXPERIMENTAL)"
-	depends on !64BIT && EXPERIMENTAL
+	depends on !64BIT && BROKEN
 	default n
 	help
 	  This was used to allow UML to run with big amounts of memory.
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 1664cce..050e4dd 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -821,12 +821,12 @@
 
 static void unregister_winch(struct tty_struct *tty)
 {
-	struct list_head *ele;
+	struct list_head *ele, *next;
 	struct winch *winch;
 
 	spin_lock(&winch_handler_lock);
 
-	list_for_each(ele, &winch_handlers) {
+	list_for_each_safe(ele, next, &winch_handlers) {
 		winch = list_entry(ele, struct winch, list);
 		if (winch->tty == tty) {
 			free_winch(winch, 1);
diff --git a/arch/um/drivers/mmapper_kern.c b/arch/um/drivers/mmapper_kern.c
index 8501e7d..7e0619c 100644
--- a/arch/um/drivers/mmapper_kern.c
+++ b/arch/um/drivers/mmapper_kern.c
@@ -37,13 +37,7 @@
 	if (*ppos > mmapper_size)
 		return -EINVAL;
 
-	if (count > mmapper_size - *ppos)
-		count = mmapper_size - *ppos;
-
-	if (copy_from_user(&v_buf[*ppos], buf, count))
-		return -EFAULT;
-
-	return count;
+	return simple_write_to_buffer(v_buf, mmapper_size, ppos, buf, count);
 }
 
 static long mmapper_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -137,3 +131,4 @@
 
 MODULE_AUTHOR("Greg Lonnon <glonnon@ridgerun.com>");
 MODULE_DESCRIPTION("DSPLinux simulator mmapper driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b6fccb0..36ed2e2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -51,6 +51,7 @@
 	select HAVE_KERNEL_GZIP
 	select HAVE_KERNEL_BZIP2
 	select HAVE_KERNEL_LZMA
+	select HAVE_KERNEL_XZ
 	select HAVE_KERNEL_LZO
 	select HAVE_HW_BREAKPOINT
 	select HAVE_MIXED_BREAKPOINTS_REGS
@@ -65,6 +66,7 @@
 	select HAVE_SPARSE_IRQ
 	select GENERIC_IRQ_PROBE
 	select GENERIC_PENDING_IRQ if SMP
+	select USE_GENERIC_SMP_HELPERS if SMP
 
 config INSTRUCTION_DECODER
 	def_bool (KPROBES || PERF_EVENTS)
@@ -203,10 +205,6 @@
 	def_bool y
 	depends on EXPERIMENTAL && DMAR && ACPI
 
-config USE_GENERIC_SMP_HELPERS
-	def_bool y
-	depends on SMP
-
 config X86_32_SMP
 	def_bool y
 	depends on X86_32 && SMP
@@ -2078,11 +2076,17 @@
 	bool "Support for OLPC's Open Firmware"
 	depends on !X86_64 && !X86_PAE
 	default n
+	select OF
 	help
 	  This option adds support for the implementation of Open Firmware
 	  that is used on the OLPC XO-1 Children's Machine.
 	  If unsure, say N here.
 
+config OLPC_OPENFIRMWARE_DT
+	bool
+	default y if OLPC_OPENFIRMWARE && PROC_DEVICETREE
+	select OF_PROMTREE
+
 endif # X86_32
 
 config AMD_NB
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 0c22955..09664ef 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -49,12 +49,15 @@
 	$(call if_changed,bzip2)
 $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzma)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
+	$(call if_changed,xzkern)
 $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
 	$(call if_changed,lzo)
 
 suffix-$(CONFIG_KERNEL_GZIP)	:= gz
 suffix-$(CONFIG_KERNEL_BZIP2)	:= bz2
 suffix-$(CONFIG_KERNEL_LZMA)	:= lzma
+suffix-$(CONFIG_KERNEL_XZ)	:= xz
 suffix-$(CONFIG_KERNEL_LZO) 	:= lzo
 
 quiet_cmd_mkpiggy = MKPIGGY $@
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 325c052..3a19d04 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -139,6 +139,10 @@
 #include "../../../../lib/decompress_unlzma.c"
 #endif
 
+#ifdef CONFIG_KERNEL_XZ
+#include "../../../../lib/decompress_unxz.c"
+#endif
+
 #ifdef CONFIG_KERNEL_LZO
 #include "../../../../lib/decompress_unlzo.c"
 #endif
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 5c22812..646aa78 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -74,7 +74,7 @@
 
 	offs = (olen > ilen) ? olen - ilen : 0;
 	offs += olen >> 12;	/* Add 8 bytes for each 32K block */
-	offs += 32*1024 + 18;	/* Add 32K + 18 bytes slack */
+	offs += 64*1024 + 128;	/* Add 64K + 128 bytes slack */
 	offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
 
 	printf(".section \".rodata..compressed\",\"a\",@progbits\n");
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index ff16756..8fe2a49 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -9,6 +9,20 @@
  *            Vinodh Gopal <vinodh.gopal@intel.com>
  *            Kahraman Akdemir
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Erdinc Ozturk (erdinc.ozturk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             James Guilford (james.guilford@intel.com)
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Wajdi Feghali (wajdi.k.feghali@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
+ * Ported x86_64 version to x86:
+ *    Author: Mathias Krause <minipli@googlemail.com>
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -18,8 +32,62 @@
 #include <linux/linkage.h>
 #include <asm/inst.h>
 
+#ifdef __x86_64__
+.data
+POLY:   .octa 0xC2000000000000000000000000000001
+TWOONE: .octa 0x00000001000000000000000000000001
+
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK,
+# and ZERO should follow ALL_F
+
+SHUF_MASK:  .octa 0x000102030405060708090A0B0C0D0E0F
+MASK1:      .octa 0x0000000000000000ffffffffffffffff
+MASK2:      .octa 0xffffffffffffffff0000000000000000
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F:      .octa 0xffffffffffffffffffffffffffffffff
+ZERO:       .octa 0x00000000000000000000000000000000
+ONE:        .octa 0x00000000000000000000000000000001
+F_MIN_MASK: .octa 0xf1f2f3f4f5f6f7f8f9fafbfcfdfeff0
+dec:        .octa 0x1
+enc:        .octa 0x2
+
+
 .text
 
+
+#define	STACK_OFFSET    8*3
+#define	HashKey		16*0	// store HashKey <<1 mod poly here
+#define	HashKey_2	16*1	// store HashKey^2 <<1 mod poly here
+#define	HashKey_3	16*2	// store HashKey^3 <<1 mod poly here
+#define	HashKey_4	16*3	// store HashKey^4 <<1 mod poly here
+#define	HashKey_k	16*4	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey <<1 mod poly here
+				//(for Karatsuba purposes)
+#define	HashKey_2_k	16*5	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^2 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_3_k	16*6	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^3 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	HashKey_4_k	16*7	// store XOR of High 64 bits and Low 64
+				// bits of  HashKey^4 <<1 mod poly here
+				// (for Karatsuba purposes)
+#define	VARIABLE_OFFSET	16*8
+
+#define arg1 rdi
+#define arg2 rsi
+#define arg3 rdx
+#define arg4 rcx
+#define arg5 r8
+#define arg6 r9
+#define arg7 STACK_OFFSET+8(%r14)
+#define arg8 STACK_OFFSET+16(%r14)
+#define arg9 STACK_OFFSET+24(%r14)
+#define arg10 STACK_OFFSET+32(%r14)
+#endif
+
+
 #define STATE1	%xmm0
 #define STATE2	%xmm4
 #define STATE3	%xmm5
@@ -32,12 +100,16 @@
 #define IN	IN1
 #define KEY	%xmm2
 #define IV	%xmm3
+
 #define BSWAP_MASK %xmm10
 #define CTR	%xmm11
 #define INC	%xmm12
 
+#ifdef __x86_64__
+#define AREG	%rax
 #define KEYP	%rdi
 #define OUTP	%rsi
+#define UKEYP	OUTP
 #define INP	%rdx
 #define LEN	%rcx
 #define IVP	%r8
@@ -46,6 +118,1588 @@
 #define TKEYP	T1
 #define T2	%r11
 #define TCTR_LOW T2
+#else
+#define AREG	%eax
+#define KEYP	%edi
+#define OUTP	AREG
+#define UKEYP	OUTP
+#define INP	%edx
+#define LEN	%esi
+#define IVP	%ebp
+#define KLEN	%ebx
+#define T1	%ecx
+#define TKEYP	T1
+#endif
+
+
+#ifdef __x86_64__
+/* GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+*
+*
+* Input: A and B (128-bits each, bit-reflected)
+* Output: C = A*B*x mod poly, (i.e. >>1 )
+* To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+* GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+*
+*/
+.macro GHASH_MUL GH HK TMP1 TMP2 TMP3 TMP4 TMP5
+	movdqa	  \GH, \TMP1
+	pshufd	  $78, \GH, \TMP2
+	pshufd	  $78, \HK, \TMP3
+	pxor	  \GH, \TMP2            # TMP2 = a1+a0
+	pxor	  \HK, \TMP3            # TMP3 = b1+b0
+	PCLMULQDQ 0x11, \HK, \TMP1     # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \HK, \GH       # GH = a0*b0
+	PCLMULQDQ 0x00, \TMP3, \TMP2   # TMP2 = (a0+a1)*(b1+b0)
+	pxor	  \GH, \TMP2
+	pxor	  \TMP1, \TMP2          # TMP2 = (a0*b0)+(a1*b0)
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3             # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2             # right shift TMP2 2 DWs
+	pxor	  \TMP3, \GH
+	pxor	  \TMP2, \TMP1          # TMP2:GH holds the result of GH*HK
+
+        # first phase of the reduction
+
+	movdqa    \GH, \TMP2
+	movdqa    \GH, \TMP3
+	movdqa    \GH, \TMP4            # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	pslld     $31, \TMP2            # packed right shift <<31
+	pslld     $30, \TMP3            # packed right shift <<30
+	pslld     $25, \TMP4            # packed right shift <<25
+	pxor      \TMP3, \TMP2          # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5             # right shift TMP5 1 DW
+	pslldq    $12, \TMP2            # left shift TMP2 3 DWs
+	pxor      \TMP2, \GH
+
+        # second phase of the reduction
+
+	movdqa    \GH,\TMP2             # copy GH into TMP2,TMP3 and TMP4
+					# in in order to perform
+					# independent shifts
+	movdqa    \GH,\TMP3
+	movdqa    \GH,\TMP4
+	psrld     $1,\TMP2              # packed left shift >>1
+	psrld     $2,\TMP3              # packed left shift >>2
+	psrld     $7,\TMP4              # packed left shift >>7
+	pxor      \TMP3,\TMP2		# xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \GH
+	pxor      \TMP1, \GH            # result is in TMP1
+.endm
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+	movdqa     \TMP1, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu	   \XMM4, 16*3(%arg2 , %r11 , 1)
+	movdqa     \TMP1, \XMM4
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+
+/*
+* if a = number of total plaintext bytes
+* b = floor(a/16)
+* num_initial_blocks = b mod 4
+* encrypt the initial num_initial_blocks blocks and apply ghash on
+* the ciphertext
+* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
+* are clobbered
+* arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
+*/
+
+
+.macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
+XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
+	mov	   arg7, %r10           # %r10 = AAD
+	mov	   arg8, %r12           # %r12 = aadLen
+	mov	   %r12, %r11
+	pxor	   %xmm\i, %xmm\i
+_get_AAD_loop\num_initial_blocks\operation:
+	movd	   (%r10), \TMP1
+	pslldq	   $12, \TMP1
+	psrldq	   $4, %xmm\i
+	pxor	   \TMP1, %xmm\i
+	add	   $4, %r10
+	sub	   $4, %r12
+	jne	   _get_AAD_loop\num_initial_blocks\operation
+	cmp	   $16, %r11
+	je	   _get_AAD_loop2_done\num_initial_blocks\operation
+	mov	   $16, %r12
+_get_AAD_loop2\num_initial_blocks\operation:
+	psrldq	   $4, %xmm\i
+	sub	   $4, %r12
+	cmp	   %r11, %r12
+	jne	   _get_AAD_loop2\num_initial_blocks\operation
+_get_AAD_loop2_done\num_initial_blocks\operation:
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\i # byte-reflect the AAD data
+
+	xor	   %r11, %r11 # initialise the data pointer offset as zero
+
+        # start AES for num_initial_blocks blocks
+
+	mov	   %arg5, %rax                      # %rax = *Y0
+	movdqu	   (%rax), \XMM0                    # XMM0 = Y0
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, \XMM0
+
+.if (\i == 5) || (\i == 6) || (\i == 7)
+.irpc index, \i_seq
+	paddd	   ONE(%rip), \XMM0                 # INCR Y0
+	movdqa	   \XMM0, %xmm\index
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM   %xmm14, %xmm\index      # perform a 16 byte swap
+
+.endr
+.irpc index, \i_seq
+	pxor	   16*0(%arg1), %xmm\index
+.endr
+.irpc index, \i_seq
+	movaps 0x10(%rdi), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 1
+.endr
+.irpc index, \i_seq
+	movaps 0x20(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x30(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x40(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x50(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x60(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x70(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x80(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0x90(%arg1), \TMP1
+	AESENC     \TMP1, %xmm\index          # Round 2
+.endr
+.irpc index, \i_seq
+	movaps 0xa0(%arg1), \TMP1
+	AESENCLAST \TMP1, %xmm\index         # Round 10
+.endr
+.irpc index, \i_seq
+	movdqu	   (%arg3 , %r11, 1), \TMP1
+	pxor	   \TMP1, %xmm\index
+	movdqu	   %xmm\index, (%arg2 , %r11, 1)
+	# write back plaintext/ciphertext for num_initial_blocks
+	add	   $16, %r11
+
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM	   %xmm14, %xmm\index
+
+		# prepare plaintext/ciphertext for GHASH computation
+.endr
+.endif
+	GHASH_MUL  %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        # apply GHASH on num_initial_blocks blocks
+
+.if \i == 5
+        pxor       %xmm5, %xmm6
+	GHASH_MUL  %xmm6, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 6
+        pxor       %xmm6, %xmm7
+	GHASH_MUL  %xmm7, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.elseif \i == 7
+        pxor       %xmm7, %xmm8
+	GHASH_MUL  %xmm8, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
+.endif
+	cmp	   $64, %r13
+	jl	_initial_blocks_done\num_initial_blocks\operation
+	# no need for precomputed values
+/*
+*
+* Precomputations for HashKey parallel with encryption of first 4 blocks.
+* Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+*/
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM1
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM1        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM2
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM  %xmm14, \XMM2        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM3
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3        # perform a 16 byte swap
+
+	paddd	   ONE(%rip), \XMM0              # INCR Y0
+	movdqa	   \XMM0, \XMM4
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4        # perform a 16 byte swap
+
+	pxor	   16*0(%arg1), \XMM1
+	pxor	   16*0(%arg1), \XMM2
+	pxor	   16*0(%arg1), \XMM3
+	pxor	   16*0(%arg1), \XMM4
+	movdqa	   \TMP3, \TMP5
+	pshufd	   $78, \TMP3, \TMP1
+	pxor	   \TMP3, \TMP1
+	movdqa	   \TMP1, HashKey_k(%rsp)
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^2<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_2(%rsp)
+# HashKey_2 = HashKey^2<<1 (mod poly)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_2_k(%rsp)
+.irpc index, 1234 # do 4 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_3(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_3_k(%rsp)
+.irpc index, 56789 # do next 5 rounds
+	movaps 0x10*\index(%arg1), \TMP1
+	AESENC	   \TMP1, \XMM1
+	AESENC	   \TMP1, \XMM2
+	AESENC	   \TMP1, \XMM3
+	AESENC	   \TMP1, \XMM4
+.endr
+	GHASH_MUL  \TMP5, \TMP3, \TMP1, \TMP2, \TMP4, \TMP6, \TMP7
+# TMP5 = HashKey^3<<1 (mod poly)
+	movdqa	   \TMP5, HashKey_4(%rsp)
+	pshufd	   $78, \TMP5, \TMP1
+	pxor	   \TMP5, \TMP1
+	movdqa	   \TMP1, HashKey_4_k(%rsp)
+	movaps 0xa0(%arg1), \TMP2
+	AESENCLAST \TMP2, \XMM1
+	AESENCLAST \TMP2, \XMM2
+	AESENCLAST \TMP2, \XMM3
+	AESENCLAST \TMP2, \XMM4
+	movdqu	   16*0(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM1
+	movdqu	   16*1(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM2
+	movdqu	   16*2(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM3
+	movdqu	   16*3(%arg3 , %r11 , 1), \TMP1
+	pxor	   \TMP1, \XMM4
+	movdqu     \XMM1, 16*0(%arg2 , %r11 , 1)
+	movdqu     \XMM2, 16*1(%arg2 , %r11 , 1)
+	movdqu     \XMM3, 16*2(%arg2 , %r11 , 1)
+	movdqu     \XMM4, 16*3(%arg2 , %r11 , 1)
+
+	add	   $64, %r11
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM1 # perform a 16 byte swap
+	pxor	   \XMMDst, \XMM1
+# combine GHASHed value with the corresponding ciphertext
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM2 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM3 # perform a 16 byte swap
+        movdqa     SHUF_MASK(%rip), %xmm14
+	PSHUFB_XMM %xmm14, \XMM4 # perform a 16 byte swap
+
+_initial_blocks_done\num_initial_blocks\operation:
+
+.endm
+
+/*
+* encrypt 4 blocks at a time
+* ghash the 4 previously encrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_ENC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+        movdqu    \XMM1, (%arg2,%r11,1)        # Write to the ciphertext buffer
+        movdqu    \XMM2, 16(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM3, 32(%arg2,%r11,1)      # Write to the ciphertext buffer
+        movdqu    \XMM4, 48(%arg2,%r11,1)      # Write to the ciphertext buffer
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/*
+* decrypt 4 blocks at a time
+* ghash the 4 previously decrypted ciphertext blocks
+* arg1, %arg2, %arg3 are used as pointers only, not modified
+* %r11 is the data offset value
+*/
+.macro GHASH_4_ENCRYPT_4_PARALLEL_DEC TMP1 TMP2 TMP3 TMP4 TMP5 \
+TMP6 XMM0 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 operation
+
+	movdqa	  \XMM1, \XMM5
+	movdqa	  \XMM2, \XMM6
+	movdqa	  \XMM3, \XMM7
+	movdqa	  \XMM4, \XMM8
+
+        movdqa    SHUF_MASK(%rip), %xmm15
+        # multiply TMP5 * HashKey using karatsuba
+
+	movdqa	  \XMM5, \TMP4
+	pshufd	  $78, \XMM5, \TMP6
+	pxor	  \XMM5, \TMP6
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP4           # TMP4 = a1*b1
+	movdqa    \XMM0, \XMM1
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM2
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM3
+	paddd     ONE(%rip), \XMM0		# INCR CNT
+	movdqa    \XMM0, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1	# perform a 16 byte swap
+	PCLMULQDQ 0x00, \TMP5, \XMM5           # XMM5 = a0*b0
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  (%arg1), \XMM1
+	pxor	  (%arg1), \XMM2
+	pxor	  (%arg1), \XMM3
+	pxor	  (%arg1), \XMM4
+	movdqa	  HashKey_4_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP6           # TMP6 = (a1+a0)*(b1+b0)
+	movaps 0x10(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 1
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movaps 0x20(%arg1), \TMP1
+	AESENC	  \TMP1, \XMM1              # Round 2
+	AESENC	  \TMP1, \XMM2
+	AESENC	  \TMP1, \XMM3
+	AESENC	  \TMP1, \XMM4
+	movdqa	  \XMM6, \TMP1
+	pshufd	  $78, \XMM6, \TMP2
+	pxor	  \XMM6, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1 * b1
+	movaps 0x30(%arg1), \TMP3
+	AESENC    \TMP3, \XMM1              # Round 3
+	AESENC    \TMP3, \XMM2
+	AESENC    \TMP3, \XMM3
+	AESENC    \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM6           # XMM6 = a0*b0
+	movaps 0x40(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 4
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_3_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x50(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 5
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM6, \XMM5
+	pxor	  \TMP2, \TMP6
+	movdqa	  \XMM7, \TMP1
+	pshufd	  $78, \XMM7, \TMP2
+	pxor	  \XMM7, \TMP2
+	movdqa	  HashKey_2(%rsp ), \TMP5
+
+        # Multiply TMP5 * HashKey using karatsuba
+
+	PCLMULQDQ 0x11, \TMP5, \TMP1           # TMP1 = a1*b1
+	movaps 0x60(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1              # Round 6
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM7           # XMM7 = a0*b0
+	movaps 0x70(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 7
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	movdqa	  HashKey_2_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2           # TMP2 = (a1+a0)*(b1+b0)
+	movaps 0x80(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1             # Round 8
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	pxor	  \TMP1, \TMP4
+# accumulate the results in TMP4:XMM5, TMP6 holds the middle part
+	pxor	  \XMM7, \XMM5
+	pxor	  \TMP2, \TMP6
+
+        # Multiply XMM8 * HashKey
+        # XMM8 and TMP5 hold the values for the two operands
+
+	movdqa	  \XMM8, \TMP1
+	pshufd	  $78, \XMM8, \TMP2
+	pxor	  \XMM8, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1          # TMP1 = a1*b1
+	movaps 0x90(%arg1), \TMP3
+	AESENC	  \TMP3, \XMM1            # Round 9
+	AESENC	  \TMP3, \XMM2
+	AESENC	  \TMP3, \XMM3
+	AESENC	  \TMP3, \XMM4
+	PCLMULQDQ 0x00, \TMP5, \XMM8          # XMM8 = a0*b0
+	movaps 0xa0(%arg1), \TMP3
+	AESENCLAST \TMP3, \XMM1           # Round 10
+	AESENCLAST \TMP3, \XMM2
+	AESENCLAST \TMP3, \XMM3
+	AESENCLAST \TMP3, \XMM4
+	movdqa    HashKey_k(%rsp), \TMP5
+	PCLMULQDQ 0x00, \TMP5, \TMP2          # TMP2 = (a1+a0)*(b1+b0)
+	movdqu	  (%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM1                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM1, (%arg2,%r11,1)        # Write to plaintext buffer
+	movdqa    \TMP3, \XMM1
+	movdqu	  16(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM2                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM2, 16(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM2
+	movdqu	  32(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM3                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM3, 32(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM3
+	movdqu	  48(%arg3,%r11,1), \TMP3
+	pxor	  \TMP3, \XMM4                 # Ciphertext/Plaintext XOR EK
+	movdqu	  \XMM4, 48(%arg2,%r11,1)      # Write to plaintext buffer
+	movdqa    \TMP3, \XMM4
+	PSHUFB_XMM %xmm15, \XMM1        # perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM2	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM3	# perform a 16 byte swap
+	PSHUFB_XMM %xmm15, \XMM4	# perform a 16 byte swap
+
+	pxor	  \TMP4, \TMP1
+	pxor	  \XMM8, \XMM5
+	pxor	  \TMP6, \TMP2
+	pxor	  \TMP1, \TMP2
+	pxor	  \XMM5, \TMP2
+	movdqa	  \TMP2, \TMP3
+	pslldq	  $8, \TMP3                    # left shift TMP3 2 DWs
+	psrldq	  $8, \TMP2                    # right shift TMP2 2 DWs
+	pxor	  \TMP3, \XMM5
+	pxor	  \TMP2, \TMP1	  # accumulate the results in TMP1:XMM5
+
+        # first phase of reduction
+
+	movdqa    \XMM5, \TMP2
+	movdqa    \XMM5, \TMP3
+	movdqa    \XMM5, \TMP4
+# move XMM5 into TMP2, TMP3, TMP4 in order to perform shifts independently
+	pslld     $31, \TMP2                   # packed right shift << 31
+	pslld     $30, \TMP3                   # packed right shift << 30
+	pslld     $25, \TMP4                   # packed right shift << 25
+	pxor      \TMP3, \TMP2	               # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP5
+	psrldq    $4, \TMP5                    # right shift T5 1 DW
+	pslldq    $12, \TMP2                   # left shift T2 3 DWs
+	pxor      \TMP2, \XMM5
+
+        # second phase of reduction
+
+	movdqa    \XMM5,\TMP2 # make 3 copies of XMM5 into TMP2, TMP3, TMP4
+	movdqa    \XMM5,\TMP3
+	movdqa    \XMM5,\TMP4
+	psrld     $1, \TMP2                    # packed left shift >>1
+	psrld     $2, \TMP3                    # packed left shift >>2
+	psrld     $7, \TMP4                    # packed left shift >>7
+	pxor      \TMP3,\TMP2		       # xor the shifted versions
+	pxor      \TMP4,\TMP2
+	pxor      \TMP5, \TMP2
+	pxor      \TMP2, \XMM5
+	pxor      \TMP1, \XMM5                 # result is in TMP1
+
+	pxor	  \XMM5, \XMM1
+.endm
+
+/* GHASH the last 4 ciphertext blocks. */
+.macro	GHASH_LAST_4 TMP1 TMP2 TMP3 TMP4 TMP5 TMP6 \
+TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
+
+        # Multiply TMP6 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM1, \TMP6
+	pshufd	  $78, \XMM1, \TMP2
+	pxor	  \XMM1, \TMP2
+	movdqa	  HashKey_4(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP6       # TMP6 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM1       # XMM1 = a0*b0
+	movdqa	  HashKey_4_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	movdqa	  \XMM1, \XMMDst
+	movdqa	  \TMP2, \XMM1              # result in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM2, \TMP1
+	pshufd	  $78, \XMM2, \TMP2
+	pxor	  \XMM2, \TMP2
+	movdqa	  HashKey_3(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM2       # XMM2 = a0*b0
+	movdqa	  HashKey_3_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM2, \XMMDst
+	pxor	  \TMP2, \XMM1
+# results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+
+	movdqa	  \XMM3, \TMP1
+	pshufd	  $78, \XMM3, \TMP2
+	pxor	  \XMM3, \TMP2
+	movdqa	  HashKey_2(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1       # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM3       # XMM3 = a0*b0
+	movdqa	  HashKey_2_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM3, \XMMDst
+	pxor	  \TMP2, \XMM1   # results accumulated in TMP6, XMMDst, XMM1
+
+        # Multiply TMP1 * HashKey (using Karatsuba)
+	movdqa	  \XMM4, \TMP1
+	pshufd	  $78, \XMM4, \TMP2
+	pxor	  \XMM4, \TMP2
+	movdqa	  HashKey(%rsp), \TMP5
+	PCLMULQDQ 0x11, \TMP5, \TMP1	    # TMP1 = a1*b1
+	PCLMULQDQ 0x00, \TMP5, \XMM4       # XMM4 = a0*b0
+	movdqa	  HashKey_k(%rsp), \TMP4
+	PCLMULQDQ 0x00, \TMP4, \TMP2       # TMP2 = (a1+a0)*(b1+b0)
+	pxor	  \TMP1, \TMP6
+	pxor	  \XMM4, \XMMDst
+	pxor	  \XMM1, \TMP2
+	pxor	  \TMP6, \TMP2
+	pxor	  \XMMDst, \TMP2
+	# middle section of the temp results combined as in karatsuba algorithm
+	movdqa	  \TMP2, \TMP4
+	pslldq	  $8, \TMP4                 # left shift TMP4 2 DWs
+	psrldq	  $8, \TMP2                 # right shift TMP2 2 DWs
+	pxor	  \TMP4, \XMMDst
+	pxor	  \TMP2, \TMP6
+# TMP6:XMMDst holds the result of the accumulated carry-less multiplications
+	# first phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+# move XMMDst into TMP2, TMP3, TMP4 in order to perform 3 shifts independently
+	pslld     $31, \TMP2                # packed right shifting << 31
+	pslld     $30, \TMP3                # packed right shifting << 30
+	pslld     $25, \TMP4                # packed right shifting << 25
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	movdqa    \TMP2, \TMP7
+	psrldq    $4, \TMP7                 # right shift TMP7 1 DW
+	pslldq    $12, \TMP2                # left shift TMP2 3 DWs
+	pxor      \TMP2, \XMMDst
+
+        # second phase of the reduction
+	movdqa    \XMMDst, \TMP2
+	# make 3 copies of XMMDst for doing 3 shift operations
+	movdqa    \XMMDst, \TMP3
+	movdqa    \XMMDst, \TMP4
+	psrld     $1, \TMP2                 # packed left shift >> 1
+	psrld     $2, \TMP3                 # packed left shift >> 2
+	psrld     $7, \TMP4                 # packed left shift >> 7
+	pxor      \TMP3, \TMP2              # xor the shifted versions
+	pxor      \TMP4, \TMP2
+	pxor      \TMP7, \TMP2
+	pxor      \TMP2, \XMMDst
+	pxor      \TMP6, \XMMDst            # reduced result is in XMMDst
+.endm
+
+/* Encryption of a single block done*/
+.macro ENCRYPT_SINGLE_BLOCK XMM0 TMP1
+
+	pxor	(%arg1), \XMM0
+        movaps 16(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 32(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 48(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 64(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 80(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 96(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 112(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 128(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 144(%arg1), \TMP1
+	AESENC	\TMP1, \XMM0
+        movaps 160(%arg1), \TMP1
+	AESENCLAST	\TMP1, \XMM0
+.endm
+
+
+/*****************************************************************************
+* void aesni_gcm_dec(void *aes_ctx,    // AES Key schedule. Starts on a 16 byte boundary.
+*                   u8 *out,           // Plaintext output. Encrypt in-place is allowed.
+*                   const u8 *in,      // Ciphertext input
+*                   u64 plaintext_len, // Length of data in bytes for decryption.
+*                   u8 *iv,            // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                      // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                      // concatenated with 0x00000001. 16-byte aligned pointer.
+*                   u8 *hash_subkey,   // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                   const u8 *aad,     // Additional Authentication Data (AAD)
+*                   u64 aad_len,       // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                   u8  *auth_tag,     // Authenticated Tag output. The driver will compare this to the
+*                                      // given authentication tag and only return the plaintext if they match.
+*                   u64 auth_tag_len); // Authenticated Tag Length in bytes. Valid values are 16
+*                                      // (most likely), 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the first
+*       set of 11 keys in the data structure void *aes_ctx
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                       AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                        AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+*
+*****************************************************************************/
+
+ENTRY(aesni_gcm_dec)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+/*
+* states of %xmm registers %xmm6:%xmm15 not saved
+* all %xmm registers are clobbered
+*/
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp                        # align rsp to 64 bytes
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13			  # %xmm13 = HashKey
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# Precompute HashKey<<1 (mod poly) from the hash key (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # Reduction
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13     # %xmm13 holds the HashKey<<1 (mod poly)
+
+
+        # Decrypt first few blocks
+
+	movdqa %xmm13, HashKey(%rsp)           # store HashKey<<1 (mod poly)
+	mov %arg4, %r13    # save the number of bytes of plaintext/ciphertext
+	and $-16, %r13                      # %r13 = %r13 - (%r13 mod 16)
+	mov %r13, %r12
+	and $(3<<4), %r12
+	jz _initial_num_blocks_is_0_decrypt
+	cmp $(2<<4), %r12
+	jb _initial_num_blocks_is_1_decrypt
+	je _initial_num_blocks_is_2_decrypt
+_initial_num_blocks_is_3_decrypt:
+	INITIAL_BLOCKS_DEC 3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, dec
+	sub	$48, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_2_decrypt:
+	INITIAL_BLOCKS_DEC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, dec
+	sub	$32, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_1_decrypt:
+	INITIAL_BLOCKS_DEC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, dec
+	sub	$16, %r13
+	jmp	_initial_blocks_decrypted
+_initial_num_blocks_is_0_decrypt:
+	INITIAL_BLOCKS_DEC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, dec
+_initial_blocks_decrypted:
+	cmp	$0, %r13
+	je	_zero_cipher_left_decrypt
+	sub	$64, %r13
+	je	_four_cipher_left_decrypt
+_decrypt_by_4:
+	GHASH_4_ENCRYPT_4_PARALLEL_DEC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, dec
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_decrypt_by_4
+_four_cipher_left_decrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_decrypt:
+	mov	%arg4, %r13
+	and	$15, %r13				# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_decrypt
+
+        # Handle the last <16 byte block seperately
+
+	paddd ONE(%rip), %xmm0         # increment CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK  %xmm0, %xmm1    # E(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1   # recieve the last <16 byte block
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
+# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu (%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM %xmm2, %xmm1            # right shift 16-%r13 butes
+
+	movdqa  %xmm1, %xmm2
+	pxor %xmm1, %xmm0            # Ciphertext XOR E(K, Yn)
+	movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
+	pand %xmm1, %xmm0            # mask out top 16-%r13 bytes of %xmm0
+	pand    %xmm1, %xmm2
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10 ,%xmm2
+
+	pxor %xmm2, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	          # GHASH computation for the last <16 byte block
+	sub %r13, %r11
+	add $16, %r11
+
+        # output %r13 bytes
+	MOVQ_R64_XMM	%xmm0, %rax
+	cmp	$8, %r13
+	jle	_less_than_8_bytes_left_decrypt
+	mov	%rax, (%arg2 , %r11, 1)
+	add	$8, %r11
+	psrldq	$8, %xmm0
+	MOVQ_R64_XMM	%xmm0, %rax
+	sub	$8, %r13
+_less_than_8_bytes_left_decrypt:
+	mov	%al,  (%arg2, %r11, 1)
+	add	$1, %r11
+	shr	$8, %rax
+	sub	$1, %r13
+	jne	_less_than_8_bytes_left_decrypt
+_multiple_of_16_bytes_decrypt:
+	mov	arg8, %r12		  # %r13 = aadLen (number of bytes)
+	shl	$3, %r12		  # convert into number of bits
+	movd	%r12d, %xmm15		  # len(A) in %xmm15
+	shl	$3, %arg4		  # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15		  # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15		  # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	         # final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8
+
+	mov	%arg5, %rax		  # %rax = *Y0
+	movdqu	(%rax), %xmm0		  # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0,  %xmm1	  # E(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_decrypt:
+	mov	arg9, %r10                # %r10 = authTag
+	mov	arg10, %r11               # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_decrypt
+	cmp	$12, %r11
+	je	_T_12_decrypt
+_T_8_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_decrypt
+_T_12_decrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_decrypt
+_T_16_decrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_decrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+
+/*****************************************************************************
+* void aesni_gcm_enc(void *aes_ctx,      // AES Key schedule. Starts on a 16 byte boundary.
+*                    u8 *out,            // Ciphertext output. Encrypt in-place is allowed.
+*                    const u8 *in,       // Plaintext input
+*                    u64 plaintext_len,  // Length of data in bytes for encryption.
+*                    u8 *iv,             // Pre-counter block j0: 4 byte salt (from Security Association)
+*                                        // concatenated with 8 byte Initialisation Vector (from IPSec ESP Payload)
+*                                        // concatenated with 0x00000001. 16-byte aligned pointer.
+*                    u8 *hash_subkey,    // H, the Hash sub key input. Data starts on a 16-byte boundary.
+*                    const u8 *aad,      // Additional Authentication Data (AAD)
+*                    u64 aad_len,        // Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 bytes
+*                    u8 *auth_tag,       // Authenticated Tag output.
+*                    u64 auth_tag_len);  // Authenticated Tag Length in bytes. Valid values are 16 (most likely),
+*                                        // 12 or 8.
+*
+* Assumptions:
+*
+* keys:
+*       keys are pre-expanded and aligned to 16 bytes. we are using the
+*       first set of 11 keys in the data structure void *aes_ctx
+*
+*
+* iv:
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                             Salt  (From the SA)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     Initialization Vector                     |
+*       |         (This is the sequence number from IPSec header)       |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x1                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*
+*
+* AAD:
+*       AAD padded to 128 bits with 0
+*       for example, assume AAD is a u32 vector
+*
+*       if AAD is 8 bytes:
+*       AAD[3] = {A0, A1};
+*       padded AAD in xmm register = {A1 A0 0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A1)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                     32-bit Sequence Number (A0)               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                                 AAD Format with 32-bit Sequence Number
+*
+*       if AAD is 12 bytes:
+*       AAD[3] = {A0, A1, A2};
+*       padded AAD in xmm register = {A2 A1 A0 0}
+*
+*       0                   1                   2                   3
+*       0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                               SPI (A2)                        |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                 64-bit Extended Sequence Number {A1,A0}       |
+*       |                                                               |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*       |                              0x0                              |
+*       +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*
+*                         AAD Format with 64-bit Extended Sequence Number
+*
+* aadLen:
+*       from the definition of the spec, aadLen can only be 8 or 12 bytes.
+*       The code supports 16 too but for other sizes, the code will fail.
+*
+* TLen:
+*       from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+*       For other sizes, the code will fail.
+*
+* poly = x^128 + x^127 + x^126 + x^121 + 1
+***************************************************************************/
+ENTRY(aesni_gcm_enc)
+	push	%r12
+	push	%r13
+	push	%r14
+	mov	%rsp, %r14
+#
+# states of %xmm registers %xmm6:%xmm15 not saved
+# all %xmm registers are clobbered
+#
+	sub	$VARIABLE_OFFSET, %rsp
+	and	$~63, %rsp
+	mov	%arg6, %r12
+	movdqu	(%r12), %xmm13
+        movdqa  SHUF_MASK(%rip), %xmm2
+	PSHUFB_XMM %xmm2, %xmm13
+
+
+# precompute HashKey<<1 mod poly from the HashKey (required for GHASH)
+
+	movdqa	%xmm13, %xmm2
+	psllq	$1, %xmm13
+	psrlq	$63, %xmm2
+	movdqa	%xmm2, %xmm1
+	pslldq	$8, %xmm2
+	psrldq	$8, %xmm1
+	por	%xmm2, %xmm13
+
+        # reduce HashKey<<1
+
+	pshufd	$0x24, %xmm1, %xmm2
+	pcmpeqd TWOONE(%rip), %xmm2
+	pand	POLY(%rip), %xmm2
+	pxor	%xmm2, %xmm13
+	movdqa	%xmm13, HashKey(%rsp)
+	mov	%arg4, %r13            # %xmm13 holds HashKey<<1 (mod poly)
+	and	$-16, %r13
+	mov	%r13, %r12
+
+        # Encrypt first few blocks
+
+	and	$(3<<4), %r12
+	jz	_initial_num_blocks_is_0_encrypt
+	cmp	$(2<<4), %r12
+	jb	_initial_num_blocks_is_1_encrypt
+	je	_initial_num_blocks_is_2_encrypt
+_initial_num_blocks_is_3_encrypt:
+	INITIAL_BLOCKS_ENC	3, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 5, 678, enc
+	sub	$48, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_2_encrypt:
+	INITIAL_BLOCKS_ENC	2, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 6, 78, enc
+	sub	$32, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_1_encrypt:
+	INITIAL_BLOCKS_ENC	1, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 7, 8, enc
+	sub	$16, %r13
+	jmp	_initial_blocks_encrypted
+_initial_num_blocks_is_0_encrypt:
+	INITIAL_BLOCKS_ENC	0, %xmm9, %xmm10, %xmm13, %xmm11, %xmm12, %xmm0, \
+%xmm1, %xmm2, %xmm3, %xmm4, %xmm8, %xmm5, %xmm6, 8, 0, enc
+_initial_blocks_encrypted:
+
+        # Main loop - Encrypt remaining blocks
+
+	cmp	$0, %r13
+	je	_zero_cipher_left_encrypt
+	sub	$64, %r13
+	je	_four_cipher_left_encrypt
+_encrypt_by_4_encrypt:
+	GHASH_4_ENCRYPT_4_PARALLEL_ENC	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, \
+%xmm14, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, enc
+	add	$64, %r11
+	sub	$64, %r13
+	jne	_encrypt_by_4_encrypt
+_four_cipher_left_encrypt:
+	GHASH_LAST_4	%xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, \
+%xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm8
+_zero_cipher_left_encrypt:
+	mov	%arg4, %r13
+	and	$15, %r13			# %r13 = arg4 (mod 16)
+	je	_multiple_of_16_bytes_encrypt
+
+         # Handle the last <16 Byte block seperately
+	paddd ONE(%rip), %xmm0                # INCR CNT to get Yn
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm0
+
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm1        # Encrypt(K, Yn)
+	sub $16, %r11
+	add %r13, %r11
+	movdqu (%arg3,%r11,1), %xmm1     # receive the last <16 byte blocks
+	lea SHIFT_MASK+16(%rip), %r12
+	sub %r13, %r12
+	# adjust the shuffle mask pointer to be able to shift 16-r13 bytes
+	# (%r13 is the number of bytes in plaintext mod 16)
+	movdqu	(%r12), %xmm2           # get the appropriate shuffle mask
+	PSHUFB_XMM	%xmm2, %xmm1            # shift right 16-r13 byte
+	pxor	%xmm1, %xmm0            # Plaintext XOR Encrypt(K, Yn)
+	movdqu	ALL_F-SHIFT_MASK(%r12), %xmm1
+	# get the appropriate mask to mask out top 16-r13 bytes of xmm0
+	pand	%xmm1, %xmm0            # mask out top 16-r13 bytes of xmm0
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10,%xmm0
+
+	pxor	%xmm0, %xmm8
+	GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# GHASH computation for the last <16 byte block
+	sub	%r13, %r11
+	add	$16, %r11
+	PSHUFB_XMM %xmm10, %xmm1
+
+	# shuffle xmm0 back to output as ciphertext
+
+        # Output %r13 bytes
+	MOVQ_R64_XMM %xmm0, %rax
+	cmp $8, %r13
+	jle _less_than_8_bytes_left_encrypt
+	mov %rax, (%arg2 , %r11, 1)
+	add $8, %r11
+	psrldq $8, %xmm0
+	MOVQ_R64_XMM %xmm0, %rax
+	sub $8, %r13
+_less_than_8_bytes_left_encrypt:
+	mov %al,  (%arg2, %r11, 1)
+	add $1, %r11
+	shr $8, %rax
+	sub $1, %r13
+	jne _less_than_8_bytes_left_encrypt
+_multiple_of_16_bytes_encrypt:
+	mov	arg8, %r12    # %r12 = addLen (number of bytes)
+	shl	$3, %r12
+	movd	%r12d, %xmm15       # len(A) in %xmm15
+	shl	$3, %arg4               # len(C) in bits (*128)
+	MOVQ_R64_XMM	%arg4, %xmm1
+	pslldq	$8, %xmm15          # %xmm15 = len(A)||0x0000000000000000
+	pxor	%xmm1, %xmm15       # %xmm15 = len(A)||len(C)
+	pxor	%xmm15, %xmm8
+	GHASH_MUL	%xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
+	# final GHASH computation
+        movdqa SHUF_MASK(%rip), %xmm10
+	PSHUFB_XMM %xmm10, %xmm8         # perform a 16 byte swap
+
+	mov	%arg5, %rax		       # %rax  = *Y0
+	movdqu	(%rax), %xmm0		       # %xmm0 = Y0
+	ENCRYPT_SINGLE_BLOCK	%xmm0, %xmm15         # Encrypt(K, Y0)
+	pxor	%xmm8, %xmm0
+_return_T_encrypt:
+	mov	arg9, %r10                     # %r10 = authTag
+	mov	arg10, %r11                    # %r11 = auth_tag_len
+	cmp	$16, %r11
+	je	_T_16_encrypt
+	cmp	$12, %r11
+	je	_T_12_encrypt
+_T_8_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	jmp	_return_T_done_encrypt
+_T_12_encrypt:
+	MOVQ_R64_XMM	%xmm0, %rax
+	mov	%rax, (%r10)
+	psrldq	$8, %xmm0
+	movd	%xmm0, %eax
+	mov	%eax, 8(%r10)
+	jmp	_return_T_done_encrypt
+_T_16_encrypt:
+	movdqu	%xmm0, (%r10)
+_return_T_done_encrypt:
+	mov	%r14, %rsp
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	ret
+
+#endif
+
 
 _key_expansion_128:
 _key_expansion_256a:
@@ -55,10 +1709,11 @@
 	shufps $0b10001100, %xmm0, %xmm4
 	pxor %xmm4, %xmm0
 	pxor %xmm1, %xmm0
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_192a:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -76,12 +1731,13 @@
 
 	movaps %xmm0, %xmm1
 	shufps $0b01000100, %xmm0, %xmm6
-	movaps %xmm6, (%rcx)
+	movaps %xmm6, (TKEYP)
 	shufps $0b01001110, %xmm2, %xmm1
-	movaps %xmm1, 16(%rcx)
-	add $0x20, %rcx
+	movaps %xmm1, 0x10(TKEYP)
+	add $0x20, TKEYP
 	ret
 
+.align 4
 _key_expansion_192b:
 	pshufd $0b01010101, %xmm1, %xmm1
 	shufps $0b00010000, %xmm0, %xmm4
@@ -96,10 +1752,11 @@
 	pxor %xmm3, %xmm2
 	pxor %xmm5, %xmm2
 
-	movaps %xmm0, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm0, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
+.align 4
 _key_expansion_256b:
 	pshufd $0b10101010, %xmm1, %xmm1
 	shufps $0b00010000, %xmm2, %xmm4
@@ -107,8 +1764,8 @@
 	shufps $0b10001100, %xmm2, %xmm4
 	pxor %xmm4, %xmm2
 	pxor %xmm1, %xmm2
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	ret
 
 /*
@@ -116,17 +1773,23 @@
  *                   unsigned int key_len)
  */
 ENTRY(aesni_set_key)
-	movups (%rsi), %xmm0		# user key (first 16 bytes)
-	movaps %xmm0, (%rdi)
-	lea 0x10(%rdi), %rcx		# key addr
-	movl %edx, 480(%rdi)
+#ifndef __x86_64__
+	pushl KEYP
+	movl 8(%esp), KEYP		# ctx
+	movl 12(%esp), UKEYP		# in_key
+	movl 16(%esp), %edx		# key_len
+#endif
+	movups (UKEYP), %xmm0		# user key (first 16 bytes)
+	movaps %xmm0, (KEYP)
+	lea 0x10(KEYP), TKEYP		# key addr
+	movl %edx, 480(KEYP)
 	pxor %xmm4, %xmm4		# xmm4 is assumed 0 in _key_expansion_x
 	cmp $24, %dl
 	jb .Lenc_key128
 	je .Lenc_key192
-	movups 0x10(%rsi), %xmm2	# other user key
-	movaps %xmm2, (%rcx)
-	add $0x10, %rcx
+	movups 0x10(UKEYP), %xmm2	# other user key
+	movaps %xmm2, (TKEYP)
+	add $0x10, TKEYP
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_256a
 	AESKEYGENASSIST 0x1 %xmm0 %xmm1
@@ -155,7 +1818,7 @@
 	call _key_expansion_256a
 	jmp .Ldec_key
 .Lenc_key192:
-	movq 0x10(%rsi), %xmm2		# other user key
+	movq 0x10(UKEYP), %xmm2		# other user key
 	AESKEYGENASSIST 0x1 %xmm2 %xmm1		# round 1
 	call _key_expansion_192a
 	AESKEYGENASSIST 0x2 %xmm2 %xmm1		# round 2
@@ -195,33 +1858,47 @@
 	AESKEYGENASSIST 0x36 %xmm0 %xmm1	# round 10
 	call _key_expansion_128
 .Ldec_key:
-	sub $0x10, %rcx
-	movaps (%rdi), %xmm0
-	movaps (%rcx), %xmm1
-	movaps %xmm0, 240(%rcx)
-	movaps %xmm1, 240(%rdi)
-	add $0x10, %rdi
-	lea 240-16(%rcx), %rsi
+	sub $0x10, TKEYP
+	movaps (KEYP), %xmm0
+	movaps (TKEYP), %xmm1
+	movaps %xmm0, 240(TKEYP)
+	movaps %xmm1, 240(KEYP)
+	add $0x10, KEYP
+	lea 240-16(TKEYP), UKEYP
 .align 4
 .Ldec_key_loop:
-	movaps (%rdi), %xmm0
+	movaps (KEYP), %xmm0
 	AESIMC %xmm0 %xmm1
-	movaps %xmm1, (%rsi)
-	add $0x10, %rdi
-	sub $0x10, %rsi
-	cmp %rcx, %rdi
+	movaps %xmm1, (UKEYP)
+	add $0x10, KEYP
+	sub $0x10, UKEYP
+	cmp TKEYP, KEYP
 	jb .Ldec_key_loop
-	xor %rax, %rax
+	xor AREG, AREG
+#ifndef __x86_64__
+	popl KEYP
+#endif
 	ret
 
 /*
  * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_enc)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	movl 480(KEYP), KLEN		# key length
 	movups (INP), STATE		# input
 	call _aesni_enc1
 	movups STATE, (OUTP)		# output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -236,6 +1913,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -298,6 +1976,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_enc4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -391,11 +2070,22 @@
  * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
  */
 ENTRY(aesni_dec)
+#ifndef __x86_64__
+	pushl KEYP
+	pushl KLEN
+	movl 12(%esp), KEYP
+	movl 16(%esp), OUTP
+	movl 20(%esp), INP
+#endif
 	mov 480(KEYP), KLEN		# key length
 	add $240, KEYP
 	movups (INP), STATE		# input
 	call _aesni_dec1
 	movups STATE, (OUTP)		#output
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+#endif
 	ret
 
 /*
@@ -410,6 +2100,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec1:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -472,6 +2163,7 @@
  *	KEY
  *	TKEYP (T1)
  */
+.align 4
 _aesni_dec4:
 	movaps (KEYP), KEY		# key
 	mov KEYP, TKEYP
@@ -566,6 +2258,15 @@
  *		      size_t len)
  */
 ENTRY(aesni_ecb_enc)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN		# check length
 	jz .Lecb_enc_ret
 	mov 480(KEYP), KLEN
@@ -602,6 +2303,11 @@
 	cmp $16, LEN
 	jge .Lecb_enc_loop1
 .Lecb_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -609,6 +2315,15 @@
  *		      size_t len);
  */
 ENTRY(aesni_ecb_dec)
+#ifndef __x86_64__
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 16(%esp), KEYP
+	movl 20(%esp), OUTP
+	movl 24(%esp), INP
+	movl 28(%esp), LEN
+#endif
 	test LEN, LEN
 	jz .Lecb_dec_ret
 	mov 480(KEYP), KLEN
@@ -646,6 +2361,11 @@
 	cmp $16, LEN
 	jge .Lecb_dec_loop1
 .Lecb_dec_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+#endif
 	ret
 
 /*
@@ -653,6 +2373,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_enc)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_enc_ret
 	mov 480(KEYP), KLEN
@@ -670,6 +2401,12 @@
 	jge .Lcbc_enc_loop
 	movups STATE, (IVP)
 .Lcbc_enc_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
 /*
@@ -677,6 +2414,17 @@
  *		      size_t len, u8 *iv)
  */
 ENTRY(aesni_cbc_dec)
+#ifndef __x86_64__
+	pushl IVP
+	pushl LEN
+	pushl KEYP
+	pushl KLEN
+	movl 20(%esp), KEYP
+	movl 24(%esp), OUTP
+	movl 28(%esp), INP
+	movl 32(%esp), LEN
+	movl 36(%esp), IVP
+#endif
 	cmp $16, LEN
 	jb .Lcbc_dec_just_ret
 	mov 480(KEYP), KLEN
@@ -690,16 +2438,30 @@
 	movaps IN1, STATE1
 	movups 0x10(INP), IN2
 	movaps IN2, STATE2
+#ifdef __x86_64__
 	movups 0x20(INP), IN3
 	movaps IN3, STATE3
 	movups 0x30(INP), IN4
 	movaps IN4, STATE4
+#else
+	movups 0x20(INP), IN1
+	movaps IN1, STATE3
+	movups 0x30(INP), IN2
+	movaps IN2, STATE4
+#endif
 	call _aesni_dec4
 	pxor IV, STATE1
+#ifdef __x86_64__
 	pxor IN1, STATE2
 	pxor IN2, STATE3
 	pxor IN3, STATE4
 	movaps IN4, IV
+#else
+	pxor (INP), STATE2
+	pxor 0x10(INP), STATE3
+	pxor IN1, STATE4
+	movaps IN2, IV
+#endif
 	movups STATE1, (OUTP)
 	movups STATE2, 0x10(OUTP)
 	movups STATE3, 0x20(OUTP)
@@ -727,8 +2489,15 @@
 .Lcbc_dec_ret:
 	movups IV, (IVP)
 .Lcbc_dec_just_ret:
+#ifndef __x86_64__
+	popl KLEN
+	popl KEYP
+	popl LEN
+	popl IVP
+#endif
 	ret
 
+#ifdef __x86_64__
 .align 16
 .Lbswap_mask:
 	.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
@@ -744,6 +2513,7 @@
  *	INC:	== 1, in little endian
  *	BSWAP_MASK == endian swapping mask
  */
+.align 4
 _aesni_inc_init:
 	movaps .Lbswap_mask, BSWAP_MASK
 	movaps IV, CTR
@@ -768,6 +2538,7 @@
  *	CTR:	== output IV, in little endian
  *	TCTR_LOW: == lower qword of CTR
  */
+.align 4
 _aesni_inc:
 	paddq INC, CTR
 	add $1, TCTR_LOW
@@ -839,3 +2610,4 @@
 	movups IV, (IVP)
 .Lctr_enc_just_ret:
 	ret
+#endif
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 2cb3dcc..e1e60c7 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -5,6 +5,14 @@
  * Copyright (C) 2008, Intel Corp.
  *    Author: Huang Ying <ying.huang@intel.com>
  *
+ * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
+ * interface for 64-bit kernels.
+ *    Authors: Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -21,6 +29,10 @@
 #include <crypto/ctr.h>
 #include <asm/i387.h>
 #include <asm/aes.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/internal/aead.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
 #if defined(CONFIG_CRYPTO_CTR) || defined(CONFIG_CRYPTO_CTR_MODULE)
 #define HAS_CTR
@@ -42,8 +54,31 @@
 	struct cryptd_ablkcipher *cryptd_tfm;
 };
 
-#define AESNI_ALIGN	16
+/* This data is stored at the end of the crypto_tfm struct.
+ * It's a type of per "session" data storage location.
+ * This needs to be 16 byte aligned.
+ */
+struct aesni_rfc4106_gcm_ctx {
+	u8 hash_subkey[16];
+	struct crypto_aes_ctx aes_key_expanded;
+	u8 nonce[4];
+	struct cryptd_aead *cryptd_tfm;
+};
+
+struct aesni_gcm_set_hash_subkey_result {
+	int err;
+	struct completion completion;
+};
+
+struct aesni_hash_subkey_req_data {
+	u8 iv[16];
+	struct aesni_gcm_set_hash_subkey_result result;
+	struct scatterlist sg;
+};
+
+#define AESNI_ALIGN	(16)
 #define AES_BLOCK_MASK	(~(AES_BLOCK_SIZE-1))
+#define RFC4106_HASH_SUBKEY_SIZE 16
 
 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
 			     unsigned int key_len);
@@ -59,9 +94,62 @@
 			      const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
+#ifdef CONFIG_X86_64
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
 			      const u8 *in, unsigned int len, u8 *iv);
 
+/* asmlinkage void aesni_gcm_enc()
+ * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Ciphertext output. Encrypt in-place is allowed.
+ * const u8 *in, Plaintext input
+ * unsigned long plaintext_len, Length of data in bytes for encryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this
+ *          is going to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
+ *          Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
+			const u8 *in, unsigned long plaintext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+/* asmlinkage void aesni_gcm_dec()
+ * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
+ * u8 *out, Plaintext output. Decrypt in-place is allowed.
+ * const u8 *in, Ciphertext input
+ * unsigned long ciphertext_len, Length of data in bytes for decryption.
+ * u8 *iv, Pre-counter block j0: 4 byte salt (from Security Association)
+ *         concatenated with 8 byte Initialisation Vector (from IPSec ESP
+ *         Payload) concatenated with 0x00000001. 16-byte aligned pointer.
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ * const u8 *aad, Additional Authentication Data (AAD)
+ * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
+ * to be 8 or 12 bytes
+ * u8 *auth_tag, Authenticated Tag output.
+ * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
+ * Valid values are 16 (most likely), 12 or 8.
+ */
+asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
+			const u8 *in, unsigned long ciphertext_len, u8 *iv,
+			u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+			u8 *auth_tag, unsigned long auth_tag_len);
+
+static inline struct
+aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
+{
+	return
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)
+		crypto_tfm_ctx(crypto_aead_tfm(tfm)), AESNI_ALIGN);
+}
+#endif
+
 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
 {
 	unsigned long addr = (unsigned long)raw_ctx;
@@ -324,6 +412,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
 			    struct blkcipher_walk *walk)
 {
@@ -389,6 +478,7 @@
 		},
 	},
 };
+#endif
 
 static int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
 			unsigned int key_len)
@@ -536,6 +626,7 @@
 	},
 };
 
+#ifdef CONFIG_X86_64
 static int ablk_ctr_init(struct crypto_tfm *tfm)
 {
 	struct cryptd_ablkcipher *cryptd_tfm;
@@ -612,6 +703,7 @@
 	},
 };
 #endif
+#endif
 
 #ifdef HAS_LRW
 static int ablk_lrw_init(struct crypto_tfm *tfm)
@@ -730,6 +822,424 @@
 };
 #endif
 
+#ifdef CONFIG_X86_64
+static int rfc4106_init(struct crypto_tfm *tfm)
+{
+	struct cryptd_aead *cryptd_tfm;
+	struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni", 0, 0);
+	if (IS_ERR(cryptd_tfm))
+		return PTR_ERR(cryptd_tfm);
+	ctx->cryptd_tfm = cryptd_tfm;
+	tfm->crt_aead.reqsize = sizeof(struct aead_request)
+		+ crypto_aead_reqsize(&cryptd_tfm->base);
+	return 0;
+}
+
+static void rfc4106_exit(struct crypto_tfm *tfm)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx =
+		(struct aesni_rfc4106_gcm_ctx *)
+		PTR_ALIGN((u8 *)crypto_tfm_ctx(tfm), AESNI_ALIGN);
+	if (!IS_ERR(ctx->cryptd_tfm))
+		cryptd_free_aead(ctx->cryptd_tfm);
+	return;
+}
+
+static void
+rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err)
+{
+	struct aesni_gcm_set_hash_subkey_result *result = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	result->err = err;
+	complete(&result->completion);
+}
+
+static int
+rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
+{
+	struct crypto_ablkcipher *ctr_tfm;
+	struct ablkcipher_request *req;
+	int ret = -EINVAL;
+	struct aesni_hash_subkey_req_data *req_data;
+
+	ctr_tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, 0);
+	if (IS_ERR(ctr_tfm))
+		return PTR_ERR(ctr_tfm);
+
+	crypto_ablkcipher_clear_flags(ctr_tfm, ~0);
+
+	ret = crypto_ablkcipher_setkey(ctr_tfm, key, key_len);
+	if (ret) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return ret;
+	}
+
+	req = ablkcipher_request_alloc(ctr_tfm, GFP_KERNEL);
+	if (!req) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -EINVAL;
+	}
+
+	req_data = kmalloc(sizeof(*req_data), GFP_KERNEL);
+	if (!req_data) {
+		crypto_free_ablkcipher(ctr_tfm);
+		return -ENOMEM;
+	}
+	memset(req_data->iv, 0, sizeof(req_data->iv));
+
+	/* Clear the data in the hash sub key container to zero.*/
+	/* We want to cipher all zeros to create the hash sub key. */
+	memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
+
+	init_completion(&req_data->result.completion);
+	sg_init_one(&req_data->sg, hash_subkey, RFC4106_HASH_SUBKEY_SIZE);
+	ablkcipher_request_set_tfm(req, ctr_tfm);
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
+					CRYPTO_TFM_REQ_MAY_BACKLOG,
+					rfc4106_set_hash_subkey_done,
+					&req_data->result);
+
+	ablkcipher_request_set_crypt(req, &req_data->sg,
+		&req_data->sg, RFC4106_HASH_SUBKEY_SIZE, req_data->iv);
+
+	ret = crypto_ablkcipher_encrypt(req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret = wait_for_completion_interruptible
+			(&req_data->result.completion);
+		if (!ret)
+			ret = req_data->result.err;
+	}
+	ablkcipher_request_free(req);
+	kfree(req_data);
+	crypto_free_ablkcipher(ctr_tfm);
+	return ret;
+}
+
+static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
+						   unsigned int key_len)
+{
+	int ret = 0;
+	struct crypto_tfm *tfm = crypto_aead_tfm(parent);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	u8 *new_key_mem = NULL;
+
+	if (key_len < 4) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	/*Account for 4 byte nonce at the end.*/
+	key_len -= 4;
+	if (key_len != AES_KEYSIZE_128) {
+		crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+
+	memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN)
+		return -EINVAL;
+
+	if ((unsigned long)key % AESNI_ALIGN) {
+		/*key is not aligned: use an auxuliar aligned pointer*/
+		new_key_mem = kmalloc(key_len+AESNI_ALIGN, GFP_KERNEL);
+		if (!new_key_mem)
+			return -ENOMEM;
+
+		new_key_mem = PTR_ALIGN(new_key_mem, AESNI_ALIGN);
+		memcpy(new_key_mem, key, key_len);
+		key = new_key_mem;
+	}
+
+	if (!irq_fpu_usable())
+		ret = crypto_aes_expand_key(&(ctx->aes_key_expanded),
+		key, key_len);
+	else {
+		kernel_fpu_begin();
+		ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len);
+		kernel_fpu_end();
+	}
+	/*This must be on a 16 byte boundary!*/
+	if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
+exit:
+	kfree(new_key_mem);
+	return ret;
+}
+
+/* This is the Integrity Check Value (aka the authentication tag length and can
+ * be 8, 12 or 16 bytes long. */
+static int rfc4106_set_authsize(struct crypto_aead *parent,
+				unsigned int authsize)
+{
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	crypto_aead_crt(parent)->authsize = authsize;
+	crypto_aead_crt(cryptd_child)->authsize = authsize;
+	return 0;
+}
+
+static int rfc4106_encrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_encrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.encrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static int rfc4106_decrypt(struct aead_request *req)
+{
+	int ret;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	struct crypto_aead *cryptd_child = cryptd_aead_child(ctx->cryptd_tfm);
+
+	if (!irq_fpu_usable()) {
+		struct aead_request *cryptd_req =
+			(struct aead_request *) aead_request_ctx(req);
+		memcpy(cryptd_req, req, sizeof(*req));
+		aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
+		return crypto_aead_decrypt(cryptd_req);
+	} else {
+		kernel_fpu_begin();
+		ret = cryptd_child->base.crt_aead.decrypt(req);
+		kernel_fpu_end();
+		return ret;
+	}
+}
+
+static struct crypto_alg rfc4106_alg = {
+	.cra_name = "rfc4106(gcm(aes))",
+	.cra_driver_name = "rfc4106-gcm-aesni",
+	.cra_priority = 400,
+	.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+	.cra_blocksize = 1,
+	.cra_ctxsize = sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask = 0,
+	.cra_type = &crypto_nivaead_type,
+	.cra_module = THIS_MODULE,
+	.cra_list = LIST_HEAD_INIT(rfc4106_alg.cra_list),
+	.cra_init = rfc4106_init,
+	.cra_exit = rfc4106_exit,
+	.cra_u = {
+		.aead = {
+			.setkey = rfc4106_set_key,
+			.setauthsize = rfc4106_set_authsize,
+			.encrypt = rfc4106_encrypt,
+			.decrypt = rfc4106_decrypt,
+			.geniv = "seqiv",
+			.ivsize = 8,
+			.maxauthsize = 16,
+		},
+	},
+};
+
+static int __driver_rfc4106_encrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	__be32 counter = cpu_to_be32(1);
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_tab[16+AESNI_ALIGN];
+	u8* iv = (u8 *) PTR_ALIGN((u8 *)iv_tab, AESNI_ALIGN);
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length equal */
+	/* to 8 or 12 bytes */
+	if (unlikely(req->assoclen != 8 && req->assoclen != 12))
+		return -EINVAL;
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
+			GFP_ATOMIC);
+		if (unlikely(!src))
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+					req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+		+ ((unsigned long)req->cryptlen), auth_tag_len);
+
+	/* The authTag (aka the Integrity Check Value) needs to be written
+	 * back to the packet. */
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0,
+			req->cryptlen + auth_tag_len, 1);
+		kfree(src);
+	}
+	return 0;
+}
+
+static int __driver_rfc4106_decrypt(struct aead_request *req)
+{
+	u8 one_entry_in_sg = 0;
+	u8 *src, *dst, *assoc;
+	unsigned long tempCipherLen = 0;
+	__be32 counter = cpu_to_be32(1);
+	int retval = 0;
+	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+	struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+	void *aes_ctx = &(ctx->aes_key_expanded);
+	unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+	u8 iv_and_authTag[32+AESNI_ALIGN];
+	u8 *iv = (u8 *) PTR_ALIGN((u8 *)iv_and_authTag, AESNI_ALIGN);
+	u8 *authTag = iv + 16;
+	struct scatter_walk src_sg_walk;
+	struct scatter_walk assoc_sg_walk;
+	struct scatter_walk dst_sg_walk;
+	unsigned int i;
+
+	if (unlikely((req->cryptlen < auth_tag_len) ||
+		(req->assoclen != 8 && req->assoclen != 12)))
+		return -EINVAL;
+	/* Assuming we are supporting rfc4106 64-bit extended */
+	/* sequence numbers We need to have the AAD length */
+	/* equal to 8 or 12 bytes */
+
+	tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
+	/* IV below built */
+	for (i = 0; i < 4; i++)
+		*(iv+i) = ctx->nonce[i];
+	for (i = 0; i < 8; i++)
+		*(iv+4+i) = req->iv[i];
+	*((__be32 *)(iv+12)) = counter;
+
+	if ((sg_is_last(req->src)) && (sg_is_last(req->assoc))) {
+		one_entry_in_sg = 1;
+		scatterwalk_start(&src_sg_walk, req->src);
+		scatterwalk_start(&assoc_sg_walk, req->assoc);
+		src = scatterwalk_map(&src_sg_walk, 0);
+		assoc = scatterwalk_map(&assoc_sg_walk, 0);
+		dst = src;
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_start(&dst_sg_walk, req->dst);
+			dst = scatterwalk_map(&dst_sg_walk, 0);
+		}
+
+	} else {
+		/* Allocate memory for src, dst, assoc */
+		src = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
+		if (!src)
+			return -ENOMEM;
+		assoc = (src + req->cryptlen + auth_tag_len);
+		scatterwalk_map_and_copy(src, req->src, 0, req->cryptlen, 0);
+		scatterwalk_map_and_copy(assoc, req->assoc, 0,
+			req->assoclen, 0);
+		dst = src;
+	}
+
+	aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
+		ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
+		authTag, auth_tag_len);
+
+	/* Compare generated tag with passed in tag. */
+	retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
+		-EBADMSG : 0;
+
+	if (one_entry_in_sg) {
+		if (unlikely(req->src != req->dst)) {
+			scatterwalk_unmap(dst, 0);
+			scatterwalk_done(&dst_sg_walk, 0, 0);
+		}
+		scatterwalk_unmap(src, 0);
+		scatterwalk_unmap(assoc, 0);
+		scatterwalk_done(&src_sg_walk, 0, 0);
+		scatterwalk_done(&assoc_sg_walk, 0, 0);
+	} else {
+		scatterwalk_map_and_copy(dst, req->dst, 0, req->cryptlen, 1);
+		kfree(src);
+	}
+	return retval;
+}
+
+static struct crypto_alg __rfc4106_alg = {
+	.cra_name		= "__gcm-aes-aesni",
+	.cra_driver_name	= "__driver-gcm-aes-aesni",
+	.cra_priority		= 0,
+	.cra_flags		= CRYPTO_ALG_TYPE_AEAD,
+	.cra_blocksize		= 1,
+	.cra_ctxsize	= sizeof(struct aesni_rfc4106_gcm_ctx) + AESNI_ALIGN,
+	.cra_alignmask		= 0,
+	.cra_type		= &crypto_aead_type,
+	.cra_module		= THIS_MODULE,
+	.cra_list		= LIST_HEAD_INIT(__rfc4106_alg.cra_list),
+	.cra_u = {
+		.aead = {
+			.encrypt	= __driver_rfc4106_encrypt,
+			.decrypt	= __driver_rfc4106_decrypt,
+		},
+	},
+};
+#endif
+
 static int __init aesni_init(void)
 {
 	int err;
@@ -738,6 +1248,7 @@
 		printk(KERN_INFO "Intel AES-NI instructions are not detected.\n");
 		return -ENODEV;
 	}
+
 	if ((err = crypto_register_alg(&aesni_alg)))
 		goto aes_err;
 	if ((err = crypto_register_alg(&__aesni_alg)))
@@ -746,18 +1257,24 @@
 		goto blk_ecb_err;
 	if ((err = crypto_register_alg(&blk_cbc_alg)))
 		goto blk_cbc_err;
-	if ((err = crypto_register_alg(&blk_ctr_alg)))
-		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ecb_alg)))
 		goto ablk_ecb_err;
 	if ((err = crypto_register_alg(&ablk_cbc_alg)))
 		goto ablk_cbc_err;
+#ifdef CONFIG_X86_64
+	if ((err = crypto_register_alg(&blk_ctr_alg)))
+		goto blk_ctr_err;
 	if ((err = crypto_register_alg(&ablk_ctr_alg)))
 		goto ablk_ctr_err;
+	if ((err = crypto_register_alg(&__rfc4106_alg)))
+		goto __aead_gcm_err;
+	if ((err = crypto_register_alg(&rfc4106_alg)))
+		goto aead_gcm_err;
 #ifdef HAS_CTR
 	if ((err = crypto_register_alg(&ablk_rfc3686_ctr_alg)))
 		goto ablk_rfc3686_ctr_err;
 #endif
+#endif
 #ifdef HAS_LRW
 	if ((err = crypto_register_alg(&ablk_lrw_alg)))
 		goto ablk_lrw_err;
@@ -770,7 +1287,6 @@
 	if ((err = crypto_register_alg(&ablk_xts_alg)))
 		goto ablk_xts_err;
 #endif
-
 	return err;
 
 #ifdef HAS_XTS
@@ -784,18 +1300,24 @@
 	crypto_unregister_alg(&ablk_lrw_alg);
 ablk_lrw_err:
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 ablk_rfc3686_ctr_err:
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+aead_gcm_err:
+	crypto_unregister_alg(&__rfc4106_alg);
+__aead_gcm_err:
 	crypto_unregister_alg(&ablk_ctr_alg);
 ablk_ctr_err:
+	crypto_unregister_alg(&blk_ctr_alg);
+blk_ctr_err:
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 ablk_cbc_err:
 	crypto_unregister_alg(&ablk_ecb_alg);
 ablk_ecb_err:
-	crypto_unregister_alg(&blk_ctr_alg);
-blk_ctr_err:
 	crypto_unregister_alg(&blk_cbc_alg);
 blk_cbc_err:
 	crypto_unregister_alg(&blk_ecb_alg);
@@ -818,13 +1340,17 @@
 #ifdef HAS_LRW
 	crypto_unregister_alg(&ablk_lrw_alg);
 #endif
+#ifdef CONFIG_X86_64
 #ifdef HAS_CTR
 	crypto_unregister_alg(&ablk_rfc3686_ctr_alg);
 #endif
+	crypto_unregister_alg(&rfc4106_alg);
+	crypto_unregister_alg(&__rfc4106_alg);
 	crypto_unregister_alg(&ablk_ctr_alg);
+	crypto_unregister_alg(&blk_ctr_alg);
+#endif
 	crypto_unregister_alg(&ablk_cbc_alg);
 	crypto_unregister_alg(&ablk_ecb_alg);
-	crypto_unregister_alg(&blk_ctr_alg);
 	crypto_unregister_alg(&blk_cbc_alg);
 	crypto_unregister_alg(&blk_ecb_alg);
 	crypto_unregister_alg(&__aesni_alg);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 55d106b..211ca3f 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -185,17 +185,16 @@
 
 #ifdef CONFIG_ACPI_NUMA
 extern int acpi_numa;
-extern int acpi_get_nodes(struct bootnode *physnodes);
+extern void acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end);
 extern int acpi_scan_nodes(unsigned long start, unsigned long end);
 #define NR_NODE_MEMBLKS (MAX_NUMNODES*2)
+
+#ifdef CONFIG_NUMA_EMU
 extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
 				   int num_nodes);
-#else
-static inline void acpi_fake_nodes(const struct bootnode *fake_nodes,
-				   int num_nodes)
-{
-}
 #endif
+#endif /* CONFIG_ACPI_NUMA */
 
 #define acpi_unlazy_tlb(x)	leave_mm(x)
 
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 6aee50d..64dc82e 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -3,16 +3,27 @@
 
 #include <linux/pci.h>
 
+struct amd_nb_bus_dev_range {
+	u8 bus;
+	u8 dev_base;
+	u8 dev_limit;
+};
+
 extern struct pci_device_id amd_nb_misc_ids[];
+extern const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[];
 struct bootnode;
 
 extern int early_is_amd_nb(u32 value);
 extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
-extern int amd_get_nodes(struct bootnode *nodes);
 extern int amd_numa_init(unsigned long start_pfn, unsigned long end_pfn);
 extern int amd_scan_nodes(void);
 
+#ifdef CONFIG_NUMA_EMU
+extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
+extern void amd_get_nodes(struct bootnode *nodes);
+#endif
+
 struct amd_northbridge {
 	struct pci_dev *misc;
 };
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index 3b62ab5..5e1a2ee 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -32,11 +32,7 @@
 #define BOOT_HEAP_SIZE             0x400000
 #else /* !CONFIG_KERNEL_BZIP2 */
 
-#ifdef CONFIG_X86_64
-#define BOOT_HEAP_SIZE	0x7000
-#else
-#define BOOT_HEAP_SIZE	0x4000
-#endif
+#define BOOT_HEAP_SIZE	0x8000
 
 #endif /* !CONFIG_KERNEL_BZIP2 */
 
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0141b23..4729b2b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -116,11 +116,11 @@
 #endif
 	FIX_TEXT_POKE1,	/* reserve 2 pages for text_poke() */
 	FIX_TEXT_POKE0, /* first page is last, because allocation is backward */
-	__end_of_permanent_fixed_addresses,
-
 #ifdef	CONFIG_X86_MRST
 	FIX_LNW_VRTC,
 #endif
+	__end_of_permanent_fixed_addresses,
+
 	/*
 	 * 256 temporary boot-time mappings, used by early_ioremap(),
 	 * before ioremap() is functional.
diff --git a/arch/x86/include/asm/gpio.h b/arch/x86/include/asm/gpio.h
index 49dbfdf..91d915a 100644
--- a/arch/x86/include/asm/gpio.h
+++ b/arch/x86/include/asm/gpio.h
@@ -38,12 +38,9 @@
 	return __gpio_cansleep(gpio);
 }
 
-/*
- * Not implemented, yet.
- */
 static inline int gpio_to_irq(unsigned int gpio)
 {
-	return -ENOSYS;
+	return __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned int irq)
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb..c704b38 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -10,6 +10,9 @@
 #include <asm/apicdef.h>
 #include <asm/irq_vectors.h>
 
+/* Even though we don't support this, supply it to appease OF */
+static inline void irq_dispose_mapping(unsigned int virq) { }
+
 static inline int irq_canonicalize(int irq)
 {
 	return ((irq == 2) ? 9 : irq);
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index f23eb25..ca242d3 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -18,7 +18,6 @@
 	DIE_TRAP,
 	DIE_GPF,
 	DIE_CALL,
-	DIE_NMI_IPI,
 	DIE_PAGE_FAULT,
 	DIE_NMIUNKNOWN,
 };
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b36c6b3..8e37deb 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -15,6 +15,14 @@
 
 struct x86_emulate_ctxt;
 
+struct x86_exception {
+	u8 vector;
+	bool error_code_valid;
+	u16 error_code;
+	bool nested_page_fault;
+	u64 address; /* cr2 or nested page fault gpa */
+};
+
 /*
  * x86_emulate_ops:
  *
@@ -64,7 +72,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*read_std)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			unsigned int bytes, struct kvm_vcpu *vcpu,
+			struct x86_exception *fault);
 
 	/*
 	 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -74,7 +83,8 @@
 	 *  @bytes: [IN ] Number of bytes to write to memory.
 	 */
 	int (*write_std)(unsigned long addr, void *val,
-			 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+			 unsigned int bytes, struct kvm_vcpu *vcpu,
+			 struct x86_exception *fault);
 	/*
 	 * fetch: Read bytes of standard (non-emulated/special) memory.
 	 *        Used for instruction fetch.
@@ -83,7 +93,8 @@
 	 *  @bytes: [IN ] Number of bytes to read from memory.
 	 */
 	int (*fetch)(unsigned long addr, void *val,
-			unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
+		     unsigned int bytes, struct kvm_vcpu *vcpu,
+		     struct x86_exception *fault);
 
 	/*
 	 * read_emulated: Read bytes from emulated/special memory area.
@@ -94,7 +105,7 @@
 	int (*read_emulated)(unsigned long addr,
 			     void *val,
 			     unsigned int bytes,
-			     unsigned int *error,
+			     struct x86_exception *fault,
 			     struct kvm_vcpu *vcpu);
 
 	/*
@@ -107,7 +118,7 @@
 	int (*write_emulated)(unsigned long addr,
 			      const void *val,
 			      unsigned int bytes,
-			      unsigned int *error,
+			      struct x86_exception *fault,
 			      struct kvm_vcpu *vcpu);
 
 	/*
@@ -122,7 +133,7 @@
 				const void *old,
 				const void *new,
 				unsigned int bytes,
-				unsigned int *error,
+				struct x86_exception *fault,
 				struct kvm_vcpu *vcpu);
 
 	int (*pio_in_emulated)(int size, unsigned short port, void *val,
@@ -159,7 +170,10 @@
 	};
 	union {
 		unsigned long *reg;
-		unsigned long mem;
+		struct segmented_address {
+			ulong ea;
+			unsigned seg;
+		} mem;
 	} addr;
 	union {
 		unsigned long val;
@@ -226,9 +240,8 @@
 
 	bool perm_ok; /* do not check permissions if true */
 
-	int exception; /* exception that happens during emulation or -1 */
-	u32 error_code; /* error code for exception */
-	bool error_code_valid;
+	bool have_exception;
+	struct x86_exception exception;
 
 	/* decode cache */
 	struct decode_cache decode;
@@ -252,7 +265,7 @@
 #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
 #endif
 
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
 #define EMULATION_FAILED -1
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f702f82..aa75f21 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -83,11 +83,14 @@
 #define KVM_NR_FIXED_MTRR_REGION 88
 #define KVM_NR_VAR_MTRR 8
 
+#define ASYNC_PF_PER_VCPU 64
+
 extern spinlock_t kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_vcpu;
 struct kvm;
+struct kvm_async_pf;
 
 enum kvm_reg {
 	VCPU_REGS_RAX = 0,
@@ -114,6 +117,7 @@
 
 enum kvm_reg_ex {
 	VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+	VCPU_EXREG_CR3,
 };
 
 enum {
@@ -238,16 +242,18 @@
 	void (*new_cr3)(struct kvm_vcpu *vcpu);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
 	unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
-	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
-	void (*inject_page_fault)(struct kvm_vcpu *vcpu);
+	int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err,
+			  bool prefault);
+	void (*inject_page_fault)(struct kvm_vcpu *vcpu,
+				  struct x86_exception *fault);
 	void (*free)(struct kvm_vcpu *vcpu);
 	gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
-			    u32 *error);
+			    struct x86_exception *exception);
 	gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
 	void (*prefetch_page)(struct kvm_vcpu *vcpu,
 			      struct kvm_mmu_page *page);
 	int (*sync_page)(struct kvm_vcpu *vcpu,
-			 struct kvm_mmu_page *sp, bool clear_unsync);
+			 struct kvm_mmu_page *sp);
 	void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva);
 	hpa_t root_hpa;
 	int root_level;
@@ -315,16 +321,6 @@
 	 */
 	struct kvm_mmu *walk_mmu;
 
-	/*
-	 * This struct is filled with the necessary information to propagate a
-	 * page fault into the guest
-	 */
-	struct {
-		u64      address;
-		unsigned error_code;
-		bool     nested;
-	} fault;
-
 	/* only needed in kvm_pv_mmu_op() path, but it's hot so
 	 * put it here to avoid allocation */
 	struct kvm_pv_mmu_op_buffer mmu_op_buffer;
@@ -412,6 +408,15 @@
 	u64 hv_vapic;
 
 	cpumask_var_t wbinvd_dirty_mask;
+
+	struct {
+		bool halted;
+		gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+		struct gfn_to_hva_cache data;
+		u64 msr_val;
+		u32 id;
+		bool send_user_only;
+	} apf;
 };
 
 struct kvm_arch {
@@ -456,6 +461,10 @@
 	/* fields used by HYPER-V emulation */
 	u64 hv_guest_os_id;
 	u64 hv_hypercall;
+
+	#ifdef CONFIG_KVM_MMU_AUDIT
+	int audit_point;
+	#endif
 };
 
 struct kvm_vm_stat {
@@ -529,6 +538,7 @@
 			    struct kvm_segment *var, int seg);
 	void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
 	void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
+	void (*decache_cr3)(struct kvm_vcpu *vcpu);
 	void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
 	void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
 	void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -582,9 +592,17 @@
 
 	void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
+	void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 	const struct trace_print_flags *exit_reasons_str;
 };
 
+struct kvm_arch_async_pf {
+	u32 token;
+	gfn_t gfn;
+	unsigned long cr3;
+	bool direct_map;
+};
+
 extern struct kvm_x86_ops *kvm_x86_ops;
 
 int kvm_mmu_module_init(void);
@@ -594,7 +612,6 @@
 int kvm_mmu_create(struct kvm_vcpu *vcpu);
 int kvm_mmu_setup(struct kvm_vcpu *vcpu);
 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
-void kvm_mmu_set_base_ptes(u64 base_pte);
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask);
 
@@ -623,8 +640,15 @@
 #define EMULTYPE_NO_DECODE	    (1 << 0)
 #define EMULTYPE_TRAP_UD	    (1 << 1)
 #define EMULTYPE_SKIP		    (1 << 2)
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2, u16 error_code, int emulation_type);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+			    int emulation_type, void *insn, int insn_len);
+
+static inline int emulate_instruction(struct kvm_vcpu *vcpu,
+			int emulation_type)
+{
+	return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
 
@@ -650,7 +674,7 @@
 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
@@ -668,11 +692,11 @@
 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu);
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
 			    gfn_t gfn, void *data, int offset, int len,
 			    u32 access);
-void kvm_propagate_fault(struct kvm_vcpu *vcpu);
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
 
 int kvm_pic_set_irq(void *opaque, int irq, int level);
@@ -690,16 +714,21 @@
 int kvm_mmu_load(struct kvm_vcpu *vcpu);
 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception);
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception);
 
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+		       void *insn, int insn_len);
 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
 
 void kvm_enable_tdp(void);
@@ -766,20 +795,25 @@
 #define HF_VINTR_MASK		(1 << 2)
 #define HF_NMI_MASK		(1 << 3)
 #define HF_IRET_MASK		(1 << 4)
+#define HF_GUEST_MASK		(1 << 5) /* VCPU is in guest-mode */
 
 /*
  * Hardware virtualization extension instructions may fault if a
  * reboot turns off virtualization while processes are running.
  * Trap the fault and ignore the instruction if that happens.
  */
-asmlinkage void kvm_handle_fault_on_reboot(void);
+asmlinkage void kvm_spurious_fault(void);
+extern bool kvm_rebooting;
 
 #define __kvm_handle_fault_on_reboot(insn) \
 	"666: " insn "\n\t" \
+	"668: \n\t"                           \
 	".pushsection .fixup, \"ax\" \n" \
 	"667: \n\t" \
+	"cmpb $0, kvm_rebooting \n\t"	      \
+	"jne 668b \n\t"      		      \
 	__ASM_SIZE(push) " $666b \n\t"	      \
-	"jmp kvm_handle_fault_on_reboot \n\t" \
+	"call kvm_spurious_fault \n\t"	      \
 	".popsection \n\t" \
 	".pushsection __ex_table, \"a\" \n\t" \
 	_ASM_PTR " 666b, 667b \n\t" \
@@ -799,4 +833,15 @@
 
 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
 
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work);
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work);
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
+			       struct kvm_async_pf *work);
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+
 #endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 7b562b6..a427bf7 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -20,6 +20,7 @@
  * are available. The use of 0x11 and 0x12 is deprecated
  */
 #define KVM_FEATURE_CLOCKSOURCE2        3
+#define KVM_FEATURE_ASYNC_PF		4
 
 /* The last 8 bits are used to indicate how to interpret the flags field
  * in pvclock structure. If no bits are set, all flags are ignored.
@@ -32,9 +33,13 @@
 /* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
 #define MSR_KVM_WALL_CLOCK_NEW  0x4b564d00
 #define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
+#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
 
 #define KVM_MAX_MMU_OP_BATCH           32
 
+#define KVM_ASYNC_PF_ENABLED			(1 << 0)
+#define KVM_ASYNC_PF_SEND_ALWAYS		(1 << 1)
+
 /* Operations for KVM_HC_MMU_OP */
 #define KVM_MMU_OP_WRITE_PTE            1
 #define KVM_MMU_OP_FLUSH_TLB	        2
@@ -61,10 +66,20 @@
 	__u64 pt_phys;
 };
 
+#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
+#define KVM_PV_REASON_PAGE_READY 2
+
+struct kvm_vcpu_pv_apf_data {
+	__u32 reason;
+	__u8 pad[60];
+	__u32 enabled;
+};
+
 #ifdef __KERNEL__
 #include <asm/processor.h>
 
 extern void kvmclock_init(void);
+extern int kvm_register_clock(char *txt);
 
 
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
@@ -160,8 +175,17 @@
 
 #ifdef CONFIG_KVM_GUEST
 void __init kvm_guest_init(void);
+void kvm_async_pf_task_wait(u32 token);
+void kvm_async_pf_task_wake(u32 token);
+u32 kvm_read_and_reset_pf_reason(void);
 #else
 #define kvm_guest_init() do { } while (0)
+#define kvm_async_pf_task_wait(T) do {} while(0)
+#define kvm_async_pf_task_wake(T) do {} while(0)
+static inline u32 kvm_read_and_reset_pf_reason(void)
+{
+	return 0;
+}
 #endif
 
 #endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
index f792060..72a8b52 100644
--- a/arch/x86/include/asm/mach_traps.h
+++ b/arch/x86/include/asm/mach_traps.h
@@ -7,9 +7,19 @@
 
 #include <asm/mc146818rtc.h>
 
+#define NMI_REASON_PORT		0x61
+
+#define NMI_REASON_SERR		0x80
+#define NMI_REASON_IOCHK	0x40
+#define NMI_REASON_MASK		(NMI_REASON_SERR | NMI_REASON_IOCHK)
+
+#define NMI_REASON_CLEAR_SERR	0x04
+#define NMI_REASON_CLEAR_IOCHK	0x08
+#define NMI_REASON_CLEAR_MASK	0x0f
+
 static inline unsigned char get_nmi_reason(void)
 {
-	return inb(0x61);
+	return inb(NMI_REASON_PORT);
 }
 
 static inline void reassert_nmi(void)
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c4021b9..c76f5b9 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -23,6 +23,26 @@
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
+/*
+ * Define some priorities for the nmi notifier call chain.
+ *
+ * Create a local nmi bit that has a higher priority than
+ * external nmis, because the local ones are more frequent.
+ *
+ * Also setup some default high/normal/low settings for
+ * subsystems to registers with.  Using 4 bits to seperate
+ * the priorities.  This can go alot higher if needed be.
+ */
+
+#define NMI_LOCAL_SHIFT		16	/* randomly picked */
+#define NMI_LOCAL_BIT		(1ULL << NMI_LOCAL_SHIFT)
+#define NMI_HIGH_PRIOR		(1ULL << 8)
+#define NMI_NORMAL_PRIOR	(1ULL << 4)
+#define NMI_LOW_PRIOR		(1ULL << 0)
+#define NMI_LOCAL_HIGH_PRIOR	(NMI_LOCAL_BIT | NMI_HIGH_PRIOR)
+#define NMI_LOCAL_NORMAL_PRIOR	(NMI_LOCAL_BIT | NMI_NORMAL_PRIOR)
+#define NMI_LOCAL_LOW_PRIOR	(NMI_LOCAL_BIT | NMI_LOW_PRIOR)
+
 void stop_nmi(void);
 void restart_nmi(void);
 
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 823e070..5ae8728 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -38,7 +38,7 @@
 extern void __cpuinit numa_remove_cpu(int cpu);
 
 #ifdef CONFIG_NUMA_EMU
-#define FAKE_NODE_MIN_SIZE	((u64)64 << 20)
+#define FAKE_NODE_MIN_SIZE	((u64)32 << 20)
 #define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
 #endif /* CONFIG_NUMA_EMU */
 #else
diff --git a/arch/x86/include/asm/olpc.h b/arch/x86/include/asm/olpc.h
index 42a978c..f482010 100644
--- a/arch/x86/include/asm/olpc.h
+++ b/arch/x86/include/asm/olpc.h
@@ -107,10 +107,14 @@
 /* GPIO assignments */
 
 #define OLPC_GPIO_MIC_AC	1
-#define OLPC_GPIO_DCON_IRQ	geode_gpio(7)
+#define OLPC_GPIO_DCON_STAT0	5
+#define OLPC_GPIO_DCON_STAT1	6
+#define OLPC_GPIO_DCON_IRQ	7
 #define OLPC_GPIO_THRM_ALRM	geode_gpio(10)
-#define OLPC_GPIO_SMB_CLK	geode_gpio(14)
-#define OLPC_GPIO_SMB_DATA	geode_gpio(15)
+#define OLPC_GPIO_DCON_LOAD    11
+#define OLPC_GPIO_DCON_BLANK   12
+#define OLPC_GPIO_SMB_CLK      14
+#define OLPC_GPIO_SMB_DATA     15
 #define OLPC_GPIO_WORKAUX	geode_gpio(24)
 #define OLPC_GPIO_LID		geode_gpio(26)
 #define OLPC_GPIO_ECSCI		geode_gpio(27)
diff --git a/arch/x86/include/asm/olpc_ofw.h b/arch/x86/include/asm/olpc_ofw.h
index 2a84781..641988e 100644
--- a/arch/x86/include/asm/olpc_ofw.h
+++ b/arch/x86/include/asm/olpc_ofw.h
@@ -8,6 +8,8 @@
 
 #ifdef CONFIG_OLPC_OPENFIRMWARE
 
+extern bool olpc_ofw_is_installed(void);
+
 /* run an OFW command by calling into the firmware */
 #define olpc_ofw(name, args, res) \
 	__olpc_ofw((name), ARRAY_SIZE(args), args, ARRAY_SIZE(res), res)
@@ -26,10 +28,17 @@
 
 #else /* !CONFIG_OLPC_OPENFIRMWARE */
 
+static inline bool olpc_ofw_is_installed(void) { return false; }
 static inline void olpc_ofw_detect(void) { }
 static inline void setup_olpc_ofw_pgd(void) { }
 static inline bool olpc_ofw_present(void) { return false; }
 
 #endif /* !CONFIG_OLPC_OPENFIRMWARE */
 
+#ifdef CONFIG_OLPC_OPENFIRMWARE_DT
+extern void olpc_dt_build_devicetree(void);
+#else
+static inline void olpc_dt_build_devicetree(void) { }
+#endif /* CONFIG_OLPC_OPENFIRMWARE_DT */
+
 #endif /* _ASM_X86_OLPC_OFW_H */
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 295e2ff..e2f6a99 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -20,6 +20,9 @@
 #define ARCH_P4_MAX_ESCR	(ARCH_P4_TOTAL_ESCR - ARCH_P4_RESERVED_ESCR)
 #define ARCH_P4_MAX_CCCR	(18)
 
+#define ARCH_P4_CNTRVAL_BITS	(40)
+#define ARCH_P4_CNTRVAL_MASK	((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
+
 #define P4_ESCR_EVENT_MASK	0x7e000000U
 #define P4_ESCR_EVENT_SHIFT	25
 #define P4_ESCR_EVENTMASK_MASK	0x01fffe00U
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 271de94..b4389a4 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -92,7 +92,7 @@
 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
 
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
-				  unsigned long adddress)
+				  unsigned long address)
 {
 	___pmd_free_tlb(tlb, pmd);
 }
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c6efecf8..53fd1d5 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -901,7 +901,7 @@
 /*
  * The below -8 is to reserve 8 bytes on top of the ring0 stack.
  * This is necessary to guarantee that the entire "struct pt_regs"
- * is accessable even if the CPU haven't stored the SS/ESP registers
+ * is accessible even if the CPU haven't stored the SS/ESP registers
  * on the stack (interrupt gate does not save these registers
  * when switching to the same priv ring).
  * Therefore beware: accessing the ss/esp fields of the
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
new file mode 100644
index 0000000..b4ec95f
--- /dev/null
+++ b/arch/x86/include/asm/prom.h
@@ -0,0 +1 @@
+/* dummy prom.h; here to make linux/of.h's #includes happy */
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0e83105..f2b83bc 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -47,14 +47,13 @@
 	INTERCEPT_MONITOR,
 	INTERCEPT_MWAIT,
 	INTERCEPT_MWAIT_COND,
+	INTERCEPT_XSETBV,
 };
 
 
 struct __attribute__ ((__packed__)) vmcb_control_area {
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 	u8 reserved_1[42];
@@ -81,14 +80,19 @@
 	u32 event_inj_err;
 	u64 nested_cr3;
 	u64 lbr_ctl;
-	u64 reserved_5;
+	u32 clean;
+	u32 reserved_5;
 	u64 next_rip;
-	u8 reserved_6[816];
+	u8 insn_len;
+	u8 insn_bytes[15];
+	u8 reserved_6[800];
 };
 
 
 #define TLB_CONTROL_DO_NOTHING 0
 #define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
 
 #define V_TPR_MASK 0x0f
 
@@ -204,19 +208,31 @@
 #define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
 #define SVM_SELECTOR_CODE_MASK (1 << 3)
 
-#define INTERCEPT_CR0_MASK 1
-#define INTERCEPT_CR3_MASK (1 << 3)
-#define INTERCEPT_CR4_MASK (1 << 4)
-#define INTERCEPT_CR8_MASK (1 << 8)
+#define INTERCEPT_CR0_READ	0
+#define INTERCEPT_CR3_READ	3
+#define INTERCEPT_CR4_READ	4
+#define INTERCEPT_CR8_READ	8
+#define INTERCEPT_CR0_WRITE	(16 + 0)
+#define INTERCEPT_CR3_WRITE	(16 + 3)
+#define INTERCEPT_CR4_WRITE	(16 + 4)
+#define INTERCEPT_CR8_WRITE	(16 + 8)
 
-#define INTERCEPT_DR0_MASK 1
-#define INTERCEPT_DR1_MASK (1 << 1)
-#define INTERCEPT_DR2_MASK (1 << 2)
-#define INTERCEPT_DR3_MASK (1 << 3)
-#define INTERCEPT_DR4_MASK (1 << 4)
-#define INTERCEPT_DR5_MASK (1 << 5)
-#define INTERCEPT_DR6_MASK (1 << 6)
-#define INTERCEPT_DR7_MASK (1 << 7)
+#define INTERCEPT_DR0_READ	0
+#define INTERCEPT_DR1_READ	1
+#define INTERCEPT_DR2_READ	2
+#define INTERCEPT_DR3_READ	3
+#define INTERCEPT_DR4_READ	4
+#define INTERCEPT_DR5_READ	5
+#define INTERCEPT_DR6_READ	6
+#define INTERCEPT_DR7_READ	7
+#define INTERCEPT_DR0_WRITE	(16 + 0)
+#define INTERCEPT_DR1_WRITE	(16 + 1)
+#define INTERCEPT_DR2_WRITE	(16 + 2)
+#define INTERCEPT_DR3_WRITE	(16 + 3)
+#define INTERCEPT_DR4_WRITE	(16 + 4)
+#define INTERCEPT_DR5_WRITE	(16 + 5)
+#define INTERCEPT_DR6_WRITE	(16 + 6)
+#define INTERCEPT_DR7_WRITE	(16 + 7)
 
 #define SVM_EVTINJ_VEC_MASK 0xff
 
@@ -246,6 +262,8 @@
 #define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
 #define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
 
+#define SVM_EXITINFO_REG_MASK 0x0F
+
 #define	SVM_EXIT_READ_CR0 	0x000
 #define	SVM_EXIT_READ_CR3 	0x003
 #define	SVM_EXIT_READ_CR4 	0x004
@@ -316,6 +334,7 @@
 #define SVM_EXIT_MONITOR	0x08a
 #define SVM_EXIT_MWAIT		0x08b
 #define SVM_EXIT_MWAIT_COND	0x08c
+#define SVM_EXIT_XSETBV		0x08d
 #define SVM_EXIT_NPF  		0x400
 
 #define SVM_EXIT_ERR		-1
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index f66cda5..0310da6 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -30,6 +30,7 @@
 asmlinkage void stack_segment(void);
 asmlinkage void general_protection(void);
 asmlinkage void page_fault(void);
+asmlinkage void async_page_fault(void);
 asmlinkage void spurious_interrupt_bug(void);
 asmlinkage void coprocessor_error(void);
 asmlinkage void alignment_check(void);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9f0cbd9..84471b8 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -66,15 +66,23 @@
 #define PIN_BASED_NMI_EXITING                   0x00000008
 #define PIN_BASED_VIRTUAL_NMIS                  0x00000020
 
+#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000002
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
+#define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL      0x00001000
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
 #define VM_EXIT_SAVE_IA32_PAT			0x00040000
 #define VM_EXIT_LOAD_IA32_PAT			0x00080000
+#define VM_EXIT_SAVE_IA32_EFER                  0x00100000
+#define VM_EXIT_LOAD_IA32_EFER                  0x00200000
+#define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER       0x00400000
 
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000002
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
+#define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL     0x00002000
 #define VM_ENTRY_LOAD_IA32_PAT			0x00004000
+#define VM_ENTRY_LOAD_IA32_EFER                 0x00008000
 
 /* VMCS Encodings */
 enum vmcs_field {
@@ -239,6 +247,7 @@
 #define EXIT_REASON_TASK_SWITCH         9
 #define EXIT_REASON_CPUID               10
 #define EXIT_REASON_HLT                 12
+#define EXIT_REASON_INVD                13
 #define EXIT_REASON_INVLPG              14
 #define EXIT_REASON_RDPMC               15
 #define EXIT_REASON_RDTSC               16
@@ -296,6 +305,12 @@
 #define GUEST_INTR_STATE_SMI		0x00000004
 #define GUEST_INTR_STATE_NMI		0x00000008
 
+/* GUEST_ACTIVITY_STATE flags */
+#define GUEST_ACTIVITY_ACTIVE		0
+#define GUEST_ACTIVITY_HLT		1
+#define GUEST_ACTIVITY_SHUTDOWN		2
+#define GUEST_ACTIVITY_WAIT_SIPI	3
+
 /*
  * Exit Qualifications for MOV for Control Register Access
  */
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d2fdb08..57ca777 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1086,7 +1086,7 @@
 
 	dma_dom->aperture_size += APERTURE_RANGE_SIZE;
 
-	/* Intialize the exclusion range if necessary */
+	/* Initialize the exclusion range if necessary */
 	for_each_iommu(iommu) {
 		if (iommu->exclusion_start &&
 		    iommu->exclusion_start >= dma_dom->aperture[index]->offset
@@ -1353,7 +1353,7 @@
 
 /*
  * Allocates a new protection domain usable for the dma_ops functions.
- * It also intializes the page table and the address allocator data
+ * It also initializes the page table and the address allocator data
  * structures required for the dma_ops interface
  */
 static struct dma_ops_domain *dma_ops_domain_alloc(void)
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index affacb5..0a99f71 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,13 @@
 };
 EXPORT_SYMBOL(amd_nb_misc_ids);
 
+const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
+	{ 0x00, 0x18, 0x20 },
+	{ 0xff, 0x00, 0x20 },
+	{ 0xfe, 0x00, 0x20 },
+	{ }
+};
+
 struct amd_northbridge_info amd_northbridges;
 EXPORT_SYMBOL(amd_northbridges);
 
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index dcd7c83..5955a78 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -39,18 +39,6 @@
 
 int fix_aperture __initdata = 1;
 
-struct bus_dev_range {
-	int bus;
-	int dev_base;
-	int dev_limit;
-};
-
-static struct bus_dev_range bus_dev_ranges[] __initdata = {
-	{ 0x00, 0x18, 0x20},
-	{ 0xff, 0x00, 0x20},
-	{ 0xfe, 0x00, 0x20}
-};
-
 static struct resource gart_resource = {
 	.name	= "GART",
 	.flags	= IORESOURCE_MEM,
@@ -294,13 +282,13 @@
 	search_agp_bridge(&agp_aper_order, &valid_agp);
 
 	fix = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -349,13 +337,13 @@
 		return;
 
 	/* disable them all at first */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -390,14 +378,14 @@
 
 	fix = 0;
 	node = 0;
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus;
 		int dev_base, dev_limit;
 		u32 ctl;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
@@ -505,7 +493,7 @@
 	}
 
 	/* Fix up the north bridges */
-	for (i = 0; i < ARRAY_SIZE(bus_dev_ranges); i++) {
+	for (i = 0; i < amd_nb_bus_dev_ranges[i].dev_limit; i++) {
 		int bus, dev_base, dev_limit;
 
 		/*
@@ -514,9 +502,9 @@
 		 */
 		u32 ctl = DISTLBWALKPRB | aper_order << 1;
 
-		bus = bus_dev_ranges[i].bus;
-		dev_base = bus_dev_ranges[i].dev_base;
-		dev_limit = bus_dev_ranges[i].dev_limit;
+		bus = amd_nb_bus_dev_ranges[i].bus;
+		dev_base = amd_nb_bus_dev_ranges[i].dev_base;
+		dev_limit = amd_nb_bus_dev_ranges[i].dev_limit;
 		for (slot = dev_base; slot < dev_limit; slot++) {
 			if (!early_is_amd_nb(read_pci_config(bus, slot, 3, 0x00)))
 				continue;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index a51345b..06c196d 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -684,7 +684,7 @@
 	lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
 				       lapic_clockevent.shift);
 	lapic_clockevent.max_delta_ns =
-		clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+		clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
 	lapic_clockevent.min_delta_ns =
 		clockevent_delta2ns(0xF, &lapic_clockevent);
 
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 72ec29e..79fd43c 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -68,7 +68,6 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 
 	default:
@@ -96,7 +95,7 @@
 static __read_mostly struct notifier_block backtrace_notifier = {
 	.notifier_call          = arch_trigger_all_cpu_backtrace_handler,
 	.next                   = NULL,
-	.priority               = 1
+	.priority               = NMI_LOCAL_LOW_PRIOR,
 };
 
 static int __init register_trigger_all_cpu_backtrace(void)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ecca5f4..bd16b58 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -378,7 +378,7 @@
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
 {
-	__this_cpu_write(x2apic_extra_bits, (pnode << 6));
+	__this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
 }
 
 /*
@@ -641,7 +641,7 @@
  */
 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
 {
-	if (reason != DIE_NMI_IPI)
+	if (reason != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	if (in_crash_kexec)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index e7dbde7..a779719 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -25,6 +25,7 @@
 #include <linux/gfp.h>
 #include <asm/mce.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 /* Update fake mce registers on current CPU. */
 static void inject_mce(struct mce *m)
@@ -83,7 +84,7 @@
 	struct die_args *args = (struct die_args *)data;
 	int cpu = smp_processor_id();
 	struct mce *m = &__get_cpu_var(injectm);
-	if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
+	if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask))
 		return NOTIFY_DONE;
 	cpumask_clear_cpu(cpu, mce_inject_cpumask);
 	if (m->inject_flags & MCJ_EXCEPTION)
@@ -95,7 +96,7 @@
 
 static struct notifier_block mce_raise_nb = {
 	.notifier_call = mce_raise_notify,
-	.priority = 1000,
+	.priority = NMI_LOCAL_NORMAL_PRIOR,
 };
 
 /* Inject mce on current CPU */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0492101..9d977a2 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1267,7 +1267,6 @@
 
 	switch (cmd) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		break;
 	case DIE_NMIUNKNOWN:
 		this_nmi = percpu_read(irq_stat.__nmi_count);
@@ -1317,7 +1316,7 @@
 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
 	.notifier_call		= perf_event_nmi_handler,
 	.next			= NULL,
-	.priority		= 1
+	.priority		= NMI_LOCAL_LOW_PRIOR,
 };
 
 static struct event_constraint unconstrained;
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 81400b9..e56b9bf 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -753,19 +753,21 @@
 
 static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
 {
-	int overflow = 0;
-	u32 low, high;
+	u64 v;
 
-	rdmsr(hwc->config_base + hwc->idx, low, high);
-
-	/* we need to check high bit for unflagged overflows */
-	if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) {
-		overflow = 1;
-		(void)checking_wrmsrl(hwc->config_base + hwc->idx,
-			((u64)low) & ~P4_CCCR_OVF);
+	/* an official way for overflow indication */
+	rdmsrl(hwc->config_base + hwc->idx, v);
+	if (v & P4_CCCR_OVF) {
+		wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF);
+		return 1;
 	}
 
-	return overflow;
+	/* it might be unflagged overflow */
+	rdmsrl(hwc->event_base + hwc->idx, v);
+	if (!(v & ARCH_P4_CNTRVAL_MASK))
+		return 1;
+
+	return 0;
 }
 
 static void p4_pmu_disable_pebs(void)
@@ -1152,9 +1154,9 @@
 	 */
 	.num_counters		= ARCH_P4_MAX_CCCR,
 	.apic			= 1,
-	.cntval_bits		= 40,
-	.cntval_mask		= (1ULL << 40) - 1,
-	.max_period		= (1ULL << 39) - 1,
+	.cntval_bits		= ARCH_P4_CNTRVAL_BITS,
+	.cntval_mask		= ARCH_P4_CNTRVAL_MASK,
+	.max_period		= (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1,
 	.hw_config		= p4_hw_config,
 	.schedule_events	= p4_pmu_schedule_events,
 	/*
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 8474c99..d6fb146 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -197,14 +197,8 @@
  */
 void dump_stack(void)
 {
-	unsigned long bp = 0;
 	unsigned long stack;
 
-#ifdef CONFIG_FRAME_POINTER
-	if (!bp)
-		get_bp(bp);
-#endif
-
 	printk("Pid: %d, comm: %.20s %s %s %.*s\n",
 		current->pid, current->comm, print_tainted(),
 		init_utsname()->release,
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 591e601..c8b4efa 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1406,6 +1406,16 @@
 	CFI_ENDPROC
 END(general_protection)
 
+#ifdef CONFIG_KVM_GUEST
+ENTRY(async_page_fault)
+	RING0_EC_FRAME
+	pushl $do_async_page_fault
+	CFI_ADJUST_CFA_OFFSET 4
+	jmp error_code
+	CFI_ENDPROC
+END(apf_page_fault)
+#endif
+
 /*
  * End of kprobes section
  */
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e3ba417..aed1ffb 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -299,17 +299,21 @@
 ENTRY(save_args)
 	XCPT_FRAME
 	cld
-	movq_cfi rdi, RDI+16-ARGOFFSET
-	movq_cfi rsi, RSI+16-ARGOFFSET
-	movq_cfi rdx, RDX+16-ARGOFFSET
-	movq_cfi rcx, RCX+16-ARGOFFSET
-	movq_cfi rax, RAX+16-ARGOFFSET
-	movq_cfi  r8,  R8+16-ARGOFFSET
-	movq_cfi  r9,  R9+16-ARGOFFSET
-	movq_cfi r10, R10+16-ARGOFFSET
-	movq_cfi r11, R11+16-ARGOFFSET
+	/*
+	 * start from rbp in pt_regs and jump over
+	 * return address.
+	 */
+	movq_cfi rdi, RDI+8-RBP
+	movq_cfi rsi, RSI+8-RBP
+	movq_cfi rdx, RDX+8-RBP
+	movq_cfi rcx, RCX+8-RBP
+	movq_cfi rax, RAX+8-RBP
+	movq_cfi  r8,  R8+8-RBP
+	movq_cfi  r9,  R9+8-RBP
+	movq_cfi r10, R10+8-RBP
+	movq_cfi r11, R11+8-RBP
 
-	leaq -ARGOFFSET+16(%rsp),%rdi	/* arg1 for handler */
+	leaq -RBP+8(%rsp),%rdi	/* arg1 for handler */
 	movq_cfi rbp, 8		/* push %rbp */
 	leaq 8(%rsp), %rbp		/* mov %rsp, %ebp */
 	testl $3, CS(%rdi)
@@ -782,8 +786,9 @@
 
 /* 0(%rsp): ~(interrupt number) */
 	.macro interrupt func
-	subq $ORIG_RAX-ARGOFFSET+8, %rsp
-	CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+8
+	/* reserve pt_regs for scratch regs and rbp */
+	subq $ORIG_RAX-RBP, %rsp
+	CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
 	call save_args
 	PARTIAL_FRAME 0
 	call \func
@@ -808,9 +813,14 @@
 	TRACE_IRQS_OFF
 	decl PER_CPU_VAR(irq_count)
 	leaveq
+
 	CFI_RESTORE		rbp
 	CFI_DEF_CFA_REGISTER	rsp
 	CFI_ADJUST_CFA_OFFSET	-8
+
+	/* we did not save rbx, restore only from ARGOFFSET */
+	addq $8, %rsp
+	CFI_ADJUST_CFA_OFFSET	-8
 exit_intr:
 	GET_THREAD_INFO(%rcx)
 	testl $3,CS-ARGOFFSET(%rsp)
@@ -1319,6 +1329,9 @@
 #endif
 errorentry general_protection do_general_protection
 errorentry page_fault do_page_fault
+#ifdef CONFIG_KVM_GUEST
+errorentry async_page_fault do_async_page_fault
+#endif
 #ifdef CONFIG_X86_MCE
 paranoidzeroentry machine_check *machine_check_vector(%rip)
 #endif
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 9f54b20..fc293dc 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -126,7 +126,7 @@
 	movsl
 	movl pa(boot_params) + NEW_CL_POINTER,%esi
 	andl %esi,%esi
-	jz 1f			# No comand line
+	jz 1f			# No command line
 	movl $pa(boot_command_line),%edi
 	movl $(COMMAND_LINE_SIZE/4),%ecx
 	rep
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 58bb239..e60c38c 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -169,6 +169,7 @@
 	set_stopped_child_used_math(tsk);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(init_fpu);
 
 /*
  * The xstateregs_active() routine is the same as the fpregs_active() routine,
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3a43caa..52945da 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
+#include <linux/of.h>
 #include <linux/seq_file.h>
 #include <linux/smp.h>
 #include <linux/ftrace.h>
@@ -275,6 +276,15 @@
 
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
 
+#ifdef CONFIG_OF
+unsigned int irq_create_of_mapping(struct device_node *controller,
+		const u32 *intspec, unsigned int intsize)
+{
+	return intspec[0];
+}
+EXPORT_SYMBOL_GPL(irq_create_of_mapping);
+#endif
+
 #ifdef CONFIG_HOTPLUG_CPU
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index cd21b65..a413000 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -48,6 +48,7 @@
 #include <asm/apicdef.h>
 #include <asm/system.h>
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
 {
@@ -525,10 +526,6 @@
 		}
 		return NOTIFY_DONE;
 
-	case DIE_NMI_IPI:
-		/* Just ignore, we will handle the roundup on DIE_NMI. */
-		return NOTIFY_DONE;
-
 	case DIE_NMIUNKNOWN:
 		if (was_in_debug_nmi[raw_smp_processor_id()]) {
 			was_in_debug_nmi[raw_smp_processor_id()] = 0;
@@ -606,7 +603,7 @@
 	/*
 	 * Lowest-prio notifier priority, we want to be notified last:
 	 */
-	.priority	= -INT_MAX,
+	.priority	= NMI_LOCAL_LOW_PRIOR,
 };
 
 /**
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 63b0ec8..8dc4466 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -27,16 +27,37 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hardirq.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kprobes.h>
 #include <asm/timer.h>
+#include <asm/cpu.h>
+#include <asm/traps.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
 
 #define MMU_QUEUE_SIZE 1024
 
+static int kvmapf = 1;
+
+static int parse_no_kvmapf(char *arg)
+{
+        kvmapf = 0;
+        return 0;
+}
+
+early_param("no-kvmapf", parse_no_kvmapf);
+
 struct kvm_para_state {
 	u8 mmu_queue[MMU_QUEUE_SIZE];
 	int mmu_queue_len;
 };
 
 static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 
 static struct kvm_para_state *kvm_para_state(void)
 {
@@ -50,6 +71,195 @@
 {
 }
 
+#define KVM_TASK_SLEEP_HASHBITS 8
+#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
+
+struct kvm_task_sleep_node {
+	struct hlist_node link;
+	wait_queue_head_t wq;
+	u32 token;
+	int cpu;
+	bool halted;
+	struct mm_struct *mm;
+};
+
+static struct kvm_task_sleep_head {
+	spinlock_t lock;
+	struct hlist_head list;
+} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
+						  u32 token)
+{
+	struct hlist_node *p;
+
+	hlist_for_each(p, &b->list) {
+		struct kvm_task_sleep_node *n =
+			hlist_entry(p, typeof(*n), link);
+		if (n->token == token)
+			return n;
+	}
+
+	return NULL;
+}
+
+void kvm_async_pf_task_wait(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node n, *e;
+	DEFINE_WAIT(wait);
+	int cpu, idle;
+
+	cpu = get_cpu();
+	idle = idle_cpu(cpu);
+	put_cpu();
+
+	spin_lock(&b->lock);
+	e = _find_apf_task(b, token);
+	if (e) {
+		/* dummy entry exist -> wake up was delivered ahead of PF */
+		hlist_del(&e->link);
+		kfree(e);
+		spin_unlock(&b->lock);
+		return;
+	}
+
+	n.token = token;
+	n.cpu = smp_processor_id();
+	n.mm = current->active_mm;
+	n.halted = idle || preempt_count() > 1;
+	atomic_inc(&n.mm->mm_count);
+	init_waitqueue_head(&n.wq);
+	hlist_add_head(&n.link, &b->list);
+	spin_unlock(&b->lock);
+
+	for (;;) {
+		if (!n.halted)
+			prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+		if (hlist_unhashed(&n.link))
+			break;
+
+		if (!n.halted) {
+			local_irq_enable();
+			schedule();
+			local_irq_disable();
+		} else {
+			/*
+			 * We cannot reschedule. So halt.
+			 */
+			native_safe_halt();
+			local_irq_disable();
+		}
+	}
+	if (!n.halted)
+		finish_wait(&n.wq, &wait);
+
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
+
+static void apf_task_wake_one(struct kvm_task_sleep_node *n)
+{
+	hlist_del_init(&n->link);
+	if (!n->mm)
+		return;
+	mmdrop(n->mm);
+	if (n->halted)
+		smp_send_reschedule(n->cpu);
+	else if (waitqueue_active(&n->wq))
+		wake_up(&n->wq);
+}
+
+static void apf_task_wake_all(void)
+{
+	int i;
+
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+		struct hlist_node *p, *next;
+		struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+		spin_lock(&b->lock);
+		hlist_for_each_safe(p, next, &b->list) {
+			struct kvm_task_sleep_node *n =
+				hlist_entry(p, typeof(*n), link);
+			if (n->cpu == smp_processor_id())
+				apf_task_wake_one(n);
+		}
+		spin_unlock(&b->lock);
+	}
+}
+
+void kvm_async_pf_task_wake(u32 token)
+{
+	u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+	struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+	struct kvm_task_sleep_node *n;
+
+	if (token == ~0) {
+		apf_task_wake_all();
+		return;
+	}
+
+again:
+	spin_lock(&b->lock);
+	n = _find_apf_task(b, token);
+	if (!n) {
+		/*
+		 * async PF was not yet handled.
+		 * Add dummy entry for the token.
+		 */
+		n = kmalloc(sizeof(*n), GFP_ATOMIC);
+		if (!n) {
+			/*
+			 * Allocation failed! Busy wait while other cpu
+			 * handles async PF.
+			 */
+			spin_unlock(&b->lock);
+			cpu_relax();
+			goto again;
+		}
+		n->token = token;
+		n->cpu = smp_processor_id();
+		n->mm = NULL;
+		init_waitqueue_head(&n->wq);
+		hlist_add_head(&n->link, &b->list);
+	} else
+		apf_task_wake_one(n);
+	spin_unlock(&b->lock);
+	return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+
+u32 kvm_read_and_reset_pf_reason(void)
+{
+	u32 reason = 0;
+
+	if (__get_cpu_var(apf_reason).enabled) {
+		reason = __get_cpu_var(apf_reason).reason;
+		__get_cpu_var(apf_reason).reason = 0;
+	}
+
+	return reason;
+}
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
+
+dotraplinkage void __kprobes
+do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+	switch (kvm_read_and_reset_pf_reason()) {
+	default:
+		do_page_fault(regs, error_code);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		/* page is swapped out by the host. */
+		kvm_async_pf_task_wait((u32)read_cr2());
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		kvm_async_pf_task_wake((u32)read_cr2());
+		break;
+	}
+}
+
 static void kvm_mmu_op(void *buffer, unsigned len)
 {
 	int r;
@@ -231,10 +441,117 @@
 #endif
 }
 
-void __init kvm_guest_init(void)
+void __cpuinit kvm_guest_cpu_init(void)
 {
 	if (!kvm_para_available())
 		return;
 
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
+		u64 pa = __pa(&__get_cpu_var(apf_reason));
+
+#ifdef CONFIG_PREEMPT
+		pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
+		wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+		__get_cpu_var(apf_reason).enabled = 1;
+		printk(KERN_INFO"KVM setup async PF for cpu %d\n",
+		       smp_processor_id());
+	}
+}
+
+static void kvm_pv_disable_apf(void *unused)
+{
+	if (!__get_cpu_var(apf_reason).enabled)
+		return;
+
+	wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
+	__get_cpu_var(apf_reason).enabled = 0;
+
+	printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
+	       smp_processor_id());
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+				unsigned long code, void *unused)
+{
+	if (code == SYS_RESTART)
+		on_each_cpu(kvm_pv_disable_apf, NULL, 1);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block kvm_pv_reboot_nb = {
+	.notifier_call = kvm_pv_reboot_notify,
+};
+
+#ifdef CONFIG_SMP
+static void __init kvm_smp_prepare_boot_cpu(void)
+{
+#ifdef CONFIG_KVM_CLOCK
+	WARN_ON(kvm_register_clock("primary cpu clock"));
+#endif
+	kvm_guest_cpu_init();
+	native_smp_prepare_boot_cpu();
+}
+
+static void kvm_guest_cpu_online(void *dummy)
+{
+	kvm_guest_cpu_init();
+}
+
+static void kvm_guest_cpu_offline(void *dummy)
+{
+	kvm_pv_disable_apf(NULL);
+	apf_task_wake_all();
+}
+
+static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
+{
+	int cpu = (unsigned long)hcpu;
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_DOWN_FAILED:
+	case CPU_ONLINE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
+		break;
+	case CPU_DOWN_PREPARE:
+	case CPU_DOWN_PREPARE_FROZEN:
+		smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
+        .notifier_call  = kvm_cpu_notify,
+};
+#endif
+
+static void __init kvm_apf_trap_init(void)
+{
+	set_intr_gate(14, &async_page_fault);
+}
+
+void __init kvm_guest_init(void)
+{
+	int i;
+
+	if (!kvm_para_available())
+		return;
+
 	paravirt_ops_setup();
+	register_reboot_notifier(&kvm_pv_reboot_nb);
+	for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+		spin_lock_init(&async_pf_sleepers[i].lock);
+	if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+		x86_init.irqs.trap_init = kvm_apf_trap_init;
+
+#ifdef CONFIG_SMP
+	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+	register_cpu_notifier(&kvm_cpu_notifier);
+#else
+	kvm_guest_cpu_init();
+#endif
 }
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ca43ce3..f98d3ea 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -125,7 +125,7 @@
 	.flags = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static int kvm_register_clock(char *txt)
+int kvm_register_clock(char *txt)
 {
 	int cpu = smp_processor_id();
 	int low, high, ret;
@@ -152,14 +152,6 @@
 }
 #endif
 
-#ifdef CONFIG_SMP
-static void __init kvm_smp_prepare_boot_cpu(void)
-{
-	WARN_ON(kvm_register_clock("primary cpu clock"));
-	native_smp_prepare_boot_cpu();
-}
-#endif
-
 /*
  * After the clock is registered, the host will keep writing to the
  * registered memory location. If the guest happens to shutdown, this memory
@@ -206,9 +198,6 @@
 	x86_cpuinit.setup_percpu_clockev =
 		kvm_setup_secondary_clock;
 #endif
-#ifdef CONFIG_SMP
-	smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
-#endif
 	machine_ops.shutdown  = kvm_shutdown;
 #ifdef CONFIG_KEXEC
 	machine_ops.crash_shutdown  = kvm_crash_shutdown;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index c495aa8d..fc7aae1 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -18,6 +18,7 @@
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
 #include <asm/cpu.h>
+#include <asm/nmi.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/ctype.h>
@@ -747,7 +748,7 @@
 {
 	int cpu;
 
-	if (val != DIE_NMI_IPI)
+	if (val != DIE_NMI)
 		return NOTIFY_OK;
 
 	cpu = raw_smp_processor_id();
@@ -778,6 +779,8 @@
 
 static struct notifier_block crash_nmi_nb = {
 	.notifier_call = crash_nmi_callback,
+	/* we want to be the first one called */
+	.priority = NMI_LOCAL_HIGH_PRIOR+1,
 };
 
 /* Halt all other CPUs, calling the specified function on each of them
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c
index 1cfbbfc..6f39cab 100644
--- a/arch/x86/kernel/rtc.c
+++ b/arch/x86/kernel/rtc.c
@@ -76,7 +76,7 @@
 		CMOS_WRITE(real_seconds, RTC_SECONDS);
 		CMOS_WRITE(real_minutes, RTC_MINUTES);
 	} else {
-		printk(KERN_WARNING
+		printk_once(KERN_NOTICE
 		       "set_rtc_mmss: can't update from %d to %d\n",
 		       cmos_minutes, real_minutes);
 		retval = -1;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index c7149c9..763df77 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -97,12 +97,12 @@
  */
 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
 
-void cpu_hotplug_driver_lock()
+void cpu_hotplug_driver_lock(void)
 {
         mutex_lock(&x86_cpu_hotplug_driver_mutex);
 }
 
-void cpu_hotplug_driver_unlock()
+void cpu_hotplug_driver_unlock(void)
 {
         mutex_unlock(&x86_cpu_hotplug_driver_mutex);
 }
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c76aaca..b9b6716 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -84,6 +84,11 @@
 static int ignore_nmis;
 
 int unknown_nmi_panic;
+/*
+ * Prevent NMI reason port (0x61) being accessed simultaneously, can
+ * only be used in NMI handler.
+ */
+static DEFINE_RAW_SPINLOCK(nmi_reason_lock);
 
 static inline void conditional_sti(struct pt_regs *regs)
 {
@@ -310,15 +315,15 @@
 __setup("unknown_nmi_panic", setup_unknown_nmi_panic);
 
 static notrace __kprobes void
-mem_parity_error(unsigned char reason, struct pt_regs *regs)
+pci_serr_error(unsigned char reason, struct pt_regs *regs)
 {
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG
-		"You have some hardware problem, likely on the PCI bus.\n");
-
+	/*
+	 * On some machines, PCI SERR line is used to report memory
+	 * errors. EDAC makes use of it.
+	 */
 #if defined(CONFIG_EDAC)
 	if (edac_handler_set()) {
 		edac_atomic_assert_error();
@@ -329,11 +334,11 @@
 	if (panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 
-	/* Clear and disable the memory parity error line. */
-	reason = (reason & 0xf) | 4;
-	outb(reason, 0x61);
+	/* Clear and disable the PCI SERR error line. */
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -341,15 +346,17 @@
 {
 	unsigned long i;
 
-	printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+	pr_emerg(
+	"NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 	show_registers(regs);
 
 	if (panic_on_io_nmi)
 		panic("NMI IOCK error: Not continuing");
 
 	/* Re-enable the IOCK line, wait for a few seconds */
-	reason = (reason & 0xf) | 8;
-	outb(reason, 0x61);
+	reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 
 	i = 20000;
 	while (--i) {
@@ -357,8 +364,8 @@
 		udelay(100);
 	}
 
-	reason &= ~8;
-	outb(reason, 0x61);
+	reason &= ~NMI_REASON_CLEAR_IOCHK;
+	outb(reason, NMI_REASON_PORT);
 }
 
 static notrace __kprobes void
@@ -377,57 +384,50 @@
 		return;
 	}
 #endif
-	printk(KERN_EMERG
-		"Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
-			reason, smp_processor_id());
+	pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
+		 reason, smp_processor_id());
 
-	printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
+	pr_emerg("Do you have a strange power saving mode enabled?\n");
 	if (unknown_nmi_panic || panic_on_unrecovered_nmi)
 		panic("NMI: Not continuing");
 
-	printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
+	pr_emerg("Dazed and confused, but trying to continue\n");
 }
 
 static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
 {
 	unsigned char reason = 0;
-	int cpu;
 
-	cpu = smp_processor_id();
+	/*
+	 * CPU-specific NMI must be processed before non-CPU-specific
+	 * NMI, otherwise we may lose it, because the CPU-specific
+	 * NMI can not be detected/processed on other CPUs.
+	 */
+	if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP)
+		return;
 
-	/* Only the BSP gets external NMIs from the system. */
-	if (!cpu)
-		reason = get_nmi_reason();
+	/* Non-CPU-specific NMI: NMI sources can be processed on any CPU */
+	raw_spin_lock(&nmi_reason_lock);
+	reason = get_nmi_reason();
 
-	if (!(reason & 0xc0)) {
-		if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
-								== NOTIFY_STOP)
-			return;
-
-#ifdef CONFIG_X86_LOCAL_APIC
-		if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
-							== NOTIFY_STOP)
-			return;
+	if (reason & NMI_REASON_MASK) {
+		if (reason & NMI_REASON_SERR)
+			pci_serr_error(reason, regs);
+		else if (reason & NMI_REASON_IOCHK)
+			io_check_error(reason, regs);
+#ifdef CONFIG_X86_32
+		/*
+		 * Reassert NMI in case it became active
+		 * meanwhile as it's edge-triggered:
+		 */
+		reassert_nmi();
 #endif
-		unknown_nmi_error(reason, regs);
-
+		raw_spin_unlock(&nmi_reason_lock);
 		return;
 	}
-	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
-		return;
+	raw_spin_unlock(&nmi_reason_lock);
 
-	/* AK: following checks seem to be broken on modern chipsets. FIXME */
-	if (reason & 0x80)
-		mem_parity_error(reason, regs);
-	if (reason & 0x40)
-		io_check_error(reason, regs);
-#ifdef CONFIG_X86_32
-	/*
-	 * Reassert NMI in case it became active meanwhile
-	 * as it's edge-triggered:
-	 */
-	reassert_nmi();
-#endif
+	unknown_nmi_error(reason, regs);
 }
 
 dotraplinkage notrace __kprobes void
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 03d2ea8..823f79a 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -965,7 +965,7 @@
 
 static int __init init_tsc_clocksource(void)
 {
-	if (!cpu_has_tsc || tsc_disabled > 0)
+	if (!cpu_has_tsc || tsc_disabled > 0 || !tsc_khz)
 		return 0;
 
 	if (tsc_clocksource_reliable)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index ddc131f..50f6364 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -28,6 +28,7 @@
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_EVENTFD
 	select KVM_APIC_ARCHITECTURE
+	select KVM_ASYNC_PF
 	select USER_RETURN_NOTIFIER
 	select KVM_MMIO
 	---help---
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 31a7035..f15501f43 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,5 +1,5 @@
 
-EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
+ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
 
 CFLAGS_x86.o := -I.
 CFLAGS_svm.o := -I.
@@ -9,6 +9,7 @@
 				coalesced_mmio.o irq_comm.o eventfd.o \
 				assigned-dev.o)
 kvm-$(CONFIG_IOMMU_API)	+= $(addprefix ../../../virt/kvm/, iommu.o)
+kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(addprefix ../../../virt/kvm/, async_pf.o)
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
 			   i8254.o timer.o
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 38b6e8d..caf9667 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -20,16 +20,8 @@
  * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
  */
 
-#ifndef __KERNEL__
-#include <stdio.h>
-#include <stdint.h>
-#include <public/xen.h>
-#define DPRINTF(_f, _a ...) printf(_f , ## _a)
-#else
 #include <linux/kvm_host.h>
 #include "kvm_cache_regs.h"
-#define DPRINTF(x...) do {} while (0)
-#endif
 #include <linux/module.h>
 #include <asm/kvm_emulate.h>
 
@@ -418,9 +410,9 @@
 }
 
 static inline unsigned long
-register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+register_address(struct decode_cache *c, unsigned long reg)
 {
-	return base + address_mask(c, reg);
+	return address_mask(c, reg);
 }
 
 static inline void
@@ -452,60 +444,55 @@
 	return ops->get_cached_segment_base(seg, ctxt->vcpu);
 }
 
-static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
-				       struct x86_emulate_ops *ops,
-				       struct decode_cache *c)
+static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
+			     struct x86_emulate_ops *ops,
+			     struct decode_cache *c)
 {
 	if (!c->has_seg_override)
 		return 0;
 
-	return seg_base(ctxt, ops, c->seg_override);
+	return c->seg_override;
 }
 
-static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static ulong linear(struct x86_emulate_ctxt *ctxt,
+		    struct segmented_address addr)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_ES);
+	struct decode_cache *c = &ctxt->decode;
+	ulong la;
+
+	la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
+	if (c->ad_bytes != 8)
+		la &= (u32)-1;
+	return la;
 }
 
-static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
-			     struct x86_emulate_ops *ops)
+static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
+			     u32 error, bool valid)
 {
-	return seg_base(ctxt, ops, VCPU_SREG_SS);
+	ctxt->exception.vector = vec;
+	ctxt->exception.error_code = error;
+	ctxt->exception.error_code_valid = valid;
+	return X86EMUL_PROPAGATE_FAULT;
 }
 
-static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
-				      u32 error, bool valid)
+static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
 {
-	ctxt->exception = vec;
-	ctxt->error_code = error;
-	ctxt->error_code_valid = valid;
+	return emulate_exception(ctxt, GP_VECTOR, err, true);
 }
 
-static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
+static int emulate_ud(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, GP_VECTOR, err, true);
+	return emulate_exception(ctxt, UD_VECTOR, 0, false);
 }
 
-static void emulate_pf(struct x86_emulate_ctxt *ctxt)
+static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
 {
-	emulate_exception(ctxt, PF_VECTOR, 0, true);
-}
-
-static void emulate_ud(struct x86_emulate_ctxt *ctxt)
-{
-	emulate_exception(ctxt, UD_VECTOR, 0, false);
-}
-
-static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
-{
-	emulate_exception(ctxt, TS_VECTOR, err, true);
+	return emulate_exception(ctxt, TS_VECTOR, err, true);
 }
 
 static int emulate_de(struct x86_emulate_ctxt *ctxt)
 {
-	emulate_exception(ctxt, DE_VECTOR, 0, false);
-	return X86EMUL_PROPAGATE_FAULT;
+	return emulate_exception(ctxt, DE_VECTOR, 0, false);
 }
 
 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -520,7 +507,7 @@
 		cur_size = fc->end - fc->start;
 		size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
 		rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
-				size, ctxt->vcpu, NULL);
+				size, ctxt->vcpu, &ctxt->exception);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		fc->end += size;
@@ -564,7 +551,7 @@
 
 static int read_descriptor(struct x86_emulate_ctxt *ctxt,
 			   struct x86_emulate_ops *ops,
-			   ulong addr,
+			   struct segmented_address addr,
 			   u16 *size, unsigned long *address, int op_bytes)
 {
 	int rc;
@@ -572,10 +559,13 @@
 	if (op_bytes == 2)
 		op_bytes = 3;
 	*address = 0;
-	rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL);
+	rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
+			   ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
-	rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL);
+	addr.ea += 2;
+	rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
+			   ctxt->vcpu, &ctxt->exception);
 	return rc;
 }
 
@@ -768,7 +758,7 @@
 			break;
 		}
 	}
-	op->addr.mem = modrm_ea;
+	op->addr.mem.ea = modrm_ea;
 done:
 	return rc;
 }
@@ -783,13 +773,13 @@
 	op->type = OP_MEM;
 	switch (c->ad_bytes) {
 	case 2:
-		op->addr.mem = insn_fetch(u16, 2, c->eip);
+		op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
 		break;
 	case 4:
-		op->addr.mem = insn_fetch(u32, 4, c->eip);
+		op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
 		break;
 	case 8:
-		op->addr.mem = insn_fetch(u64, 8, c->eip);
+		op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
 		break;
 	}
 done:
@@ -808,7 +798,7 @@
 		else if (c->src.bytes == 4)
 			sv = (s32)c->src.val & (s32)mask;
 
-		c->dst.addr.mem += (sv >> 3);
+		c->dst.addr.mem.ea += (sv >> 3);
 	}
 
 	/* only subword offset */
@@ -821,7 +811,6 @@
 {
 	int rc;
 	struct read_cache *mc = &ctxt->decode.mem_read;
-	u32 err;
 
 	while (size) {
 		int n = min(size, 8u);
@@ -829,10 +818,8 @@
 		if (mc->pos < mc->end)
 			goto read_cached;
 
-		rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
-					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
+		rc = ops->read_emulated(addr, mc->data + mc->end, n,
+					&ctxt->exception, ctxt->vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		mc->end += n;
@@ -907,19 +894,15 @@
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
 	int ret;
-	u32 err;
 	ulong addr;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 	addr = dt.address + index * 8;
-	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,  &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			    &ctxt->exception);
 
        return ret;
 }
@@ -931,21 +914,17 @@
 {
 	struct desc_ptr dt;
 	u16 index = selector >> 3;
-	u32 err;
 	ulong addr;
 	int ret;
 
 	get_descriptor_table_ptr(ctxt, ops, selector, &dt);
 
-	if (dt.size < index * 8 + 7) {
-		emulate_gp(ctxt, selector & 0xfffc);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (dt.size < index * 8 + 7)
+		return emulate_gp(ctxt, selector & 0xfffc);
 
 	addr = dt.address + index * 8;
-	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
+	ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
+			     &ctxt->exception);
 
 	return ret;
 }
@@ -1092,7 +1071,6 @@
 {
 	int rc;
 	struct decode_cache *c = &ctxt->decode;
-	u32 err;
 
 	switch (c->dst.type) {
 	case OP_REG:
@@ -1101,21 +1079,19 @@
 	case OP_MEM:
 		if (c->lock_prefix)
 			rc = ops->cmpxchg_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.orig_val,
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
 		else
 			rc = ops->write_emulated(
-					c->dst.addr.mem,
+					linear(ctxt, c->dst.addr.mem),
 					&c->dst.val,
 					c->dst.bytes,
-					&err,
+					&ctxt->exception,
 					ctxt->vcpu);
-		if (rc == X86EMUL_PROPAGATE_FAULT)
-			emulate_pf(ctxt);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
 		break;
@@ -1137,8 +1113,8 @@
 	c->dst.bytes = c->op_bytes;
 	c->dst.val = c->src.val;
 	register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
-	c->dst.addr.mem = register_address(c, ss_base(ctxt, ops),
-					   c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	c->dst.addr.mem.seg = VCPU_SREG_SS;
 }
 
 static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1147,10 +1123,11 @@
 {
 	struct decode_cache *c = &ctxt->decode;
 	int rc;
+	struct segmented_address addr;
 
-	rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
-						       c->regs[VCPU_REGS_RSP]),
-			   dest, len);
+	addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
+	addr.seg = VCPU_SREG_SS;
+	rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1184,10 +1161,8 @@
 			change_mask |= EFLG_IF;
 		break;
 	case X86EMUL_MODE_VM86:
-		if (iopl < 3) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (iopl < 3)
+			return emulate_gp(ctxt, 0);
 		change_mask |= EFLG_IF;
 		break;
 	default: /* real mode */
@@ -1198,9 +1173,6 @@
 	*(unsigned long *)dest =
 		(ctxt->eflags & ~change_mask) | (val & change_mask);
 
-	if (rc == X86EMUL_PROPAGATE_FAULT)
-		emulate_pf(ctxt);
-
 	return rc;
 }
 
@@ -1287,7 +1259,6 @@
 	gva_t cs_addr;
 	gva_t eip_addr;
 	u16 cs, eip;
-	u32 err;
 
 	/* TODO: Add limit checks */
 	c->src.val = ctxt->eflags;
@@ -1317,11 +1288,11 @@
 	eip_addr = dt.address + (irq << 2);
 	cs_addr = dt.address + (irq << 2) + 2;
 
-	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err);
+	rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
@@ -1370,10 +1341,8 @@
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	if (temp_eip & ~0xffff) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (temp_eip & ~0xffff)
+		return emulate_gp(ctxt, 0);
 
 	rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
 
@@ -1624,10 +1593,8 @@
 
 	/* syscall is not available in real mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1678,34 +1645,26 @@
 	u16 cs_sel, ss_sel;
 
 	/* inject #GP if in real mode */
-	if (ctxt->mode == X86EMUL_MODE_REAL) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_REAL)
+		return emulate_gp(ctxt, 0);
 
 	/* XXX sysenter/sysexit have not been tested in 64bit mode.
 	* Therefore, we inject an #UD.
 	*/
-	if (ctxt->mode == X86EMUL_MODE_PROT64) {
-		emulate_ud(ctxt);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ctxt->mode == X86EMUL_MODE_PROT64)
+		return emulate_ud(ctxt);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
 	ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
 	switch (ctxt->mode) {
 	case X86EMUL_MODE_PROT32:
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	case X86EMUL_MODE_PROT64:
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		break;
 	}
 
@@ -1745,10 +1704,8 @@
 
 	/* inject #GP if in real mode or Virtual 8086 mode */
 	if (ctxt->mode == X86EMUL_MODE_REAL ||
-	    ctxt->mode == X86EMUL_MODE_VM86) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	    ctxt->mode == X86EMUL_MODE_VM86)
+		return emulate_gp(ctxt, 0);
 
 	setup_syscalls_segments(ctxt, ops, &cs, &ss);
 
@@ -1763,18 +1720,14 @@
 	switch (usermode) {
 	case X86EMUL_MODE_PROT32:
 		cs_sel = (u16)(msr_data + 16);
-		if ((msr_data & 0xfffc) == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if ((msr_data & 0xfffc) == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = (u16)(msr_data + 24);
 		break;
 	case X86EMUL_MODE_PROT64:
 		cs_sel = (u16)(msr_data + 32);
-		if (msr_data == 0x0) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		if (msr_data == 0x0)
+			return emulate_gp(ctxt, 0);
 		ss_sel = cs_sel + 8;
 		cs.d = 0;
 		cs.l = 1;
@@ -1934,33 +1887,27 @@
 {
 	struct tss_segment_16 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss16(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -1968,12 +1915,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss16(ctxt, ops, &tss_seg);
@@ -2013,10 +1958,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	int ret;
 
-	if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
+		return emulate_gp(ctxt, 0);
 	c->eip = tss->eip;
 	ctxt->eflags = tss->eflags | 2;
 	c->regs[VCPU_REGS_RAX] = tss->eax;
@@ -2076,33 +2019,27 @@
 {
 	struct tss_segment_32 tss_seg;
 	int ret;
-	u32 err, new_tss_base = get_desc_base(new_desc);
+	u32 new_tss_base = get_desc_base(new_desc);
 
 	ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	save_state_to_tss32(ctxt, ops, &tss_seg);
 
 	ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			     &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			     &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
-			    &err);
-	if (ret == X86EMUL_PROPAGATE_FAULT) {
+			    &ctxt->exception);
+	if (ret != X86EMUL_CONTINUE)
 		/* FIXME: need to provide precise fault address */
-		emulate_pf(ctxt);
 		return ret;
-	}
 
 	if (old_tss_sel != 0xffff) {
 		tss_seg.prev_task_link = old_tss_sel;
@@ -2110,12 +2047,10 @@
 		ret = ops->write_std(new_tss_base,
 				     &tss_seg.prev_task_link,
 				     sizeof tss_seg.prev_task_link,
-				     ctxt->vcpu, &err);
-		if (ret == X86EMUL_PROPAGATE_FAULT) {
+				     ctxt->vcpu, &ctxt->exception);
+		if (ret != X86EMUL_CONTINUE)
 			/* FIXME: need to provide precise fault address */
-			emulate_pf(ctxt);
 			return ret;
-		}
 	}
 
 	return load_state_from_tss32(ctxt, ops, &tss_seg);
@@ -2146,10 +2081,8 @@
 
 	if (reason != TASK_SWITCH_IRET) {
 		if ((tss_selector & 3) > next_tss_desc.dpl ||
-		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
-			emulate_gp(ctxt, 0);
-			return X86EMUL_PROPAGATE_FAULT;
-		}
+		    ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
+			return emulate_gp(ctxt, 0);
 	}
 
 	desc_limit = desc_limit_scaled(&next_tss_desc);
@@ -2231,14 +2164,15 @@
 	return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
 }
 
-static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
+static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
 			    int reg, struct operand *op)
 {
 	struct decode_cache *c = &ctxt->decode;
 	int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
 
 	register_address_increment(c, &c->regs[reg], df * op->bytes);
-	op->addr.mem = register_address(c,  base, c->regs[reg]);
+	op->addr.mem.ea = register_address(c, c->regs[reg]);
+	op->addr.mem.seg = seg;
 }
 
 static int em_push(struct x86_emulate_ctxt *ctxt)
@@ -2369,10 +2303,8 @@
 	struct decode_cache *c = &ctxt->decode;
 	u64 tsc = 0;
 
-	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) {
-		emulate_gp(ctxt, 0);
-		return X86EMUL_PROPAGATE_FAULT;
-	}
+	if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
+		return emulate_gp(ctxt, 0);
 	ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
 	c->regs[VCPU_REGS_RAX] = (u32)tsc;
 	c->regs[VCPU_REGS_RDX] = tsc >> 32;
@@ -2647,7 +2579,7 @@
 
 	op->type = OP_IMM;
 	op->bytes = size;
-	op->addr.mem = c->eip;
+	op->addr.mem.ea = c->eip;
 	/* NB. Immediates are sign-extended as necessary. */
 	switch (op->bytes) {
 	case 1:
@@ -2678,7 +2610,7 @@
 }
 
 int
-x86_decode_insn(struct x86_emulate_ctxt *ctxt)
+x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
 {
 	struct x86_emulate_ops *ops = ctxt->ops;
 	struct decode_cache *c = &ctxt->decode;
@@ -2689,7 +2621,10 @@
 	struct operand memop = { .type = OP_NONE };
 
 	c->eip = ctxt->eip;
-	c->fetch.start = c->fetch.end = c->eip;
+	c->fetch.start = c->eip;
+	c->fetch.end = c->fetch.start + insn_len;
+	if (insn_len > 0)
+		memcpy(c->fetch.data, insn, insn_len);
 	ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
 
 	switch (mode) {
@@ -2803,10 +2738,8 @@
 	c->execute = opcode.u.execute;
 
 	/* Unrecognised? */
-	if (c->d == 0 || (c->d & Undefined)) {
-		DPRINTF("Cannot emulate %02x\n", c->b);
+	if (c->d == 0 || (c->d & Undefined))
 		return -1;
-	}
 
 	if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
 		c->op_bytes = 8;
@@ -2831,14 +2764,13 @@
 	if (!c->has_seg_override)
 		set_seg_override(c, VCPU_SREG_DS);
 
-	if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d))
-		memop.addr.mem += seg_override_base(ctxt, ops, c);
+	memop.addr.mem.seg = seg_override(ctxt, ops, c);
 
 	if (memop.type == OP_MEM && c->ad_bytes != 8)
-		memop.addr.mem = (u32)memop.addr.mem;
+		memop.addr.mem.ea = (u32)memop.addr.mem.ea;
 
 	if (memop.type == OP_MEM && c->rip_relative)
-		memop.addr.mem += c->eip;
+		memop.addr.mem.ea += c->eip;
 
 	/*
 	 * Decode and fetch the source operand: register, memory
@@ -2890,14 +2822,14 @@
 	case SrcSI:
 		c->src.type = OP_MEM;
 		c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->src.addr.mem =
-			register_address(c,  seg_override_base(ctxt, ops, c),
-					 c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RSI]);
+		c->src.addr.mem.seg = seg_override(ctxt, ops, c),
 		c->src.val = 0;
 		break;
 	case SrcImmFAddr:
 		c->src.type = OP_IMM;
-		c->src.addr.mem = c->eip;
+		c->src.addr.mem.ea = c->eip;
 		c->src.bytes = c->op_bytes + 2;
 		insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
 		break;
@@ -2944,7 +2876,7 @@
 		break;
 	case DstImmUByte:
 		c->dst.type = OP_IMM;
-		c->dst.addr.mem = c->eip;
+		c->dst.addr.mem.ea = c->eip;
 		c->dst.bytes = 1;
 		c->dst.val = insn_fetch(u8, 1, c->eip);
 		break;
@@ -2969,9 +2901,9 @@
 	case DstDI:
 		c->dst.type = OP_MEM;
 		c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-		c->dst.addr.mem =
-			register_address(c, es_base(ctxt, ops),
-					 c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.ea =
+			register_address(c, c->regs[VCPU_REGS_RDI]);
+		c->dst.addr.mem.seg = VCPU_SREG_ES;
 		c->dst.val = 0;
 		break;
 	case ImplicitOps:
@@ -3020,24 +2952,24 @@
 	ctxt->decode.mem_read.pos = 0;
 
 	if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* LOCK prefix is allowed only with some instructions */
 	if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
-		emulate_ud(ctxt);
+		rc = emulate_ud(ctxt);
 		goto done;
 	}
 
 	/* Privileged instruction can be executed only in CPL=0 */
 	if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
-		emulate_gp(ctxt, 0);
+		rc = emulate_gp(ctxt, 0);
 		goto done;
 	}
 
@@ -3050,7 +2982,7 @@
 	}
 
 	if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
-		rc = read_emulated(ctxt, ops, c->src.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
 					c->src.valptr, c->src.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3058,7 +2990,7 @@
 	}
 
 	if (c->src2.type == OP_MEM) {
-		rc = read_emulated(ctxt, ops, c->src2.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
 					&c->src2.val, c->src2.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3070,7 +3002,7 @@
 
 	if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
 		/* optimisation - avoid slow emulated read if Mov */
-		rc = read_emulated(ctxt, ops, c->dst.addr.mem,
+		rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
 				   &c->dst.val, c->dst.bytes);
 		if (rc != X86EMUL_CONTINUE)
 			goto done;
@@ -3215,13 +3147,13 @@
 		break;
 	case 0x8c:  /* mov r/m, sreg */
 		if (c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 		c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
 		break;
 	case 0x8d: /* lea r16/r32, m */
-		c->dst.val = c->src.addr.mem;
+		c->dst.val = c->src.addr.mem.ea;
 		break;
 	case 0x8e: { /* mov seg, r/m16 */
 		uint16_t sel;
@@ -3230,7 +3162,7 @@
 
 		if (c->modrm_reg == VCPU_SREG_CS ||
 		    c->modrm_reg > VCPU_SREG_GS) {
-			emulate_ud(ctxt);
+			rc = emulate_ud(ctxt);
 			goto done;
 		}
 
@@ -3268,7 +3200,6 @@
 		break;
 	case 0xa6 ... 0xa7:	/* cmps */
 		c->dst.type = OP_NONE; /* Disable writeback. */
-		DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.addr.mem, c->dst.addr.mem);
 		goto cmp;
 	case 0xa8 ... 0xa9:	/* test ax, imm */
 		goto test;
@@ -3363,7 +3294,7 @@
 	do_io_in:
 		c->dst.bytes = min(c->dst.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
@@ -3377,7 +3308,7 @@
 		c->src.bytes = min(c->src.bytes, 4u);
 		if (!emulator_io_permited(ctxt, ops, c->dst.val,
 					  c->src.bytes)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		}
 		ops->pio_out_emulated(c->src.bytes, c->dst.val,
@@ -3402,14 +3333,14 @@
 		break;
 	case 0xfa: /* cli */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else
 			ctxt->eflags &= ~X86_EFLAGS_IF;
 		break;
 	case 0xfb: /* sti */
 		if (emulator_bad_iopl(ctxt, ops)) {
-			emulate_gp(ctxt, 0);
+			rc = emulate_gp(ctxt, 0);
 			goto done;
 		} else {
 			ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
@@ -3449,11 +3380,11 @@
 	c->dst.type = saved_dst_type;
 
 	if ((c->d & SrcMask) == SrcSI)
-		string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
+		string_addr_inc(ctxt, seg_override(ctxt, ops, c),
 				VCPU_REGS_RSI, &c->src);
 
 	if ((c->d & DstMask) == DstDI)
-		string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
+		string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
 				&c->dst);
 
 	if (c->rep_prefix && (c->d & String)) {
@@ -3482,6 +3413,8 @@
 	ctxt->eip = c->eip;
 
 done:
+	if (rc == X86EMUL_PROPAGATE_FAULT)
+		ctxt->have_exception = true;
 	return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
 
 twobyte_insn:
@@ -3544,9 +3477,11 @@
 			break;
 		case 5: /* not defined */
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		case 7: /* invlpg*/
-			emulate_invlpg(ctxt->vcpu, c->src.addr.mem);
+			emulate_invlpg(ctxt->vcpu,
+				       linear(ctxt, c->src.addr.mem));
 			/* Disable writeback. */
 			c->dst.type = OP_NONE;
 			break;
@@ -3573,6 +3508,7 @@
 		case 5 ... 7:
 		case 9 ... 15:
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu);
@@ -3581,6 +3517,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu);
@@ -3588,6 +3525,7 @@
 	case 0x22: /* mov reg, cr */
 		if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		c->dst.type = OP_NONE;
@@ -3596,6 +3534,7 @@
 		if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
 		    (c->modrm_reg == 4 || c->modrm_reg == 5)) {
 			emulate_ud(ctxt);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3604,6 +3543,7 @@
 				 ~0ULL : ~0U), ctxt->vcpu) < 0) {
 			/* #UD condition is already handled by the code above */
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 
@@ -3615,6 +3555,7 @@
 			| ((u64)c->regs[VCPU_REGS_RDX] << 32);
 		if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		}
 		rc = X86EMUL_CONTINUE;
@@ -3623,6 +3564,7 @@
 		/* rdmsr */
 		if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
 			emulate_gp(ctxt, 0);
+			rc = X86EMUL_PROPAGATE_FAULT;
 			goto done;
 		} else {
 			c->regs[VCPU_REGS_RAX] = (u32)msr_data;
@@ -3785,6 +3727,5 @@
 	goto writeback;
 
 cannot_emulate:
-	DPRINTF("Cannot emulate %02x\n", c->b);
 	return -1;
 }
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 975bb45..3377d53 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -73,6 +73,13 @@
 	return vcpu->arch.cr4 & mask;
 }
 
+static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
+{
+	if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+		kvm_x86_ops->decache_cr3(vcpu);
+	return vcpu->arch.cr3;
+}
+
 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
 {
 	return kvm_read_cr4_bits(vcpu, ~0UL);
@@ -84,4 +91,19 @@
 		| ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
 }
 
+static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags |= HF_GUEST_MASK;
+}
+
+static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.hflags &= ~HF_GUEST_MASK;
+}
+
+static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
+{
+	return vcpu->arch.hflags & HF_GUEST_MASK;
+}
+
 #endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 413f897..93cf9d0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -277,7 +277,8 @@
 
 	if (old_ppr != ppr) {
 		apic_set_reg(apic, APIC_PROCPRI, ppr);
-		kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+		if (ppr < old_ppr)
+			kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
 	}
 }
 
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fbb04ae..9cafbb4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -18,9 +18,11 @@
  *
  */
 
+#include "irq.h"
 #include "mmu.h"
 #include "x86.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -194,7 +196,6 @@
 
 static u64 __read_mostly shadow_trap_nonpresent_pte;
 static u64 __read_mostly shadow_notrap_nonpresent_pte;
-static u64 __read_mostly shadow_base_present_pte;
 static u64 __read_mostly shadow_nx_mask;
 static u64 __read_mostly shadow_x_mask;	/* mutual exclusive with nx_mask */
 static u64 __read_mostly shadow_user_mask;
@@ -213,12 +214,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
 
-void kvm_mmu_set_base_ptes(u64 base_pte)
-{
-	shadow_base_present_pte = base_pte;
-}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
-
 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 		u64 dirty_mask, u64 nx_mask, u64 x_mask)
 {
@@ -482,46 +477,46 @@
 }
 
 /*
- * Return the pointer to the largepage write count for a given
- * gfn, handling slots that are not large page aligned.
+ * Return the pointer to the large page information for a given gfn,
+ * handling slots that are not large page aligned.
  */
-static int *slot_largepage_idx(gfn_t gfn,
-			       struct kvm_memory_slot *slot,
-			       int level)
+static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
+					      struct kvm_memory_slot *slot,
+					      int level)
 {
 	unsigned long idx;
 
 	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
 	      (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
-	return &slot->lpage_info[level - 2][idx].write_count;
+	return &slot->lpage_info[level - 2][idx];
 }
 
 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count += 1;
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count += 1;
 	}
 }
 
 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-	int *write_count;
+	struct kvm_lpage_info *linfo;
 	int i;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
-		write_count   = slot_largepage_idx(gfn, slot, i);
-		*write_count -= 1;
-		WARN_ON(*write_count < 0);
+		linfo = lpage_info_slot(gfn, slot, i);
+		linfo->write_count -= 1;
+		WARN_ON(linfo->write_count < 0);
 	}
 }
 
@@ -530,12 +525,12 @@
 				int level)
 {
 	struct kvm_memory_slot *slot;
-	int *largepage_idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (slot) {
-		largepage_idx = slot_largepage_idx(gfn, slot, level);
-		return *largepage_idx;
+		linfo = lpage_info_slot(gfn, slot, level);
+		return linfo->write_count;
 	}
 
 	return 1;
@@ -590,16 +585,15 @@
 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
 {
 	struct kvm_memory_slot *slot;
-	unsigned long idx;
+	struct kvm_lpage_info *linfo;
 
 	slot = gfn_to_memslot(kvm, gfn);
 	if (likely(level == PT_PAGE_TABLE_LEVEL))
 		return &slot->rmap[gfn - slot->base_gfn];
 
-	idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
-		(slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
+	linfo = lpage_info_slot(gfn, slot, level);
 
-	return &slot->lpage_info[level - 2][idx].rmap_pde;
+	return &linfo->rmap_pde;
 }
 
 /*
@@ -887,19 +881,16 @@
 		end = start + (memslot->npages << PAGE_SHIFT);
 		if (hva >= start && hva < end) {
 			gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
+			gfn_t gfn = memslot->base_gfn + gfn_offset;
 
 			ret = handler(kvm, &memslot->rmap[gfn_offset], data);
 
 			for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
-				unsigned long idx;
-				int sh;
+				struct kvm_lpage_info *linfo;
 
-				sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
-				idx = ((memslot->base_gfn+gfn_offset) >> sh) -
-					(memslot->base_gfn >> sh);
-				ret |= handler(kvm,
-					&memslot->lpage_info[j][idx].rmap_pde,
-					data);
+				linfo = lpage_info_slot(gfn, memslot,
+							PT_DIRECTORY_LEVEL + j);
+				ret |= handler(kvm, &linfo->rmap_pde, data);
 			}
 			trace_kvm_age_page(hva, memslot, ret);
 			retval |= ret;
@@ -1161,7 +1152,7 @@
 }
 
 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
-			       struct kvm_mmu_page *sp, bool clear_unsync)
+			       struct kvm_mmu_page *sp)
 {
 	return 1;
 }
@@ -1291,7 +1282,7 @@
 	if (clear_unsync)
 		kvm_unlink_unsync_page(vcpu->kvm, sp);
 
-	if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
+	if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
 		kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
 		return 1;
 	}
@@ -1332,12 +1323,12 @@
 			continue;
 
 		WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+		kvm_unlink_unsync_page(vcpu->kvm, s);
 		if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
-			(vcpu->arch.mmu.sync_page(vcpu, s, true))) {
+			(vcpu->arch.mmu.sync_page(vcpu, s))) {
 			kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
 			continue;
 		}
-		kvm_unlink_unsync_page(vcpu->kvm, s);
 		flush = true;
 	}
 
@@ -1963,9 +1954,9 @@
 		    unsigned pte_access, int user_fault,
 		    int write_fault, int dirty, int level,
 		    gfn_t gfn, pfn_t pfn, bool speculative,
-		    bool can_unsync, bool reset_host_protection)
+		    bool can_unsync, bool host_writable)
 {
-	u64 spte;
+	u64 spte, entry = *sptep;
 	int ret = 0;
 
 	/*
@@ -1973,7 +1964,7 @@
 	 * whether the guest actually used the pte (in order to detect
 	 * demand paging).
 	 */
-	spte = shadow_base_present_pte;
+	spte = PT_PRESENT_MASK;
 	if (!speculative)
 		spte |= shadow_accessed_mask;
 	if (!dirty)
@@ -1990,8 +1981,10 @@
 		spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
 			kvm_is_mmio_pfn(pfn));
 
-	if (reset_host_protection)
+	if (host_writable)
 		spte |= SPTE_HOST_WRITEABLE;
+	else
+		pte_access &= ~ACC_WRITE_MASK;
 
 	spte |= (u64)pfn << PAGE_SHIFT;
 
@@ -2036,6 +2029,14 @@
 
 set_pte:
 	update_spte(sptep, spte);
+	/*
+	 * If we overwrite a writable spte with a read-only one we
+	 * should flush remote TLBs. Otherwise rmap_write_protect
+	 * will find a read-only spte, even though the writable spte
+	 * might be cached on a CPU's TLB.
+	 */
+	if (is_writable_pte(entry) && !is_writable_pte(*sptep))
+		kvm_flush_remote_tlbs(vcpu->kvm);
 done:
 	return ret;
 }
@@ -2045,7 +2046,7 @@
 			 int user_fault, int write_fault, int dirty,
 			 int *ptwrite, int level, gfn_t gfn,
 			 pfn_t pfn, bool speculative,
-			 bool reset_host_protection)
+			 bool host_writable)
 {
 	int was_rmapped = 0;
 	int rmap_count;
@@ -2080,7 +2081,7 @@
 
 	if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
 		      dirty, level, gfn, pfn, speculative, true,
-		      reset_host_protection)) {
+		      host_writable)) {
 		if (write_fault)
 			*ptwrite = 1;
 		kvm_mmu_flush_tlb(vcpu);
@@ -2211,7 +2212,8 @@
 }
 
 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
-			int level, gfn_t gfn, pfn_t pfn)
+			int map_writable, int level, gfn_t gfn, pfn_t pfn,
+			bool prefault)
 {
 	struct kvm_shadow_walk_iterator iterator;
 	struct kvm_mmu_page *sp;
@@ -2220,9 +2222,11 @@
 
 	for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
 		if (iterator.level == level) {
-			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
+			unsigned pte_access = ACC_ALL;
+
+			mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, pte_access,
 				     0, write, 1, &pt_write,
-				     level, gfn, pfn, false, true);
+				     level, gfn, pfn, prefault, map_writable);
 			direct_pte_prefetch(vcpu, iterator.sptep);
 			++vcpu->stat.pf_fixed;
 			break;
@@ -2277,12 +2281,17 @@
 	return 1;
 }
 
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable);
+
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
+			 bool prefault)
 {
 	int r;
 	int level;
 	pfn_t pfn;
 	unsigned long mmu_seq;
+	bool map_writable;
 
 	level = mapping_level(vcpu, gfn);
 
@@ -2297,7 +2306,9 @@
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -2307,7 +2318,8 @@
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, v, write, level, gfn, pfn);
+	r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
+			 prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 
@@ -2530,6 +2542,7 @@
 		hpa_t root = vcpu->arch.mmu.root_hpa;
 		sp = page_header(root);
 		mmu_sync_children(vcpu, sp);
+		trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
 		return;
 	}
 	for (i = 0; i < 4; ++i) {
@@ -2552,23 +2565,24 @@
 }
 
 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
-				  u32 access, u32 *error)
+				  u32 access, struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vaddr;
 }
 
 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
-					 u32 access, u32 *error)
+					 u32 access,
+					 struct x86_exception *exception)
 {
-	if (error)
-		*error = 0;
+	if (exception)
+		exception->error_code = 0;
 	return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-				u32 error_code)
+				u32 error_code, bool prefault)
 {
 	gfn_t gfn;
 	int r;
@@ -2584,17 +2598,67 @@
 	gfn = gva >> PAGE_SHIFT;
 
 	return nonpaging_map(vcpu, gva & PAGE_MASK,
-			     error_code & PFERR_WRITE_MASK, gfn);
+			     error_code & PFERR_WRITE_MASK, gfn, prefault);
 }
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
-				u32 error_code)
+static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+{
+	struct kvm_arch_async_pf arch;
+
+	arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
+	arch.gfn = gfn;
+	arch.direct_map = vcpu->arch.mmu.direct_map;
+	arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
+
+	return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
+}
+
+static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+{
+	if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
+		     kvm_event_needs_reinjection(vcpu)))
+		return false;
+
+	return kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+			 gva_t gva, pfn_t *pfn, bool write, bool *writable)
+{
+	bool async;
+
+	*pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
+
+	if (!async)
+		return false; /* *pfn has correct page already */
+
+	put_page(pfn_to_page(*pfn));
+
+	if (!prefault && can_do_async_pf(vcpu)) {
+		trace_kvm_try_async_get_page(gva, gfn);
+		if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+			trace_kvm_async_pf_doublefault(gva, gfn);
+			kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+			return true;
+		} else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+			return true;
+	}
+
+	*pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
+
+	return false;
+}
+
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+			  bool prefault)
 {
 	pfn_t pfn;
 	int r;
 	int level;
 	gfn_t gfn = gpa >> PAGE_SHIFT;
 	unsigned long mmu_seq;
+	int write = error_code & PFERR_WRITE_MASK;
+	bool map_writable;
 
 	ASSERT(vcpu);
 	ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -2609,15 +2673,19 @@
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+	if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+		return 0;
+
+	/* mmio */
 	if (is_error_pfn(pfn))
 		return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
 	spin_lock(&vcpu->kvm->mmu_lock);
 	if (mmu_notifier_retry(vcpu, mmu_seq))
 		goto out_unlock;
 	kvm_mmu_free_some_pages(vcpu);
-	r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
-			 level, gfn, pfn);
+	r = __direct_map(vcpu, gpa, write, map_writable,
+			 level, gfn, pfn, prefault);
 	spin_unlock(&vcpu->kvm->mmu_lock);
 
 	return r;
@@ -2659,18 +2727,19 @@
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-	pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
+	pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
 	mmu_free_roots(vcpu);
 }
 
 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.cr3;
+	return kvm_read_cr3(vcpu);
 }
 
-static void inject_page_fault(struct kvm_vcpu *vcpu)
+static void inject_page_fault(struct kvm_vcpu *vcpu,
+			      struct x86_exception *fault)
 {
-	vcpu->arch.mmu.inject_page_fault(vcpu);
+	vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 static void paging_free(struct kvm_vcpu *vcpu)
@@ -2816,6 +2885,7 @@
 {
 	struct kvm_mmu *context = vcpu->arch.walk_mmu;
 
+	context->base_role.word = 0;
 	context->new_cr3 = nonpaging_new_cr3;
 	context->page_fault = tdp_page_fault;
 	context->free = nonpaging_free;
@@ -3008,9 +3078,6 @@
 		return;
         }
 
-	if (is_rsvd_bits_set(&vcpu->arch.mmu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
-		return;
-
 	++vcpu->kvm->stat.mmu_pte_updated;
 	if (!sp->role.cr4_pae)
 		paging32_update_pte(vcpu, sp, spte, new);
@@ -3264,12 +3331,13 @@
 	}
 }
 
-int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
+int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
+		       void *insn, int insn_len)
 {
 	int r;
 	enum emulation_result er;
 
-	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+	r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
 	if (r < 0)
 		goto out;
 
@@ -3282,7 +3350,7 @@
 	if (r)
 		goto out;
 
-	er = emulate_instruction(vcpu, cr2, error_code, 0);
+	er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
 
 	switch (er) {
 	case EMULATE_DONE:
@@ -3377,11 +3445,14 @@
 		if (!test_bit(slot, sp->slot_bitmap))
 			continue;
 
+		if (sp->role.level != PT_PAGE_TABLE_LEVEL)
+			continue;
+
 		pt = sp->spt;
 		for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
 			/* avoid RMW */
 			if (is_writable_pte(pt[i]))
-				pt[i] &= ~PT_WRITABLE_MASK;
+				update_spte(&pt[i], pt[i] & ~PT_WRITABLE_MASK);
 	}
 	kvm_flush_remote_tlbs(kvm);
 }
@@ -3463,13 +3534,6 @@
 		kmem_cache_destroy(mmu_page_header_cache);
 }
 
-void kvm_mmu_module_exit(void)
-{
-	mmu_destroy_caches();
-	percpu_counter_destroy(&kvm_total_used_mmu_pages);
-	unregister_shrinker(&mmu_shrinker);
-}
-
 int kvm_mmu_module_init(void)
 {
 	pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -3566,7 +3630,7 @@
 
 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	(void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
+	(void)kvm_set_cr3(vcpu, kvm_read_cr3(vcpu));
 	return 1;
 }
 
@@ -3662,12 +3726,6 @@
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
 
-#ifdef CONFIG_KVM_MMU_AUDIT
-#include "mmu_audit.c"
-#else
-static void mmu_audit_disable(void) { }
-#endif
-
 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
 {
 	ASSERT(vcpu);
@@ -3675,5 +3733,18 @@
 	destroy_kvm_mmu(vcpu);
 	free_mmu_pages(vcpu);
 	mmu_free_memory_caches(vcpu);
+}
+
+#ifdef CONFIG_KVM_MMU_AUDIT
+#include "mmu_audit.c"
+#else
+static void mmu_audit_disable(void) { }
+#endif
+
+void kvm_mmu_module_exit(void)
+{
+	mmu_destroy_caches();
+	percpu_counter_destroy(&kvm_total_used_mmu_pages);
+	unregister_shrinker(&mmu_shrinker);
 	mmu_audit_disable();
 }
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index ba2bcdd..5f6223b 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -19,11 +19,9 @@
 
 #include <linux/ratelimit.h>
 
-static int audit_point;
-
-#define audit_printk(fmt, args...)		\
+#define audit_printk(kvm, fmt, args...)		\
 	printk(KERN_ERR "audit: (%s) error: "	\
-		fmt, audit_point_name[audit_point], ##args)
+		fmt, audit_point_name[kvm->arch.audit_point], ##args)
 
 typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
 
@@ -97,18 +95,21 @@
 
 	if (sp->unsync) {
 		if (level != PT_PAGE_TABLE_LEVEL) {
-			audit_printk("unsync sp: %p level = %d\n", sp, level);
+			audit_printk(vcpu->kvm, "unsync sp: %p "
+				     "level = %d\n", sp, level);
 			return;
 		}
 
 		if (*sptep == shadow_notrap_nonpresent_pte) {
-			audit_printk("notrap spte in unsync sp: %p\n", sp);
+			audit_printk(vcpu->kvm, "notrap spte in unsync "
+				     "sp: %p\n", sp);
 			return;
 		}
 	}
 
 	if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
-		audit_printk("notrap spte in direct sp: %p\n", sp);
+		audit_printk(vcpu->kvm, "notrap spte in direct sp: %p\n",
+			     sp);
 		return;
 	}
 
@@ -125,8 +126,9 @@
 
 	hpa =  pfn << PAGE_SHIFT;
 	if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
-		audit_printk("levels %d pfn %llx hpa %llx ent %llxn",
-				   vcpu->arch.mmu.root_level, pfn, hpa, *sptep);
+		audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
+			     "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+			     hpa, *sptep);
 }
 
 static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
@@ -142,8 +144,8 @@
 	if (!gfn_to_memslot(kvm, gfn)) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no memslot for gfn %llx\n", gfn);
-		audit_printk("index %ld of sp (gfn=%llx)\n",
+		audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
+		audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
 		       (long int)(sptep - rev_sp->spt), rev_sp->gfn);
 		dump_stack();
 		return;
@@ -153,7 +155,8 @@
 	if (!*rmapp) {
 		if (!printk_ratelimit())
 			return;
-		audit_printk("no rmap for writable spte %llx\n", *sptep);
+		audit_printk(kvm, "no rmap for writable spte %llx\n",
+			     *sptep);
 		dump_stack();
 	}
 }
@@ -168,8 +171,9 @@
 {
 	struct kvm_mmu_page *sp = page_header(__pa(sptep));
 
-	if (audit_point == AUDIT_POST_SYNC && sp->unsync)
-		audit_printk("meet unsync sp(%p) after sync root.\n", sp);
+	if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
+		audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
+			     "root.\n", sp);
 }
 
 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -202,8 +206,9 @@
 	spte = rmap_next(kvm, rmapp, NULL);
 	while (spte) {
 		if (is_writable_pte(*spte))
-			audit_printk("shadow page has writable mappings: gfn "
-				     "%llx role %x\n", sp->gfn, sp->role.word);
+			audit_printk(kvm, "shadow page has writable "
+				     "mappings: gfn %llx role %x\n",
+				     sp->gfn, sp->role.word);
 		spte = rmap_next(kvm, rmapp, spte);
 	}
 }
@@ -238,7 +243,7 @@
 	if (!__ratelimit(&ratelimit_state))
 		return;
 
-	audit_point = point;
+	vcpu->kvm->arch.audit_point = point;
 	audit_all_active_sps(vcpu->kvm);
 	audit_vcpu_spte(vcpu);
 }
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index cd7a833..53210f1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -72,7 +72,7 @@
 	unsigned pt_access;
 	unsigned pte_access;
 	gfn_t gfn;
-	u32 error_code;
+	struct x86_exception fault;
 };
 
 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
@@ -266,21 +266,23 @@
 	return 1;
 
 error:
-	walker->error_code = 0;
+	walker->fault.vector = PF_VECTOR;
+	walker->fault.error_code_valid = true;
+	walker->fault.error_code = 0;
 	if (present)
-		walker->error_code |= PFERR_PRESENT_MASK;
+		walker->fault.error_code |= PFERR_PRESENT_MASK;
 
-	walker->error_code |= write_fault | user_fault;
+	walker->fault.error_code |= write_fault | user_fault;
 
 	if (fetch_fault && mmu->nx)
-		walker->error_code |= PFERR_FETCH_MASK;
+		walker->fault.error_code |= PFERR_FETCH_MASK;
 	if (rsvd_fault)
-		walker->error_code |= PFERR_RSVD_MASK;
+		walker->fault.error_code |= PFERR_RSVD_MASK;
 
-	vcpu->arch.fault.address    = addr;
-	vcpu->arch.fault.error_code = walker->error_code;
+	walker->fault.address = addr;
+	walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
 
-	trace_kvm_mmu_walker_error(walker->error_code);
+	trace_kvm_mmu_walker_error(walker->fault.error_code);
 	return 0;
 }
 
@@ -299,25 +301,42 @@
 					addr, access);
 }
 
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+				    struct kvm_mmu_page *sp, u64 *spte,
+				    pt_element_t gpte)
+{
+	u64 nonpresent = shadow_trap_nonpresent_pte;
+
+	if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+		goto no_present;
+
+	if (!is_present_gpte(gpte)) {
+		if (!sp->unsync)
+			nonpresent = shadow_notrap_nonpresent_pte;
+		goto no_present;
+	}
+
+	if (!(gpte & PT_ACCESSED_MASK))
+		goto no_present;
+
+	return false;
+
+no_present:
+	drop_spte(vcpu->kvm, spte, nonpresent);
+	return true;
+}
+
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
 			      u64 *spte, const void *pte)
 {
 	pt_element_t gpte;
 	unsigned pte_access;
 	pfn_t pfn;
-	u64 new_spte;
 
 	gpte = *(const pt_element_t *)pte;
-	if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
-		if (!is_present_gpte(gpte)) {
-			if (sp->unsync)
-				new_spte = shadow_trap_nonpresent_pte;
-			else
-				new_spte = shadow_notrap_nonpresent_pte;
-			__set_spte(spte, new_spte);
-		}
+	if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 		return;
-	}
+
 	pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
 	pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
 	if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
@@ -329,7 +348,7 @@
 		return;
 	kvm_get_pfn(pfn);
 	/*
-	 * we call mmu_set_spte() with reset_host_protection = true beacuse that
+	 * we call mmu_set_spte() with host_writable = true beacuse that
 	 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
 	 */
 	mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
@@ -364,7 +383,6 @@
 				u64 *sptep)
 {
 	struct kvm_mmu_page *sp;
-	struct kvm_mmu *mmu = &vcpu->arch.mmu;
 	pt_element_t *gptep = gw->prefetch_ptes;
 	u64 *spte;
 	int i;
@@ -395,14 +413,7 @@
 
 		gpte = gptep[i];
 
-		if (!is_present_gpte(gpte) ||
-		      is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
-			if (!sp->unsync)
-				__set_spte(spte, shadow_notrap_nonpresent_pte);
-			continue;
-		}
-
-		if (!(gpte & PT_ACCESSED_MASK))
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
 			continue;
 
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
@@ -427,7 +438,8 @@
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 			 struct guest_walker *gw,
 			 int user_fault, int write_fault, int hlevel,
-			 int *ptwrite, pfn_t pfn)
+			 int *ptwrite, pfn_t pfn, bool map_writable,
+			 bool prefault)
 {
 	unsigned access = gw->pt_access;
 	struct kvm_mmu_page *sp = NULL;
@@ -501,7 +513,7 @@
 
 	mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
 		     user_fault, write_fault, dirty, ptwrite, it.level,
-		     gw->gfn, pfn, false, true);
+		     gw->gfn, pfn, prefault, map_writable);
 	FNAME(pte_prefetch)(vcpu, gw, it.sptep);
 
 	return it.sptep;
@@ -527,8 +539,8 @@
  *  Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  *           a negative value on error.
  */
-static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
-			       u32 error_code)
+static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
+			     bool prefault)
 {
 	int write_fault = error_code & PFERR_WRITE_MASK;
 	int user_fault = error_code & PFERR_USER_MASK;
@@ -539,6 +551,7 @@
 	pfn_t pfn;
 	int level = PT_PAGE_TABLE_LEVEL;
 	unsigned long mmu_seq;
+	bool map_writable;
 
 	pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
 
@@ -556,8 +569,11 @@
 	 */
 	if (!r) {
 		pgprintk("%s: guest page fault\n", __func__);
-		inject_page_fault(vcpu);
-		vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
+		if (!prefault) {
+			inject_page_fault(vcpu, &walker.fault);
+			/* reset fork detector */
+			vcpu->arch.last_pt_write_count = 0;
+		}
 		return 0;
 	}
 
@@ -568,7 +584,10 @@
 
 	mmu_seq = vcpu->kvm->mmu_notifier_seq;
 	smp_rmb();
-	pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
+
+	if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
+			 &map_writable))
+		return 0;
 
 	/* mmio */
 	if (is_error_pfn(pfn))
@@ -581,7 +600,7 @@
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
 	kvm_mmu_free_some_pages(vcpu);
 	sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-			     level, &write_pt, pfn);
+			     level, &write_pt, pfn, map_writable, prefault);
 	(void)sptep;
 	pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
 		 sptep, *sptep, write_pt);
@@ -661,7 +680,7 @@
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
-			       u32 *error)
+			       struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -672,14 +691,15 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
 
 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
-				      u32 access, u32 *error)
+				      u32 access,
+				      struct x86_exception *exception)
 {
 	struct guest_walker walker;
 	gpa_t gpa = UNMAPPED_GVA;
@@ -690,8 +710,8 @@
 	if (r) {
 		gpa = gfn_to_gpa(walker.gfn);
 		gpa |= vaddr & ~PAGE_MASK;
-	} else if (error)
-		*error = walker.error_code;
+	} else if (exception)
+		*exception = walker.fault;
 
 	return gpa;
 }
@@ -730,12 +750,19 @@
  * Using the cached information from sp->gfns is safe because:
  * - The spte has a reference to the struct page, so the pfn for a given gfn
  *   can't change unless all sptes pointing to it are nuked first.
+ *
+ * Note:
+ *   We should flush all tlbs if spte is dropped even though guest is
+ *   responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
+ *   and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
+ *   used by guest then tlbs are not flushed, so guest is allowed to access the
+ *   freed pages.
+ *   And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  */
-static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
-			    bool clear_unsync)
+static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 {
 	int i, offset, nr_present;
-	bool reset_host_protection;
+	bool host_writable;
 	gpa_t first_pte_gpa;
 
 	offset = nr_present = 0;
@@ -764,31 +791,27 @@
 			return -EINVAL;
 
 		gfn = gpte_to_gfn(gpte);
-		if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
-		      || gfn != sp->gfns[i] || !is_present_gpte(gpte)
-		      || !(gpte & PT_ACCESSED_MASK)) {
-			u64 nonpresent;
 
-			if (is_present_gpte(gpte) || !clear_unsync)
-				nonpresent = shadow_trap_nonpresent_pte;
-			else
-				nonpresent = shadow_notrap_nonpresent_pte;
-			drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
+		if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
+			vcpu->kvm->tlbs_dirty++;
+			continue;
+		}
+
+		if (gfn != sp->gfns[i]) {
+			drop_spte(vcpu->kvm, &sp->spt[i],
+				      shadow_trap_nonpresent_pte);
+			vcpu->kvm->tlbs_dirty++;
 			continue;
 		}
 
 		nr_present++;
 		pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
-		if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
-			pte_access &= ~ACC_WRITE_MASK;
-			reset_host_protection = 0;
-		} else {
-			reset_host_protection = 1;
-		}
+		host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
+
 		set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
 			 is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
 			 spte_to_pfn(sp->spt[i]), true, false,
-			 reset_host_protection);
+			 host_writable);
 	}
 
 	return !nr_present;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index b81a9b7..25bd1bc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -31,6 +31,7 @@
 
 #include <asm/tlbflush.h>
 #include <asm/desc.h>
+#include <asm/kvm_para.h>
 
 #include <asm/virtext.h>
 #include "trace.h"
@@ -50,6 +51,10 @@
 #define SVM_FEATURE_LBRV           (1 <<  1)
 #define SVM_FEATURE_SVML           (1 <<  2)
 #define SVM_FEATURE_NRIP           (1 <<  3)
+#define SVM_FEATURE_TSC_RATE       (1 <<  4)
+#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
+#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
+#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
 
 #define NESTED_EXIT_HOST	0	/* Exit handled on host level */
@@ -97,10 +102,8 @@
 	unsigned long vmexit_rax;
 
 	/* cache for intercepts of the guest */
-	u16 intercept_cr_read;
-	u16 intercept_cr_write;
-	u16 intercept_dr_read;
-	u16 intercept_dr_write;
+	u32 intercept_cr;
+	u32 intercept_dr;
 	u32 intercept_exceptions;
 	u64 intercept;
 
@@ -123,7 +126,12 @@
 	u64 next_rip;
 
 	u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
-	u64 host_gs_base;
+	struct {
+		u16 fs;
+		u16 gs;
+		u16 ldt;
+		u64 gs_base;
+	} host;
 
 	u32 *msrpm;
 
@@ -133,6 +141,7 @@
 
 	unsigned int3_injected;
 	unsigned long int3_rip;
+	u32 apf_reason;
 };
 
 #define MSR_INVALID			0xffffffffU
@@ -180,14 +189,151 @@
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
 				      bool has_error_code, u32 error_code);
 
+enum {
+	VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
+			    pause filter count */
+	VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
+	VMCB_ASID,	 /* ASID */
+	VMCB_INTR,	 /* int_ctl, int_vector */
+	VMCB_NPT,        /* npt_en, nCR3, gPAT */
+	VMCB_CR,	 /* CR0, CR3, CR4, EFER */
+	VMCB_DR,         /* DR6, DR7 */
+	VMCB_DT,         /* GDT, IDT */
+	VMCB_SEG,        /* CS, DS, SS, ES, CPL */
+	VMCB_CR2,        /* CR2 only */
+	VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
+	VMCB_DIRTY_MAX,
+};
+
+/* TPR and CR2 are always written before VMRUN */
+#define VMCB_ALWAYS_DIRTY_MASK	((1U << VMCB_INTR) | (1U << VMCB_CR2))
+
+static inline void mark_all_dirty(struct vmcb *vmcb)
+{
+	vmcb->control.clean = 0;
+}
+
+static inline void mark_all_clean(struct vmcb *vmcb)
+{
+	vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+			       & ~VMCB_ALWAYS_DIRTY_MASK;
+}
+
+static inline void mark_dirty(struct vmcb *vmcb, int bit)
+{
+	vmcb->control.clean &= ~(1 << bit);
+}
+
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
 {
 	return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
-static inline bool is_nested(struct vcpu_svm *svm)
+static void recalc_intercepts(struct vcpu_svm *svm)
 {
-	return svm->nested.vmcb;
+	struct vmcb_control_area *c, *h;
+	struct nested_state *g;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+
+	if (!is_guest_mode(&svm->vcpu))
+		return;
+
+	c = &svm->vmcb->control;
+	h = &svm->nested.hsave->control;
+	g = &svm->nested;
+
+	c->intercept_cr = h->intercept_cr | g->intercept_cr;
+	c->intercept_dr = h->intercept_dr | g->intercept_dr;
+	c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+	c->intercept = h->intercept | g->intercept;
+}
+
+static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
+{
+	if (is_guest_mode(&svm->vcpu))
+		return svm->nested.hsave;
+	else
+		return svm->vmcb;
+}
+
+static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_cr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	return vmcb->control.intercept_cr & (1U << bit);
+}
+
+static inline void set_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_dr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_dr &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions |= (1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept_exceptions &= ~(1U << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void set_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept |= (1ULL << bit);
+
+	recalc_intercepts(svm);
+}
+
+static inline void clr_intercept(struct vcpu_svm *svm, int bit)
+{
+	struct vmcb *vmcb = get_host_vmcb(svm);
+
+	vmcb->control.intercept &= ~(1ULL << bit);
+
+	recalc_intercepts(svm);
 }
 
 static inline void enable_gif(struct vcpu_svm *svm)
@@ -264,11 +410,6 @@
 
 #define MAX_INST_SIZE 15
 
-static inline u32 svm_has(u32 feat)
-{
-	return svm_features & feat;
-}
-
 static inline void clgi(void)
 {
 	asm volatile (__ex(SVM_CLGI));
@@ -284,16 +425,6 @@
 	asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
 }
 
-static inline void force_new_asid(struct kvm_vcpu *vcpu)
-{
-	to_svm(vcpu)->asid_generation--;
-}
-
-static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
-{
-	force_new_asid(vcpu);
-}
-
 static int get_npt_level(void)
 {
 #ifdef CONFIG_X86_64
@@ -310,6 +441,7 @@
 		efer &= ~EFER_LME;
 
 	to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static int is_external_interrupt(u32 info)
@@ -347,7 +479,7 @@
 		svm->next_rip = svm->vmcb->control.next_rip;
 
 	if (!svm->next_rip) {
-		if (emulate_instruction(vcpu, 0, 0, EMULTYPE_SKIP) !=
+		if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
 				EMULATE_DONE)
 			printk(KERN_DEBUG "%s: NOP\n", __func__);
 		return;
@@ -374,7 +506,7 @@
 	    nested_svm_check_exception(svm, nr, has_error_code, error_code))
 		return;
 
-	if (nr == BP_VECTOR && !svm_has(SVM_FEATURE_NRIP)) {
+	if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
 		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
 
 		/*
@@ -670,7 +802,7 @@
 
 	svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
-	if (!svm_has(SVM_FEATURE_NPT))
+	if (!boot_cpu_has(X86_FEATURE_NPT))
 		npt_enabled = false;
 
 	if (npt_enabled && !npt) {
@@ -725,13 +857,15 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 g_tsc_offset = 0;
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		g_tsc_offset = svm->vmcb->control.tsc_offset -
 			       svm->nested.hsave->control.tsc_offset;
 		svm->nested.hsave->control.tsc_offset = offset;
 	}
 
 	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -739,8 +873,9 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.tsc_offset += adjustment;
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		svm->nested.hsave->control.tsc_offset += adjustment;
+	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
 static void init_vmcb(struct vcpu_svm *svm)
@@ -749,62 +884,62 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	svm->vcpu.fpu_active = 1;
+	svm->vcpu.arch.hflags = 0;
 
-	control->intercept_cr_read =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK;
+	set_cr_intercept(svm, INTERCEPT_CR0_READ);
+	set_cr_intercept(svm, INTERCEPT_CR3_READ);
+	set_cr_intercept(svm, INTERCEPT_CR4_READ);
+	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
+	set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 
-	control->intercept_cr_write =	INTERCEPT_CR0_MASK |
-					INTERCEPT_CR3_MASK |
-					INTERCEPT_CR4_MASK |
-					INTERCEPT_CR8_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_READ);
+	set_dr_intercept(svm, INTERCEPT_DR1_READ);
+	set_dr_intercept(svm, INTERCEPT_DR2_READ);
+	set_dr_intercept(svm, INTERCEPT_DR3_READ);
+	set_dr_intercept(svm, INTERCEPT_DR4_READ);
+	set_dr_intercept(svm, INTERCEPT_DR5_READ);
+	set_dr_intercept(svm, INTERCEPT_DR6_READ);
+	set_dr_intercept(svm, INTERCEPT_DR7_READ);
 
-	control->intercept_dr_read =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_dr_intercept(svm, INTERCEPT_DR0_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR1_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR2_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR3_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR4_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR5_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR6_WRITE);
+	set_dr_intercept(svm, INTERCEPT_DR7_WRITE);
 
-	control->intercept_dr_write =	INTERCEPT_DR0_MASK |
-					INTERCEPT_DR1_MASK |
-					INTERCEPT_DR2_MASK |
-					INTERCEPT_DR3_MASK |
-					INTERCEPT_DR4_MASK |
-					INTERCEPT_DR5_MASK |
-					INTERCEPT_DR6_MASK |
-					INTERCEPT_DR7_MASK;
+	set_exception_intercept(svm, PF_VECTOR);
+	set_exception_intercept(svm, UD_VECTOR);
+	set_exception_intercept(svm, MC_VECTOR);
 
-	control->intercept_exceptions = (1 << PF_VECTOR) |
-					(1 << UD_VECTOR) |
-					(1 << MC_VECTOR);
-
-
-	control->intercept =	(1ULL << INTERCEPT_INTR) |
-				(1ULL << INTERCEPT_NMI) |
-				(1ULL << INTERCEPT_SMI) |
-				(1ULL << INTERCEPT_SELECTIVE_CR0) |
-				(1ULL << INTERCEPT_CPUID) |
-				(1ULL << INTERCEPT_INVD) |
-				(1ULL << INTERCEPT_HLT) |
-				(1ULL << INTERCEPT_INVLPG) |
-				(1ULL << INTERCEPT_INVLPGA) |
-				(1ULL << INTERCEPT_IOIO_PROT) |
-				(1ULL << INTERCEPT_MSR_PROT) |
-				(1ULL << INTERCEPT_TASK_SWITCH) |
-				(1ULL << INTERCEPT_SHUTDOWN) |
-				(1ULL << INTERCEPT_VMRUN) |
-				(1ULL << INTERCEPT_VMMCALL) |
-				(1ULL << INTERCEPT_VMLOAD) |
-				(1ULL << INTERCEPT_VMSAVE) |
-				(1ULL << INTERCEPT_STGI) |
-				(1ULL << INTERCEPT_CLGI) |
-				(1ULL << INTERCEPT_SKINIT) |
-				(1ULL << INTERCEPT_WBINVD) |
-				(1ULL << INTERCEPT_MONITOR) |
-				(1ULL << INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_INTR);
+	set_intercept(svm, INTERCEPT_NMI);
+	set_intercept(svm, INTERCEPT_SMI);
+	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
+	set_intercept(svm, INTERCEPT_CPUID);
+	set_intercept(svm, INTERCEPT_INVD);
+	set_intercept(svm, INTERCEPT_HLT);
+	set_intercept(svm, INTERCEPT_INVLPG);
+	set_intercept(svm, INTERCEPT_INVLPGA);
+	set_intercept(svm, INTERCEPT_IOIO_PROT);
+	set_intercept(svm, INTERCEPT_MSR_PROT);
+	set_intercept(svm, INTERCEPT_TASK_SWITCH);
+	set_intercept(svm, INTERCEPT_SHUTDOWN);
+	set_intercept(svm, INTERCEPT_VMRUN);
+	set_intercept(svm, INTERCEPT_VMMCALL);
+	set_intercept(svm, INTERCEPT_VMLOAD);
+	set_intercept(svm, INTERCEPT_VMSAVE);
+	set_intercept(svm, INTERCEPT_STGI);
+	set_intercept(svm, INTERCEPT_CLGI);
+	set_intercept(svm, INTERCEPT_SKINIT);
+	set_intercept(svm, INTERCEPT_WBINVD);
+	set_intercept(svm, INTERCEPT_MONITOR);
+	set_intercept(svm, INTERCEPT_MWAIT);
+	set_intercept(svm, INTERCEPT_XSETBV);
 
 	control->iopm_base_pa = iopm_base;
 	control->msrpm_base_pa = __pa(svm->msrpm);
@@ -855,25 +990,27 @@
 	if (npt_enabled) {
 		/* Setup VMCB for Nested Paging */
 		control->nested_ctl = 1;
-		control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
-					(1ULL << INTERCEPT_INVLPG));
-		control->intercept_exceptions &= ~(1 << PF_VECTOR);
-		control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
-		control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
+		clr_intercept(svm, INTERCEPT_TASK_SWITCH);
+		clr_intercept(svm, INTERCEPT_INVLPG);
+		clr_exception_intercept(svm, PF_VECTOR);
+		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
 		save->g_pat = 0x0007040600070406ULL;
 		save->cr3 = 0;
 		save->cr4 = 0;
 	}
-	force_new_asid(&svm->vcpu);
+	svm->asid_generation = 0;
 
 	svm->nested.vmcb = 0;
 	svm->vcpu.arch.hflags = 0;
 
-	if (svm_has(SVM_FEATURE_PAUSE_FILTER)) {
+	if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
 		control->pause_filter_count = 3000;
-		control->intercept |= (1ULL << INTERCEPT_PAUSE);
+		set_intercept(svm, INTERCEPT_PAUSE);
 	}
 
+	mark_all_dirty(svm->vmcb);
+
 	enable_gif(svm);
 }
 
@@ -990,8 +1127,16 @@
 
 	if (unlikely(cpu != vcpu->cpu)) {
 		svm->asid_generation = 0;
+		mark_all_dirty(svm->vmcb);
 	}
 
+#ifdef CONFIG_X86_64
+	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
+#endif
+	savesegment(fs, svm->host.fs);
+	savesegment(gs, svm->host.gs);
+	svm->host.ldt = kvm_read_ldt();
+
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1002,6 +1147,14 @@
 	int i;
 
 	++vcpu->stat.host_state_reload;
+	kvm_load_ldt(svm->host.ldt);
+#ifdef CONFIG_X86_64
+	loadsegment(fs, svm->host.fs);
+	load_gs_index(svm->host.gs);
+	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+#else
+	loadsegment(gs, svm->host.gs);
+#endif
 	for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
 		wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
 }
@@ -1021,7 +1174,7 @@
 	switch (reg) {
 	case VCPU_EXREG_PDPTR:
 		BUG_ON(!npt_enabled);
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		break;
 	default:
 		BUG();
@@ -1030,12 +1183,12 @@
 
 static void svm_set_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
+	set_intercept(svm, INTERCEPT_VINTR);
 }
 
 static void svm_clear_vintr(struct vcpu_svm *svm)
 {
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
+	clr_intercept(svm, INTERCEPT_VINTR);
 }
 
 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
@@ -1150,6 +1303,7 @@
 
 	svm->vmcb->save.idtr.limit = dt->size;
 	svm->vmcb->save.idtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
@@ -1166,19 +1320,23 @@
 
 	svm->vmcb->save.gdtr.limit = dt->size;
 	svm->vmcb->save.gdtr.base = dt->address ;
+	mark_dirty(svm->vmcb, VMCB_DT);
 }
 
 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
+static void svm_decache_cr3(struct kvm_vcpu *vcpu)
+{
+}
+
 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 }
 
 static void update_cr0_intercept(struct vcpu_svm *svm)
 {
-	struct vmcb *vmcb = svm->vmcb;
 	ulong gcr0 = svm->vcpu.arch.cr0;
 	u64 *hcr0 = &svm->vmcb->save.cr0;
 
@@ -1188,27 +1346,14 @@
 		*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
 			| (gcr0 & SVM_CR0_SELECTIVE_MASK);
 
+	mark_dirty(svm->vmcb, VMCB_CR);
 
 	if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
-		vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
-		vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read  &= ~INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
-			vmcb->control.intercept_cr_read  |= svm->nested.intercept_cr_read;
-			vmcb->control.intercept_cr_write |= svm->nested.intercept_cr_write;
-		}
+		clr_cr_intercept(svm, INTERCEPT_CR0_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	} else {
-		svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		if (is_nested(svm)) {
-			struct vmcb *hsave = svm->nested.hsave;
-
-			hsave->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
-			hsave->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
-		}
+		set_cr_intercept(svm, INTERCEPT_CR0_READ);
+		set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
 	}
 }
 
@@ -1216,7 +1361,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		/*
 		 * We are here because we run in nested mode, the host kvm
 		 * intercepts cr0 writes but the l1 hypervisor does not.
@@ -1268,6 +1413,7 @@
 	 */
 	cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
 	svm->vmcb->save.cr0 = cr0;
+	mark_dirty(svm->vmcb, VMCB_CR);
 	update_cr0_intercept(svm);
 }
 
@@ -1277,13 +1423,14 @@
 	unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
 
 	if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
-		force_new_asid(vcpu);
+		svm_flush_tlb(vcpu);
 
 	vcpu->arch.cr4 = cr4;
 	if (!npt_enabled)
 		cr4 |= X86_CR4_PAE;
 	cr4 |= host_cr4_mce;
 	to_svm(vcpu)->vmcb->save.cr4 = cr4;
+	mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -1312,26 +1459,25 @@
 			= (svm->vmcb->save.cs.attrib
 			   >> SVM_SELECTOR_DPL_SHIFT) & 3;
 
+	mark_dirty(svm->vmcb, VMCB_SEG);
 }
 
 static void update_db_intercept(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions &=
-		~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+	clr_exception_intercept(svm, DB_VECTOR);
+	clr_exception_intercept(svm, BP_VECTOR);
 
 	if (svm->nmi_singlestep)
-		svm->vmcb->control.intercept_exceptions |= (1 << DB_VECTOR);
+		set_exception_intercept(svm, DB_VECTOR);
 
 	if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
 		if (vcpu->guest_debug &
 		    (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-			svm->vmcb->control.intercept_exceptions |=
-				1 << DB_VECTOR;
+			set_exception_intercept(svm, DB_VECTOR);
 		if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-			svm->vmcb->control.intercept_exceptions |=
-				1 << BP_VECTOR;
+			set_exception_intercept(svm, BP_VECTOR);
 	} else
 		vcpu->guest_debug = 0;
 }
@@ -1345,23 +1491,11 @@
 	else
 		svm->vmcb->save.dr7 = vcpu->arch.dr7;
 
+	mark_dirty(svm->vmcb, VMCB_DR);
+
 	update_db_intercept(vcpu);
 }
 
-static void load_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
-static void save_host_msrs(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
-	rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
-#endif
-}
-
 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
 {
 	if (sd->next_asid > sd->max_asid) {
@@ -1372,6 +1506,8 @@
 
 	svm->asid_generation = sd->asid_generation;
 	svm->vmcb->control.asid = sd->next_asid++;
+
+	mark_dirty(svm->vmcb, VMCB_ASID);
 }
 
 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
@@ -1379,20 +1515,40 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.dr7 = value;
+	mark_dirty(svm->vmcb, VMCB_DR);
 }
 
 static int pf_interception(struct vcpu_svm *svm)
 {
-	u64 fault_address;
+	u64 fault_address = svm->vmcb->control.exit_info_2;
 	u32 error_code;
+	int r = 1;
 
-	fault_address  = svm->vmcb->control.exit_info_2;
-	error_code = svm->vmcb->control.exit_info_1;
+	switch (svm->apf_reason) {
+	default:
+		error_code = svm->vmcb->control.exit_info_1;
 
-	trace_kvm_page_fault(fault_address, error_code);
-	if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
-		kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
-	return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
+		trace_kvm_page_fault(fault_address, error_code);
+		if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
+			kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+		r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+			svm->vmcb->control.insn_bytes,
+			svm->vmcb->control.insn_len);
+		break;
+	case KVM_PV_REASON_PAGE_NOT_PRESENT:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wait(fault_address);
+		local_irq_enable();
+		break;
+	case KVM_PV_REASON_PAGE_READY:
+		svm->apf_reason = 0;
+		local_irq_disable();
+		kvm_async_pf_task_wake(fault_address);
+		local_irq_enable();
+		break;
+	}
+	return r;
 }
 
 static int db_interception(struct vcpu_svm *svm)
@@ -1440,7 +1596,7 @@
 {
 	int er;
 
-	er = emulate_instruction(&svm->vcpu, 0, 0, EMULTYPE_TRAP_UD);
+	er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
 	if (er != EMULATE_DONE)
 		kvm_queue_exception(&svm->vcpu, UD_VECTOR);
 	return 1;
@@ -1449,21 +1605,8 @@
 static void svm_fpu_activate(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u32 excp;
 
-	if (is_nested(svm)) {
-		u32 h_excp, n_excp;
-
-		h_excp  = svm->nested.hsave->control.intercept_exceptions;
-		n_excp  = svm->nested.intercept_exceptions;
-		h_excp &= ~(1 << NM_VECTOR);
-		excp    = h_excp | n_excp;
-	} else {
-		excp  = svm->vmcb->control.intercept_exceptions;
-		excp &= ~(1 << NM_VECTOR);
-	}
-
-	svm->vmcb->control.intercept_exceptions = excp;
+	clr_exception_intercept(svm, NM_VECTOR);
 
 	svm->vcpu.fpu_active = 1;
 	update_cr0_intercept(svm);
@@ -1570,7 +1713,7 @@
 	string = (io_info & SVM_IOIO_STR_MASK) != 0;
 	in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = io_info >> 16;
 	size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -1624,17 +1767,19 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_NPT);
+	svm_flush_tlb(vcpu);
 }
 
-static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu)
+static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
+				       struct x86_exception *fault)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.exit_code = SVM_EXIT_NPF;
 	svm->vmcb->control.exit_code_hi = 0;
-	svm->vmcb->control.exit_info_1 = vcpu->arch.fault.error_code;
-	svm->vmcb->control.exit_info_2 = vcpu->arch.fault.address;
+	svm->vmcb->control.exit_info_1 = fault->error_code;
+	svm->vmcb->control.exit_info_2 = fault->address;
 
 	nested_svm_vmexit(svm);
 }
@@ -1680,7 +1825,7 @@
 {
 	int vmexit;
 
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return 0;
 
 	svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
@@ -1698,7 +1843,7 @@
 /* This function returns true if it is save to enable the irq window */
 static inline bool nested_svm_intr(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
@@ -1737,7 +1882,7 @@
 /* This function returns true if it is save to enable the nmi window */
 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
 {
-	if (!is_nested(svm))
+	if (!is_guest_mode(&svm->vcpu))
 		return true;
 
 	if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
@@ -1836,8 +1981,8 @@
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + PF_VECTOR:
-		/* When we're shadowing, trap PFs */
-		if (!npt_enabled)
+		/* When we're shadowing, trap PFs, but not async PF */
+		if (!npt_enabled && svm->apf_reason == 0)
 			return NESTED_EXIT_HOST;
 		break;
 	case SVM_EXIT_EXCP_BASE + NM_VECTOR:
@@ -1865,27 +2010,15 @@
 	case SVM_EXIT_IOIO:
 		vmexit = nested_svm_intercept_ioio(svm);
 		break;
-	case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
-		if (svm->nested.intercept_cr_read & cr_bits)
+	case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
+		if (svm->nested.intercept_cr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
-	case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
-		u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
-		if (svm->nested.intercept_cr_write & cr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
-		if (svm->nested.intercept_dr_read & dr_bits)
-			vmexit = NESTED_EXIT_DONE;
-		break;
-	}
-	case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
-		u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
-		if (svm->nested.intercept_dr_write & dr_bits)
+	case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
+		u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
+		if (svm->nested.intercept_dr & bit)
 			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
@@ -1893,6 +2026,10 @@
 		u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
 		if (svm->nested.intercept_exceptions & excp_bits)
 			vmexit = NESTED_EXIT_DONE;
+		/* async page fault always cause vmexit */
+		else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
+			 svm->apf_reason != 0)
+			vmexit = NESTED_EXIT_DONE;
 		break;
 	}
 	case SVM_EXIT_ERR: {
@@ -1926,10 +2063,8 @@
 	struct vmcb_control_area *dst  = &dst_vmcb->control;
 	struct vmcb_control_area *from = &from_vmcb->control;
 
-	dst->intercept_cr_read    = from->intercept_cr_read;
-	dst->intercept_cr_write   = from->intercept_cr_write;
-	dst->intercept_dr_read    = from->intercept_dr_read;
-	dst->intercept_dr_write   = from->intercept_dr_write;
+	dst->intercept_cr         = from->intercept_cr;
+	dst->intercept_dr         = from->intercept_dr;
 	dst->intercept_exceptions = from->intercept_exceptions;
 	dst->intercept            = from->intercept;
 	dst->iopm_base_pa         = from->iopm_base_pa;
@@ -1970,7 +2105,8 @@
 	if (!nested_vmcb)
 		return 1;
 
-	/* Exit nested SVM mode */
+	/* Exit Guest-Mode */
+	leave_guest_mode(&svm->vcpu);
 	svm->nested.vmcb = 0;
 
 	/* Give the current vmcb to the guest */
@@ -1984,7 +2120,7 @@
 	nested_vmcb->save.idtr   = vmcb->save.idtr;
 	nested_vmcb->save.efer   = svm->vcpu.arch.efer;
 	nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
-	nested_vmcb->save.cr3    = svm->vcpu.arch.cr3;
+	nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
 	nested_vmcb->save.cr2    = vmcb->save.cr2;
 	nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
 	nested_vmcb->save.rflags = vmcb->save.rflags;
@@ -2061,6 +2197,8 @@
 	svm->vmcb->save.cpl = 0;
 	svm->vmcb->control.exit_int_info = 0;
 
+	mark_all_dirty(svm->vmcb);
+
 	nested_svm_unmap(page);
 
 	nested_svm_uninit_mmu_context(&svm->vcpu);
@@ -2148,8 +2286,8 @@
 			       nested_vmcb->control.event_inj,
 			       nested_vmcb->control.nested_ctl);
 
-	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr_read,
-				    nested_vmcb->control.intercept_cr_write,
+	trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
+				    nested_vmcb->control.intercept_cr >> 16,
 				    nested_vmcb->control.intercept_exceptions,
 				    nested_vmcb->control.intercept);
 
@@ -2177,7 +2315,7 @@
 	if (npt_enabled)
 		hsave->save.cr3    = vmcb->save.cr3;
 	else
-		hsave->save.cr3    = svm->vcpu.arch.cr3;
+		hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
 
 	copy_vmcb_control_area(hsave, vmcb);
 
@@ -2229,14 +2367,12 @@
 	svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
 
 	/* cache intercepts */
-	svm->nested.intercept_cr_read    = nested_vmcb->control.intercept_cr_read;
-	svm->nested.intercept_cr_write   = nested_vmcb->control.intercept_cr_write;
-	svm->nested.intercept_dr_read    = nested_vmcb->control.intercept_dr_read;
-	svm->nested.intercept_dr_write   = nested_vmcb->control.intercept_dr_write;
+	svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
+	svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
 	svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
 	svm->nested.intercept            = nested_vmcb->control.intercept;
 
-	force_new_asid(&svm->vcpu);
+	svm_flush_tlb(&svm->vcpu);
 	svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
 	if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
 		svm->vcpu.arch.hflags |= HF_VINTR_MASK;
@@ -2245,29 +2381,12 @@
 
 	if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
 		/* We only want the cr8 intercept bits of the guest */
-		svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK;
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+		clr_cr_intercept(svm, INTERCEPT_CR8_READ);
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 	}
 
 	/* We don't want to see VMMCALLs from a nested guest */
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL);
-
-	/*
-	 * We don't want a nested guest to be more powerful than the guest, so
-	 * all intercepts are ORed
-	 */
-	svm->vmcb->control.intercept_cr_read |=
-		nested_vmcb->control.intercept_cr_read;
-	svm->vmcb->control.intercept_cr_write |=
-		nested_vmcb->control.intercept_cr_write;
-	svm->vmcb->control.intercept_dr_read |=
-		nested_vmcb->control.intercept_dr_read;
-	svm->vmcb->control.intercept_dr_write |=
-		nested_vmcb->control.intercept_dr_write;
-	svm->vmcb->control.intercept_exceptions |=
-		nested_vmcb->control.intercept_exceptions;
-
-	svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
+	clr_intercept(svm, INTERCEPT_VMMCALL);
 
 	svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
 	svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
@@ -2278,11 +2397,21 @@
 
 	nested_svm_unmap(page);
 
-	/* nested_vmcb is our indicator if nested SVM is activated */
+	/* Enter Guest-Mode */
+	enter_guest_mode(&svm->vcpu);
+
+	/*
+	 * Merge guest and host intercepts - must be called  with vcpu in
+	 * guest-mode to take affect here
+	 */
+	recalc_intercepts(svm);
+
 	svm->nested.vmcb = vmcb_gpa;
 
 	enable_gif(svm);
 
+	mark_all_dirty(svm->vmcb);
+
 	return true;
 }
 
@@ -2400,6 +2529,8 @@
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
 
+	mark_dirty(svm->vmcb, VMCB_INTR);
+
 	return 1;
 }
 
@@ -2426,6 +2557,19 @@
 	return 1;
 }
 
+static int xsetbv_interception(struct vcpu_svm *svm)
+{
+	u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
+	u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
+
+	if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
+		svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
+		skip_emulated_instruction(&svm->vcpu);
+	}
+
+	return 1;
+}
+
 static int invalid_op_interception(struct vcpu_svm *svm)
 {
 	kvm_queue_exception(&svm->vcpu, UD_VECTOR);
@@ -2507,19 +2651,92 @@
 static int iret_interception(struct vcpu_svm *svm)
 {
 	++svm->vcpu.stat.nmi_window_exits;
-	svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+	clr_intercept(svm, INTERCEPT_IRET);
 	svm->vcpu.arch.hflags |= HF_IRET_MASK;
 	return 1;
 }
 
 static int invlpg_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+
+	kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
+	skip_emulated_instruction(&svm->vcpu);
+	return 1;
 }
 
 static int emulate_on_interception(struct vcpu_svm *svm)
 {
-	return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+}
+
+#define CR_VALID (1ULL << 63)
+
+static int cr_interception(struct vcpu_svm *svm)
+{
+	int reg, cr;
+	unsigned long val;
+	int err;
+
+	if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
+
+	err = 0;
+	if (cr >= 16) { /* mov to cr */
+		cr -= 16;
+		val = kvm_register_read(&svm->vcpu, reg);
+		switch (cr) {
+		case 0:
+			err = kvm_set_cr0(&svm->vcpu, val);
+			break;
+		case 3:
+			err = kvm_set_cr3(&svm->vcpu, val);
+			break;
+		case 4:
+			err = kvm_set_cr4(&svm->vcpu, val);
+			break;
+		case 8:
+			err = kvm_set_cr8(&svm->vcpu, val);
+			break;
+		default:
+			WARN(1, "unhandled write to CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+	} else { /* mov from cr */
+		switch (cr) {
+		case 0:
+			val = kvm_read_cr0(&svm->vcpu);
+			break;
+		case 2:
+			val = svm->vcpu.arch.cr2;
+			break;
+		case 3:
+			val = kvm_read_cr3(&svm->vcpu);
+			break;
+		case 4:
+			val = kvm_read_cr4(&svm->vcpu);
+			break;
+		case 8:
+			val = kvm_get_cr8(&svm->vcpu);
+			break;
+		default:
+			WARN(1, "unhandled read from CR%d", cr);
+			kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+			return 1;
+		}
+		kvm_register_write(&svm->vcpu, reg, val);
+	}
+	kvm_complete_insn_gp(&svm->vcpu, err);
+
+	return 1;
 }
 
 static int cr0_write_interception(struct vcpu_svm *svm)
@@ -2527,7 +2744,7 @@
 	struct kvm_vcpu *vcpu = &svm->vcpu;
 	int r;
 
-	r = emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 
 	if (svm->nested.vmexit_rip) {
 		kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
@@ -2536,22 +2753,47 @@
 		svm->nested.vmexit_rip = 0;
 	}
 
-	return r == EMULATE_DONE;
+	return r;
+}
+
+static int dr_interception(struct vcpu_svm *svm)
+{
+	int reg, dr;
+	unsigned long val;
+	int err;
+
+	if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
+		return emulate_on_interception(svm);
+
+	reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
+	dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
+
+	if (dr >= 16) { /* mov to DRn */
+		val = kvm_register_read(&svm->vcpu, reg);
+		kvm_set_dr(&svm->vcpu, dr - 16, val);
+	} else {
+		err = kvm_get_dr(&svm->vcpu, dr, &val);
+		if (!err)
+			kvm_register_write(&svm->vcpu, reg, val);
+	}
+
+	return 1;
 }
 
 static int cr8_write_interception(struct vcpu_svm *svm)
 {
 	struct kvm_run *kvm_run = svm->vcpu.run;
+	int r;
 
 	u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
 	/* instruction emulation calls kvm_set_cr8() */
-	emulate_instruction(&svm->vcpu, 0, 0, 0);
+	r = cr_interception(svm);
 	if (irqchip_in_kernel(svm->vcpu.kvm)) {
-		svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
-		return 1;
+		clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+		return r;
 	}
 	if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
-		return 1;
+		return r;
 	kvm_run->exit_reason = KVM_EXIT_SET_TPR;
 	return 0;
 }
@@ -2562,14 +2804,9 @@
 
 	switch (ecx) {
 	case MSR_IA32_TSC: {
-		u64 tsc_offset;
+		struct vmcb *vmcb = get_host_vmcb(svm);
 
-		if (is_nested(svm))
-			tsc_offset = svm->nested.hsave->control.tsc_offset;
-		else
-			tsc_offset = svm->vmcb->control.tsc_offset;
-
-		*data = tsc_offset + native_read_tsc();
+		*data = vmcb->control.tsc_offset + native_read_tsc();
 		break;
 	}
 	case MSR_STAR:
@@ -2714,7 +2951,7 @@
 		svm->vmcb->save.sysenter_esp = data;
 		break;
 	case MSR_IA32_DEBUGCTLMSR:
-		if (!svm_has(SVM_FEATURE_LBRV)) {
+		if (!boot_cpu_has(X86_FEATURE_LBRV)) {
 			pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
 					__func__, data);
 			break;
@@ -2723,6 +2960,7 @@
 			return 1;
 
 		svm->vmcb->save.dbgctl = data;
+		mark_dirty(svm->vmcb, VMCB_LBR);
 		if (data & (1ULL<<0))
 			svm_enable_lbrv(svm);
 		else
@@ -2775,6 +3013,7 @@
 	kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
 	svm_clear_vintr(svm);
 	svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
+	mark_dirty(svm->vmcb, VMCB_INTR);
 	/*
 	 * If the user space waits to inject interrupts, exit as soon as
 	 * possible
@@ -2797,31 +3036,31 @@
 }
 
 static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
-	[SVM_EXIT_READ_CR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_CR8]			= emulate_on_interception,
+	[SVM_EXIT_READ_CR0]			= cr_interception,
+	[SVM_EXIT_READ_CR3]			= cr_interception,
+	[SVM_EXIT_READ_CR4]			= cr_interception,
+	[SVM_EXIT_READ_CR8]			= cr_interception,
 	[SVM_EXIT_CR0_SEL_WRITE]		= emulate_on_interception,
 	[SVM_EXIT_WRITE_CR0]			= cr0_write_interception,
-	[SVM_EXIT_WRITE_CR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_CR4]			= emulate_on_interception,
+	[SVM_EXIT_WRITE_CR3]			= cr_interception,
+	[SVM_EXIT_WRITE_CR4]			= cr_interception,
 	[SVM_EXIT_WRITE_CR8]			= cr8_write_interception,
-	[SVM_EXIT_READ_DR0]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR1]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR2]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR3]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR4]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR5]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR6]			= emulate_on_interception,
-	[SVM_EXIT_READ_DR7]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR0]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR1]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR2]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR3]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR4]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR5]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR6]			= emulate_on_interception,
-	[SVM_EXIT_WRITE_DR7]			= emulate_on_interception,
+	[SVM_EXIT_READ_DR0]			= dr_interception,
+	[SVM_EXIT_READ_DR1]			= dr_interception,
+	[SVM_EXIT_READ_DR2]			= dr_interception,
+	[SVM_EXIT_READ_DR3]			= dr_interception,
+	[SVM_EXIT_READ_DR4]			= dr_interception,
+	[SVM_EXIT_READ_DR5]			= dr_interception,
+	[SVM_EXIT_READ_DR6]			= dr_interception,
+	[SVM_EXIT_READ_DR7]			= dr_interception,
+	[SVM_EXIT_WRITE_DR0]			= dr_interception,
+	[SVM_EXIT_WRITE_DR1]			= dr_interception,
+	[SVM_EXIT_WRITE_DR2]			= dr_interception,
+	[SVM_EXIT_WRITE_DR3]			= dr_interception,
+	[SVM_EXIT_WRITE_DR4]			= dr_interception,
+	[SVM_EXIT_WRITE_DR5]			= dr_interception,
+	[SVM_EXIT_WRITE_DR6]			= dr_interception,
+	[SVM_EXIT_WRITE_DR7]			= dr_interception,
 	[SVM_EXIT_EXCP_BASE + DB_VECTOR]	= db_interception,
 	[SVM_EXIT_EXCP_BASE + BP_VECTOR]	= bp_interception,
 	[SVM_EXIT_EXCP_BASE + UD_VECTOR]	= ud_interception,
@@ -2854,6 +3093,7 @@
 	[SVM_EXIT_WBINVD]                       = emulate_on_interception,
 	[SVM_EXIT_MONITOR]			= invalid_op_interception,
 	[SVM_EXIT_MWAIT]			= invalid_op_interception,
+	[SVM_EXIT_XSETBV]			= xsetbv_interception,
 	[SVM_EXIT_NPF]				= pf_interception,
 };
 
@@ -2864,10 +3104,10 @@
 	struct vmcb_save_area *save = &svm->vmcb->save;
 
 	pr_err("VMCB Control Area:\n");
-	pr_err("cr_read:            %04x\n", control->intercept_cr_read);
-	pr_err("cr_write:           %04x\n", control->intercept_cr_write);
-	pr_err("dr_read:            %04x\n", control->intercept_dr_read);
-	pr_err("dr_write:           %04x\n", control->intercept_dr_write);
+	pr_err("cr_read:            %04x\n", control->intercept_cr & 0xffff);
+	pr_err("cr_write:           %04x\n", control->intercept_cr >> 16);
+	pr_err("dr_read:            %04x\n", control->intercept_dr & 0xffff);
+	pr_err("dr_write:           %04x\n", control->intercept_dr >> 16);
 	pr_err("exceptions:         %08x\n", control->intercept_exceptions);
 	pr_err("intercepts:         %016llx\n", control->intercept);
 	pr_err("pause filter count: %d\n", control->pause_filter_count);
@@ -2950,15 +3190,23 @@
 
 }
 
+static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
+
+	*info1 = control->exit_info_1;
+	*info2 = control->exit_info_2;
+}
+
 static int handle_exit(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 	struct kvm_run *kvm_run = vcpu->run;
 	u32 exit_code = svm->vmcb->control.exit_code;
 
-	trace_kvm_exit(exit_code, vcpu);
+	trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
+	if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
 		vcpu->arch.cr0 = svm->vmcb->save.cr0;
 	if (npt_enabled)
 		vcpu->arch.cr3 = svm->vmcb->save.cr3;
@@ -2970,7 +3218,7 @@
 		return 1;
 	}
 
-	if (is_nested(svm)) {
+	if (is_guest_mode(vcpu)) {
 		int vmexit;
 
 		trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
@@ -3033,7 +3281,6 @@
 
 	struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
 
-	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
 	/* FIXME: handle wraparound of asid_generation */
 	if (svm->asid_generation != sd->asid_generation)
 		new_asid(svm, sd);
@@ -3045,7 +3292,7 @@
 
 	svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
 	vcpu->arch.hflags |= HF_NMI_MASK;
-	svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+	set_intercept(svm, INTERCEPT_IRET);
 	++vcpu->stat.nmi_injections;
 }
 
@@ -3058,6 +3305,7 @@
 	control->int_ctl &= ~V_INTR_PRIO_MASK;
 	control->int_ctl |= V_IRQ_MASK |
 		((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
+	mark_dirty(svm->vmcb, VMCB_INTR);
 }
 
 static void svm_set_irq(struct kvm_vcpu *vcpu)
@@ -3077,14 +3325,14 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	if (irr == -1)
 		return;
 
 	if (tpr >= irr)
-		svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
 }
 
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -3112,10 +3360,10 @@
 
 	if (masked) {
 		svm->vcpu.arch.hflags |= HF_NMI_MASK;
-		svm->vmcb->control.intercept |= (1ULL << INTERCEPT_IRET);
+		set_intercept(svm, INTERCEPT_IRET);
 	} else {
 		svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
-		svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_IRET);
+		clr_intercept(svm, INTERCEPT_IRET);
 	}
 }
 
@@ -3131,7 +3379,7 @@
 
 	ret = !!(vmcb->save.rflags & X86_EFLAGS_IF);
 
-	if (is_nested(svm))
+	if (is_guest_mode(vcpu))
 		return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
 
 	return ret;
@@ -3177,7 +3425,12 @@
 
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
-	force_new_asid(vcpu);
+	struct vcpu_svm *svm = to_svm(vcpu);
+
+	if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
+		svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
+	else
+		svm->asid_generation--;
 }
 
 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
@@ -3188,10 +3441,10 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
-	if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+	if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
 		int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
 		kvm_set_cr8(vcpu, cr8);
 	}
@@ -3202,7 +3455,7 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 	u64 cr8;
 
-	if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK))
+	if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
 		return;
 
 	cr8 = kvm_get_cr8(vcpu);
@@ -3289,9 +3542,6 @@
 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	u16 fs_selector;
-	u16 gs_selector;
-	u16 ldt_selector;
 
 	svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
 	svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
@@ -3308,10 +3558,6 @@
 
 	sync_lapic_to_cr8(vcpu);
 
-	save_host_msrs(vcpu);
-	savesegment(fs, fs_selector);
-	savesegment(gs, gs_selector);
-	ldt_selector = kvm_read_ldt();
 	svm->vmcb->save.cr2 = vcpu->arch.cr2;
 
 	clgi();
@@ -3389,19 +3635,10 @@
 #endif
 		);
 
-	vcpu->arch.cr2 = svm->vmcb->save.cr2;
-	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
-	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
-	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
-
-	load_host_msrs(vcpu);
-	kvm_load_ldt(ldt_selector);
-	loadsegment(fs, fs_selector);
 #ifdef CONFIG_X86_64
-	load_gs_index(gs_selector);
-	wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
+	wrmsrl(MSR_GS_BASE, svm->host.gs_base);
 #else
-	loadsegment(gs, gs_selector);
+	loadsegment(fs, svm->host.fs);
 #endif
 
 	reload_tss(vcpu);
@@ -3410,10 +3647,21 @@
 
 	stgi();
 
+	vcpu->arch.cr2 = svm->vmcb->save.cr2;
+	vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
+	vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
+	vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
+
 	sync_cr8_to_lapic(vcpu);
 
 	svm->next_rip = 0;
 
+	svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
+	/* if exit due to PF check for async PF */
+	if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
+		svm->apf_reason = kvm_read_and_reset_pf_reason();
+
 	if (npt_enabled) {
 		vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
 		vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
@@ -3426,6 +3674,8 @@
 	if (unlikely(svm->vmcb->control.exit_code ==
 		     SVM_EXIT_EXCP_BASE + MC_VECTOR))
 		svm_handle_mce(svm);
+
+	mark_all_clean(svm->vmcb);
 }
 
 #undef R
@@ -3435,7 +3685,8 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->save.cr3 = root;
-	force_new_asid(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
+	svm_flush_tlb(vcpu);
 }
 
 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
@@ -3443,11 +3694,13 @@
 	struct vcpu_svm *svm = to_svm(vcpu);
 
 	svm->vmcb->control.nested_cr3 = root;
+	mark_dirty(svm->vmcb, VMCB_NPT);
 
 	/* Also sync guest cr3 here in case we live migrate */
-	svm->vmcb->save.cr3 = vcpu->arch.cr3;
+	svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
+	mark_dirty(svm->vmcb, VMCB_CR);
 
-	force_new_asid(vcpu);
+	svm_flush_tlb(vcpu);
 }
 
 static int is_disabled(void)
@@ -3494,10 +3747,6 @@
 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
 	switch (func) {
-	case 0x00000001:
-		/* Mask out xsave bit as long as it is not supported by SVM */
-		entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
-		break;
 	case 0x80000001:
 		if (nested)
 			entry->ecx |= (1 << 2); /* Set SVM bit */
@@ -3511,7 +3760,7 @@
 				   additional features */
 
 		/* Support next_rip if host supports it */
-		if (svm_has(SVM_FEATURE_NRIP))
+		if (boot_cpu_has(X86_FEATURE_NRIPS))
 			entry->edx |= SVM_FEATURE_NRIP;
 
 		/* Support NPT for the guest if enabled */
@@ -3571,6 +3820,7 @@
 	{ SVM_EXIT_WBINVD,			"wbinvd" },
 	{ SVM_EXIT_MONITOR,			"monitor" },
 	{ SVM_EXIT_MWAIT,			"mwait" },
+	{ SVM_EXIT_XSETBV,			"xsetbv" },
 	{ SVM_EXIT_NPF,				"npf" },
 	{ -1, NULL }
 };
@@ -3594,9 +3844,7 @@
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
 
-	svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
-	if (is_nested(svm))
-		svm->nested.hsave->control.intercept_exceptions |= 1 << NM_VECTOR;
+	set_exception_intercept(svm, NM_VECTOR);
 	update_cr0_intercept(svm);
 }
 
@@ -3627,6 +3875,7 @@
 	.get_cpl = svm_get_cpl,
 	.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
+	.decache_cr3 = svm_decache_cr3,
 	.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
 	.set_cr0 = svm_set_cr0,
 	.set_cr3 = svm_set_cr3,
@@ -3667,7 +3916,9 @@
 	.get_tdp_level = get_npt_level,
 	.get_mt_mask = svm_get_mt_mask,
 
+	.get_exit_info = svm_get_exit_info,
 	.exit_reasons_str = svm_exit_reasons_str,
+
 	.get_lpage_level = svm_get_lpage_level,
 
 	.cpuid_update = svm_cpuid_update,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a6544b8..1357d7c 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -178,27 +178,36 @@
 #define trace_kvm_apic_read(reg, val)		trace_kvm_apic(0, reg, val)
 #define trace_kvm_apic_write(reg, val)		trace_kvm_apic(1, reg, val)
 
+#define KVM_ISA_VMX   1
+#define KVM_ISA_SVM   2
+
 /*
  * Tracepoint for kvm guest exit:
  */
 TRACE_EVENT(kvm_exit,
-	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu),
-	TP_ARGS(exit_reason, vcpu),
+	TP_PROTO(unsigned int exit_reason, struct kvm_vcpu *vcpu, u32 isa),
+	TP_ARGS(exit_reason, vcpu, isa),
 
 	TP_STRUCT__entry(
 		__field(	unsigned int,	exit_reason	)
 		__field(	unsigned long,	guest_rip	)
+		__field(	u32,	        isa             )
+		__field(	u64,	        info1           )
+		__field(	u64,	        info2           )
 	),
 
 	TP_fast_assign(
 		__entry->exit_reason	= exit_reason;
 		__entry->guest_rip	= kvm_rip_read(vcpu);
+		__entry->isa            = isa;
+		kvm_x86_ops->get_exit_info(vcpu, &__entry->info1,
+					   &__entry->info2);
 	),
 
-	TP_printk("reason %s rip 0x%lx",
+	TP_printk("reason %s rip 0x%lx info %llx %llx",
 		 ftrace_print_symbols_seq(p, __entry->exit_reason,
 					  kvm_x86_ops->exit_reasons_str),
-		 __entry->guest_rip)
+		 __entry->guest_rip, __entry->info1, __entry->info2)
 );
 
 /*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 81fcbe9..bf89ec2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -69,6 +69,9 @@
 static int __read_mostly vmm_exclusive = 1;
 module_param(vmm_exclusive, bool, S_IRUGO);
 
+static int __read_mostly yield_on_hlt = 1;
+module_param(yield_on_hlt, bool, S_IRUGO);
+
 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST				\
 	(X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
 #define KVM_GUEST_CR0_MASK						\
@@ -177,6 +180,7 @@
 static u64 construct_eptp(unsigned long root_hpa);
 static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
+static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -188,6 +192,8 @@
 static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
+static bool cpu_has_load_ia32_efer;
+
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
 
@@ -472,7 +478,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
-		      : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+		      : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 		      : "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
@@ -485,7 +491,7 @@
 	u8 error;
 
 	asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
-			: "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+			: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
 			: "cc", "memory");
 	if (error)
 		printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
@@ -565,10 +571,10 @@
 
 static unsigned long vmcs_readl(unsigned long field)
 {
-	unsigned long value;
+	unsigned long value = 0;
 
 	asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
-		      : "=a"(value) : "d"(field) : "cc");
+		      : "+a"(value) : "d"(field) : "cc");
 	return value;
 }
 
@@ -661,6 +667,12 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -680,6 +692,14 @@
 	unsigned i;
 	struct msr_autoload *m = &vmx->msr_autoload;
 
+	if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
+		vmcs_write64(GUEST_IA32_EFER, guest_val);
+		vmcs_write64(HOST_IA32_EFER, host_val);
+		vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
+		vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
+		return;
+	}
+
 	for (i = 0; i < m->nr; ++i)
 		if (m->guest[i].index == msr)
 			break;
@@ -1009,6 +1029,17 @@
 	vmx_set_interrupt_shadow(vcpu, 0);
 }
 
+static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
+{
+	/* Ensure that we clear the HLT state in the VMCS.  We don't need to
+	 * explicitly skip the instruction because if the HLT state is set, then
+	 * the instruction is already executing and RIP has already been
+	 * advanced. */
+	if (!yield_on_hlt &&
+	    vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
+		vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
+}
+
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 				bool has_error_code, u32 error_code,
 				bool reinject)
@@ -1035,6 +1066,7 @@
 		intr_info |= INTR_TYPE_HARD_EXCEPTION;
 
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
+	vmx_clear_hlt(vcpu);
 }
 
 static bool vmx_rdtscp_supported(void)
@@ -1305,8 +1337,11 @@
 			&& tboot_enabled())
 			return 1;
 		if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
-			&& !tboot_enabled())
+			&& !tboot_enabled()) {
+			printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
+				" activate TXT before enabling KVM\n");
 			return 1;
+		}
 	}
 
 	return 0;
@@ -1400,6 +1435,14 @@
 	return 0;
 }
 
+static __init bool allow_1_setting(u32 msr, u32 ctl)
+{
+	u32 vmx_msr_low, vmx_msr_high;
+
+	rdmsr(msr, vmx_msr_low, vmx_msr_high);
+	return vmx_msr_high & ctl;
+}
+
 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
 {
 	u32 vmx_msr_low, vmx_msr_high;
@@ -1416,7 +1459,7 @@
 				&_pin_based_exec_control) < 0)
 		return -EIO;
 
-	min = CPU_BASED_HLT_EXITING |
+	min =
 #ifdef CONFIG_X86_64
 	      CPU_BASED_CR8_LOAD_EXITING |
 	      CPU_BASED_CR8_STORE_EXITING |
@@ -1429,6 +1472,10 @@
 	      CPU_BASED_MWAIT_EXITING |
 	      CPU_BASED_MONITOR_EXITING |
 	      CPU_BASED_INVLPG_EXITING;
+
+	if (yield_on_hlt)
+		min |= CPU_BASED_HLT_EXITING;
+
 	opt = CPU_BASED_TPR_SHADOW |
 	      CPU_BASED_USE_MSR_BITMAPS |
 	      CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -1510,6 +1557,12 @@
 	vmcs_conf->vmexit_ctrl         = _vmexit_control;
 	vmcs_conf->vmentry_ctrl        = _vmentry_control;
 
+	cpu_has_load_ia32_efer =
+		allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+				VM_ENTRY_LOAD_IA32_EFER)
+		&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+				   VM_EXIT_LOAD_IA32_EFER);
+
 	return 0;
 }
 
@@ -1683,9 +1736,13 @@
 	save->limit = vmcs_read32(sf->limit);
 	save->ar = vmcs_read32(sf->ar_bytes);
 	vmcs_write16(sf->selector, save->base >> 4);
-	vmcs_write32(sf->base, save->base & 0xfffff);
+	vmcs_write32(sf->base, save->base & 0xffff0);
 	vmcs_write32(sf->limit, 0xffff);
 	vmcs_write32(sf->ar_bytes, 0xf3);
+	if (save->base & 0xf)
+		printk_once(KERN_WARNING "kvm: segment base is not paragraph"
+			    " aligned when entering protected mode (seg=%d)",
+			    seg);
 }
 
 static void enter_rmode(struct kvm_vcpu *vcpu)
@@ -1814,6 +1871,13 @@
 	vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
 }
 
+static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
+{
+	if (enable_ept && is_paging(vcpu))
+		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+}
+
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
 	ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
@@ -1857,6 +1921,7 @@
 					unsigned long cr0,
 					struct kvm_vcpu *vcpu)
 {
+	vmx_decache_cr3(vcpu);
 	if (!(cr0 & X86_CR0_PG)) {
 		/* From paging/starting to nonpaging */
 		vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1937,7 +2002,7 @@
 	if (enable_ept) {
 		eptp = construct_eptp(cr3);
 		vmcs_write64(EPT_POINTER, eptp);
-		guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
+		guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
 			vcpu->kvm->arch.ept_identity_map_addr;
 		ept_load_pdptrs(vcpu);
 	}
@@ -2725,7 +2790,7 @@
 	vmcs_writel(GUEST_IDTR_BASE, 0);
 	vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
 
-	vmcs_write32(GUEST_ACTIVITY_STATE, 0);
+	vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
 	vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
 	vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
 
@@ -2787,6 +2852,10 @@
 		return;
 	}
 
+	if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+		enable_irq_window(vcpu);
+		return;
+	}
 	cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
 	cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
 	vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
@@ -2814,6 +2883,7 @@
 	} else
 		intr |= INTR_TYPE_EXT_INTR;
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
+	vmx_clear_hlt(vcpu);
 }
 
 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
@@ -2841,6 +2911,7 @@
 	}
 	vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
 			INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
+	vmx_clear_hlt(vcpu);
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -2849,7 +2920,8 @@
 		return 0;
 
 	return	!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-			(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
+		  (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
+		   | GUEST_INTR_STATE_NMI));
 }
 
 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -2910,7 +2982,7 @@
 	 * Cause the #SS fault with 0 error code in VM86 mode.
 	 */
 	if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
-		if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
+		if (emulate_instruction(vcpu, 0) == EMULATE_DONE)
 			return 1;
 	/*
 	 * Forward all other exceptions that are valid in real mode.
@@ -3007,7 +3079,7 @@
 	}
 
 	if (is_invalid_opcode(intr_info)) {
-		er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
+		er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
 		if (er != EMULATE_DONE)
 			kvm_queue_exception(vcpu, UD_VECTOR);
 		return 1;
@@ -3026,7 +3098,7 @@
 
 		if (kvm_event_needs_reinjection(vcpu))
 			kvm_mmu_unprotect_page_virt(vcpu, cr2);
-		return kvm_mmu_page_fault(vcpu, cr2, error_code);
+		return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
 	}
 
 	if (vmx->rmode.vm86_active &&
@@ -3098,7 +3170,7 @@
 	++vcpu->stat.io_exits;
 
 	if (string || in)
-		return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+		return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 
 	port = exit_qualification >> 16;
 	size = (exit_qualification & 7) + 1;
@@ -3118,14 +3190,6 @@
 	hypercall[2] = 0xc1;
 }
 
-static void complete_insn_gp(struct kvm_vcpu *vcpu, int err)
-{
-	if (err)
-		kvm_inject_gp(vcpu, 0);
-	else
-		skip_emulated_instruction(vcpu);
-}
-
 static int handle_cr(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification, val;
@@ -3143,21 +3207,21 @@
 		switch (cr) {
 		case 0:
 			err = kvm_set_cr0(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 3:
 			err = kvm_set_cr3(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 4:
 			err = kvm_set_cr4(vcpu, val);
-			complete_insn_gp(vcpu, err);
+			kvm_complete_insn_gp(vcpu, err);
 			return 1;
 		case 8: {
 				u8 cr8_prev = kvm_get_cr8(vcpu);
 				u8 cr8 = kvm_register_read(vcpu, reg);
-				kvm_set_cr8(vcpu, cr8);
-				skip_emulated_instruction(vcpu);
+				err = kvm_set_cr8(vcpu, cr8);
+				kvm_complete_insn_gp(vcpu, err);
 				if (irqchip_in_kernel(vcpu->kvm))
 					return 1;
 				if (cr8_prev <= cr8)
@@ -3176,8 +3240,9 @@
 	case 1: /*mov from cr*/
 		switch (cr) {
 		case 3:
-			kvm_register_write(vcpu, reg, vcpu->arch.cr3);
-			trace_kvm_cr_read(cr, vcpu->arch.cr3);
+			val = kvm_read_cr3(vcpu);
+			kvm_register_write(vcpu, reg, val);
+			trace_kvm_cr_read(cr, val);
 			skip_emulated_instruction(vcpu);
 			return 1;
 		case 8:
@@ -3349,6 +3414,11 @@
 	return 1;
 }
 
+static int handle_invd(struct kvm_vcpu *vcpu)
+{
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+}
+
 static int handle_invlpg(struct kvm_vcpu *vcpu)
 {
 	unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -3377,7 +3447,7 @@
 
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
-	return emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE;
+	return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
 static int handle_task_switch(struct kvm_vcpu *vcpu)
@@ -3476,7 +3546,7 @@
 
 	gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 	trace_kvm_page_fault(gpa, exit_qualification);
-	return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
+	return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
 }
 
 static u64 ept_rsvd_mask(u64 spte, int level)
@@ -3592,7 +3662,7 @@
 		    && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
 			return handle_interrupt_window(&vmx->vcpu);
 
-		err = emulate_instruction(vcpu, 0, 0, 0);
+		err = emulate_instruction(vcpu, 0);
 
 		if (err == EMULATE_DO_MMIO) {
 			ret = 0;
@@ -3649,6 +3719,7 @@
 	[EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
 	[EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
 	[EXIT_REASON_HLT]                     = handle_halt,
+	[EXIT_REASON_INVD]		      = handle_invd,
 	[EXIT_REASON_INVLPG]		      = handle_invlpg,
 	[EXIT_REASON_VMCALL]                  = handle_vmcall,
 	[EXIT_REASON_VMCLEAR]	              = handle_vmx_insn,
@@ -3676,6 +3747,12 @@
 static const int kvm_vmx_max_exit_handlers =
 	ARRAY_SIZE(kvm_vmx_exit_handlers);
 
+static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
+{
+	*info1 = vmcs_readl(EXIT_QUALIFICATION);
+	*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+}
+
 /*
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
@@ -3686,17 +3763,12 @@
 	u32 exit_reason = vmx->exit_reason;
 	u32 vectoring_info = vmx->idt_vectoring_info;
 
-	trace_kvm_exit(exit_reason, vcpu);
+	trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
 
 	/* If guest state is invalid, start emulating */
 	if (vmx->emulation_required && emulate_invalid_guest_state)
 		return handle_invalid_guest_state(vcpu);
 
-	/* Access CR3 don't cause VMExit in paging mode, so we need
-	 * to sync with guest real CR3. */
-	if (enable_ept && is_paging(vcpu))
-		vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
-
 	if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
 		vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
 		vcpu->run->fail_entry.hardware_entry_failure_reason
@@ -4013,7 +4085,8 @@
 	      );
 
 	vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
-				  | (1 << VCPU_EXREG_PDPTR));
+				  | (1 << VCPU_EXREG_PDPTR)
+				  | (1 << VCPU_EXREG_CR3));
 	vcpu->arch.regs_dirty = 0;
 
 	vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
@@ -4280,6 +4353,7 @@
 	.get_cpl = vmx_get_cpl,
 	.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
 	.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
+	.decache_cr3 = vmx_decache_cr3,
 	.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
 	.set_cr0 = vmx_set_cr0,
 	.set_cr3 = vmx_set_cr3,
@@ -4320,7 +4394,9 @@
 	.get_tdp_level = get_ept_level,
 	.get_mt_mask = vmx_get_mt_mask,
 
+	.get_exit_info = vmx_get_exit_info,
 	.exit_reasons_str = vmx_exit_reasons_str,
+
 	.get_lpage_level = vmx_get_lpage_level,
 
 	.cpuid_update = vmx_cpuid_update,
@@ -4396,8 +4472,6 @@
 
 	if (enable_ept) {
 		bypass_guest_pf = 0;
-		kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
-			VMX_EPT_WRITABLE_MASK);
 		kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
 				VMX_EPT_EXECUTABLE_MASK);
 		kvm_enable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 46a368c..bcc0efc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -43,6 +43,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <linux/uaccess.h>
+#include <linux/hash.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -155,6 +156,13 @@
 
 u64 __read_mostly host_xcr0;
 
+static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
+{
+	int i;
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
+		vcpu->arch.apf.gfns[i] = ~0;
+}
+
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
 	unsigned slot;
@@ -326,23 +334,28 @@
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
-void kvm_inject_page_fault(struct kvm_vcpu *vcpu)
+void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
 {
-	unsigned error_code = vcpu->arch.fault.error_code;
+	if (err)
+		kvm_inject_gp(vcpu, 0);
+	else
+		kvm_x86_ops->skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
 
+void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
+{
 	++vcpu->stat.pf_guest;
-	vcpu->arch.cr2 = vcpu->arch.fault.address;
-	kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
+	vcpu->arch.cr2 = fault->address;
+	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
 }
 
-void kvm_propagate_fault(struct kvm_vcpu *vcpu)
+void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
 {
-	if (mmu_is_nested(vcpu) && !vcpu->arch.fault.nested)
-		vcpu->arch.nested_mmu.inject_page_fault(vcpu);
+	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
+		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
 	else
-		vcpu->arch.mmu.inject_page_fault(vcpu);
-
-	vcpu->arch.fault.nested = false;
+		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
 }
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
@@ -460,8 +473,8 @@
 		      (unsigned long *)&vcpu->arch.regs_avail))
 		return true;
 
-	gfn = (vcpu->arch.cr3 & ~31u) >> PAGE_SHIFT;
-	offset = (vcpu->arch.cr3 & ~31u) & (PAGE_SIZE - 1);
+	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
+	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
 	if (r < 0)
@@ -506,12 +519,15 @@
 		} else
 #endif
 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
-						 vcpu->arch.cr3))
+						 kvm_read_cr3(vcpu)))
 			return 1;
 	}
 
 	kvm_x86_ops->set_cr0(vcpu, cr0);
 
+	if ((cr0 ^ old_cr0) & X86_CR0_PG)
+		kvm_clear_async_pf_completion_queue(vcpu);
+
 	if ((cr0 ^ old_cr0) & update_bits)
 		kvm_mmu_reset_context(vcpu);
 	return 0;
@@ -595,7 +611,8 @@
 			return 1;
 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
 		   && ((cr4 ^ old_cr4) & pdptr_bits)
-		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3))
+		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
+				   kvm_read_cr3(vcpu)))
 		return 1;
 
 	if (cr4 & X86_CR4_VMXE)
@@ -615,7 +632,7 @@
 
 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
-	if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
+	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
 		kvm_mmu_sync_roots(vcpu);
 		kvm_mmu_flush_tlb(vcpu);
 		return 0;
@@ -650,12 +667,13 @@
 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
 		return 1;
 	vcpu->arch.cr3 = cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 	vcpu->arch.mmu.new_cr3(vcpu);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr3);
 
-int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
 	if (cr8 & CR8_RESERVED_BITS)
 		return 1;
@@ -665,12 +683,6 @@
 		vcpu->arch.cr8 = cr8;
 	return 0;
 }
-
-void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
-{
-	if (__kvm_set_cr8(vcpu, cr8))
-		kvm_inject_gp(vcpu, 0);
-}
 EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
@@ -775,12 +787,12 @@
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN	7
+#define KVM_SAVE_MSRS_BEGIN	8
 static u32 msrs_to_save[] = {
 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
-	HV_X64_MSR_APIC_ASSIST_PAGE,
+	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN,
 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
 	MSR_STAR,
 #ifdef CONFIG_X86_64
@@ -830,7 +842,6 @@
 	kvm_x86_ops->set_efer(vcpu, efer);
 
 	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
-	kvm_mmu_reset_context(vcpu);
 
 	/* Update reserved bits */
 	if ((efer ^ old_efer) & EFER_NX)
@@ -1418,6 +1429,30 @@
 	return 0;
 }
 
+static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
+{
+	gpa_t gpa = data & ~0x3f;
+
+	/* Bits 2:5 are resrved, Should be zero */
+	if (data & 0x3c)
+		return 1;
+
+	vcpu->arch.apf.msr_val = data;
+
+	if (!(data & KVM_ASYNC_PF_ENABLED)) {
+		kvm_clear_async_pf_completion_queue(vcpu);
+		kvm_async_pf_hash_reset(vcpu);
+		return 0;
+	}
+
+	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
+		return 1;
+
+	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
+	kvm_async_pf_wakeup_all(vcpu);
+	return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	switch (msr) {
@@ -1499,6 +1534,10 @@
 		}
 		break;
 	}
+	case MSR_KVM_ASYNC_PF_EN:
+		if (kvm_pv_enable_async_pf(vcpu, data))
+			return 1;
+		break;
 	case MSR_IA32_MCG_CTL:
 	case MSR_IA32_MCG_STATUS:
 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
@@ -1775,6 +1814,9 @@
 	case MSR_KVM_SYSTEM_TIME_NEW:
 		data = vcpu->arch.time;
 		break;
+	case MSR_KVM_ASYNC_PF_EN:
+		data = vcpu->arch.apf.msr_val;
+		break;
 	case MSR_IA32_P5_MC_ADDR:
 	case MSR_IA32_P5_MC_TYPE:
 	case MSR_IA32_MCG_CAP:
@@ -1904,6 +1946,7 @@
 	case KVM_CAP_NOP_IO_DELAY:
 	case KVM_CAP_MP_STATE:
 	case KVM_CAP_SYNC_MMU:
+	case KVM_CAP_USER_NMI:
 	case KVM_CAP_REINJECT_CONTROL:
 	case KVM_CAP_IRQ_INJECT_STATUS:
 	case KVM_CAP_ASSIGN_DEV_IRQ:
@@ -1922,6 +1965,7 @@
 	case KVM_CAP_DEBUGREGS:
 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
 	case KVM_CAP_XSAVE:
+	case KVM_CAP_ASYNC_PF:
 		r = 1;
 		break;
 	case KVM_CAP_COALESCED_MMIO:
@@ -2185,6 +2229,11 @@
 	return r;
 }
 
+static void cpuid_mask(u32 *word, int wordnum)
+{
+	*word &= boot_cpu_data.x86_capability[wordnum];
+}
+
 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
 			   u32 index)
 {
@@ -2259,7 +2308,9 @@
 		break;
 	case 1:
 		entry->edx &= kvm_supported_word0_x86_features;
+		cpuid_mask(&entry->edx, 0);
 		entry->ecx &= kvm_supported_word4_x86_features;
+		cpuid_mask(&entry->ecx, 4);
 		/* we support x2apic emulation even if host does not support
 		 * it since we emulate x2apic in software */
 		entry->ecx |= F(X2APIC);
@@ -2350,7 +2401,9 @@
 		break;
 	case 0x80000001:
 		entry->edx &= kvm_supported_word1_x86_features;
+		cpuid_mask(&entry->edx, 1);
 		entry->ecx &= kvm_supported_word6_x86_features;
+		cpuid_mask(&entry->ecx, 6);
 		break;
 	}
 
@@ -3169,20 +3222,18 @@
 		struct kvm_memslots *slots, *old_slots;
 		unsigned long *dirty_bitmap;
 
-		r = -ENOMEM;
-		dirty_bitmap = vmalloc(n);
-		if (!dirty_bitmap)
-			goto out;
+		dirty_bitmap = memslot->dirty_bitmap_head;
+		if (memslot->dirty_bitmap == dirty_bitmap)
+			dirty_bitmap += n / sizeof(long);
 		memset(dirty_bitmap, 0, n);
 
 		r = -ENOMEM;
 		slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
-		if (!slots) {
-			vfree(dirty_bitmap);
+		if (!slots)
 			goto out;
-		}
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+		slots->generation++;
 
 		old_slots = kvm->memslots;
 		rcu_assign_pointer(kvm->memslots, slots);
@@ -3195,11 +3246,8 @@
 		spin_unlock(&kvm->mmu_lock);
 
 		r = -EFAULT;
-		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
-			vfree(dirty_bitmap);
+		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
 			goto out;
-		}
-		vfree(dirty_bitmap);
 	} else {
 		r = -EFAULT;
 		if (clear_user(log->dirty_bitmap, n))
@@ -3266,8 +3314,10 @@
 		if (vpic) {
 			r = kvm_ioapic_init(kvm);
 			if (r) {
+				mutex_lock(&kvm->slots_lock);
 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
 							  &vpic->dev);
+				mutex_unlock(&kvm->slots_lock);
 				kfree(vpic);
 				goto create_irqchip_unlock;
 			}
@@ -3278,10 +3328,12 @@
 		smp_wmb();
 		r = kvm_setup_default_irq_routing(kvm);
 		if (r) {
+			mutex_lock(&kvm->slots_lock);
 			mutex_lock(&kvm->irq_lock);
 			kvm_ioapic_destroy(kvm);
 			kvm_destroy_pic(kvm);
 			mutex_unlock(&kvm->irq_lock);
+			mutex_unlock(&kvm->slots_lock);
 		}
 	create_irqchip_unlock:
 		mutex_unlock(&kvm->lock);
@@ -3557,63 +3609,63 @@
 static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
 {
 	gpa_t t_gpa;
-	u32 error;
+	struct x86_exception exception;
 
 	BUG_ON(!mmu_is_nested(vcpu));
 
 	/* NPT walks are always user-walks */
 	access |= PFERR_USER_MASK;
-	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error);
-	if (t_gpa == UNMAPPED_GVA)
-		vcpu->arch.fault.nested = true;
+	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
 
 	return t_gpa;
 }
 
-gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
+			      struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
- gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_FETCH_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
-gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	access |= PFERR_WRITE_MASK;
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
 }
 
 /* uses this to access any guest's mapped memory without checking CPL */
-gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
+				struct x86_exception *exception)
 {
-	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
+	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
 }
 
 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 				      struct kvm_vcpu *vcpu, u32 access,
-				      u32 *error)
+				      struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
 
 	while (bytes) {
 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
-							    error);
+							    exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3630,31 +3682,35 @@
 
 /* used for instruction fetching */
 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
-				struct kvm_vcpu *vcpu, u32 *error)
+				struct kvm_vcpu *vcpu,
+				struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-					  access | PFERR_FETCH_MASK, error);
+					  access | PFERR_FETCH_MASK,
+					  exception);
 }
 
 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+			       struct kvm_vcpu *vcpu,
+			       struct x86_exception *exception)
 {
 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
-					  error);
+					  exception);
 }
 
 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
-			       struct kvm_vcpu *vcpu, u32 *error)
+				      struct kvm_vcpu *vcpu,
+				      struct x86_exception *exception)
 {
-	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
 }
 
 static int kvm_write_guest_virt_system(gva_t addr, void *val,
 				       unsigned int bytes,
 				       struct kvm_vcpu *vcpu,
-				       u32 *error)
+				       struct x86_exception *exception)
 {
 	void *data = val;
 	int r = X86EMUL_CONTINUE;
@@ -3662,15 +3718,13 @@
 	while (bytes) {
 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
 							     PFERR_WRITE_MASK,
-							     error);
+							     exception);
 		unsigned offset = addr & (PAGE_SIZE-1);
 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
 		int ret;
 
-		if (gpa == UNMAPPED_GVA) {
-			r = X86EMUL_PROPAGATE_FAULT;
-			goto out;
-		}
+		if (gpa == UNMAPPED_GVA)
+			return X86EMUL_PROPAGATE_FAULT;
 		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
 		if (ret < 0) {
 			r = X86EMUL_IO_NEEDED;
@@ -3688,7 +3742,7 @@
 static int emulator_read_emulated(unsigned long addr,
 				  void *val,
 				  unsigned int bytes,
-				  unsigned int *error_code,
+				  struct x86_exception *exception,
 				  struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
@@ -3701,7 +3755,7 @@
 		return X86EMUL_CONTINUE;
 	}
 
-	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3710,8 +3764,8 @@
 	if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
 		goto mmio;
 
-	if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
-				== X86EMUL_CONTINUE)
+	if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
+	    == X86EMUL_CONTINUE)
 		return X86EMUL_CONTINUE;
 
 mmio:
@@ -3735,7 +3789,7 @@
 }
 
 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-			  const void *val, int bytes)
+			const void *val, int bytes)
 {
 	int ret;
 
@@ -3749,12 +3803,12 @@
 static int emulator_write_emulated_onepage(unsigned long addr,
 					   const void *val,
 					   unsigned int bytes,
-					   unsigned int *error_code,
+					   struct x86_exception *exception,
 					   struct kvm_vcpu *vcpu)
 {
 	gpa_t                 gpa;
 
-	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
+	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception);
 
 	if (gpa == UNMAPPED_GVA)
 		return X86EMUL_PROPAGATE_FAULT;
@@ -3787,7 +3841,7 @@
 int emulator_write_emulated(unsigned long addr,
 			    const void *val,
 			    unsigned int bytes,
-			    unsigned int *error_code,
+			    struct x86_exception *exception,
 			    struct kvm_vcpu *vcpu)
 {
 	/* Crossing a page boundary? */
@@ -3795,7 +3849,7 @@
 		int rc, now;
 
 		now = -addr & ~PAGE_MASK;
-		rc = emulator_write_emulated_onepage(addr, val, now, error_code,
+		rc = emulator_write_emulated_onepage(addr, val, now, exception,
 						     vcpu);
 		if (rc != X86EMUL_CONTINUE)
 			return rc;
@@ -3803,7 +3857,7 @@
 		val += now;
 		bytes -= now;
 	}
-	return emulator_write_emulated_onepage(addr, val, bytes, error_code,
+	return emulator_write_emulated_onepage(addr, val, bytes, exception,
 					       vcpu);
 }
 
@@ -3821,7 +3875,7 @@
 				     const void *old,
 				     const void *new,
 				     unsigned int bytes,
-				     unsigned int *error_code,
+				     struct x86_exception *exception,
 				     struct kvm_vcpu *vcpu)
 {
 	gpa_t gpa;
@@ -3879,7 +3933,7 @@
 emul_write:
 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
 
-	return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
+	return emulator_write_emulated(addr, new, bytes, exception, vcpu);
 }
 
 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@ -3904,7 +3958,7 @@
 	if (vcpu->arch.pio.count)
 		goto data_avail;
 
-	trace_kvm_pio(0, port, size, 1);
+	trace_kvm_pio(0, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 1;
@@ -3932,7 +3986,7 @@
 			      const void *val, unsigned int count,
 			      struct kvm_vcpu *vcpu)
 {
-	trace_kvm_pio(1, port, size, 1);
+	trace_kvm_pio(1, port, size, count);
 
 	vcpu->arch.pio.port = port;
 	vcpu->arch.pio.in = 0;
@@ -3973,13 +4027,15 @@
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
-		preempt_disable();
+		int cpu = get_cpu();
+
+		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
 				wbinvd_ipi, NULL, 1);
-		preempt_enable();
+		put_cpu();
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
-	}
-	wbinvd();
+	} else
+		wbinvd();
 	return X86EMUL_CONTINUE;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
@@ -4019,7 +4075,7 @@
 		value = vcpu->arch.cr2;
 		break;
 	case 3:
-		value = vcpu->arch.cr3;
+		value = kvm_read_cr3(vcpu);
 		break;
 	case 4:
 		value = kvm_read_cr4(vcpu);
@@ -4053,7 +4109,7 @@
 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
 		break;
 	case 8:
-		res = __kvm_set_cr8(vcpu, val & 0xfUL);
+		res = kvm_set_cr8(vcpu, val);
 		break;
 	default:
 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
@@ -4206,12 +4262,13 @@
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
 {
 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
-	if (ctxt->exception == PF_VECTOR)
-		kvm_propagate_fault(vcpu);
-	else if (ctxt->error_code_valid)
-		kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
+	if (ctxt->exception.vector == PF_VECTOR)
+		kvm_propagate_fault(vcpu, &ctxt->exception);
+	else if (ctxt->exception.error_code_valid)
+		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
+				      ctxt->exception.error_code);
 	else
-		kvm_queue_exception(vcpu, ctxt->exception);
+		kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -4267,13 +4324,19 @@
 
 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 {
+	int r = EMULATE_DONE;
+
 	++vcpu->stat.insn_emulation_fail;
 	trace_kvm_emulate_insn_failed(vcpu);
-	vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-	vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
-	vcpu->run->internal.ndata = 0;
+	if (!is_guest_mode(vcpu)) {
+		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+		vcpu->run->internal.ndata = 0;
+		r = EMULATE_FAIL;
+	}
 	kvm_queue_exception(vcpu, UD_VECTOR);
-	return EMULATE_FAIL;
+
+	return r;
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
@@ -4302,10 +4365,11 @@
 	return false;
 }
 
-int emulate_instruction(struct kvm_vcpu *vcpu,
-			unsigned long cr2,
-			u16 error_code,
-			int emulation_type)
+int x86_emulate_instruction(struct kvm_vcpu *vcpu,
+			    unsigned long cr2,
+			    int emulation_type,
+			    void *insn,
+			    int insn_len)
 {
 	int r;
 	struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
@@ -4323,10 +4387,10 @@
 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
 		init_emulate_ctxt(vcpu);
 		vcpu->arch.emulate_ctxt.interruptibility = 0;
-		vcpu->arch.emulate_ctxt.exception = -1;
+		vcpu->arch.emulate_ctxt.have_exception = false;
 		vcpu->arch.emulate_ctxt.perm_ok = false;
 
-		r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
+		r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
 		if (r == X86EMUL_PROPAGATE_FAULT)
 			goto done;
 
@@ -4389,7 +4453,7 @@
 	}
 
 done:
-	if (vcpu->arch.emulate_ctxt.exception >= 0) {
+	if (vcpu->arch.emulate_ctxt.have_exception) {
 		inject_emulated_exception(vcpu);
 		r = EMULATE_DONE;
 	} else if (vcpu->arch.pio.count) {
@@ -4413,7 +4477,7 @@
 
 	return r;
 }
-EXPORT_SYMBOL_GPL(emulate_instruction);
+EXPORT_SYMBOL_GPL(x86_emulate_instruction);
 
 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
 {
@@ -4653,7 +4717,6 @@
 
 	kvm_x86_ops = ops;
 	kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
-	kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
 
@@ -5116,6 +5179,12 @@
 			vcpu->fpu_active = 0;
 			kvm_x86_ops->fpu_deactivate(vcpu);
 		}
+		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
+			/* Page is swapped out. Do synthetic halt */
+			vcpu->arch.apf.halted = true;
+			r = 1;
+			goto out;
+		}
 	}
 
 	r = kvm_mmu_reload(vcpu);
@@ -5244,7 +5313,8 @@
 
 	r = 1;
 	while (r > 0) {
-		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		    !vcpu->arch.apf.halted)
 			r = vcpu_enter_guest(vcpu);
 		else {
 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
@@ -5257,6 +5327,7 @@
 					vcpu->arch.mp_state =
 						KVM_MP_STATE_RUNNABLE;
 				case KVM_MP_STATE_RUNNABLE:
+					vcpu->arch.apf.halted = false;
 					break;
 				case KVM_MP_STATE_SIPI_RECEIVED:
 				default:
@@ -5278,6 +5349,9 @@
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
 			++vcpu->stat.request_irq_exits;
 		}
+
+		kvm_check_async_pf_completion(vcpu);
+
 		if (signal_pending(current)) {
 			r = -EINTR;
 			vcpu->run->exit_reason = KVM_EXIT_INTR;
@@ -5302,6 +5376,9 @@
 	int r;
 	sigset_t sigsaved;
 
+	if (!tsk_used_math(current) && init_fpu(current))
+		return -ENOMEM;
+
 	if (vcpu->sigset_active)
 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
@@ -5313,8 +5390,12 @@
 	}
 
 	/* re-sync apic's tpr */
-	if (!irqchip_in_kernel(vcpu->kvm))
-		kvm_set_cr8(vcpu, kvm_run->cr8);
+	if (!irqchip_in_kernel(vcpu->kvm)) {
+		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
+			r = -EINVAL;
+			goto out;
+		}
+	}
 
 	if (vcpu->arch.pio.count || vcpu->mmio_needed) {
 		if (vcpu->mmio_needed) {
@@ -5323,7 +5404,7 @@
 			vcpu->mmio_needed = 0;
 		}
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-		r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
+		r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 		srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
 		if (r != EMULATE_DONE) {
 			r = 0;
@@ -5436,7 +5517,7 @@
 
 	sregs->cr0 = kvm_read_cr0(vcpu);
 	sregs->cr2 = vcpu->arch.cr2;
-	sregs->cr3 = vcpu->arch.cr3;
+	sregs->cr3 = kvm_read_cr3(vcpu);
 	sregs->cr4 = kvm_read_cr4(vcpu);
 	sregs->cr8 = kvm_get_cr8(vcpu);
 	sregs->efer = vcpu->arch.efer;
@@ -5504,8 +5585,9 @@
 	kvm_x86_ops->set_gdt(vcpu, &dt);
 
 	vcpu->arch.cr2 = sregs->cr2;
-	mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
+	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
 	vcpu->arch.cr3 = sregs->cr3;
+	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
 
 	kvm_set_cr8(vcpu, sregs->cr8);
 
@@ -5522,7 +5604,7 @@
 	if (sregs->cr4 & X86_CR4_OSXSAVE)
 		update_cpuid(vcpu);
 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
-		load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3);
+		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
 		mmu_reset_needed = 1;
 	}
 
@@ -5773,6 +5855,8 @@
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
+	vcpu->arch.apf.msr_val = 0;
+
 	vcpu_load(vcpu);
 	kvm_mmu_unload(vcpu);
 	vcpu_put(vcpu);
@@ -5792,6 +5876,11 @@
 	vcpu->arch.dr7 = DR7_FIXED_1;
 
 	kvm_make_request(KVM_REQ_EVENT, vcpu);
+	vcpu->arch.apf.msr_val = 0;
+
+	kvm_clear_async_pf_completion_queue(vcpu);
+	kvm_async_pf_hash_reset(vcpu);
+	vcpu->arch.apf.halted = false;
 
 	return kvm_x86_ops->vcpu_reset(vcpu);
 }
@@ -5881,6 +5970,8 @@
 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
 		goto fail_free_mce_banks;
 
+	kvm_async_pf_hash_reset(vcpu);
+
 	return 0;
 fail_free_mce_banks:
 	kfree(vcpu->arch.mce_banks);
@@ -5906,13 +5997,8 @@
 	free_page((unsigned long)vcpu->arch.pio_data);
 }
 
-struct  kvm *kvm_arch_create_vm(void)
+int kvm_arch_init_vm(struct kvm *kvm)
 {
-	struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
-
-	if (!kvm)
-		return ERR_PTR(-ENOMEM);
-
 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
 
@@ -5921,7 +6007,7 @@
 
 	spin_lock_init(&kvm->arch.tsc_write_lock);
 
-	return kvm;
+	return 0;
 }
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
@@ -5939,8 +6025,10 @@
 	/*
 	 * Unpin any mmu pages first.
 	 */
-	kvm_for_each_vcpu(i, vcpu, kvm)
+	kvm_for_each_vcpu(i, vcpu, kvm) {
+		kvm_clear_async_pf_completion_queue(vcpu);
 		kvm_unload_vcpu_mmu(vcpu);
+	}
 	kvm_for_each_vcpu(i, vcpu, kvm)
 		kvm_arch_vcpu_free(vcpu);
 
@@ -5964,13 +6052,10 @@
 	kfree(kvm->arch.vpic);
 	kfree(kvm->arch.vioapic);
 	kvm_free_vcpus(kvm);
-	kvm_free_physmem(kvm);
 	if (kvm->arch.apic_access_page)
 		put_page(kvm->arch.apic_access_page);
 	if (kvm->arch.ept_identity_pagetable)
 		put_page(kvm->arch.ept_identity_pagetable);
-	cleanup_srcu_struct(&kvm->srcu);
-	kfree(kvm);
 }
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
@@ -6051,7 +6136,9 @@
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-	return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+		!vcpu->arch.apf.halted)
+		|| !list_empty_careful(&vcpu->async_pf.done)
 		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
 		|| vcpu->arch.nmi_pending ||
 		(kvm_arch_interrupt_allowed(vcpu) &&
@@ -6110,6 +6197,147 @@
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
 
+void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
+{
+	int r;
+
+	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+	      is_error_page(work->page))
+		return;
+
+	r = kvm_mmu_reload(vcpu);
+	if (unlikely(r))
+		return;
+
+	if (!vcpu->arch.mmu.direct_map &&
+	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+		return;
+
+	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+}
+
+static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
+{
+	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
+}
+
+static inline u32 kvm_async_pf_next_probe(u32 key)
+{
+	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
+}
+
+static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	while (vcpu->arch.apf.gfns[key] != ~0)
+		key = kvm_async_pf_next_probe(key);
+
+	vcpu->arch.apf.gfns[key] = gfn;
+}
+
+static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	int i;
+	u32 key = kvm_async_pf_hash_fn(gfn);
+
+	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
+		     (vcpu->arch.apf.gfns[key] != gfn &&
+		      vcpu->arch.apf.gfns[key] != ~0); i++)
+		key = kvm_async_pf_next_probe(key);
+
+	return key;
+}
+
+bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
+}
+
+static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
+{
+	u32 i, j, k;
+
+	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
+	while (true) {
+		vcpu->arch.apf.gfns[i] = ~0;
+		do {
+			j = kvm_async_pf_next_probe(j);
+			if (vcpu->arch.apf.gfns[j] == ~0)
+				return;
+			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
+			/*
+			 * k lies cyclically in ]i,j]
+			 * |    i.k.j |
+			 * |....j i.k.| or  |.k..j i...|
+			 */
+		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
+		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
+		i = j;
+	}
+}
+
+static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
+{
+
+	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
+				      sizeof(val));
+}
+
+void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
+				     struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
+	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
+	    (vcpu->arch.apf.send_user_only &&
+	     kvm_x86_ops->get_cpl(vcpu) == 0))
+		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+}
+
+void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
+				 struct kvm_async_pf *work)
+{
+	struct x86_exception fault;
+
+	trace_kvm_async_pf_ready(work->arch.token, work->gva);
+	if (is_error_page(work->page))
+		work->arch.token = ~0; /* broadcast wakeup */
+	else
+		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
+
+	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
+	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
+		fault.vector = PF_VECTOR;
+		fault.error_code_valid = true;
+		fault.error_code = 0;
+		fault.nested_page_fault = false;
+		fault.address = work->arch.token;
+		kvm_inject_page_fault(vcpu, &fault);
+	}
+	vcpu->arch.apf.halted = false;
+}
+
+bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
+{
+	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
+		return true;
+	else
+		return !kvm_event_needs_reinjection(vcpu) &&
+			kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 08a0069..f21962c 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -27,6 +27,7 @@
 #include <asm/amd_nb.h>
 
 static struct bootnode __initdata nodes[8];
+static unsigned char __initdata nodeids[8];
 static nodemask_t __initdata nodes_parsed = NODE_MASK_NONE;
 
 static __init int find_northbridge(void)
@@ -68,19 +69,6 @@
 #endif
 }
 
-int __init amd_get_nodes(struct bootnode *physnodes)
-{
-	int i;
-	int ret = 0;
-
-	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
-	}
-	return ret;
-}
-
 int __init amd_numa_init(unsigned long start_pfn, unsigned long end_pfn)
 {
 	unsigned long start = PFN_PHYS(start_pfn);
@@ -113,7 +101,7 @@
 		base = read_pci_config(0, nb, 1, 0x40 + i*8);
 		limit = read_pci_config(0, nb, 1, 0x44 + i*8);
 
-		nodeid = limit & 7;
+		nodeids[i] = nodeid = limit & 7;
 		if ((base & 3) == 0) {
 			if (i < numnodes)
 				pr_info("Skipping disabled node %d\n", i);
@@ -193,6 +181,76 @@
 	return 0;
 }
 
+#ifdef CONFIG_NUMA_EMU
+static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
+	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+};
+
+void __init amd_get_nodes(struct bootnode *physnodes)
+{
+	int i;
+
+	for_each_node_mask(i, nodes_parsed) {
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
+	}
+}
+
+static int __init find_node_by_addr(unsigned long addr)
+{
+	int ret = NUMA_NO_NODE;
+	int i;
+
+	for (i = 0; i < 8; i++)
+		if (addr >= nodes[i].start && addr < nodes[i].end) {
+			ret = i;
+			break;
+		}
+	return ret;
+}
+
+/*
+ * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
+ * setup to represent the physical topology but reflect the emulated
+ * environment.  For each emulated node, the real node which it appears on is
+ * found and a fake pxm to nid mapping is created which mirrors the actual
+ * locality.  node_distance() then represents the correct distances between
+ * emulated nodes by using the fake acpi mappings to pxms.
+ */
+void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
+{
+	unsigned int bits;
+	unsigned int cores;
+	unsigned int apicid_base = 0;
+	int i;
+
+	bits = boot_cpu_data.x86_coreid_bits;
+	cores = 1 << bits;
+	early_get_boot_cpu_id();
+	if (boot_cpu_physical_apicid > 0)
+		apicid_base = boot_cpu_physical_apicid;
+
+	for (i = 0; i < nr_nodes; i++) {
+		int index;
+		int nid;
+		int j;
+
+		nid = find_node_by_addr(nodes[i].start);
+		if (nid == NUMA_NO_NODE)
+			continue;
+
+		index = nodeids[nid] << bits;
+		if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
+			for (j = apicid_base; j < cores + apicid_base; j++)
+				fake_apicid_to_node[index + j] = i;
+#ifdef CONFIG_ACPI_NUMA
+		__acpi_map_pxm_to_node(nid, i);
+#endif
+	}
+	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
+}
+#endif /* CONFIG_NUMA_EMU */
+
 int __init amd_scan_nodes(void)
 {
 	unsigned int bits;
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index f89b5bb..c821074 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -45,6 +45,7 @@
 #include <asm/bugs.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
+#include <asm/olpc_ofw.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
 #include <asm/paravirt.h>
@@ -715,6 +716,7 @@
 	/*
 	 * NOTE: at this point the bootmem allocator is fully available.
 	 */
+	olpc_dt_build_devicetree();
 	sparse_init();
 	zone_sizes_init();
 }
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 7762a51..1e72102 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -260,30 +260,30 @@
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
 static char *cmdline __initdata;
 
 static int __init setup_physnodes(unsigned long start, unsigned long end,
 					int acpi, int amd)
 {
-	int nr_nodes = 0;
 	int ret = 0;
 	int i;
 
+	memset(physnodes, 0, sizeof(physnodes));
 #ifdef CONFIG_ACPI_NUMA
 	if (acpi)
-		nr_nodes = acpi_get_nodes(physnodes);
+		acpi_get_nodes(physnodes, start, end);
 #endif
 #ifdef CONFIG_AMD_NUMA
 	if (amd)
-		nr_nodes = amd_get_nodes(physnodes);
+		amd_get_nodes(physnodes);
 #endif
 	/*
 	 * Basic sanity checking on the physical node map: there may be errors
 	 * if the SRAT or AMD code incorrectly reported the topology or the mem=
 	 * kernel parameter is used.
 	 */
-	for (i = 0; i < nr_nodes; i++) {
+	for (i = 0; i < MAX_NUMNODES; i++) {
 		if (physnodes[i].start == physnodes[i].end)
 			continue;
 		if (physnodes[i].start > end) {
@@ -298,17 +298,6 @@
 			physnodes[i].start = start;
 		if (physnodes[i].end > end)
 			physnodes[i].end = end;
-	}
-
-	/*
-	 * Remove all nodes that have no memory or were truncated because of the
-	 * limited address range.
-	 */
-	for (i = 0; i < nr_nodes; i++) {
-		if (physnodes[i].start == physnodes[i].end)
-			continue;
-		physnodes[ret].start = physnodes[i].start;
-		physnodes[ret].end = physnodes[i].end;
 		ret++;
 	}
 
@@ -324,6 +313,24 @@
 	return ret;
 }
 
+static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
+{
+	int i;
+
+	BUG_ON(acpi && amd);
+#ifdef CONFIG_ACPI_NUMA
+	if (acpi)
+		acpi_fake_nodes(nodes, nr_nodes);
+#endif
+#ifdef CONFIG_AMD_NUMA
+	if (amd)
+		amd_fake_nodes(nodes, nr_nodes);
+#endif
+	if (!acpi && !amd)
+		for (i = 0; i < nr_cpu_ids; i++)
+			numa_set_node(i, 0);
+}
+
 /*
  * Setups up nid to range from addr to addr + size.  If the end
  * boundary is greater than max_addr, then max_addr is used instead.
@@ -352,8 +359,7 @@
  * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
  * to max_addr.  The return value is the number of nodes allocated.
  */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr,
-						int nr_phys_nodes, int nr_nodes)
+static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
 	u64 size;
@@ -384,7 +390,7 @@
 		return -1;
 	}
 
-	for (i = 0; i < nr_phys_nodes; i++)
+	for (i = 0; i < MAX_NUMNODES; i++)
 		if (physnodes[i].start != physnodes[i].end)
 			node_set(i, physnode_mask);
 
@@ -553,11 +559,9 @@
 {
 	u64 addr = start_pfn << PAGE_SHIFT;
 	u64 max_addr = last_pfn << PAGE_SHIFT;
-	int num_phys_nodes;
 	int num_nodes;
 	int i;
 
-	num_phys_nodes = setup_physnodes(addr, max_addr, acpi, amd);
 	/*
 	 * If the numa=fake command-line contains a 'M' or 'G', it represents
 	 * the fixed node size.  Otherwise, if it is just a single number N,
@@ -572,7 +576,7 @@
 		unsigned long n;
 
 		n = simple_strtoul(cmdline, NULL, 0);
-		num_nodes = split_nodes_interleave(addr, max_addr, num_phys_nodes, n);
+		num_nodes = split_nodes_interleave(addr, max_addr, n);
 	}
 
 	if (num_nodes < 0)
@@ -595,7 +599,8 @@
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	acpi_fake_nodes(nodes, num_nodes);
+	setup_physnodes(addr, max_addr, acpi, amd);
+	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	return 0;
 }
@@ -610,8 +615,12 @@
 	nodes_clear(node_online_map);
 
 #ifdef CONFIG_NUMA_EMU
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	if (cmdline && !numa_emulation(start_pfn, last_pfn, acpi, amd))
 		return;
+	setup_physnodes(start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT,
+			acpi, amd);
 	nodes_clear(node_possible_map);
 	nodes_clear(node_online_map);
 #endif
@@ -767,6 +776,7 @@
 
 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
 
+#ifndef CONFIG_NUMA_EMU
 void __cpuinit numa_add_cpu(int cpu)
 {
 	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
@@ -776,34 +786,115 @@
 {
 	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
 }
+#else
+void __cpuinit numa_add_cpu(int cpu)
+{
+	unsigned long addr;
+	u16 apicid;
+	int physnid;
+	int nid = NUMA_NO_NODE;
+
+	apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+	if (apicid != BAD_APICID)
+		nid = apicid_to_node[apicid];
+	if (nid == NUMA_NO_NODE)
+		nid = early_cpu_to_node(cpu);
+	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
+
+	/*
+	 * Use the starting address of the emulated node to find which physical
+	 * node it is allocated on.
+	 */
+	addr = node_start_pfn(nid) << PAGE_SHIFT;
+	for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			break;
+
+	/*
+	 * Map the cpu to each emulated node that is allocated on the physical
+	 * node of the cpu's apic id.
+	 */
+	for_each_online_node(nid) {
+		addr = node_start_pfn(nid) << PAGE_SHIFT;
+		if (addr >= physnodes[physnid].start &&
+		    addr < physnodes[physnid].end)
+			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
+	}
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+	int i;
+
+	for_each_online_node(i)
+		cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
+}
+#endif /* !CONFIG_NUMA_EMU */
 
 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
 {
 	int node = early_cpu_to_node(cpu);
 	struct cpumask *mask;
 	char buf[64];
 
 	mask = node_to_cpumask_map[node];
-	if (mask == NULL) {
-		printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
+	if (!mask) {
+		pr_err("node_to_cpumask_map[%i] NULL\n", node);
 		dump_stack();
-		return;
+		return NULL;
 	}
 
+	cpulist_scnprintf(buf, sizeof(buf), mask);
+	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+		enable ? "numa_add_cpu" : "numa_remove_cpu",
+		cpu, node, buf);
+	return mask;
+}
+
+/*
+ * --------- debug versions of the numa functions ---------
+ */
+#ifndef CONFIG_NUMA_EMU
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	struct cpumask *mask;
+
+	mask = debug_cpumask_set_cpu(cpu, enable);
+	if (!mask)
+		return;
+
 	if (enable)
 		cpumask_set_cpu(cpu, mask);
 	else
 		cpumask_clear_cpu(cpu, mask);
-
-	cpulist_scnprintf(buf, sizeof(buf), mask);
-	printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-		enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
 }
+#else
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+	int node = early_cpu_to_node(cpu);
+	struct cpumask *mask;
+	int i;
+
+	for_each_online_node(i) {
+		unsigned long addr;
+
+		addr = node_start_pfn(i) << PAGE_SHIFT;
+		if (addr < physnodes[node].start ||
+					addr >= physnodes[node].end)
+			continue;
+		mask = debug_cpumask_set_cpu(cpu, enable);
+		if (!mask)
+			return;
+
+		if (enable)
+			cpumask_set_cpu(cpu, mask);
+		else
+			cpumask_clear_cpu(cpu, mask);
+	}
+}
+#endif /* CONFIG_NUMA_EMU */
 
 void __cpuinit numa_add_cpu(int cpu)
 {
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 171a0aa..603d285 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -349,18 +349,19 @@
 
 void __init acpi_numa_arch_fixup(void) {}
 
-int __init acpi_get_nodes(struct bootnode *physnodes)
+#ifdef CONFIG_NUMA_EMU
+void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start,
+				unsigned long end)
 {
 	int i;
-	int ret = 0;
 
 	for_each_node_mask(i, nodes_parsed) {
-		physnodes[ret].start = nodes[i].start;
-		physnodes[ret].end = nodes[i].end;
-		ret++;
+		cutoff_node(i, start, end);
+		physnodes[i].start = nodes[i].start;
+		physnodes[i].end = nodes[i].end;
 	}
-	return ret;
 }
+#endif /* CONFIG_NUMA_EMU */
 
 /* Use the information discovered above to actually set up the nodes. */
 int __init acpi_scan_nodes(unsigned long start, unsigned long end)
@@ -505,8 +506,6 @@
 {
 	int i, j;
 
-	printk(KERN_INFO "Faking PXM affinity for fake nodes on real "
-			 "topology.\n");
 	for (i = 0; i < num_nodes; i++) {
 		int nid, pxm;
 
@@ -526,6 +525,17 @@
 			    fake_apicid_to_node[j] == NUMA_NO_NODE)
 				fake_apicid_to_node[j] = i;
 	}
+
+	/*
+	 * If there are apicid-to-node mappings for physical nodes that do not
+	 * have a corresponding emulated node, it should default to a guaranteed
+	 * value.
+	 */
+	for (i = 0; i < MAX_LOCAL_APIC; i++)
+		if (apicid_to_node[i] != NUMA_NO_NODE &&
+		    fake_apicid_to_node[i] == NUMA_NO_NODE)
+			fake_apicid_to_node[i] = 0;
+
 	for (i = 0; i < num_nodes; i++)
 		__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
 	memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node));
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index f24a853..e2b7b0c 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -65,7 +65,6 @@
 
 	switch (val) {
 	case DIE_NMI:
-	case DIE_NMI_IPI:
 		if (ctr_running)
 			model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs));
 		else if (!nmi_enabled)
@@ -361,7 +360,7 @@
 static struct notifier_block profile_exceptions_nb = {
 	.notifier_call = profile_exceptions_notify,
 	.next = NULL,
-	.priority = 2
+	.priority = NMI_LOCAL_LOW_PRIOR,
 };
 
 static void nmi_cpu_restore_registers(struct op_msrs *msrs)
diff --git a/arch/x86/oprofile/nmi_timer_int.c b/arch/x86/oprofile/nmi_timer_int.c
index 0636dd9..720bf5a 100644
--- a/arch/x86/oprofile/nmi_timer_int.c
+++ b/arch/x86/oprofile/nmi_timer_int.c
@@ -38,7 +38,7 @@
 static struct notifier_block profile_timer_exceptions_nb = {
 	.notifier_call = profile_timer_exceptions_notify,
 	.next = NULL,
-	.priority = 0
+	.priority = NMI_LOW_PRIOR,
 };
 
 static int timer_start(void)
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index fc1e8fe..e27dffb 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -4,6 +4,7 @@
 #include <linux/cpu.h>
 #include <linux/range.h>
 
+#include <asm/amd_nb.h>
 #include <asm/pci_x86.h>
 
 #include <asm/pci-direct.h>
@@ -378,6 +379,34 @@
 	.notifier_call	= amd_cpu_notify,
 };
 
+static void __init pci_enable_pci_io_ecs(void)
+{
+#ifdef CONFIG_AMD_NB
+	unsigned int i, n;
+
+	for (n = i = 0; !n && amd_nb_bus_dev_ranges[i].dev_limit; ++i) {
+		u8 bus = amd_nb_bus_dev_ranges[i].bus;
+		u8 slot = amd_nb_bus_dev_ranges[i].dev_base;
+		u8 limit = amd_nb_bus_dev_ranges[i].dev_limit;
+
+		for (; slot < limit; ++slot) {
+			u32 val = read_pci_config(bus, slot, 3, 0);
+
+			if (!early_is_amd_nb(val))
+				continue;
+
+			val = read_pci_config(bus, slot, 3, 0x8c);
+			if (!(val & (ENABLE_CF8_EXT_CFG >> 32))) {
+				val |= ENABLE_CF8_EXT_CFG >> 32;
+				write_pci_config(bus, slot, 3, 0x8c, val);
+			}
+			++n;
+		}
+	}
+	pr_info("Extended Config Space enabled on %u nodes\n", n);
+#endif
+}
+
 static int __init pci_io_ecs_init(void)
 {
 	int cpu;
@@ -386,6 +415,10 @@
         if (boot_cpu_data.x86 < 0x10)
 		return 0;
 
+	/* Try the PCI method first. */
+	if (early_pci_allowed())
+		pci_enable_pci_io_ecs();
+
 	register_cpu_notifier(&amd_cpu_notifier);
 	for_each_online_cpu(cpu)
 		amd_cpu_notify(&amd_cpu_notifier, (unsigned long)CPU_ONLINE,
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 65df603..25bfdbb 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -103,7 +103,7 @@
 static unsigned long mrst_spi_paddr = MRST_REGBASE_SPI0;
 
 static u32 *pclk_spi0;
-/* Always contains an accessable address, start with 0 */
+/* Always contains an accessible address, start with 0 */
 static struct dw_spi_reg *pspi;
 
 static struct kmsg_dumper dw_dumper;
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
index c31b8fc..e797428 100644
--- a/arch/x86/platform/olpc/Makefile
+++ b/arch/x86/platform/olpc/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_OLPC)		+= olpc.o
 obj-$(CONFIG_OLPC_XO1)		+= olpc-xo1.o
 obj-$(CONFIG_OLPC_OPENFIRMWARE)	+= olpc_ofw.o
+obj-$(CONFIG_OLPC_OPENFIRMWARE_DT)	+= olpc_dt.o
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
new file mode 100644
index 0000000..dab8746
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -0,0 +1,183 @@
+/*
+ * OLPC-specific OFW device tree support code.
+ *
+ * Paul Mackerras	August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ *    {engebret|bergner}@us.ibm.com
+ *
+ *  Adapted for sparc by David S. Miller davem@davemloft.net
+ *  Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net>
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of.h>
+#include <linux/of_pdt.h>
+#include <asm/olpc_ofw.h>
+
+static phandle __init olpc_dt_getsibling(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("peer", args, res) || (s32)node == -1)
+		return 0;
+
+	return node;
+}
+
+static phandle __init olpc_dt_getchild(phandle node)
+{
+	const void *args[] = { (void *)node };
+	void *res[] = { &node };
+
+	if ((s32)node == -1)
+		return 0;
+
+	if (olpc_ofw("child", args, res) || (s32)node == -1) {
+		pr_err("PROM: %s: fetching child failed!\n", __func__);
+		return 0;
+	}
+
+	return node;
+}
+
+static int __init olpc_dt_getproplen(phandle node, const char *prop)
+{
+	const void *args[] = { (void *)node, prop };
+	int len;
+	void *res[] = { &len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("getproplen", args, res)) {
+		pr_err("PROM: %s: getproplen failed!\n", __func__);
+		return -1;
+	}
+
+	return len;
+}
+
+static int __init olpc_dt_getproperty(phandle node, const char *prop,
+		char *buf, int bufsize)
+{
+	int plen;
+
+	plen = olpc_dt_getproplen(node, prop);
+	if (plen > bufsize || plen < 1) {
+		return -1;
+	} else {
+		const void *args[] = { (void *)node, prop, buf, (void *)plen };
+		void *res[] = { &plen };
+
+		if (olpc_ofw("getprop", args, res)) {
+			pr_err("PROM: %s: getprop failed!\n", __func__);
+			return -1;
+		}
+	}
+
+	return plen;
+}
+
+static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
+{
+	const void *args[] = { (void *)node, prev, buf };
+	int success;
+	void *res[] = { &success };
+
+	buf[0] = '\0';
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("nextprop", args, res) || success != 1)
+		return -1;
+
+	return 0;
+}
+
+static int __init olpc_dt_pkg2path(phandle node, char *buf,
+		const int buflen, int *len)
+{
+	const void *args[] = { (void *)node, buf, (void *)buflen };
+	void *res[] = { len };
+
+	if ((s32)node == -1)
+		return -1;
+
+	if (olpc_ofw("package-to-path", args, res) || *len < 1)
+		return -1;
+
+	return 0;
+}
+
+static unsigned int prom_early_allocated __initdata;
+
+void * __init prom_early_alloc(unsigned long size)
+{
+	static u8 *mem;
+	static size_t free_mem;
+	void *res;
+
+	if (free_mem < size) {
+		const size_t chunk_size = max(PAGE_SIZE, size);
+
+		/*
+		 * To mimimize the number of allocations, grab at least
+		 * PAGE_SIZE of memory (that's an arbitrary choice that's
+		 * fast enough on the platforms we care about while minimizing
+		 * wasted bootmem) and hand off chunks of it to callers.
+		 */
+		res = alloc_bootmem(chunk_size);
+		if (!res)
+			return NULL;
+		prom_early_allocated += chunk_size;
+		memset(res, 0, chunk_size);
+		free_mem = chunk_size;
+		mem = res;
+	}
+
+	/* allocate from the local cache */
+	free_mem -= size;
+	res = mem;
+	mem += size;
+	return res;
+}
+
+static struct of_pdt_ops prom_olpc_ops __initdata = {
+	.nextprop = olpc_dt_nextprop,
+	.getproplen = olpc_dt_getproplen,
+	.getproperty = olpc_dt_getproperty,
+	.getchild = olpc_dt_getchild,
+	.getsibling = olpc_dt_getsibling,
+	.pkg2path = olpc_dt_pkg2path,
+};
+
+void __init olpc_dt_build_devicetree(void)
+{
+	phandle root;
+
+	if (!olpc_ofw_is_installed())
+		return;
+
+	root = olpc_dt_getsibling(0);
+	if (!root) {
+		pr_err("PROM: unable to get root node from OFW!\n");
+		return;
+	}
+	of_pdt_build_devicetree(root, &prom_olpc_ops);
+
+	pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
+			prom_early_allocated);
+}
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
index 7873204..e7604f6 100644
--- a/arch/x86/platform/olpc/olpc_ofw.c
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -110,3 +110,8 @@
 			(unsigned long)olpc_ofw_cif, (-start) >> 20);
 	reserve_top_address(-start);
 }
+
+bool __init olpc_ofw_is_installed(void)
+{
+	return olpc_ofw_cif != NULL;
+}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4cd59b0..78ee4b1 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1030,7 +1030,7 @@
 
 	/*
 	 * Add group onto cgroup list. It might happen that bdi->dev is
-	 * not initiliazed yet. Initialize this new group without major
+	 * not initialized yet. Initialize this new group without major
 	 * and minor info and this info will be filled in once a new thread
 	 * comes for IO. See code above.
 	 */
diff --git a/crypto/Kconfig b/crypto/Kconfig
index e4bac29..4b7cb0e 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -110,7 +110,6 @@
 
 config CRYPTO_GF128MUL
 	tristate "GF(2^128) multiplication functions (EXPERIMENTAL)"
-	depends on EXPERIMENTAL
 	help
 	  Efficient table driven implementation of multiplications in the
 	  field GF(2^128).  This is needed by some cypher modes. This
@@ -539,8 +538,9 @@
 
 config CRYPTO_AES_NI_INTEL
 	tristate "AES cipher algorithms (AES-NI)"
-	depends on (X86 || UML_X86) && 64BIT
-	select CRYPTO_AES_X86_64
+	depends on (X86 || UML_X86)
+	select CRYPTO_AES_X86_64 if 64BIT
+	select CRYPTO_AES_586 if !64BIT
 	select CRYPTO_CRYPTD
 	select CRYPTO_ALGAPI
 	select CRYPTO_FPU
@@ -563,9 +563,10 @@
 
 	  See <http://csrc.nist.gov/encryption/aes/> for more information.
 
-	  In addition to AES cipher algorithm support, the
-	  acceleration for some popular block cipher mode is supported
-	  too, including ECB, CBC, CTR, LRW, PCBC, XTS.
+	  In addition to AES cipher algorithm support, the acceleration
+	  for some popular block cipher mode is supported too, including
+	  ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+	  acceleration for CTR.
 
 config CRYPTO_ANUBIS
 	tristate "Anubis cipher algorithm"
@@ -841,6 +842,27 @@
 	  ANSI X9.31 A.2.4. Note that this option must be enabled if
 	  CRYPTO_FIPS is selected
 
+config CRYPTO_USER_API
+	tristate
+
+config CRYPTO_USER_API_HASH
+	tristate "User-space interface for hash algorithms"
+	depends on NET
+	select CRYPTO_HASH
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for hash
+	  algorithms.
+
+config CRYPTO_USER_API_SKCIPHER
+	tristate "User-space interface for symmetric key cipher algorithms"
+	depends on NET
+	select CRYPTO_BLKCIPHER
+	select CRYPTO_USER_API
+	help
+	  This option enables the user-spaces interface for symmetric
+	  key cipher algorithms.
+
 source "drivers/crypto/Kconfig"
 
 endif	# if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index 423b7de..e9a399c 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -3,32 +3,32 @@
 #
 
 obj-$(CONFIG_CRYPTO) += crypto.o
-crypto-objs := api.o cipher.o compress.o
+crypto-y := api.o cipher.o compress.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
-crypto_algapi-objs := algapi.o scatterwalk.o $(crypto_algapi-y)
+crypto_algapi-y := algapi.o scatterwalk.o $(crypto_algapi-y)
 obj-$(CONFIG_CRYPTO_ALGAPI2) += crypto_algapi.o
 
 obj-$(CONFIG_CRYPTO_AEAD2) += aead.o
 
-crypto_blkcipher-objs := ablkcipher.o
-crypto_blkcipher-objs += blkcipher.o
+crypto_blkcipher-y := ablkcipher.o
+crypto_blkcipher-y += blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += crypto_blkcipher.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += chainiv.o
 obj-$(CONFIG_CRYPTO_BLKCIPHER2) += eseqiv.o
 obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o
 
-crypto_hash-objs += ahash.o
-crypto_hash-objs += shash.o
+crypto_hash-y += ahash.o
+crypto_hash-y += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
 obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
 
-cryptomgr-objs := algboss.o testmgr.o
+cryptomgr-y := algboss.o testmgr.o
 
 obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
 obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
@@ -85,6 +85,9 @@
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
+obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
+obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 
 #
 # generic algorithms and the async_tx api
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
new file mode 100644
index 0000000..940d70c
--- /dev/null
+++ b/crypto/af_alg.c
@@ -0,0 +1,483 @@
+/*
+ * af_alg: User-space algorithm interface
+ *
+ * This file provides the user-space API for algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <asm/atomic.h>
+#include <crypto/if_alg.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/rwsem.h>
+
+struct alg_type_list {
+	const struct af_alg_type *type;
+	struct list_head list;
+};
+
+static atomic_long_t alg_memory_allocated;
+
+static struct proto alg_proto = {
+	.name			= "ALG",
+	.owner			= THIS_MODULE,
+	.memory_allocated	= &alg_memory_allocated,
+	.obj_size		= sizeof(struct alg_sock),
+};
+
+static LIST_HEAD(alg_types);
+static DECLARE_RWSEM(alg_types_sem);
+
+static const struct af_alg_type *alg_get_type(const char *name)
+{
+	const struct af_alg_type *type = ERR_PTR(-ENOENT);
+	struct alg_type_list *node;
+
+	down_read(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, name))
+			continue;
+
+		if (try_module_get(node->type->owner))
+			type = node->type;
+		break;
+	}
+	up_read(&alg_types_sem);
+
+	return type;
+}
+
+int af_alg_register_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -EEXIST;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (!strcmp(node->type->name, type->name))
+			goto unlock;
+	}
+
+	node = kmalloc(sizeof(*node), GFP_KERNEL);
+	err = -ENOMEM;
+	if (!node)
+		goto unlock;
+
+	type->ops->owner = THIS_MODULE;
+	node->type = type;
+	list_add(&node->list, &alg_types);
+	err = 0;
+
+unlock:
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_register_type);
+
+int af_alg_unregister_type(const struct af_alg_type *type)
+{
+	struct alg_type_list *node;
+	int err = -ENOENT;
+
+	down_write(&alg_types_sem);
+	list_for_each_entry(node, &alg_types, list) {
+		if (strcmp(node->type->name, type->name))
+			continue;
+
+		list_del(&node->list);
+		kfree(node);
+		err = 0;
+		break;
+	}
+	up_write(&alg_types_sem);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_unregister_type);
+
+static void alg_do_release(const struct af_alg_type *type, void *private)
+{
+	if (!type)
+		return;
+
+	type->release(private);
+	module_put(type->owner);
+}
+
+int af_alg_release(struct socket *sock)
+{
+	if (sock->sk)
+		sock_put(sock->sk);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_release);
+
+static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct sockaddr_alg *sa = (void *)uaddr;
+	const struct af_alg_type *type;
+	void *private;
+
+	if (sock->state == SS_CONNECTED)
+		return -EINVAL;
+
+	if (addr_len != sizeof(*sa))
+		return -EINVAL;
+
+	sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
+	sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
+
+	type = alg_get_type(sa->salg_type);
+	if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
+		request_module("algif-%s", sa->salg_type);
+		type = alg_get_type(sa->salg_type);
+	}
+
+	if (IS_ERR(type))
+		return PTR_ERR(type);
+
+	private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
+	if (IS_ERR(private)) {
+		module_put(type->owner);
+		return PTR_ERR(private);
+	}
+
+	lock_sock(sk);
+
+	swap(ask->type, type);
+	swap(ask->private, private);
+
+	release_sock(sk);
+
+	alg_do_release(type, private);
+
+	return 0;
+}
+
+static int alg_setkey(struct sock *sk, char __user *ukey,
+		      unsigned int keylen)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type = ask->type;
+	u8 *key;
+	int err;
+
+	key = sock_kmalloc(sk, keylen, GFP_KERNEL);
+	if (!key)
+		return -ENOMEM;
+
+	err = -EFAULT;
+	if (copy_from_user(key, ukey, keylen))
+		goto out;
+
+	err = type->setkey(ask->private, key, keylen);
+
+out:
+	sock_kfree_s(sk, key, keylen);
+
+	return err;
+}
+
+static int alg_setsockopt(struct socket *sock, int level, int optname,
+			  char __user *optval, unsigned int optlen)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	int err = -ENOPROTOOPT;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	if (level != SOL_ALG || !type)
+		goto unlock;
+
+	switch (optname) {
+	case ALG_SET_KEY:
+		if (sock->state == SS_CONNECTED)
+			goto unlock;
+		if (!type->setkey)
+			goto unlock;
+
+		err = alg_setkey(sk, optval, optlen);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+
+int af_alg_accept(struct sock *sk, struct socket *newsock)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	const struct af_alg_type *type;
+	struct sock *sk2;
+	int err;
+
+	lock_sock(sk);
+	type = ask->type;
+
+	err = -EINVAL;
+	if (!type)
+		goto unlock;
+
+	sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto);
+	err = -ENOMEM;
+	if (!sk2)
+		goto unlock;
+
+	sock_init_data(newsock, sk2);
+	sock_graft(sk2, newsock);
+
+	err = type->accept(ask->private, sk2);
+	if (err) {
+		sk_free(sk2);
+		goto unlock;
+	}
+
+	sk2->sk_family = PF_ALG;
+
+	sock_hold(sk);
+	alg_sk(sk2)->parent = sk;
+	alg_sk(sk2)->type = type;
+
+	newsock->ops = type->ops;
+	newsock->state = SS_CONNECTED;
+
+	err = 0;
+
+unlock:
+	release_sock(sk);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_accept);
+
+static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	return af_alg_accept(sock->sk, newsock);
+}
+
+static const struct proto_ops alg_proto_ops = {
+	.family		=	PF_ALG,
+	.owner		=	THIS_MODULE,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.sendpage	=	sock_no_sendpage,
+	.sendmsg	=	sock_no_sendmsg,
+	.recvmsg	=	sock_no_recvmsg,
+	.poll		=	sock_no_poll,
+
+	.bind		=	alg_bind,
+	.release	=	af_alg_release,
+	.setsockopt	=	alg_setsockopt,
+	.accept		=	alg_accept,
+};
+
+static void alg_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+
+	alg_do_release(ask->type, ask->private);
+}
+
+static int alg_create(struct net *net, struct socket *sock, int protocol,
+		      int kern)
+{
+	struct sock *sk;
+	int err;
+
+	if (sock->type != SOCK_SEQPACKET)
+		return -ESOCKTNOSUPPORT;
+	if (protocol != 0)
+		return -EPROTONOSUPPORT;
+
+	err = -ENOMEM;
+	sk = sk_alloc(net, PF_ALG, GFP_KERNEL, &alg_proto);
+	if (!sk)
+		goto out;
+
+	sock->ops = &alg_proto_ops;
+	sock_init_data(sock, sk);
+
+	sk->sk_family = PF_ALG;
+	sk->sk_destruct = alg_sock_destruct;
+
+	return 0;
+out:
+	return err;
+}
+
+static const struct net_proto_family alg_family = {
+	.family	=	PF_ALG,
+	.create	=	alg_create,
+	.owner	=	THIS_MODULE,
+};
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write)
+{
+	unsigned long from = (unsigned long)addr;
+	unsigned long npages;
+	unsigned off;
+	int err;
+	int i;
+
+	err = -EFAULT;
+	if (!access_ok(write ? VERIFY_READ : VERIFY_WRITE, addr, len))
+		goto out;
+
+	off = from & ~PAGE_MASK;
+	npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	if (npages > ALG_MAX_PAGES)
+		npages = ALG_MAX_PAGES;
+
+	err = get_user_pages_fast(from, npages, write, sgl->pages);
+	if (err < 0)
+		goto out;
+
+	npages = err;
+	err = -EINVAL;
+	if (WARN_ON(npages == 0))
+		goto out;
+
+	err = 0;
+
+	sg_init_table(sgl->sg, npages);
+
+	for (i = 0; i < npages; i++) {
+		int plen = min_t(int, len, PAGE_SIZE - off);
+
+		sg_set_page(sgl->sg + i, sgl->pages[i], plen, off);
+
+		off = 0;
+		len -= plen;
+		err += plen;
+	}
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_make_sg);
+
+void af_alg_free_sg(struct af_alg_sgl *sgl)
+{
+	int i;
+
+	i = 0;
+	do {
+		put_page(sgl->pages[i]);
+	} while (!sg_is_last(sgl->sg + (i++)));
+}
+EXPORT_SYMBOL_GPL(af_alg_free_sg);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
+{
+	struct cmsghdr *cmsg;
+
+	for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+		if (cmsg->cmsg_level != SOL_ALG)
+			continue;
+
+		switch(cmsg->cmsg_type) {
+		case ALG_SET_IV:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(*con->iv)))
+				return -EINVAL;
+			con->iv = (void *)CMSG_DATA(cmsg);
+			if (cmsg->cmsg_len < CMSG_LEN(con->iv->ivlen +
+						      sizeof(*con->iv)))
+				return -EINVAL;
+			break;
+
+		case ALG_SET_OP:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			con->op = *(u32 *)CMSG_DATA(cmsg);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(af_alg_cmsg_send);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion)
+{
+	switch (err) {
+	case -EINPROGRESS:
+	case -EBUSY:
+		wait_for_completion(&completion->completion);
+		INIT_COMPLETION(completion->completion);
+		err = completion->err;
+		break;
+	};
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(af_alg_wait_for_completion);
+
+void af_alg_complete(struct crypto_async_request *req, int err)
+{
+	struct af_alg_completion *completion = req->data;
+
+	completion->err = err;
+	complete(&completion->completion);
+}
+EXPORT_SYMBOL_GPL(af_alg_complete);
+
+static int __init af_alg_init(void)
+{
+	int err = proto_register(&alg_proto, 0);
+
+	if (err)
+		goto out;
+
+	err = sock_register(&alg_family);
+	if (err != 0)
+		goto out_unregister_proto;
+
+out:
+	return err;
+
+out_unregister_proto:
+	proto_unregister(&alg_proto);
+	goto out;
+}
+
+static void __exit af_alg_exit(void)
+{
+	sock_unregister(PF_ALG);
+	proto_unregister(&alg_proto);
+}
+
+module_init(af_alg_init);
+module_exit(af_alg_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_ALG);
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
new file mode 100644
index 0000000..62122a1
--- /dev/null
+++ b/crypto/algif_hash.c
@@ -0,0 +1,319 @@
+/*
+ * algif_hash: User-space interface for hash algorithms
+ *
+ * This file provides the user-space API for hash algorithms.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/hash.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct hash_ctx {
+	struct af_alg_sgl sgl;
+
+	u8 *result;
+
+	struct af_alg_completion completion;
+
+	unsigned int len;
+	bool more;
+
+	struct ahash_request req;
+};
+
+static int hash_sendmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t ignored)
+{
+	int limit = ALG_MAX_PAGES * PAGE_SIZE;
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned long iovlen;
+	struct iovec *iov;
+	long copied = 0;
+	int err;
+
+	if (limit > sk->sk_sndbuf)
+		limit = sk->sk_sndbuf;
+
+	lock_sock(sk);
+	if (!ctx->more) {
+		err = crypto_ahash_init(&ctx->req);
+		if (err)
+			goto unlock;
+	}
+
+	ctx->more = 0;
+
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			int len = min_t(unsigned long, seglen, limit);
+			int newlen;
+
+			newlen = af_alg_make_sg(&ctx->sgl, from, len, 0);
+			if (newlen < 0)
+				goto unlock;
+
+			ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL,
+						newlen);
+
+			err = af_alg_wait_for_completion(
+				crypto_ahash_update(&ctx->req),
+				&ctx->completion);
+
+			af_alg_free_sg(&ctx->sgl);
+
+			if (err)
+				goto unlock;
+
+			seglen -= newlen;
+			from += newlen;
+			copied += newlen;
+		}
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more) {
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+	}
+
+unlock:
+	release_sock(sk);
+
+	return err ?: copied;
+}
+
+static ssize_t hash_sendpage(struct socket *sock, struct page *page,
+			     int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	int err;
+
+	lock_sock(sk);
+	sg_init_table(ctx->sgl.sg, 1);
+	sg_set_page(ctx->sgl.sg, page, size, offset);
+
+	ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
+
+	if (!(flags & MSG_MORE)) {
+		if (ctx->more)
+			err = crypto_ahash_finup(&ctx->req);
+		else
+			err = crypto_ahash_digest(&ctx->req);
+	} else {
+		if (!ctx->more) {
+			err = crypto_ahash_init(&ctx->req);
+			if (err)
+				goto unlock;
+		}
+
+		err = crypto_ahash_update(&ctx->req);
+	}
+
+	err = af_alg_wait_for_completion(err, &ctx->completion);
+	if (err)
+		goto unlock;
+
+	ctx->more = flags & MSG_MORE;
+
+unlock:
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
+			struct msghdr *msg, size_t len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
+	int err;
+
+	if (len > ds)
+		len = ds;
+	else if (len < ds)
+		msg->msg_flags |= MSG_TRUNC;
+
+	lock_sock(sk);
+	if (ctx->more) {
+		ctx->more = 0;
+		ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
+		err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
+						 &ctx->completion);
+		if (err)
+			goto unlock;
+	}
+
+	err = memcpy_toiovec(msg->msg_iov, ctx->result, len);
+
+unlock:
+	release_sock(sk);
+
+	return err ?: len;
+}
+
+static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+	struct ahash_request *req = &ctx->req;
+	char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+	struct sock *sk2;
+	struct alg_sock *ask2;
+	struct hash_ctx *ctx2;
+	int err;
+
+	err = crypto_ahash_export(req, state);
+	if (err)
+		return err;
+
+	err = af_alg_accept(ask->parent, newsock);
+	if (err)
+		return err;
+
+	sk2 = newsock->sk;
+	ask2 = alg_sk(sk2);
+	ctx2 = ask2->private;
+	ctx2->more = 1;
+
+	err = crypto_ahash_import(&ctx2->req, state);
+	if (err) {
+		sock_orphan(sk2);
+		sock_put(sk2);
+	}
+
+	return err;
+}
+
+static struct proto_ops algif_hash_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.setsockopt	=	sock_no_setsockopt,
+	.poll		=	sock_no_poll,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	hash_sendmsg,
+	.sendpage	=	hash_sendpage,
+	.recvmsg	=	hash_recvmsg,
+	.accept		=	hash_accept,
+};
+
+static void *hash_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ahash(name, type, mask);
+}
+
+static void hash_release(void *private)
+{
+	crypto_free_ahash(private);
+}
+
+static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ahash_setkey(private, key, keylen);
+}
+
+static void hash_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct hash_ctx *ctx = ask->private;
+
+	sock_kfree_s(sk, ctx->result,
+		     crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int hash_accept_parent(void *private, struct sock *sk)
+{
+	struct hash_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
+	unsigned ds = crypto_ahash_digestsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
+	if (!ctx->result) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->result, 0, ds);
+
+	ctx->len = len;
+	ctx->more = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ahash_request_set_tfm(&ctx->req, private);
+	ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				   af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = hash_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_hash = {
+	.bind		=	hash_bind,
+	.release	=	hash_release,
+	.setkey		=	hash_setkey,
+	.accept		=	hash_accept_parent,
+	.ops		=	&algif_hash_ops,
+	.name		=	"hash",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_hash_init(void)
+{
+	return af_alg_register_type(&algif_type_hash);
+}
+
+static void __exit algif_hash_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_hash);
+	BUG_ON(err);
+}
+
+module_init(algif_hash_init);
+module_exit(algif_hash_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
new file mode 100644
index 0000000..6a6dfc0
--- /dev/null
+++ b/crypto/algif_skcipher.c
@@ -0,0 +1,632 @@
+/*
+ * algif_skcipher: User-space interface for skcipher algorithms
+ *
+ * This file provides the user-space API for symmetric key ciphers.
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/if_alg.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <net/sock.h>
+
+struct skcipher_sg_list {
+	struct list_head list;
+
+	int cur;
+
+	struct scatterlist sg[0];
+};
+
+struct skcipher_ctx {
+	struct list_head tsgl;
+	struct af_alg_sgl rsgl;
+
+	void *iv;
+
+	struct af_alg_completion completion;
+
+	unsigned used;
+
+	unsigned int len;
+	bool more;
+	bool merge;
+	bool enc;
+
+	struct ablkcipher_request req;
+};
+
+#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
+		      sizeof(struct scatterlist) - 1)
+
+static inline int skcipher_sndbuf(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
+			  ctx->used, 0);
+}
+
+static inline bool skcipher_writable(struct sock *sk)
+{
+	return PAGE_SIZE <= skcipher_sndbuf(sk);
+}
+
+static int skcipher_alloc_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg = NULL;
+
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+	if (!list_empty(&ctx->tsgl))
+		sg = sgl->sg;
+
+	if (!sg || sgl->cur >= MAX_SGL_ENTS) {
+		sgl = sock_kmalloc(sk, sizeof(*sgl) +
+				       sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
+				   GFP_KERNEL);
+		if (!sgl)
+			return -ENOMEM;
+
+		sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+		sgl->cur = 0;
+
+		if (sg)
+			scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
+
+		list_add_tail(&sgl->list, &ctx->tsgl);
+	}
+
+	return 0;
+}
+
+static void skcipher_pull_sgl(struct sock *sk, int used)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	int i;
+
+	while (!list_empty(&ctx->tsgl)) {
+		sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
+				       list);
+		sg = sgl->sg;
+
+		for (i = 0; i < sgl->cur; i++) {
+			int plen = min_t(int, used, sg[i].length);
+
+			if (!sg_page(sg + i))
+				continue;
+
+			sg[i].length -= plen;
+			sg[i].offset += plen;
+
+			used -= plen;
+			ctx->used -= plen;
+
+			if (sg[i].length)
+				return;
+
+			put_page(sg_page(sg + i));
+			sg_assign_page(sg + i, NULL);
+		}
+
+		list_del(&sgl->list);
+		sock_kfree_s(sk, sgl,
+			     sizeof(*sgl) + sizeof(sgl->sg[0]) *
+					    (MAX_SGL_ENTS + 1));
+	}
+
+	if (!ctx->used)
+		ctx->merge = 0;
+}
+
+static void skcipher_free_sgl(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+
+	skcipher_pull_sgl(sk, ctx->used);
+}
+
+static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
+{
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT)
+		return -EAGAIN;
+
+	set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	return err;
+}
+
+static void skcipher_wmem_wakeup(struct sock *sk)
+{
+	struct socket_wq *wq;
+
+	if (!skcipher_writable(sk))
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+	rcu_read_unlock();
+}
+
+static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	long timeout;
+	DEFINE_WAIT(wait);
+	int err = -ERESTARTSYS;
+
+	if (flags & MSG_DONTWAIT) {
+		return -EAGAIN;
+	}
+
+	set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	for (;;) {
+		if (signal_pending(current))
+			break;
+		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+		timeout = MAX_SCHEDULE_TIMEOUT;
+		if (sk_wait_event(sk, &timeout, ctx->used)) {
+			err = 0;
+			break;
+		}
+	}
+	finish_wait(sk_sleep(sk), &wait);
+
+	clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+
+	return err;
+}
+
+static void skcipher_data_wakeup(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct socket_wq *wq;
+
+	if (!ctx->used)
+		return;
+
+	rcu_read_lock();
+	wq = rcu_dereference(sk->sk_wq);
+	if (wq_has_sleeper(wq))
+		wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
+							   POLLRDNORM |
+							   POLLRDBAND);
+	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+	rcu_read_unlock();
+}
+
+static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t size)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+	struct skcipher_sg_list *sgl;
+	struct af_alg_control con = {};
+	long copied = 0;
+	bool enc = 0;
+	int err;
+	int i;
+
+	if (msg->msg_controllen) {
+		err = af_alg_cmsg_send(msg, &con);
+		if (err)
+			return err;
+
+		switch (con.op) {
+		case ALG_OP_ENCRYPT:
+			enc = 1;
+			break;
+		case ALG_OP_DECRYPT:
+			enc = 0;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if (con.iv && con.iv->ivlen != ivsize)
+			return -EINVAL;
+	}
+
+	err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!ctx->used) {
+		ctx->enc = enc;
+		if (con.iv)
+			memcpy(ctx->iv, con.iv->iv, ivsize);
+	}
+
+	while (size) {
+		struct scatterlist *sg;
+		unsigned long len = size;
+		int plen;
+
+		if (ctx->merge) {
+			sgl = list_entry(ctx->tsgl.prev,
+					 struct skcipher_sg_list, list);
+			sg = sgl->sg + sgl->cur - 1;
+			len = min_t(unsigned long, len,
+				    PAGE_SIZE - sg->offset - sg->length);
+
+			err = memcpy_fromiovec(page_address(sg_page(sg)) +
+					       sg->offset + sg->length,
+					       msg->msg_iov, len);
+			if (err)
+				goto unlock;
+
+			sg->length += len;
+			ctx->merge = (sg->offset + sg->length) &
+				     (PAGE_SIZE - 1);
+
+			ctx->used += len;
+			copied += len;
+			size -= len;
+			continue;
+		}
+
+		if (!skcipher_writable(sk)) {
+			err = skcipher_wait_for_wmem(sk, msg->msg_flags);
+			if (err)
+				goto unlock;
+		}
+
+		len = min_t(unsigned long, len, skcipher_sndbuf(sk));
+
+		err = skcipher_alloc_sgl(sk);
+		if (err)
+			goto unlock;
+
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+		sg = sgl->sg;
+		do {
+			i = sgl->cur;
+			plen = min_t(int, len, PAGE_SIZE);
+
+			sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
+			err = -ENOMEM;
+			if (!sg_page(sg + i))
+				goto unlock;
+
+			err = memcpy_fromiovec(page_address(sg_page(sg + i)),
+					       msg->msg_iov, plen);
+			if (err) {
+				__free_page(sg_page(sg + i));
+				sg_assign_page(sg + i, NULL);
+				goto unlock;
+			}
+
+			sg[i].length = plen;
+			len -= plen;
+			ctx->used += plen;
+			copied += plen;
+			size -= plen;
+			sgl->cur++;
+		} while (len && sgl->cur < MAX_SGL_ENTS);
+
+		ctx->merge = plen & (PAGE_SIZE - 1);
+	}
+
+	err = 0;
+
+	ctx->more = msg->msg_flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
+				 int offset, size_t size, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct skcipher_sg_list *sgl;
+	int err = -EINVAL;
+
+	lock_sock(sk);
+	if (!ctx->more && ctx->used)
+		goto unlock;
+
+	if (!size)
+		goto done;
+
+	if (!skcipher_writable(sk)) {
+		err = skcipher_wait_for_wmem(sk, flags);
+		if (err)
+			goto unlock;
+	}
+
+	err = skcipher_alloc_sgl(sk);
+	if (err)
+		goto unlock;
+
+	ctx->merge = 0;
+	sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+	get_page(page);
+	sg_set_page(sgl->sg + sgl->cur, page, size, offset);
+	sgl->cur++;
+	ctx->used += size;
+
+done:
+	ctx->more = flags & MSG_MORE;
+	if (!ctx->more && !list_empty(&ctx->tsgl))
+		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
+
+unlock:
+	skcipher_data_wakeup(sk);
+	release_sock(sk);
+
+	return err ?: size;
+}
+
+static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
+			    struct msghdr *msg, size_t ignored, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
+		&ctx->req));
+	struct skcipher_sg_list *sgl;
+	struct scatterlist *sg;
+	unsigned long iovlen;
+	struct iovec *iov;
+	int err = -EAGAIN;
+	int used;
+	long copied = 0;
+
+	lock_sock(sk);
+	for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
+	     iovlen--, iov++) {
+		unsigned long seglen = iov->iov_len;
+		char __user *from = iov->iov_base;
+
+		while (seglen) {
+			sgl = list_first_entry(&ctx->tsgl,
+					       struct skcipher_sg_list, list);
+			sg = sgl->sg;
+
+			while (!sg->length)
+				sg++;
+
+			used = ctx->used;
+			if (!used) {
+				err = skcipher_wait_for_data(sk, flags);
+				if (err)
+					goto unlock;
+			}
+
+			used = min_t(unsigned long, used, seglen);
+
+			used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
+			err = used;
+			if (err < 0)
+				goto unlock;
+
+			if (ctx->more || used < ctx->used)
+				used -= used % bs;
+
+			err = -EINVAL;
+			if (!used)
+				goto free;
+
+			ablkcipher_request_set_crypt(&ctx->req, sg,
+						     ctx->rsgl.sg, used,
+						     ctx->iv);
+
+			err = af_alg_wait_for_completion(
+				ctx->enc ?
+					crypto_ablkcipher_encrypt(&ctx->req) :
+					crypto_ablkcipher_decrypt(&ctx->req),
+				&ctx->completion);
+
+free:
+			af_alg_free_sg(&ctx->rsgl);
+
+			if (err)
+				goto unlock;
+
+			copied += used;
+			from += used;
+			seglen -= used;
+			skcipher_pull_sgl(sk, used);
+		}
+	}
+
+	err = 0;
+
+unlock:
+	skcipher_wmem_wakeup(sk);
+	release_sock(sk);
+
+	return copied ?: err;
+}
+
+
+static unsigned int skcipher_poll(struct file *file, struct socket *sock,
+				  poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	unsigned int mask;
+
+	sock_poll_wait(file, sk_sleep(sk), wait);
+	mask = 0;
+
+	if (ctx->used)
+		mask |= POLLIN | POLLRDNORM;
+
+	if (skcipher_writable(sk))
+		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+	return mask;
+}
+
+static struct proto_ops algif_skcipher_ops = {
+	.family		=	PF_ALG,
+
+	.connect	=	sock_no_connect,
+	.socketpair	=	sock_no_socketpair,
+	.getname	=	sock_no_getname,
+	.ioctl		=	sock_no_ioctl,
+	.listen		=	sock_no_listen,
+	.shutdown	=	sock_no_shutdown,
+	.getsockopt	=	sock_no_getsockopt,
+	.mmap		=	sock_no_mmap,
+	.bind		=	sock_no_bind,
+	.accept		=	sock_no_accept,
+	.setsockopt	=	sock_no_setsockopt,
+
+	.release	=	af_alg_release,
+	.sendmsg	=	skcipher_sendmsg,
+	.sendpage	=	skcipher_sendpage,
+	.recvmsg	=	skcipher_recvmsg,
+	.poll		=	skcipher_poll,
+};
+
+static void *skcipher_bind(const char *name, u32 type, u32 mask)
+{
+	return crypto_alloc_ablkcipher(name, type, mask);
+}
+
+static void skcipher_release(void *private)
+{
+	crypto_free_ablkcipher(private);
+}
+
+static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+{
+	return crypto_ablkcipher_setkey(private, key, keylen);
+}
+
+static void skcipher_sock_destruct(struct sock *sk)
+{
+	struct alg_sock *ask = alg_sk(sk);
+	struct skcipher_ctx *ctx = ask->private;
+	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+
+	skcipher_free_sgl(sk);
+	sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+	sock_kfree_s(sk, ctx, ctx->len);
+	af_alg_release_parent(sk);
+}
+
+static int skcipher_accept_parent(void *private, struct sock *sk)
+{
+	struct skcipher_ctx *ctx;
+	struct alg_sock *ask = alg_sk(sk);
+	unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+
+	ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
+			       GFP_KERNEL);
+	if (!ctx->iv) {
+		sock_kfree_s(sk, ctx, len);
+		return -ENOMEM;
+	}
+
+	memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+
+	INIT_LIST_HEAD(&ctx->tsgl);
+	ctx->len = len;
+	ctx->used = 0;
+	ctx->more = 0;
+	ctx->merge = 0;
+	ctx->enc = 0;
+	af_alg_init_completion(&ctx->completion);
+
+	ask->private = ctx;
+
+	ablkcipher_request_set_tfm(&ctx->req, private);
+	ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					af_alg_complete, &ctx->completion);
+
+	sk->sk_destruct = skcipher_sock_destruct;
+
+	return 0;
+}
+
+static const struct af_alg_type algif_type_skcipher = {
+	.bind		=	skcipher_bind,
+	.release	=	skcipher_release,
+	.setkey		=	skcipher_setkey,
+	.accept		=	skcipher_accept_parent,
+	.ops		=	&algif_skcipher_ops,
+	.name		=	"skcipher",
+	.owner		=	THIS_MODULE
+};
+
+static int __init algif_skcipher_init(void)
+{
+	return af_alg_register_type(&algif_type_skcipher);
+}
+
+static void __exit algif_skcipher_exit(void)
+{
+	int err = af_alg_unregister_type(&algif_type_skcipher);
+	BUG_ON(err);
+}
+
+module_init(algif_skcipher_init);
+module_exit(algif_skcipher_exit);
+MODULE_LICENSE("GPL");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index a5a22cf..5ef7ba6 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -107,20 +107,6 @@
 	goto out;
 }
 
-static void authenc_chain(struct scatterlist *head, struct scatterlist *sg,
-			  int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static void authenc_geniv_ahash_update_done(struct crypto_async_request *areq,
 					    int err)
 {
@@ -345,7 +331,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, dst, vdst == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, dst, vdst == iv + ivsize, 2);
 		dst = cipher;
 		cryptlen += ivsize;
 	}
@@ -354,7 +340,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, dst, 0);
+		scatterwalk_crypto_chain(asg, dst, 0, 2);
 		dst = asg;
 		cryptlen += req->assoclen;
 	}
@@ -499,7 +485,7 @@
 	if (ivsize) {
 		sg_init_table(cipher, 2);
 		sg_set_buf(cipher, iv, ivsize);
-		authenc_chain(cipher, src, vsrc == iv + ivsize);
+		scatterwalk_crypto_chain(cipher, src, vsrc == iv + ivsize, 2);
 		src = cipher;
 		cryptlen += ivsize;
 	}
@@ -508,7 +494,7 @@
 		authenc_ahash_fn = crypto_authenc_ahash;
 		sg_init_table(asg, 2);
 		sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
-		authenc_chain(asg, src, 0);
+		scatterwalk_crypto_chain(asg, src, 0, 2);
 		src = asg;
 		cryptlen += req->assoclen;
 	}
diff --git a/crypto/cast5.c b/crypto/cast5.c
index a1d2294..4a230dd 100644
--- a/crypto/cast5.c
+++ b/crypto/cast5.c
@@ -604,36 +604,23 @@
 	 * Rounds 3, 6, 9, 12, and 15 use f function Type 3.
 	 */
 
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	if (!(c->rr)) {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
-	} else {
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
 	}
 
 	/* c1...c64 <-- (R16,L16).  (Exchange final blocks L16, R16 and
@@ -663,32 +650,19 @@
 		t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
 		t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
 		t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
-	} else {
-		t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
-		t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
-		t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
-		t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
-		t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
-		t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
-		t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
-		t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
-		t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
-		t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
-		t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
-		t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 	}
+	t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
+	t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
+	t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
+	t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
+	t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
+	t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
+	t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
+	t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
+	t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
+	t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
+	t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
+	t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
 
 	dst[0] = cpu_to_be32(r);
 	dst[1] = cpu_to_be32(l);
diff --git a/crypto/crypto_wq.c b/crypto/crypto_wq.c
index fdcf624..b980ee1 100644
--- a/crypto/crypto_wq.c
+++ b/crypto/crypto_wq.c
@@ -20,7 +20,8 @@
 
 static int __init crypto_wq_init(void)
 {
-	kcrypto_wq = create_workqueue("crypto");
+	kcrypto_wq = alloc_workqueue("crypto",
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (unlikely(!kcrypto_wq))
 		return -ENOMEM;
 	return 0;
diff --git a/crypto/deflate.c b/crypto/deflate.c
index 463dc85..cbc7a33 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -48,12 +48,11 @@
 	int ret = 0;
 	struct z_stream_s *stream = &ctx->comp_stream;
 
-	stream->workspace = vmalloc(zlib_deflate_workspacesize());
+	stream->workspace = vzalloc(zlib_deflate_workspacesize());
 	if (!stream->workspace) {
 		ret = -ENOMEM;
 		goto out;
 	}
-	memset(stream->workspace, 0, zlib_deflate_workspacesize());
 	ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
 	                        -DEFLATE_DEF_WINBITS, DEFLATE_DEF_MEMLEVEL,
 	                        Z_DEFAULT_STRATEGY);
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index 3ca3b66..42ce9f5 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -62,20 +62,6 @@
 	skcipher_givcrypt_complete(req, err);
 }
 
-static void eseqiv_chain(struct scatterlist *head, struct scatterlist *sg,
-			 int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
 {
 	struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
@@ -124,13 +110,13 @@
 
 	sg_init_table(reqctx->src, 2);
 	sg_set_buf(reqctx->src, giv, ivsize);
-	eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
+	scatterwalk_crypto_chain(reqctx->src, osrc, vsrc == giv + ivsize, 2);
 
 	dst = reqctx->src;
 	if (osrc != odst) {
 		sg_init_table(reqctx->dst, 2);
 		sg_set_buf(reqctx->dst, giv, ivsize);
-		eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
+		scatterwalk_crypto_chain(reqctx->dst, odst, vdst == giv + ivsize, 2);
 
 		dst = reqctx->dst;
 	}
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 2f5fbba..1a25263 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1102,21 +1102,6 @@
 	return crypto_aead_setauthsize(ctx->child, authsize);
 }
 
-/* this is the same as crypto_authenc_chain */
-static void crypto_rfc4543_chain(struct scatterlist *head,
-				 struct scatterlist *sg, int chain)
-{
-	if (chain) {
-		head->length += sg->length;
-		sg = scatterwalk_sg_next(sg);
-	}
-
-	if (sg)
-		scatterwalk_sg_chain(head, 2, sg);
-	else
-		sg_mark_end(head);
-}
-
 static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
 						 int enc)
 {
@@ -1154,13 +1139,13 @@
 
 	sg_init_table(payload, 2);
 	sg_set_buf(payload, req->iv, 8);
-	crypto_rfc4543_chain(payload, dst, vdst == req->iv + 8);
+	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
 	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
 
 	sg_init_table(assoc, 2);
 	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
 		    req->assoc->offset);
-	crypto_rfc4543_chain(assoc, payload, 0);
+	scatterwalk_crypto_chain(assoc, payload, 0, 2);
 
 	aead_request_set_tfm(subreq, ctx->child);
 	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 75586f1..29a89da 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -455,7 +455,8 @@
 
 	get_online_cpus();
 
-	pcrypt->wq = create_workqueue(name);
+	pcrypt->wq = alloc_workqueue(name,
+				     WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
 	if (!pcrypt->wq)
 		goto err;
 
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 1ceb673..8a0f68b 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -325,4 +325,5 @@
 module_exit(rmd128_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 472261fc..525d7bb 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -369,4 +369,5 @@
 module_exit(rmd160_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 72eafa8..69293d9 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -344,4 +344,5 @@
 module_exit(rmd256_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 86becab..09f97df 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -5,7 +5,7 @@
  *
  * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC
  *
- * Copyright (c) 2008 Adrian-Ken Rueegsegger <rueegsegger (at) swiss-it.ch>
+ * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -393,4 +393,5 @@
 module_exit(rmd320_mod_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
diff --git a/crypto/shash.c b/crypto/shash.c
index 22fd943..76f74b9 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -310,7 +310,13 @@
 
 static int shash_async_import(struct ahash_request *req, const void *in)
 {
-	return crypto_shash_import(ahash_request_ctx(req), in);
+	struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
+	struct shash_desc *desc = ahash_request_ctx(req);
+
+	desc->tfm = *ctx;
+	desc->flags = req->base.flags;
+
+	return crypto_shash_import(desc, in);
 }
 
 static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 3ca68f9..9aac5e5 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -8,6 +8,13 @@
  * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
  * Copyright (c) 2007 Nokia Siemens Networks
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *             Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -980,6 +987,10 @@
 		ret += tcrypt_test("ansi_cprng");
 		break;
 
+	case 151:
+		ret += tcrypt_test("rfc4106(gcm(aes))");
+		break;
+
 	case 200:
 		test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
 				speed_template_16_24_32);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index fa8c8f7..27ea9fe 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -6,6 +6,13 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing.
+ *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *             Adrian Hoban <adrian.hoban@intel.com>
+ *             Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *             Tadeusz Struk (tadeusz.struk@intel.com)
+ *    Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2242,6 +2249,23 @@
 			}
 		}
 	}, {
+		.alg = "rfc4106(gcm(aes))",
+		.test = alg_test_aead,
+		.suite = {
+			.aead = {
+				.enc = {
+					.vecs = aes_gcm_rfc4106_enc_tv_template,
+					.count = AES_GCM_4106_ENC_TEST_VECTORS
+				},
+				.dec = {
+					.vecs = aes_gcm_rfc4106_dec_tv_template,
+					.count = AES_GCM_4106_DEC_TEST_VECTORS
+				}
+			}
+		}
+	}, {
+
+
 		.alg = "rfc4309(ccm(aes))",
 		.test = alg_test_aead,
 		.fips_allowed = 1,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 74e3537..834af7f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -6,6 +6,15 @@
  * Copyright (c) 2007 Nokia Siemens Networks
  * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
  *
+ * Updated RFC4106 AES-GCM testing. Some test vectors were taken from
+ * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/
+ * gcm/gcm-test-vectors.tar.gz
+ *     Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
+ *              Adrian Hoban <adrian.hoban@intel.com>
+ *              Gabriele Paoloni <gabriele.paoloni@intel.com>
+ *              Tadeusz Struk (tadeusz.struk@intel.com)
+ *     Copyright (c) 2010, Intel Corporation.
+ *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
  * Software Foundation; either version 2 of the License, or (at your option)
@@ -2947,6 +2956,8 @@
 #define AES_CTR_3686_DEC_TEST_VECTORS 6
 #define AES_GCM_ENC_TEST_VECTORS 9
 #define AES_GCM_DEC_TEST_VECTORS 8
+#define AES_GCM_4106_ENC_TEST_VECTORS 7
+#define AES_GCM_4106_DEC_TEST_VECTORS 7
 #define AES_CCM_ENC_TEST_VECTORS 7
 #define AES_CCM_DEC_TEST_VECTORS 7
 #define AES_CCM_4309_ENC_TEST_VECTORS 7
@@ -5829,6 +5840,356 @@
 	}
 };
 
+static struct aead_testvec aes_gcm_rfc4106_enc_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.rlen	= 32,
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = zeroed_string,
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.rlen	= 32,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 16,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.rlen	= 32,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+                .input  = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .ilen   = 64,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+		.result	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.rlen	= 80,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+                .input  = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .ilen   = 192,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+		.result	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.rlen	= 208,
+	}
+};
+
+static struct aead_testvec aes_gcm_rfc4106_dec_tv_template[] = {
+        { /* Generated using Crypto++ */
+		.key    = zeroed_string,
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x03\x88\xDA\xCE\x60\xB6\xA3\x92"
+                          "\xF3\x28\xC2\xB9\x71\xB2\xFE\x78"
+                          "\x97\xFE\x4C\x23\x37\x42\x01\xE0"
+                          "\x81\x9F\x8D\xC5\xD7\x41\xA0\x1B",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+
+        },{
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC0\x0D\x8B\x42\x0F\x8F\x34\x18"
+                          "\x88\xB1\xC5\xBC\xC5\xB6\xD6\x28"
+                          "\x6A\x9D\xDF\x11\x5E\xFE\x5E\x9D"
+                          "\x2F\x70\x44\x92\xF7\xF2\xE3\xEF",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = zeroed_string,
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x0B\x8F\x88\x69\x17\xE6\xB4\x3C"
+                          "\xB1\x68\xFD\x14\x52\x64\x61\xB2",
+		.ilen	= 32,
+                .assoc  = zeroed_string,
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = zeroed_string,
+		.input	= "\x4B\xB1\xB5\xE3\x25\x71\x70\xDE"
+                          "\x7F\xC9\x9C\xA5\x14\x19\xF2\xAC"
+                          "\x90\x92\xB7\xE3\x5F\xA3\x9A\x63"
+                          "\x7E\xD7\x1F\xD8\xD3\x7C\x4B\xF5",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x64\x50\xF9\x32\x13\xFB\x74\x61"
+                          "\xF4\xED\x52\xD3\xC5\x10\x55\x3C",
+		.ilen	= 32,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 16,
+        }, {
+		.key    = "\xfe\xff\xe9\x92\x86\x65\x73\x1c"
+			  "\x6d\x6a\x8f\x94\x67\x30\x83\x08"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x00\x00\x00\x00\x00\x01"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x0C\x8A\x43\x0E\x8E\x35\x19"
+                          "\x89\xB0\xC4\xBD\xC4\xB7\xD7\x29"
+                          "\x98\x14\xA1\x42\x37\x80\xFD\x90"
+                          "\x68\x12\x01\xA8\x91\x89\xB9\x83"
+                          "\x5B\x11\x77\x12\x9B\xFF\x24\x89"
+                          "\x94\x5F\x18\x12\xBA\x27\x09\x39"
+                          "\x99\x96\x76\x42\x15\x1C\xCD\xCB"
+                          "\xDC\xD3\xDA\x65\x73\xAF\x80\xCD"
+                          "\xD2\xB6\xC2\x4A\x76\xC2\x92\x85"
+                          "\xBD\xCF\x62\x98\x58\x14\xE5\xBD",
+		.ilen	= 80,
+                .assoc  = "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .alen   = 8,
+                .result = "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01"
+                          "\x01\x01\x01\x01\x01\x01\x01\x01",
+                .rlen   = 64,
+        }, {
+		.key    = "\x00\x01\x02\x03\x04\x05\x06\x07"
+			  "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x00\x00\x00\x00",
+		.klen	= 20,
+                .iv     = "\x00\x00\x45\x67\x89\xab\xcd\xef"
+                          "\x00\x00\x00\x00",
+		.input	= "\xC1\x76\x33\x85\xE2\x9B\x5F\xDE"
+			  "\xDE\x89\x3D\x42\xE7\xC9\x69\x8A"
+			  "\x44\x6D\xC3\x88\x46\x2E\xC2\x01"
+			  "\x5E\xF6\x0C\x39\xF0\xC4\xA5\x82"
+			  "\xCD\xE8\x31\xCC\x0A\x4C\xE4\x44"
+			  "\x41\xA9\x82\x6F\x22\xA1\x23\x1A"
+			  "\xA8\xE3\x16\xFD\x31\x5C\x27\x31"
+			  "\xF1\x7F\x01\x63\xA3\xAF\x70\xA1"
+			  "\xCF\x07\x57\x41\x67\xD0\xC4\x42"
+			  "\xDB\x18\xC6\x4C\x4C\xE0\x3D\x9F"
+			  "\x05\x07\xFB\x13\x7D\x4A\xCA\x5B"
+			  "\xF0\xBF\x64\x7E\x05\xB1\x72\xEE"
+			  "\x7C\x3B\xD4\xCD\x14\x03\xB2\x2C"
+			  "\xD3\xA9\xEE\xFA\x17\xFC\x9C\xDF"
+			  "\xC7\x75\x40\xFF\xAE\xAD\x1E\x59"
+			  "\x2F\x30\x24\xFB\xAD\x6B\x10\xFA"
+			  "\x6C\x9F\x5B\xE7\x25\xD5\xD0\x25"
+			  "\xAC\x4A\x4B\xDA\xFC\x7A\x85\x1B"
+			  "\x7E\x13\x06\x82\x08\x17\xA4\x35"
+			  "\xEC\xC5\x8D\x63\x96\x81\x0A\x8F"
+			  "\xA3\x05\x38\x95\x20\x1A\x47\x04"
+			  "\x6F\x6D\xDA\x8F\xEF\xC1\x76\x35"
+			  "\x6B\xC7\x4D\x0F\x94\x12\xCA\x3E"
+			  "\x2E\xD5\x03\x2E\x86\x7E\xAA\x3B"
+			  "\x37\x08\x1C\xCF\xBA\x5D\x71\x46"
+			  "\x80\x72\xB0\x4C\x82\x0D\x60\x3C",
+		.ilen	= 208,
+                .assoc  = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                          "\xaa\xaa\xaa\xaa",
+                .alen   = 12,
+                .result = "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff"
+                          "\xff\xff\xff\xff\xff\xff\xff\xff",
+                .rlen   = 192,
+
+	}
+};
+
 static struct aead_testvec aes_ccm_enc_tv_template[] = {
 	{ /* From RFC 3610 */
 		.key	= "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c3015733..739b8fc 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -95,11 +95,10 @@
 	zlib_comp_exit(ctx);
 
 	workspacesize = zlib_deflate_workspacesize();
-	stream->workspace = vmalloc(workspacesize);
+	stream->workspace = vzalloc(workspacesize);
 	if (!stream->workspace)
 		return -ENOMEM;
 
-	memset(stream->workspace, 0, workspacesize);
 	ret = zlib_deflateInit2(stream,
 				tb[ZLIB_COMP_LEVEL]
 					? nla_get_u32(tb[ZLIB_COMP_LEVEL])
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 3d93b3a..dd0a5b5 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -88,6 +88,8 @@
 
 source "drivers/leds/Kconfig"
 
+source "drivers/nfc/Kconfig"
+
 source "drivers/accessibility/Kconfig"
 
 source "drivers/infiniband/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index bf15ce7..ef51324 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -40,7 +40,7 @@
 
 obj-y				+= serial/
 obj-$(CONFIG_PARPORT)		+= parport/
-obj-y				+= base/ block/ misc/ mfd/
+obj-y				+= base/ block/ misc/ mfd/ nfc/
 obj-$(CONFIG_NUBUS)		+= nubus/
 obj-y				+= macintosh/
 obj-$(CONFIG_IDE)		+= ide/
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index bdbfaf22b..962a3cc 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -93,7 +93,7 @@
 
 #define AOPOBJ_AML_CONSTANT         0x01	/* Integer is an AML constant */
 #define AOPOBJ_STATIC_POINTER       0x02	/* Data is part of an ACPI table, don't delete */
-#define AOPOBJ_DATA_VALID           0x04	/* Object is intialized and data is valid */
+#define AOPOBJ_DATA_VALID           0x04	/* Object is initialized and data is valid */
 #define AOPOBJ_OBJECT_INITIALIZED   0x08	/* Region is initialized, _REG was run */
 #define AOPOBJ_SETUP_COMPLETE       0x10	/* Region setup is complete */
 #define AOPOBJ_INVALID              0x20	/* Host OS won't allow a Region address */
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index cf29df6..096aebf 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -39,7 +39,7 @@
 #define EINJ_PFX "EINJ: "
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 
 /*
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 5850d32..cf6db6b7 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -53,7 +53,7 @@
 				     sizeof(struct acpi_table_erst)))
 
 #define SPIN_UNIT		100			/* 100ns */
-/* Firmware should respond within 1 miliseconds */
+/* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT	(1 * NSEC_PER_MSEC)
 #define FIRMWARE_MAX_STALL	50			/* 50us */
 
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index febb153..c423231 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -319,7 +319,7 @@
 	}
 }
 
-static struct platform_suspend_ops acpi_suspend_ops = {
+static const struct platform_suspend_ops acpi_suspend_ops = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin,
 	.prepare_late = acpi_pm_prepare,
@@ -347,7 +347,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_suspend_ops acpi_suspend_ops_old = {
+static const struct platform_suspend_ops acpi_suspend_ops_old = {
 	.valid = acpi_suspend_state_valid,
 	.begin = acpi_suspend_begin_old,
 	.prepare_late = acpi_pm_pre_suspend,
@@ -506,7 +506,7 @@
 	acpi_enable_all_runtime_gpes();
 }
 
-static struct platform_hibernation_ops acpi_hibernation_ops = {
+static const struct platform_hibernation_ops acpi_hibernation_ops = {
 	.begin = acpi_hibernation_begin,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_prepare,
@@ -549,7 +549,7 @@
  * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
  * been requested.
  */
-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
 	.begin = acpi_hibernation_begin_old,
 	.end = acpi_pm_end,
 	.pre_snapshot = acpi_pm_pre_suspend,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 5cd0228..15a0fde 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -260,7 +260,7 @@
 				vd->brightness->levels[request_level]);
 }
 
-static struct backlight_ops acpi_backlight_ops = {
+static const struct backlight_ops acpi_backlight_ops = {
 	.get_brightness = acpi_video_get_brightness,
 	.update_status  = acpi_video_set_brightness,
 };
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 0a6a943..a31fe96 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2240,7 +2240,7 @@
 			if (id[ATA_ID_CFA_KEY_MGMT] & 1)
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 			snprintf(revbuf, 7, "CFA");
 		} else {
 			snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
@@ -2248,7 +2248,7 @@
 			if (ata_id_has_tpm(id))
 				ata_dev_printk(dev, KERN_WARNING,
 					       "supports DRM functions and may "
-					       "not be fully accessable.\n");
+					       "not be fully accessible.\n");
 		}
 
 		dev->n_sectors = ata_id_n_sectors(id);
diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
index b777176..e079cf2 100644
--- a/drivers/ata/sata_vsc.c
+++ b/drivers/ata/sata_vsc.c
@@ -370,7 +370,7 @@
 	if (pci_resource_len(pdev, 0) == 0)
 		return -ENODEV;
 
-	/* map IO regions and intialize host accordingly */
+	/* map IO regions and initialize host accordingly */
 	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
 	if (rc == -EBUSY)
 		pcim_pin_device(pdev);
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index ffe9b65..9f47e86 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1926,8 +1926,9 @@
   const struct firmware *fw;
   unsigned long start_address;
   const struct ihex_binrec *rec;
+  const char *errmsg = 0;
   int res;
-  
+
   res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
   if (res) {
     PRINTK (KERN_ERR, "Cannot load microcode data");
@@ -1937,8 +1938,8 @@
   /* First record contains just the start address */
   rec = (const struct ihex_binrec *)fw->data;
   if (be16_to_cpu(rec->len) != sizeof(__be32) || be32_to_cpu(rec->addr)) {
-    PRINTK (KERN_ERR, "Bad microcode data (no start record)");
-    return -EINVAL;
+    errmsg = "no start record";
+    goto fail;
   }
   start_address = be32_to_cpup((__be32 *)rec->data);
 
@@ -1950,12 +1951,12 @@
     PRINTD (DBG_LOAD, "starting region (%x, %u)", be32_to_cpu(rec->addr),
 	    be16_to_cpu(rec->len));
     if (be16_to_cpu(rec->len) > 4 * MAX_TRANSFER_DATA) {
-	    PRINTK (KERN_ERR, "Bad microcode data (record too long)");
-	    return -EINVAL;
+	    errmsg = "record too long";
+	    goto fail;
     }
     if (be16_to_cpu(rec->len) & 3) {
-	    PRINTK (KERN_ERR, "Bad microcode data (odd number of bytes)");
-	    return -EINVAL;
+	    errmsg = "odd number of bytes";
+	    goto fail;
     }
     res = loader_write(lb, dev, rec);
     if (res)
@@ -1970,6 +1971,10 @@
     res = loader_start(lb, dev, start_address);
 
   return res;
+fail:
+  release_firmware(fw);
+  PRINTK(KERN_ERR, "Bad microcode data (%s)", errmsg);
+  return -EINVAL;
 }
 
 /********** give adapter parameters **********/
diff --git a/drivers/atm/idt77252.h b/drivers/atm/idt77252.h
index 5042bb2..f53a43a 100644
--- a/drivers/atm/idt77252.h
+++ b/drivers/atm/idt77252.h
@@ -572,7 +572,7 @@
 #define SAR_STAT_TSQF       0x00001000 /* Transmit Status Queue full      */
 #define SAR_STAT_TMROF      0x00000800 /* Timer overflow                  */
 #define SAR_STAT_PHYI       0x00000400 /* PHY device Interrupt flag       */
-#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Comand Busy Flag        */
+#define SAR_STAT_CMDBZ      0x00000200 /* ABR SAR Command Busy Flag       */
 #define SAR_STAT_FBQ3A      0x00000100 /* Free Buffer Queue 3 Attention   */
 #define SAR_STAT_FBQ2A      0x00000080 /* Free Buffer Queue 2 Attention   */
 #define SAR_STAT_RSQF       0x00000040 /* Receive Status Queue full       */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 7292540..d80d51b 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2063,7 +2063,7 @@
 		- UBR Table size is 4K  
 		- UBR wait queue is 4K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be initialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         
         vcsize_sel = 0;
@@ -2089,7 +2089,7 @@
 		- ABR Table size is 2K  
 		- ABR wait queue is 2K  
 	   since the table and wait queues are contiguous, all the bytes   
-	   can be intialized by one memeset.  
+	   can be initialized by one memeset.
 	*/  
         i = ABR_SCHED_TABLE * iadev->memSize;
         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e243bd4..000e7b2 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -975,7 +975,7 @@
 EXPORT_SYMBOL_GPL(bus_get_device_klist);
 
 /*
- * Yes, this forcably breaks the klist abstraction temporarily.  It
+ * Yes, this forcibly breaks the klist abstraction temporarily.  It
  * just wants to sort the klist, not change reference counts and
  * take/drop locks rapidly in the process.  It does all this while
  * holding the lock for the list, so objects can't otherwise be
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2a52270..8340497 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -8,7 +8,7 @@
  *
  *
  * The driver model core calls device_pm_add() when a device is registered.
- * This will intialize the embedded device_pm_info object in the device
+ * This will initialize the embedded device_pm_info object in the device
  * and add it to the list of power-controlled devices. sysfs entries for
  * controlling device power management will also be added.
  *
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4b9359a..83c32cb 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -464,6 +464,7 @@
 	tristate "Xen virtual block device support"
 	depends on XEN
 	default y
+	select XEN_XENBUS_FRONTEND
 	help
 	  This driver implements the front-end of the Xen virtual
 	  block device driver.  It communicates with a back-end driver
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 008d4a0..e1e38b1 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1790,18 +1790,29 @@
 
 	rc = rbd_bus_add_dev(rbd_dev);
 	if (rc)
-		goto err_out_disk;
+		goto err_out_blkdev;
+
 	/* set up and announce blkdev mapping */
 	rc = rbd_init_disk(rbd_dev);
 	if (rc)
-		goto err_out_blkdev;
+		goto err_out_bus;
 
 	return count;
 
+err_out_bus:
+	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+	list_del_init(&rbd_dev->node);
+	mutex_unlock(&ctl_mutex);
+
+	/* this will also clean up rest of rbd_dev stuff */
+
+	rbd_bus_del_dev(rbd_dev);
+	kfree(options);
+	kfree(mon_dev_name);
+	return rc;
+
 err_out_blkdev:
 	unregister_blkdev(rbd_dev->major, rbd_dev->name);
-err_out_disk:
-	rbd_free_disk(rbd_dev);
 err_out_client:
 	rbd_put_client(rbd_dev);
 	mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index d4a7776..0f175a8 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -1047,15 +1047,6 @@
 	  pc8736x_gpio drivers.  If those drivers are built as
 	  modules, this one will be too, named nsc_gpio
 
-config CS5535_GPIO
-	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
-	depends on X86_32
-	help
-	  Give userspace access to the GPIO pins on the AMD CS5535 and
-	  CS5536 Geode companion devices.
-
-	  If compiled as a module, it will be called cs5535_gpio.
-
 config RAW_DRIVER
 	tristate "RAW driver (/dev/raw/rawN)"
 	depends on BLOCK
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index fa0b824..1e9dffb 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -82,7 +82,6 @@
 obj-$(CONFIG_SCx200_GPIO)	+= scx200_gpio.o
 obj-$(CONFIG_PC8736x_GPIO)	+= pc8736x_gpio.o
 obj-$(CONFIG_NSC_GPIO)		+= nsc_gpio.o
-obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
 obj-$(CONFIG_GPIO_TB0219)	+= tb0219.o
 obj-$(CONFIG_TELCLOCK)		+= tlclk.o
 
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065..3e67ddd 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@
 	void (*agp_destroy_page)(struct page *, int flags);
 	void (*agp_destroy_pages)(struct agp_memory *);
 	int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
-	void (*chipset_flush)(struct agp_bridge_data *);
 };
 
 struct agp_bridge_data {
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a..a48e05b 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@
 		break;
 
 	case AGPIOC_CHIPSET_FLUSH32:
-		ret_val = agpioc_chipset_flush_wrap(curr_priv);
 		break;
 	}
 
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678a..f30e0fd 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@
 struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
 struct agp_memory *agp_find_mem_by_key(int key);
 struct agp_client *agp_find_client_by_pid(pid_t id);
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
 
 #endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539..2e04433 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@
 	return agp_unbind_memory(memory);
 }
 
-int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
-{
-	DBG("");
-	agp_flush_chipset(agp_bridge);
-	return 0;
-}
-
 static long agp_ioctl(struct file *file,
 		     unsigned int cmd, unsigned long arg)
 {
@@ -1039,7 +1032,6 @@
 		break;
 	       
 	case AGPIOC_CHIPSET_FLUSH:
-		ret_val = agpioc_chipset_flush_wrap(curr_priv);
 		break;
 	}
 
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c..012cba0 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@
 	return -1;
 }
 
-void agp_flush_chipset(struct agp_bridge_data *bridge)
-{
-	if (bridge->driver->chipset_flush)
-		bridge->driver->chipset_flush(bridge);
-}
-EXPORT_SYMBOL(agp_flush_chipset);
-
 /*
  * Use kmalloc if possible for the page list. Otherwise fall back to
  * vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@
 }
 EXPORT_SYMBOL(agp_unbind_memory);
 
-/**
- *	agp_rebind_emmory  -  Rewrite the entire GATT, useful on resume
- */
-int agp_rebind_memory(void)
-{
-	struct agp_memory *curr;
-	int ret_val = 0;
-
-	spin_lock(&agp_bridge->mapped_lock);
-	list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
-		ret_val = curr->bridge->driver->insert_memory(curr,
-							      curr->pg_start,
-							      curr->type);
-		if (ret_val != 0)
-			break;
-	}
-	spin_unlock(&agp_bridge->mapped_lock);
-	return ret_val;
-}
-EXPORT_SYMBOL(agp_rebind_memory);
 
 /* End - Routines for handling swapping of agp_memory into the GATT */
 
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d..07e9796 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,14 +828,9 @@
 static int agp_intel_resume(struct pci_dev *pdev)
 {
 	struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-	int ret_val;
 
 	bridge->driver->configure();
 
-	ret_val = agp_rebind_memory();
-	if (ret_val != 0)
-		return ret_val;
-
 	return 0;
 }
 #endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df..c195bfe 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
 #define I810_GMS_DISABLE	0x00000000
 #define I810_PGETBL_CTL		0x2020
 #define I810_PGETBL_ENABLED	0x00000001
+/* Note: PGETBL_CTL2 has a different offset on G33. */
+#define I965_PGETBL_CTL2	0x20c4
 #define I965_PGETBL_SIZE_MASK	0x0000000e
 #define I965_PGETBL_SIZE_512KB	(0 << 1)
 #define I965_PGETBL_SIZE_256KB	(1 << 1)
@@ -82,9 +84,17 @@
 #define I965_PGETBL_SIZE_1MB	(3 << 1)
 #define I965_PGETBL_SIZE_2MB	(4 << 1)
 #define I965_PGETBL_SIZE_1_5MB	(5 << 1)
-#define G33_PGETBL_SIZE_MASK    (3 << 8)
-#define G33_PGETBL_SIZE_1M      (1 << 8)
-#define G33_PGETBL_SIZE_2M      (2 << 8)
+#define G33_GMCH_SIZE_MASK	(3 << 8)
+#define G33_GMCH_SIZE_1M	(1 << 8)
+#define G33_GMCH_SIZE_2M	(2 << 8)
+#define G4x_GMCH_SIZE_MASK	(0xf << 8)
+#define G4x_GMCH_SIZE_1M	(0x1 << 8)
+#define G4x_GMCH_SIZE_2M	(0x3 << 8)
+#define G4x_GMCH_SIZE_VT_1M	(0x9 << 8)
+#define G4x_GMCH_SIZE_VT_1_5M	(0xa << 8)
+#define G4x_GMCH_SIZE_VT_2M	(0xc << 8)
+
+#define GFX_FLSH_CNTL		0x2170 /* 915+ */
 
 #define I810_DRAM_CTL		0x3000
 #define I810_DRAM_ROW_0		0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 29ac6d4..e921b69 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -24,7 +24,6 @@
 #include <asm/smp.h>
 #include "agp.h"
 #include "intel-agp.h"
-#include <linux/intel-gtt.h>
 #include <drm/intel-gtt.h>
 
 /*
@@ -39,40 +38,12 @@
 #define USE_PCI_DMA_API 0
 #endif
 
-/* Max amount of stolen space, anything above will be returned to Linux */
-int intel_max_stolen = 32 * 1024 * 1024;
-
-static const struct aper_size_info_fixed intel_i810_sizes[] =
-{
-	{64, 16384, 4},
-	/* The 32M mode still requires a 64k gatt */
-	{32, 8192, 4}
-};
-
-#define AGP_DCACHE_MEMORY	1
-#define AGP_PHYS_MEMORY		2
-#define INTEL_AGP_CACHED_MEMORY 3
-
-static struct gatt_mask intel_i810_masks[] =
-{
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
-	{.mask = I810_PTE_VALID, .type = 0},
-	{.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
-	 .type = INTEL_AGP_CACHED_MEMORY}
-};
-
-#define INTEL_AGP_UNCACHED_MEMORY              0
-#define INTEL_AGP_CACHED_MEMORY_LLC            1
-#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT       2
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC        3
-#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT   4
-
 struct intel_gtt_driver {
 	unsigned int gen : 8;
 	unsigned int is_g33 : 1;
 	unsigned int is_pineview : 1;
 	unsigned int is_ironlake : 1;
+	unsigned int has_pgtbl_enable : 1;
 	unsigned int dma_mask_size : 8;
 	/* Chipset specific GTT setup */
 	int (*setup)(void);
@@ -95,13 +66,14 @@
 	u8 __iomem *registers;
 	phys_addr_t gtt_bus_addr;
 	phys_addr_t gma_bus_addr;
-	phys_addr_t pte_bus_addr;
+	u32 PGETBL_save;
 	u32 __iomem *gtt;		/* I915G */
 	int num_dcache_entries;
 	union {
 		void __iomem *i9xx_flush_page;
 		void *i8xx_flush_page;
 	};
+	char *i81x_gtt_table;
 	struct page *i8xx_page;
 	struct resource ifp_resource;
 	int resource_valid;
@@ -113,42 +85,31 @@
 #define IS_G33		intel_private.driver->is_g33
 #define IS_PINEVIEW	intel_private.driver->is_pineview
 #define IS_IRONLAKE	intel_private.driver->is_ironlake
+#define HAS_PGTBL_EN	intel_private.driver->has_pgtbl_enable
 
-static void intel_agp_free_sglist(struct agp_memory *mem)
-{
-	struct sg_table st;
-
-	st.sgl = mem->sg_list;
-	st.orig_nents = st.nents = mem->page_count;
-
-	sg_free_table(&st);
-
-	mem->sg_list = NULL;
-	mem->num_sg = 0;
-}
-
-static int intel_agp_map_memory(struct agp_memory *mem)
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+			 struct scatterlist **sg_list, int *num_sg)
 {
 	struct sg_table st;
 	struct scatterlist *sg;
 	int i;
 
-	if (mem->sg_list)
+	if (*sg_list)
 		return 0; /* already mapped (for e.g. resume */
 
-	DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
+	DBG("try mapping %lu pages\n", (unsigned long)num_entries);
 
-	if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
+	if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
 		goto err;
 
-	mem->sg_list = sg = st.sgl;
+	*sg_list = sg = st.sgl;
 
-	for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg))
-		sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0);
+	for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
+		sg_set_page(sg, pages[i], PAGE_SIZE, 0);
 
-	mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list,
-				 mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	if (unlikely(!mem->num_sg))
+	*num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
+				 num_entries, PCI_DMA_BIDIRECTIONAL);
+	if (unlikely(!*num_sg))
 		goto err;
 
 	return 0;
@@ -157,90 +118,22 @@
 	sg_free_table(&st);
 	return -ENOMEM;
 }
+EXPORT_SYMBOL(intel_gtt_map_memory);
 
-static void intel_agp_unmap_memory(struct agp_memory *mem)
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
 {
+	struct sg_table st;
 	DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
 
-	pci_unmap_sg(intel_private.pcidev, mem->sg_list,
-		     mem->page_count, PCI_DMA_BIDIRECTIONAL);
-	intel_agp_free_sglist(mem);
+	pci_unmap_sg(intel_private.pcidev, sg_list,
+		     num_sg, PCI_DMA_BIDIRECTIONAL);
+
+	st.sgl = sg_list;
+	st.orig_nents = st.nents = num_sg;
+
+	sg_free_table(&st);
 }
-
-static int intel_i810_fetch_size(void)
-{
-	u32 smram_miscc;
-	struct aper_size_info_fixed *values;
-
-	pci_read_config_dword(intel_private.bridge_dev,
-			      I810_SMRAM_MISCC, &smram_miscc);
-	values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
-	if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
-		dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
-		return 0;
-	}
-	if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
-		agp_bridge->current_size = (void *) (values + 1);
-		agp_bridge->aperture_size_idx = 1;
-		return values[1].size;
-	} else {
-		agp_bridge->current_size = (void *) (values);
-		agp_bridge->aperture_size_idx = 0;
-		return values[0].size;
-	}
-
-	return 0;
-}
-
-static int intel_i810_configure(void)
-{
-	struct aper_size_info_fixed *current_size;
-	u32 temp;
-	int i;
-
-	current_size = A_SIZE_FIX(agp_bridge->current_size);
-
-	if (!intel_private.registers) {
-		pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
-		temp &= 0xfff80000;
-
-		intel_private.registers = ioremap(temp, 128 * 4096);
-		if (!intel_private.registers) {
-			dev_err(&intel_private.pcidev->dev,
-				"can't remap memory\n");
-			return -ENOMEM;
-		}
-	}
-
-	if ((readl(intel_private.registers+I810_DRAM_CTL)
-		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
-		/* This will need to be dynamically assigned */
-		dev_info(&intel_private.pcidev->dev,
-			 "detected 4MB dedicated video ram\n");
-		intel_private.num_dcache_entries = 1024;
-	}
-	pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
-	agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-	writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
-
-	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = 0; i < current_size->num_entries; i++) {
-			writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));	/* PCI posting. */
-	}
-	global_cache_flush();
-	return 0;
-}
-
-static void intel_i810_cleanup(void)
-{
-	writel(0, intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers);	/* PCI Posting. */
-	iounmap(intel_private.registers);
-}
+EXPORT_SYMBOL(intel_gtt_unmap_memory);
 
 static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
 {
@@ -277,80 +170,64 @@
 	atomic_dec(&agp_bridge->current_memory_agp);
 }
 
-static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
+#define I810_GTT_ORDER 4
+static int i810_setup(void)
 {
-	int i, j, num_entries;
-	void *temp;
-	int ret = -EINVAL;
-	int mask_type;
+	u32 reg_addr;
+	char *gtt_table;
 
-	if (mem->page_count == 0)
-		goto out;
+	/* i81x does not preallocate the gtt. It's always 64kb in size. */
+	gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
+	if (gtt_table == NULL)
+		return -ENOMEM;
+	intel_private.i81x_gtt_table = gtt_table;
 
-	temp = agp_bridge->current_size;
-	num_entries = A_SIZE_FIX(temp)->num_entries;
+	pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+	reg_addr &= 0xfff80000;
 
-	if ((pg_start + mem->page_count) > num_entries)
-		goto out_err;
+	intel_private.registers = ioremap(reg_addr, KB(64));
+	if (!intel_private.registers)
+		return -ENOMEM;
 
+	writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
+	       intel_private.registers+I810_PGETBL_CTL);
 
-	for (j = pg_start; j < (pg_start + mem->page_count); j++) {
-		if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) {
-			ret = -EBUSY;
-			goto out_err;
-		}
+	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+
+	if ((readl(intel_private.registers+I810_DRAM_CTL)
+		& I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
+		dev_info(&intel_private.pcidev->dev,
+			 "detected 4MB dedicated video ram\n");
+		intel_private.num_dcache_entries = 1024;
 	}
 
-	if (type != mem->type)
-		goto out_err;
-
-	mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
-	switch (mask_type) {
-	case AGP_DCACHE_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = pg_start; i < (pg_start + mem->page_count); i++) {
-			writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
-			       intel_private.registers+I810_PTE_BASE+(i*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
-		break;
-	case AGP_PHYS_MEMORY:
-	case AGP_NORMAL_MEMORY:
-		if (!mem->is_flushed)
-			global_cache_flush();
-		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-			writel(agp_bridge->driver->mask_memory(agp_bridge,
-					page_to_phys(mem->pages[i]), mask_type),
-			       intel_private.registers+I810_PTE_BASE+(j*4));
-		}
-		readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
-		break;
-	default:
-		goto out_err;
-	}
-
-out:
-	ret = 0;
-out_err:
-	mem->is_flushed = true;
-	return ret;
+	return 0;
 }
 
-static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start,
-				int type)
+static void i810_cleanup(void)
+{
+	writel(0, intel_private.registers+I810_PGETBL_CTL);
+	free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
+}
+
+static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
+				      int type)
 {
 	int i;
 
-	if (mem->page_count == 0)
-		return 0;
+	if ((pg_start + mem->page_count)
+			> intel_private.num_dcache_entries)
+		return -EINVAL;
 
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
-		writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+	if (!mem->is_flushed)
+		global_cache_flush();
+
+	for (i = pg_start; i < (pg_start + mem->page_count); i++) {
+		dma_addr_t addr = i << PAGE_SHIFT;
+		intel_private.driver->write_entry(addr,
+						  i, type);
 	}
-	readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+	readl(intel_private.gtt+i-1);
 
 	return 0;
 }
@@ -397,29 +274,6 @@
 	return new;
 }
 
-static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
-{
-	struct agp_memory *new;
-
-	if (type == AGP_DCACHE_MEMORY) {
-		if (pg_count != intel_private.num_dcache_entries)
-			return NULL;
-
-		new = agp_create_memory(1);
-		if (new == NULL)
-			return NULL;
-
-		new->type = AGP_DCACHE_MEMORY;
-		new->page_count = pg_count;
-		new->num_scratch_pages = 0;
-		agp_free_page_array(new);
-		return new;
-	}
-	if (type == AGP_PHYS_MEMORY)
-		return alloc_agpphysmem_i8xx(pg_count, type);
-	return NULL;
-}
-
 static void intel_i810_free_by_type(struct agp_memory *curr)
 {
 	agp_free_key(curr->key);
@@ -437,13 +291,6 @@
 	kfree(curr);
 }
 
-static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
-					    dma_addr_t addr, int type)
-{
-	/* Type checking must be done elsewhere */
-	return addr | bridge->driver->masks[type].mask;
-}
-
 static int intel_gtt_setup_scratch_page(void)
 {
 	struct page *page;
@@ -455,7 +302,7 @@
 	get_page(page);
 	set_pages_uc(page, 1);
 
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+	if (intel_private.base.needs_dmar) {
 		dma_addr = pci_map_page(intel_private.pcidev, page, 0,
 				    PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 		if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +317,45 @@
 	return 0;
 }
 
-static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
+static void i810_write_entry(dma_addr_t addr, unsigned int entry,
+			     unsigned int flags)
+{
+	u32 pte_flags = I810_PTE_VALID;
+
+	switch (flags) {
+	case AGP_DCACHE_MEMORY:
+		pte_flags |= I810_PTE_LOCAL;
+		break;
+	case AGP_USER_CACHED_MEMORY:
+		pte_flags |= I830_PTE_SYSTEM_CACHED;
+		break;
+	}
+
+	writel(addr | pte_flags, intel_private.gtt + entry);
+}
+
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+	{32, 8192, 3},
+	{64, 16384, 4},
 	{128, 32768, 5},
-	/* The 64M mode still requires a 128k gatt */
-	{64, 16384, 5},
 	{256, 65536, 6},
 	{512, 131072, 7},
 };
 
-static unsigned int intel_gtt_stolen_entries(void)
+static unsigned int intel_gtt_stolen_size(void)
 {
 	u16 gmch_ctrl;
 	u8 rdct;
 	int local = 0;
 	static const int ddt[4] = { 0, 16, 32, 64 };
-	unsigned int overhead_entries, stolen_entries;
 	unsigned int stolen_size = 0;
 
+	if (INTEL_GTT_GEN == 1)
+		return 0; /* no stolen mem on i81x */
+
 	pci_read_config_word(intel_private.bridge_dev,
 			     I830_GMCH_CTRL, &gmch_ctrl);
 
-	if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
-		overhead_entries = 0;
-	else
-		overhead_entries = intel_private.base.gtt_mappable_entries
-			/ 1024;
-
-	overhead_entries += 1; /* BIOS popup */
-
 	if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
 	    intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
 		switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +481,7 @@
 		}
 	}
 
-	if (!local && stolen_size > intel_max_stolen) {
-		dev_info(&intel_private.bridge_dev->dev,
-			 "detected %dK stolen memory, trimming to %dK\n",
-			 stolen_size / KB(1), intel_max_stolen / KB(1));
-		stolen_size = intel_max_stolen;
-	} else if (stolen_size > 0) {
+	if (stolen_size > 0) {
 		dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
 		       stolen_size / KB(1), local ? "local" : "stolen");
 	} else {
@@ -637,46 +490,88 @@
 		stolen_size = 0;
 	}
 
-	stolen_entries = stolen_size/KB(4) - overhead_entries;
+	return stolen_size;
+}
 
-	return stolen_entries;
+static void i965_adjust_pgetbl_size(unsigned int size_flag)
+{
+	u32 pgetbl_ctl, pgetbl_ctl2;
+
+	/* ensure that ppgtt is disabled */
+	pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
+	pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
+	writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
+
+	/* write the new ggtt size */
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+	pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
+	pgetbl_ctl |= size_flag;
+	writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
+}
+
+static unsigned int i965_gtt_total_entries(void)
+{
+	int size;
+	u32 pgetbl_ctl;
+	u16 gmch_ctl;
+
+	pci_read_config_word(intel_private.bridge_dev,
+			     I830_GMCH_CTRL, &gmch_ctl);
+
+	if (INTEL_GTT_GEN == 5) {
+		switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
+		case G4x_GMCH_SIZE_1M:
+		case G4x_GMCH_SIZE_VT_1M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
+			break;
+		case G4x_GMCH_SIZE_VT_1_5M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
+			break;
+		case G4x_GMCH_SIZE_2M:
+		case G4x_GMCH_SIZE_VT_2M:
+			i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
+			break;
+		}
+	}
+
+	pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+	switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+	case I965_PGETBL_SIZE_128KB:
+		size = KB(128);
+		break;
+	case I965_PGETBL_SIZE_256KB:
+		size = KB(256);
+		break;
+	case I965_PGETBL_SIZE_512KB:
+		size = KB(512);
+		break;
+	/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
+	case I965_PGETBL_SIZE_1MB:
+		size = KB(1024);
+		break;
+	case I965_PGETBL_SIZE_2MB:
+		size = KB(2048);
+		break;
+	case I965_PGETBL_SIZE_1_5MB:
+		size = KB(1024 + 512);
+		break;
+	default:
+		dev_info(&intel_private.pcidev->dev,
+			 "unknown page table size, assuming 512KB\n");
+		size = KB(512);
+	}
+
+	return size/4;
 }
 
 static unsigned int intel_gtt_total_entries(void)
 {
 	int size;
 
-	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
-		u32 pgetbl_ctl;
-		pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
-
-		switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
-		case I965_PGETBL_SIZE_128KB:
-			size = KB(128);
-			break;
-		case I965_PGETBL_SIZE_256KB:
-			size = KB(256);
-			break;
-		case I965_PGETBL_SIZE_512KB:
-			size = KB(512);
-			break;
-		case I965_PGETBL_SIZE_1MB:
-			size = KB(1024);
-			break;
-		case I965_PGETBL_SIZE_2MB:
-			size = KB(2048);
-			break;
-		case I965_PGETBL_SIZE_1_5MB:
-			size = KB(1024 + 512);
-			break;
-		default:
-			dev_info(&intel_private.pcidev->dev,
-				 "unknown page table size, assuming 512KB\n");
-			size = KB(512);
-		}
-
-		return size/4;
-	} else if (INTEL_GTT_GEN == 6) {
+	if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
+		return i965_gtt_total_entries();
+	else if (INTEL_GTT_GEN == 6) {
 		u16 snb_gmch_ctl;
 
 		pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +601,18 @@
 {
 	unsigned int aperture_size;
 
-	if (INTEL_GTT_GEN == 2) {
+	if (INTEL_GTT_GEN == 1) {
+		u32 smram_miscc;
+
+		pci_read_config_dword(intel_private.bridge_dev,
+				      I810_SMRAM_MISCC, &smram_miscc);
+
+		if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
+				== I810_GFX_MEM_WIN_32M)
+			aperture_size = MB(32);
+		else
+			aperture_size = MB(64);
+	} else if (INTEL_GTT_GEN == 2) {
 		u16 gmch_ctrl;
 
 		pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +645,7 @@
 
 	iounmap(intel_private.gtt);
 	iounmap(intel_private.registers);
-	
+
 	intel_gtt_teardown_scratch_page();
 }
 
@@ -755,6 +661,14 @@
 	intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
 	intel_private.base.gtt_total_entries = intel_gtt_total_entries();
 
+	/* save the PGETBL reg for resume */
+	intel_private.PGETBL_save =
+		readl(intel_private.registers+I810_PGETBL_CTL)
+			& ~I810_PGETBL_ENABLED;
+	/* we only ever restore the register when enabling the PGTBL... */
+	if (HAS_PGTBL_EN)
+		intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
+
 	dev_info(&intel_private.bridge_dev->dev,
 			"detected gtt size: %dK total, %dK mappable\n",
 			intel_private.base.gtt_total_entries * 4,
@@ -772,14 +686,9 @@
 
 	global_cache_flush();   /* FIXME: ? */
 
-	/* we have to call this as early as possible after the MMIO base address is known */
-	intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
-	if (intel_private.base.gtt_stolen_entries == 0) {
-		intel_private.driver->cleanup();
-		iounmap(intel_private.registers);
-		iounmap(intel_private.gtt);
-		return -ENOMEM;
-	}
+	intel_private.base.stolen_size = intel_gtt_stolen_size();
+
+	intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
 
 	ret = intel_gtt_setup_scratch_page();
 	if (ret != 0) {
@@ -862,25 +771,19 @@
 			     unsigned int flags)
 {
 	u32 pte_flags = I810_PTE_VALID;
-	
-	switch (flags) {
-	case AGP_DCACHE_MEMORY:
-		pte_flags |= I810_PTE_LOCAL;
-		break;
-	case AGP_USER_CACHED_MEMORY:
+
+	if (flags ==  AGP_USER_CACHED_MEMORY)
 		pte_flags |= I830_PTE_SYSTEM_CACHED;
-		break;
-	}
 
 	writel(addr | pte_flags, intel_private.gtt + entry);
 }
 
-static void intel_enable_gtt(void)
+static bool intel_enable_gtt(void)
 {
 	u32 gma_addr;
-	u16 gmch_ctrl;
+	u8 __iomem *reg;
 
-	if (INTEL_GTT_GEN == 2)
+	if (INTEL_GTT_GEN <= 2)
 		pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
 				      &gma_addr);
 	else
@@ -889,13 +792,47 @@
 
 	intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
-	pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
-	gmch_ctrl |= I830_GMCH_ENABLED;
-	pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+	if (INTEL_GTT_GEN >= 6)
+	    return true;
 
-	writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
-	       intel_private.registers+I810_PGETBL_CTL);
-	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
+	if (INTEL_GTT_GEN == 2) {
+		u16 gmch_ctrl;
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		gmch_ctrl |= I830_GMCH_ENABLED;
+		pci_write_config_word(intel_private.bridge_dev,
+				      I830_GMCH_CTRL, gmch_ctrl);
+
+		pci_read_config_word(intel_private.bridge_dev,
+				     I830_GMCH_CTRL, &gmch_ctrl);
+		if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
+			dev_err(&intel_private.pcidev->dev,
+				"failed to enable the GTT: GMCH_CTRL=%x\n",
+				gmch_ctrl);
+			return false;
+		}
+	}
+
+	/* On the resume path we may be adjusting the PGTBL value, so
+	 * be paranoid and flush all chipset write buffers...
+	 */
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	reg = intel_private.registers+I810_PGETBL_CTL;
+	writel(intel_private.PGETBL_save, reg);
+	if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
+		dev_err(&intel_private.pcidev->dev,
+			"failed to enable the GTT: PGETBL=%x [expected %x]\n",
+			readl(reg), intel_private.PGETBL_save);
+		return false;
+	}
+
+	if (INTEL_GTT_GEN >= 3)
+		writel(0, intel_private.registers+GFX_FLSH_CNTL);
+
+	return true;
 }
 
 static int i830_setup(void)
@@ -910,8 +847,6 @@
 		return -ENOMEM;
 
 	intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
-	intel_private.pte_bus_addr =
-		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
 
 	intel_i830_setup_flush();
 
@@ -936,12 +871,12 @@
 {
 	int i;
 
-	intel_enable_gtt();
+	if (!intel_enable_gtt())
+	    return -EIO;
 
 	agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
 
-	for (i = intel_private.base.gtt_stolen_entries;
-			i < intel_private.base.gtt_total_entries; i++) {
+	for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
 						  i, 0);
 	}
@@ -965,10 +900,10 @@
 	return false;
 }
 
-static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
-					unsigned int sg_len,
-					unsigned int pg_start,
-					unsigned int flags)
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+				 unsigned int sg_len,
+				 unsigned int pg_start,
+				 unsigned int flags)
 {
 	struct scatterlist *sg;
 	unsigned int len, m;
@@ -989,27 +924,34 @@
 	}
 	readl(intel_private.gtt+j-1);
 }
+EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
+
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+			    struct page **pages, unsigned int flags)
+{
+	int i, j;
+
+	for (i = 0, j = first_entry; i < num_entries; i++, j++) {
+		dma_addr_t addr = page_to_phys(pages[i]);
+		intel_private.driver->write_entry(addr,
+						  j, flags);
+	}
+	readl(intel_private.gtt+j-1);
+}
+EXPORT_SYMBOL(intel_gtt_insert_pages);
 
 static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 					 off_t pg_start, int type)
 {
-	int i, j;
 	int ret = -EINVAL;
 
+	if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
+		return i810_insert_dcache_entries(mem, pg_start, type);
+
 	if (mem->page_count == 0)
 		goto out;
 
-	if (pg_start < intel_private.base.gtt_stolen_entries) {
-		dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
-			   "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
-			   pg_start, intel_private.base.gtt_stolen_entries);
-
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to insert into local/stolen memory\n");
-		goto out_err;
-	}
-
-	if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
+	if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
 		goto out_err;
 
 	if (type != mem->type)
@@ -1021,21 +963,17 @@
 	if (!mem->is_flushed)
 		global_cache_flush();
 
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
-		ret = intel_agp_map_memory(mem);
+	if (intel_private.base.needs_dmar) {
+		ret = intel_gtt_map_memory(mem->pages, mem->page_count,
+					   &mem->sg_list, &mem->num_sg);
 		if (ret != 0)
 			return ret;
 
 		intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
 					    pg_start, type);
-	} else {
-		for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
-			dma_addr_t addr = page_to_phys(mem->pages[i]);
-			intel_private.driver->write_entry(addr,
-							  j, type);
-		}
-		readl(intel_private.gtt+j-1);
-	}
+	} else
+		intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
+				       type);
 
 out:
 	ret = 0;
@@ -1044,40 +982,54 @@
 	return ret;
 }
 
-static int intel_fake_agp_remove_entries(struct agp_memory *mem,
-					 off_t pg_start, int type)
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
 {
-	int i;
+	unsigned int i;
 
-	if (mem->page_count == 0)
-		return 0;
-
-	if (pg_start < intel_private.base.gtt_stolen_entries) {
-		dev_info(&intel_private.pcidev->dev,
-			 "trying to disable local/stolen memory\n");
-		return -EINVAL;
-	}
-
-	if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
-		intel_agp_unmap_memory(mem);
-
-	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
+	for (i = first_entry; i < (first_entry + num_entries); i++) {
 		intel_private.driver->write_entry(intel_private.scratch_page_dma,
 						  i, 0);
 	}
 	readl(intel_private.gtt+i-1);
+}
+EXPORT_SYMBOL(intel_gtt_clear_range);
+
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+					 off_t pg_start, int type)
+{
+	if (mem->page_count == 0)
+		return 0;
+
+	intel_gtt_clear_range(pg_start, mem->page_count);
+
+	if (intel_private.base.needs_dmar) {
+		intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
+		mem->sg_list = NULL;
+		mem->num_sg = 0;
+	}
 
 	return 0;
 }
 
-static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
-{
-	intel_private.driver->chipset_flush();
-}
-
 static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
 						       int type)
 {
+	struct agp_memory *new;
+
+	if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
+		if (pg_count != intel_private.num_dcache_entries)
+			return NULL;
+
+		new = agp_create_memory(1);
+		if (new == NULL)
+			return NULL;
+
+		new->type = AGP_DCACHE_MEMORY;
+		new->page_count = pg_count;
+		new->num_scratch_pages = 0;
+		agp_free_page_array(new);
+		return new;
+	}
 	if (type == AGP_PHYS_MEMORY)
 		return alloc_agpphysmem_i8xx(pg_count, type);
 	/* always return NULL for other allocation types for now */
@@ -1274,40 +1226,11 @@
 		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
 	}
 
-	intel_private.pte_bus_addr =
-		readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
-
 	intel_i9xx_setup_flush();
 
 	return 0;
 }
 
-static const struct agp_bridge_driver intel_810_driver = {
-	.owner			= THIS_MODULE,
-	.aperture_sizes		= intel_i810_sizes,
-	.size_type		= FIXED_APER_SIZE,
-	.num_aperture_sizes	= 2,
-	.needs_scratch_page	= true,
-	.configure		= intel_i810_configure,
-	.fetch_size		= intel_i810_fetch_size,
-	.cleanup		= intel_i810_cleanup,
-	.mask_memory		= intel_i810_mask_memory,
-	.masks			= intel_i810_masks,
-	.agp_enable		= intel_fake_agp_enable,
-	.cache_flush		= global_cache_flush,
-	.create_gatt_table	= agp_generic_create_gatt_table,
-	.free_gatt_table	= agp_generic_free_gatt_table,
-	.insert_memory		= intel_i810_insert_entries,
-	.remove_memory		= intel_i810_remove_entries,
-	.alloc_by_type		= intel_i810_alloc_by_type,
-	.free_by_type		= intel_i810_free_by_type,
-	.agp_alloc_page		= agp_generic_alloc_page,
-	.agp_alloc_pages        = agp_generic_alloc_pages,
-	.agp_destroy_page	= agp_generic_destroy_page,
-	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
-};
-
 static const struct agp_bridge_driver intel_fake_agp_driver = {
 	.owner			= THIS_MODULE,
 	.size_type		= FIXED_APER_SIZE,
@@ -1328,15 +1251,20 @@
 	.agp_alloc_pages        = agp_generic_alloc_pages,
 	.agp_destroy_page	= agp_generic_destroy_page,
 	.agp_destroy_pages      = agp_generic_destroy_pages,
-	.chipset_flush		= intel_fake_agp_chipset_flush,
 };
 
 static const struct intel_gtt_driver i81x_gtt_driver = {
 	.gen = 1,
+	.has_pgtbl_enable = 1,
 	.dma_mask_size = 32,
+	.setup = i810_setup,
+	.cleanup = i810_cleanup,
+	.check_flags = i830_check_flags,
+	.write_entry = i810_write_entry,
 };
 static const struct intel_gtt_driver i8xx_gtt_driver = {
 	.gen = 2,
+	.has_pgtbl_enable = 1,
 	.setup = i830_setup,
 	.cleanup = i830_cleanup,
 	.write_entry = i830_write_entry,
@@ -1346,10 +1274,11 @@
 };
 static const struct intel_gtt_driver i915_gtt_driver = {
 	.gen = 3,
+	.has_pgtbl_enable = 1,
 	.setup = i9xx_setup,
 	.cleanup = i9xx_cleanup,
 	/* i945 is the last gpu to need phys mem (for overlay and cursors). */
-	.write_entry = i830_write_entry, 
+	.write_entry = i830_write_entry,
 	.dma_mask_size = 32,
 	.check_flags = i830_check_flags,
 	.chipset_flush = i9xx_chipset_flush,
@@ -1376,6 +1305,7 @@
 };
 static const struct intel_gtt_driver i965_gtt_driver = {
 	.gen = 4,
+	.has_pgtbl_enable = 1,
 	.setup = i9xx_setup,
 	.cleanup = i9xx_cleanup,
 	.write_entry = i965_write_entry,
@@ -1419,93 +1349,92 @@
 static const struct intel_gtt_driver_description {
 	unsigned int gmch_chip_id;
 	char *name;
-	const struct agp_bridge_driver *gmch_driver;
 	const struct intel_gtt_driver *gtt_driver;
 } intel_gtt_chipsets[] = {
-	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
 		&i81x_gtt_driver},
-	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver,
+	{ PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
 		&i81x_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82854_IG, "854",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_82865_IG, "865",
-		&intel_fake_agp_driver, &i8xx_gtt_driver},
+		&i8xx_gtt_driver},
 	{ PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
-		&intel_fake_agp_driver, &i915_gtt_driver },
+		&i915_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
-		&intel_fake_agp_driver, &i965_gtt_driver },
+		&i965_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G33_IG, "G33",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
-		&intel_fake_agp_driver, &g33_gtt_driver },
+		&g33_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
-		&intel_fake_agp_driver, &pineview_gtt_driver },
+		&pineview_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
-		&intel_fake_agp_driver, &pineview_gtt_driver },
+		&pineview_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_B43_IG, "B43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_G41_IG, "G41",
-		&intel_fake_agp_driver, &g4x_gtt_driver },
+		&g4x_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
-	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+	    "HD Graphics", &ironlake_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-	    "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver },
+	    "HD Graphics", &ironlake_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-	    "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver },
+	    "Sandybridge", &sandybridge_gtt_driver },
 	{ 0, NULL, NULL }
 };
 
@@ -1530,21 +1459,20 @@
 				      struct agp_bridge_data *bridge)
 {
 	int i, mask;
-	bridge->driver = NULL;
+	intel_private.driver = NULL;
 
 	for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
 		if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
-			bridge->driver =
-				intel_gtt_chipsets[i].gmch_driver;
-			intel_private.driver = 
+			intel_private.driver =
 				intel_gtt_chipsets[i].gtt_driver;
 			break;
 		}
 	}
 
-	if (!bridge->driver)
+	if (!intel_private.driver)
 		return 0;
 
+	bridge->driver = &intel_fake_agp_driver;
 	bridge->dev_private_data = &intel_private;
 	bridge->dev = pdev;
 
@@ -1560,8 +1488,8 @@
 		pci_set_consistent_dma_mask(intel_private.pcidev,
 					    DMA_BIT_MASK(mask));
 
-	if (bridge->driver == &intel_810_driver)
-		return 1;
+	/*if (bridge->driver == &intel_810_driver)
+		return 1;*/
 
 	if (intel_gtt_init() != 0)
 		return 0;
@@ -1570,12 +1498,19 @@
 }
 EXPORT_SYMBOL(intel_gmch_probe);
 
-struct intel_gtt *intel_gtt_get(void)
+const struct intel_gtt *intel_gtt_get(void)
 {
 	return &intel_private.base;
 }
 EXPORT_SYMBOL(intel_gtt_get);
 
+void intel_gtt_chipset_flush(void)
+{
+	if (intel_private.driver->chipset_flush)
+		intel_private.driver->chipset_flush();
+}
+EXPORT_SYMBOL(intel_gtt_chipset_flush);
+
 void intel_gmch_remove(struct pci_dev *pdev)
 {
 	if (intel_private.pcidev)
diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
index 27370e9..5e2f52b 100644
--- a/drivers/char/hvc_vio.c
+++ b/drivers/char/hvc_vio.c
@@ -39,7 +39,7 @@
 
 #include "hvc_console.h"
 
-char hvc_driver_name[] = "hvc_console";
+static const char hvc_driver_name[] = "hvc_console";
 
 static struct vio_device_id hvc_driver_table[] __devinitdata = {
 	{"serial", "hvterm1"},
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 794aacb..d0387a8 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -24,6 +24,7 @@
  * warranty of any kind, whether express or implied.
  */
 
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/hw_random.h>
@@ -34,7 +35,6 @@
 #include <asm/i387.h>
 
 
-#define PFX	KBUILD_MODNAME ": "
 
 
 enum {
@@ -81,8 +81,7 @@
 	ts_state = irq_ts_save();
 
 	asm(".byte 0x0F,0xA7,0xC0 /* xstore %%edi (addr=%0) */"
-		:"=m"(*addr), "=a"(eax_out)
-		:"D"(addr), "d"(edx_in));
+		: "=m" (*addr), "=a" (eax_out), "+d" (edx_in), "+D" (addr));
 
 	irq_ts_restore(ts_state);
 	return eax_out;
@@ -90,8 +89,10 @@
 
 static int via_rng_data_present(struct hwrng *rng, int wait)
 {
+	char buf[16 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
+		((aligned(STACK_ALIGN)));
+	u32 *via_rng_datum = (u32 *)PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 	u32 bytes_out;
-	u32 *via_rng_datum = (u32 *)(&rng->priv);
 	int i;
 
 	/* We choose the recommended 1-byte-per-instruction RNG rate,
@@ -115,6 +116,7 @@
 			break;
 		udelay(10);
 	}
+	rng->priv = *via_rng_datum;
 	return bytes_out ? 1 : 0;
 }
 
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index f4d334f..320668f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1081,7 +1081,7 @@
 {
 	struct die_args *args = data;
 
-	if (val != DIE_NMI)
+	if (val != DIE_NMIUNKNOWN)
 		return NOTIFY_OK;
 
 	/* Hack, if it's a memory or I/O error, ignore it. */
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index d3d63be..1a9f5f6 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -30,7 +30,7 @@
 
 #define RAMOOPS_KERNMSG_HDR "===="
 
-#define RECORD_SIZE 4096
+#define RECORD_SIZE 4096UL
 
 static ulong mem_address;
 module_param(mem_address, ulong, 0400);
@@ -68,11 +68,16 @@
 	char *buf, *buf_orig;
 	struct timeval timestamp;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
 
-	buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
+	buf = cxt->virt_addr + (cxt->count * RECORD_SIZE);
 	buf_orig = buf;
 
 	memset(buf, '\0', RECORD_SIZE);
@@ -83,8 +88,8 @@
 	buf += res;
 
 	hdr_size = buf - buf_orig;
-	l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
-	l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
+	l2_cpy = min(l2, RECORD_SIZE - hdr_size);
+	l1_cpy = min(l1, RECORD_SIZE - hdr_size - l2_cpy);
 
 	s2_start = l2 - l2_cpy;
 	s1_start = l1 - l1_cpy;
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index 7d279e5..c99305a 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -857,7 +857,7 @@
 			printk(KERN_WARNING MV_CESA
 			       "Base driver '%s' could not be loaded!\n",
 			       base_hash_name);
-			err = PTR_ERR(fallback_tfm);
+			err = PTR_ERR(base_hash);
 			goto err_bad_base;
 		}
 	}
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 76141262..80dc094 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1542,7 +1542,7 @@
 	return err;
 }
 
-static void __exit n2_unregister_algs(void)
+static void __devexit n2_unregister_algs(void)
 {
 	mutex_lock(&spu_lock);
 	if (!--algs_registered)
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 799ca51..add2a1a 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -74,11 +74,9 @@
 #define FLAGS_CBC		BIT(1)
 #define FLAGS_GIV		BIT(2)
 
-#define FLAGS_NEW_KEY		BIT(4)
-#define FLAGS_NEW_IV		BIT(5)
-#define FLAGS_INIT		BIT(6)
-#define FLAGS_FAST		BIT(7)
-#define FLAGS_BUSY		8
+#define FLAGS_INIT		BIT(4)
+#define FLAGS_FAST		BIT(5)
+#define FLAGS_BUSY		BIT(6)
 
 struct omap_aes_ctx {
 	struct omap_aes_dev *dd;
@@ -98,19 +96,18 @@
 struct omap_aes_dev {
 	struct list_head	list;
 	unsigned long		phys_base;
-	void __iomem 		*io_base;
+	void __iomem		*io_base;
 	struct clk		*iclk;
 	struct omap_aes_ctx	*ctx;
 	struct device		*dev;
 	unsigned long		flags;
+	int			err;
 
-	u32			*iv;
-	u32			ctrl;
+	spinlock_t		lock;
+	struct crypto_queue	queue;
 
-	spinlock_t			lock;
-	struct crypto_queue		queue;
-
-	struct tasklet_struct		task;
+	struct tasklet_struct	done_task;
+	struct tasklet_struct	queue_task;
 
 	struct ablkcipher_request	*req;
 	size_t				total;
@@ -179,9 +176,13 @@
 
 static int omap_aes_hw_init(struct omap_aes_dev *dd)
 {
-	int err = 0;
-
+	/*
+	 * clocks are enabled when request starts and disabled when finished.
+	 * It may be long delays between requests.
+	 * Device might go to off mode to save power.
+	 */
 	clk_enable(dd->iclk);
+
 	if (!(dd->flags & FLAGS_INIT)) {
 		/* is it necessary to reset before every operation? */
 		omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET,
@@ -193,39 +194,26 @@
 		__asm__ __volatile__("nop");
 		__asm__ __volatile__("nop");
 
-		err = omap_aes_wait(dd, AES_REG_SYSSTATUS,
-				AES_REG_SYSSTATUS_RESETDONE);
-		if (!err)
-			dd->flags |= FLAGS_INIT;
+		if (omap_aes_wait(dd, AES_REG_SYSSTATUS,
+				AES_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
 	}
 
-	return err;
+	return 0;
 }
 
-static void omap_aes_hw_cleanup(struct omap_aes_dev *dd)
-{
-	clk_disable(dd->iclk);
-}
-
-static void omap_aes_write_ctrl(struct omap_aes_dev *dd)
+static int omap_aes_write_ctrl(struct omap_aes_dev *dd)
 {
 	unsigned int key32;
-	int i;
+	int i, err;
 	u32 val, mask;
 
-	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
-	if (dd->flags & FLAGS_CBC)
-		val |= AES_REG_CTRL_CBC;
-	if (dd->flags & FLAGS_ENCRYPT)
-		val |= AES_REG_CTRL_DIRECTION;
-
-	if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) &&
-		   !(dd->ctx->flags & FLAGS_NEW_KEY))
-		goto out;
-
-	/* only need to write control registers for new settings */
-
-	dd->ctrl = val;
+	err = omap_aes_hw_init(dd);
+	if (err)
+		return err;
 
 	val = 0;
 	if (dd->dma_lch_out >= 0)
@@ -237,30 +225,43 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, val, mask);
 
-	pr_debug("Set key\n");
 	key32 = dd->ctx->keylen / sizeof(u32);
-	/* set a key */
+
+	/* it seems a key should always be set even if it has not changed */
 	for (i = 0; i < key32; i++) {
 		omap_aes_write(dd, AES_REG_KEY(i),
 			__le32_to_cpu(dd->ctx->key[i]));
 	}
-	dd->ctx->flags &= ~FLAGS_NEW_KEY;
 
-	if (dd->flags & FLAGS_NEW_IV) {
-		pr_debug("Set IV\n");
-		omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4);
-		dd->flags &= ~FLAGS_NEW_IV;
-	}
+	if ((dd->flags & FLAGS_CBC) && dd->req->info)
+		omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4);
+
+	val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
+	if (dd->flags & FLAGS_CBC)
+		val |= AES_REG_CTRL_CBC;
+	if (dd->flags & FLAGS_ENCRYPT)
+		val |= AES_REG_CTRL_DIRECTION;
 
 	mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION |
 			AES_REG_CTRL_KEY_SIZE;
 
-	omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask);
+	omap_aes_write_mask(dd, AES_REG_CTRL, val, mask);
 
-out:
-	/* start DMA or disable idle mode */
-	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
-			    AES_REG_MASK_START);
+	/* IN */
+	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
+				 dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
+
+	/* OUT */
+	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
+				dd->phys_base + AES_REG_DATA, 0, 4);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
+
+	return 0;
 }
 
 static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx)
@@ -288,8 +289,16 @@
 {
 	struct omap_aes_dev *dd = data;
 
-	if (lch == dd->dma_lch_out)
-		tasklet_schedule(&dd->task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-aes DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	} else if (lch == dd->dma_lch_in) {
+		return;
+	}
+
+	/* dma_lch_out - completed */
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_aes_dma_init(struct omap_aes_dev *dd)
@@ -339,18 +348,6 @@
 		goto err_dma_out;
 	}
 
-	omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT,
-				 dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4);
-
-	omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT,
-				dd->phys_base + AES_REG_DATA, 0, 4);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-	omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4);
-
 	return 0;
 
 err_dma_out:
@@ -406,6 +403,11 @@
 		if (!count)
 			return off;
 
+		/*
+		 * buflen and total are AES_BLOCK_SIZE size aligned,
+		 * so count should be also aligned
+		 */
+
 		sg_copy_buf(buf + off, *sg, *offset, count, out);
 
 		off += count;
@@ -461,7 +463,9 @@
 	omap_start_dma(dd->dma_lch_in);
 	omap_start_dma(dd->dma_lch_out);
 
-	omap_aes_write_ctrl(dd);
+	/* start DMA or disable idle mode */
+	omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START,
+			    AES_REG_MASK_START);
 
 	return 0;
 }
@@ -488,8 +492,10 @@
 		count = min(dd->total, sg_dma_len(dd->in_sg));
 		count = min(count, sg_dma_len(dd->out_sg));
 
-		if (count != dd->total)
+		if (count != dd->total) {
+			pr_err("request length != buffer length\n");
 			return -EINVAL;
+		}
 
 		pr_debug("fast\n");
 
@@ -525,23 +531,25 @@
 
 	dd->total -= count;
 
-	err = omap_aes_hw_init(dd);
-
 	err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count);
+	if (err) {
+		dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE);
+		dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE);
+	}
 
 	return err;
 }
 
 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 {
-	struct omap_aes_ctx *ctx;
+	struct ablkcipher_request *req = dd->req;
 
 	pr_debug("err: %d\n", err);
 
-	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req));
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
-	if (!dd->total)
-		dd->req->base.complete(&dd->req->base, err);
+	req->base.complete(&req->base, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -553,8 +561,6 @@
 
 	omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START);
 
-	omap_aes_hw_cleanup(dd);
-
 	omap_stop_dma(dd->dma_lch_in);
 	omap_stop_dma(dd->dma_lch_out);
 
@@ -574,40 +580,39 @@
 		}
 	}
 
-	if (err || !dd->total)
-		omap_aes_finish_req(dd, err);
-
 	return err;
 }
 
-static int omap_aes_handle_req(struct omap_aes_dev *dd)
+static int omap_aes_handle_queue(struct omap_aes_dev *dd,
+			       struct ablkcipher_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_aes_ctx *ctx;
 	struct omap_aes_reqctx *rctx;
-	struct ablkcipher_request *req;
 	unsigned long flags;
-
-	if (dd->total)
-		goto start;
+	int err, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ablkcipher_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
 
 	req = ablkcipher_request_cast(async_req);
 
-	pr_debug("get new req\n");
-
 	/* assign new request to device */
 	dd->req = req;
 	dd->total = req->nbytes;
@@ -621,27 +626,22 @@
 	rctx->mode &= FLAGS_MODE_MASK;
 	dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 
-	dd->iv = req->info;
-	if ((dd->flags & FLAGS_CBC) && dd->iv)
-		dd->flags |= FLAGS_NEW_IV;
-	else
-		dd->flags &= ~FLAGS_NEW_IV;
-
+	dd->ctx = ctx;
 	ctx->dd = dd;
-	if (dd->ctx != ctx) {
-		/* assign new context to device */
-		dd->ctx = ctx;
-		ctx->flags |= FLAGS_NEW_KEY;
+
+	err = omap_aes_write_ctrl(dd);
+	if (!err)
+		err = omap_aes_crypt_dma_start(dd);
+	if (err) {
+		/* aes_task will not finish it, so do it here */
+		omap_aes_finish_req(dd, err);
+		tasklet_schedule(&dd->queue_task);
 	}
 
-	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE))
-		pr_err("request size is not exact amount of AES blocks\n");
-
-start:
-	return omap_aes_crypt_dma_start(dd);
+	return ret; /* return ret, which is enqueue return value */
 }
 
-static void omap_aes_task(unsigned long data)
+static void omap_aes_done_task(unsigned long data)
 {
 	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
 	int err;
@@ -650,40 +650,50 @@
 
 	err = omap_aes_crypt_dma_stop(dd);
 
-	err = omap_aes_handle_req(dd);
+	err = dd->err ? : err;
+
+	if (dd->total && !err) {
+		err = omap_aes_crypt_dma_start(dd);
+		if (!err)
+			return; /* DMA started. Not fininishing. */
+	}
+
+	omap_aes_finish_req(dd, err);
+	omap_aes_handle_queue(dd, NULL);
 
 	pr_debug("exit\n");
 }
 
+static void omap_aes_queue_task(unsigned long data)
+{
+	struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
+
+	omap_aes_handle_queue(dd, NULL);
+}
+
 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
 	struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
 			crypto_ablkcipher_reqtfm(req));
 	struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 	struct omap_aes_dev *dd;
-	unsigned long flags;
-	int err;
 
 	pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
 		  !!(mode & FLAGS_ENCRYPT),
 		  !!(mode & FLAGS_CBC));
 
+	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+		pr_err("request size is not exact amount of AES blocks\n");
+		return -EINVAL;
+	}
+
 	dd = omap_aes_find_dev(ctx);
 	if (!dd)
 		return -ENODEV;
 
 	rctx->mode = mode;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ablkcipher_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	if (!test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		omap_aes_handle_req(dd);
-
-	pr_debug("exit\n");
-
-	return err;
+	return omap_aes_handle_queue(dd, req);
 }
 
 /* ********************** ALG API ************************************ */
@@ -701,7 +711,6 @@
 
 	memcpy(ctx->key, key, keylen);
 	ctx->keylen = keylen;
-	ctx->flags |= FLAGS_NEW_KEY;
 
 	return 0;
 }
@@ -750,7 +759,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -770,7 +779,7 @@
 	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct omap_aes_ctx),
-	.cra_alignmask	 	= 0,
+	.cra_alignmask		= 0,
 	.cra_type		= &crypto_ablkcipher_type,
 	.cra_module		= THIS_MODULE,
 	.cra_init		= omap_aes_cra_init,
@@ -849,7 +858,8 @@
 		 (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR);
 	clk_disable(dd->iclk);
 
-	tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd);
+	tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
+	tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
 	err = omap_aes_dma_init(dd);
 	if (err)
@@ -876,7 +886,8 @@
 		crypto_unregister_alg(&algs[j]);
 	omap_aes_dma_cleanup(dd);
 err_dma:
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	iounmap(dd->io_base);
 err_io:
 	clk_put(dd->iclk);
@@ -903,7 +914,8 @@
 	for (i = 0; i < ARRAY_SIZE(algs); i++)
 		crypto_unregister_alg(&algs[i]);
 
-	tasklet_kill(&dd->task);
+	tasklet_kill(&dd->done_task);
+	tasklet_kill(&dd->queue_task);
 	omap_aes_dma_cleanup(dd);
 	iounmap(dd->io_base);
 	clk_put(dd->iclk);
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index a081c7c..2e71123 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,10 +72,9 @@
 
 #define DEFAULT_TIMEOUT_INTERVAL	HZ
 
-#define FLAGS_FIRST		0x0001
 #define FLAGS_FINUP		0x0002
 #define FLAGS_FINAL		0x0004
-#define FLAGS_FAST		0x0008
+#define FLAGS_SG		0x0008
 #define FLAGS_SHA1		0x0010
 #define FLAGS_DMA_ACTIVE	0x0020
 #define FLAGS_OUTPUT_READY	0x0040
@@ -83,13 +82,17 @@
 #define FLAGS_INIT		0x0100
 #define FLAGS_CPU		0x0200
 #define FLAGS_HMAC		0x0400
-
-/* 3rd byte */
-#define FLAGS_BUSY		16
+#define FLAGS_ERROR		0x0800
+#define FLAGS_BUSY		0x1000
 
 #define OP_UPDATE	1
 #define OP_FINAL	2
 
+#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
+#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
+
+#define BUFLEN		PAGE_SIZE
+
 struct omap_sham_dev;
 
 struct omap_sham_reqctx {
@@ -97,8 +100,8 @@
 	unsigned long		flags;
 	unsigned long		op;
 
+	u8			digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
 	size_t			digcnt;
-	u8			*buffer;
 	size_t			bufcnt;
 	size_t			buflen;
 	dma_addr_t		dma_addr;
@@ -107,6 +110,8 @@
 	struct scatterlist	*sg;
 	unsigned int		offset;	/* offset in current sg */
 	unsigned int		total;	/* total request */
+
+	u8			buffer[0] OMAP_ALIGNED;
 };
 
 struct omap_sham_hmac_ctx {
@@ -136,6 +141,7 @@
 	int			irq;
 	struct clk		*iclk;
 	spinlock_t		lock;
+	int			err;
 	int			dma;
 	int			dma_lch;
 	struct tasklet_struct	done_task;
@@ -194,53 +200,68 @@
 static void omap_sham_copy_hash(struct ahash_request *req, int out)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *hash = (u32 *)ctx->digest;
+	int i;
+
+	/* MD5 is almost unused. So copy sha1 size to reduce code */
+	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
+		if (out)
+			hash[i] = omap_sham_read(ctx->dd,
+						SHA_REG_DIGEST(i));
+		else
+			omap_sham_write(ctx->dd,
+					SHA_REG_DIGEST(i), hash[i]);
+	}
+}
+
+static void omap_sham_copy_ready_hash(struct ahash_request *req)
+{
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	u32 *in = (u32 *)ctx->digest;
 	u32 *hash = (u32 *)req->result;
 	int i;
 
+	if (!hash)
+		return;
+
 	if (likely(ctx->flags & FLAGS_SHA1)) {
 		/* SHA1 results are in big endian */
 		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = be32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_be32(hash[i]));
+			hash[i] = be32_to_cpu(in[i]);
 	} else {
 		/* MD5 results are in little endian */
 		for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
-			if (out)
-				hash[i] = le32_to_cpu(omap_sham_read(ctx->dd,
-							SHA_REG_DIGEST(i)));
-			else
-				omap_sham_write(ctx->dd, SHA_REG_DIGEST(i),
-							cpu_to_le32(hash[i]));
+			hash[i] = le32_to_cpu(in[i]);
 	}
 }
 
-static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
+static int omap_sham_hw_init(struct omap_sham_dev *dd)
+{
+	clk_enable(dd->iclk);
+
+	if (!(dd->flags & FLAGS_INIT)) {
+		omap_sham_write_mask(dd, SHA_REG_MASK,
+			SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
+
+		if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
+					SHA_REG_SYSSTATUS_RESETDONE))
+			return -ETIMEDOUT;
+
+		dd->flags |= FLAGS_INIT;
+		dd->err = 0;
+	}
+
+	return 0;
+}
+
+static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
 				 int final, int dma)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	u32 val = length << 5, mask;
 
-	if (unlikely(!ctx->digcnt)) {
-
-		clk_enable(dd->iclk);
-
-		if (!(dd->flags & FLAGS_INIT)) {
-			omap_sham_write_mask(dd, SHA_REG_MASK,
-				SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
-
-			if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
-						SHA_REG_SYSSTATUS_RESETDONE))
-				return -ETIMEDOUT;
-
-			dd->flags |= FLAGS_INIT;
-		}
-	} else {
+	if (likely(ctx->digcnt))
 		omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
-	}
 
 	omap_sham_write_mask(dd, SHA_REG_MASK,
 		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
@@ -260,29 +281,26 @@
 			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 
 	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
-
-	return 0;
 }
 
 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, count, len32;
+	int count, len32;
 	const u32 *buffer = (const u32 *)buf;
 
 	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	err = omap_sham_write_ctrl(dd, length, final, 0);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 0);
+
+	/* should be non-zero before next lines to disable clocks later */
+	ctx->digcnt += length;
 
 	if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
 		return -ETIMEDOUT;
 
-	ctx->digcnt += length;
-
 	if (final)
 		ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
 
@@ -298,16 +316,11 @@
 			      size_t length, int final)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	int err, len32;
+	int len32;
 
 	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 						ctx->digcnt, length, final);
 
-	/* flush cache entries related to our page */
-	if (dma_addr == ctx->dma_addr)
-		dma_sync_single_for_device(dd->dev, dma_addr, length,
-					   DMA_TO_DEVICE);
-
 	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 	omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
@@ -317,9 +330,7 @@
 	omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
 				dma_addr, 0, 0);
 
-	err = omap_sham_write_ctrl(dd, length, final, 1);
-	if (err)
-		return err;
+	omap_sham_write_ctrl(dd, length, final, 1);
 
 	ctx->digcnt += length;
 
@@ -371,15 +382,29 @@
 	return 0;
 }
 
+static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
+					struct omap_sham_reqctx *ctx,
+					size_t length, int final)
+{
+	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
+				       DMA_TO_DEVICE);
+	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
+		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
+		return -EINVAL;
+	}
+
+	ctx->flags &= ~FLAGS_SG;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
+}
+
 static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 	unsigned int final;
 	size_t count;
 
-	if (!ctx->total)
-		return 0;
-
 	omap_sham_append_sg(ctx);
 
 	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
@@ -390,30 +415,68 @@
 	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 		count = ctx->bufcnt;
 		ctx->bufcnt = 0;
-		return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final);
+		return omap_sham_xmit_dma_map(dd, ctx, count, final);
 	}
 
 	return 0;
 }
 
-static int omap_sham_update_dma_fast(struct omap_sham_dev *dd)
+/* Start address alignment */
+#define SG_AA(sg)	(IS_ALIGNED(sg->offset, sizeof(u32)))
+/* SHA1 block size alignment */
+#define SG_SA(sg)	(IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
+
+static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-	unsigned int length;
+	unsigned int length, final, tail;
+	struct scatterlist *sg;
 
-	ctx->flags |= FLAGS_FAST;
+	if (!ctx->total)
+		return 0;
 
-	length = min(ctx->total, sg_dma_len(ctx->sg));
-	ctx->total = length;
+	if (ctx->bufcnt || ctx->offset)
+		return omap_sham_update_dma_slow(dd);
+
+	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
+			ctx->digcnt, ctx->bufcnt, ctx->total);
+
+	sg = ctx->sg;
+
+	if (!SG_AA(sg))
+		return omap_sham_update_dma_slow(dd);
+
+	if (!sg_is_last(sg) && !SG_SA(sg))
+		/* size is not SHA1_BLOCK_SIZE aligned */
+		return omap_sham_update_dma_slow(dd);
+
+	length = min(ctx->total, sg->length);
+
+	if (sg_is_last(sg)) {
+		if (!(ctx->flags & FLAGS_FINUP)) {
+			/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
+			tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
+			/* without finup() we need one block to close hash */
+			if (!tail)
+				tail = SHA1_MD5_BLOCK_SIZE;
+			length -= tail;
+		}
+	}
 
 	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 		dev_err(dd->dev, "dma_map_sg  error\n");
 		return -EINVAL;
 	}
 
-	ctx->total -= length;
+	ctx->flags |= FLAGS_SG;
 
-	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1);
+	ctx->total -= length;
+	ctx->offset = length; /* offset where to start slow */
+
+	final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+
+	/* next call does not fail... so no unmap in the case of error */
+	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
 }
 
 static int omap_sham_update_cpu(struct omap_sham_dev *dd)
@@ -433,8 +496,17 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
 	omap_stop_dma(dd->dma_lch);
-	if (ctx->flags & FLAGS_FAST)
+	if (ctx->flags & FLAGS_SG) {
 		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
+		if (ctx->sg->length == ctx->offset) {
+			ctx->sg = sg_next(ctx->sg);
+			if (ctx->sg)
+				ctx->offset = 0;
+		}
+	} else {
+		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
+				 DMA_TO_DEVICE);
+	}
 
 	return 0;
 }
@@ -454,14 +526,7 @@
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (ctx->digcnt)
-		clk_disable(dd->iclk);
-
-	if (ctx->dma_addr)
-		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
-				 DMA_TO_DEVICE);
-
-	if (ctx->buffer)
-		free_page((unsigned long)ctx->buffer);
+		omap_sham_copy_ready_hash(req);
 
 	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 }
@@ -489,8 +554,6 @@
 
 	ctx->flags = 0;
 
-	ctx->flags |= FLAGS_FIRST;
-
 	dev_dbg(dd->dev, "init: digest size: %d\n",
 		crypto_ahash_digestsize(tfm));
 
@@ -499,21 +562,7 @@
 
 	ctx->bufcnt = 0;
 	ctx->digcnt = 0;
-
-	ctx->buflen = PAGE_SIZE;
-	ctx->buffer = (void *)__get_free_page(
-				(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
-				GFP_KERNEL : GFP_ATOMIC);
-	if (!ctx->buffer)
-		return -ENOMEM;
-
-	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
-					DMA_TO_DEVICE);
-	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
-		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
-		free_page((unsigned long)ctx->buffer);
-		return -EINVAL;
-	}
+	ctx->buflen = BUFLEN;
 
 	if (tctx->flags & FLAGS_HMAC) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -538,10 +587,8 @@
 
 	if (ctx->flags & FLAGS_CPU)
 		err = omap_sham_update_cpu(dd);
-	else if (ctx->flags & FLAGS_FAST)
-		err = omap_sham_update_dma_fast(dd);
 	else
-		err = omap_sham_update_dma_slow(dd);
+		err = omap_sham_update_dma_start(dd);
 
 	/* wait for dma completion before can take more data */
 	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
@@ -560,15 +607,12 @@
 		use_dma = 0;
 
 	if (use_dma)
-		err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1);
+		err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 	else
 		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 
 	ctx->bufcnt = 0;
 
-	if (err != -EINPROGRESS)
-		omap_sham_cleanup(req);
-
 	dev_dbg(dd->dev, "final_req: err: %d\n", err);
 
 	return err;
@@ -576,6 +620,7 @@
 
 static int omap_sham_finish_req_hmac(struct ahash_request *req)
 {
+	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_hmac_ctx *bctx = tctx->base;
 	int bs = crypto_shash_blocksize(bctx->shash);
@@ -590,48 +635,56 @@
 
 	return crypto_shash_init(&desc.shash) ?:
 	       crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
-	       crypto_shash_finup(&desc.shash, req->result, ds, req->result);
+	       crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest);
 }
 
 static void omap_sham_finish_req(struct ahash_request *req, int err)
 {
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
+	struct omap_sham_dev *dd = ctx->dd;
 
 	if (!err) {
 		omap_sham_copy_hash(ctx->dd->req, 1);
 		if (ctx->flags & FLAGS_HMAC)
 			err = omap_sham_finish_req_hmac(req);
+	} else {
+		ctx->flags |= FLAGS_ERROR;
 	}
 
-	if (ctx->flags & FLAGS_FINAL)
+	if ((ctx->flags & FLAGS_FINAL) || err)
 		omap_sham_cleanup(req);
 
-	clear_bit(FLAGS_BUSY, &ctx->dd->flags);
+	clk_disable(dd->iclk);
+	dd->flags &= ~FLAGS_BUSY;
 
 	if (req->base.complete)
 		req->base.complete(&req->base, err);
 }
 
-static int omap_sham_handle_queue(struct omap_sham_dev *dd)
+static int omap_sham_handle_queue(struct omap_sham_dev *dd,
+				  struct ahash_request *req)
 {
 	struct crypto_async_request *async_req, *backlog;
 	struct omap_sham_reqctx *ctx;
-	struct ahash_request *req, *prev_req;
+	struct ahash_request *prev_req;
 	unsigned long flags;
-	int err = 0;
-
-	if (test_and_set_bit(FLAGS_BUSY, &dd->flags))
-		return 0;
+	int err = 0, ret = 0;
 
 	spin_lock_irqsave(&dd->lock, flags);
+	if (req)
+		ret = ahash_enqueue_request(&dd->queue, req);
+	if (dd->flags & FLAGS_BUSY) {
+		spin_unlock_irqrestore(&dd->lock, flags);
+		return ret;
+	}
 	backlog = crypto_get_backlog(&dd->queue);
 	async_req = crypto_dequeue_request(&dd->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dd->flags);
+	if (async_req)
+		dd->flags |= FLAGS_BUSY;
 	spin_unlock_irqrestore(&dd->lock, flags);
 
 	if (!async_req)
-		return 0;
+		return ret;
 
 	if (backlog)
 		backlog->complete(backlog, -EINPROGRESS);
@@ -646,7 +699,22 @@
 	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 						ctx->op, req->nbytes);
 
-	if (req != prev_req && ctx->digcnt)
+
+	err = omap_sham_hw_init(dd);
+	if (err)
+		goto err1;
+
+	omap_set_dma_dest_params(dd->dma_lch, 0,
+			OMAP_DMA_AMODE_CONSTANT,
+			dd->phys_base + SHA_REG_DIN(0), 0, 16);
+
+	omap_set_dma_dest_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_16);
+
+	omap_set_dma_src_burst_mode(dd->dma_lch,
+			OMAP_DMA_DATA_BURST_4);
+
+	if (ctx->digcnt)
 		/* request has changed - restore hash */
 		omap_sham_copy_hash(req, 0);
 
@@ -658,7 +726,7 @@
 	} else if (ctx->op == OP_FINAL) {
 		err = omap_sham_final_req(dd);
 	}
-
+err1:
 	if (err != -EINPROGRESS) {
 		/* done_task will not finish it, so do it here */
 		omap_sham_finish_req(req, err);
@@ -667,7 +735,7 @@
 
 	dev_dbg(dd->dev, "exit, err: %d\n", err);
 
-	return err;
+	return ret;
 }
 
 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
@@ -675,18 +743,10 @@
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 	struct omap_sham_dev *dd = tctx->dd;
-	unsigned long flags;
-	int err;
 
 	ctx->op = op;
 
-	spin_lock_irqsave(&dd->lock, flags);
-	err = ahash_enqueue_request(&dd->queue, req);
-	spin_unlock_irqrestore(&dd->lock, flags);
-
-	omap_sham_handle_queue(dd);
-
-	return err;
+	return omap_sham_handle_queue(dd, req);
 }
 
 static int omap_sham_update(struct ahash_request *req)
@@ -709,21 +769,13 @@
 			*/
 			omap_sham_append_sg(ctx);
 			return 0;
-		} else if (ctx->bufcnt + ctx->total <= 64) {
+		} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
+			/*
+			* faster to use CPU for short transfers
+			*/
 			ctx->flags |= FLAGS_CPU;
-		} else if (!ctx->bufcnt && sg_is_last(ctx->sg)) {
-			/* may be can use faster functions */
-			int aligned = IS_ALIGNED((u32)ctx->sg->offset,
-								sizeof(u32));
-
-			if (aligned && (ctx->flags & FLAGS_FIRST))
-				/* digest: first and final */
-				ctx->flags |= FLAGS_FAST;
-
-			ctx->flags &= ~FLAGS_FIRST;
 		}
-	} else if (ctx->bufcnt + ctx->total <= ctx->buflen) {
-		/* if not finaup -> not fast */
+	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 		omap_sham_append_sg(ctx);
 		return 0;
 	}
@@ -761,12 +813,14 @@
 
 	ctx->flags |= FLAGS_FINUP;
 
-	/* OMAP HW accel works only with buffers >= 9 */
-	/* HMAC is always >= 9 because of ipad */
-	if ((ctx->digcnt + ctx->bufcnt) < 9)
-		err = omap_sham_final_shash(req);
-	else if (ctx->bufcnt)
-		return omap_sham_enqueue(req, OP_FINAL);
+	if (!(ctx->flags & FLAGS_ERROR)) {
+		/* OMAP HW accel works only with buffers >= 9 */
+		/* HMAC is always >= 9 because of ipad */
+		if ((ctx->digcnt + ctx->bufcnt) < 9)
+			err = omap_sham_final_shash(req);
+		else if (ctx->bufcnt)
+			return omap_sham_enqueue(req, OP_FINAL);
+	}
 
 	omap_sham_cleanup(req);
 
@@ -836,6 +890,8 @@
 	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 	const char *alg_name = crypto_tfm_alg_name(tfm);
 
+	pr_info("enter\n");
+
 	/* Allocate a fallback and abort if it failed. */
 	tctx->fallback = crypto_alloc_shash(alg_name, 0,
 					    CRYPTO_ALG_NEED_FALLBACK);
@@ -846,7 +902,7 @@
 	}
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				 sizeof(struct omap_sham_reqctx));
+				 sizeof(struct omap_sham_reqctx) + BUFLEN);
 
 	if (alg_base) {
 		struct omap_sham_hmac_ctx *bctx = tctx->base;
@@ -932,7 +988,7 @@
 						CRYPTO_ALG_NEED_FALLBACK,
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -956,7 +1012,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_sha1_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -980,7 +1036,7 @@
 		.cra_blocksize		= SHA1_BLOCK_SIZE,
 		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
 					sizeof(struct omap_sham_hmac_ctx),
-		.cra_alignmask		= 0,
+		.cra_alignmask		= OMAP_ALIGN_MASK,
 		.cra_module		= THIS_MODULE,
 		.cra_init		= omap_sham_cra_md5_init,
 		.cra_exit		= omap_sham_cra_exit,
@@ -993,7 +1049,7 @@
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 	struct ahash_request *req = dd->req;
 	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
-	int ready = 1;
+	int ready = 0, err = 0;
 
 	if (ctx->flags & FLAGS_OUTPUT_READY) {
 		ctx->flags &= ~FLAGS_OUTPUT_READY;
@@ -1003,15 +1059,18 @@
 	if (dd->flags & FLAGS_DMA_ACTIVE) {
 		dd->flags &= ~FLAGS_DMA_ACTIVE;
 		omap_sham_update_dma_stop(dd);
-		omap_sham_update_dma_slow(dd);
+		if (!dd->err)
+			err = omap_sham_update_dma_start(dd);
 	}
 
-	if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) {
-		dev_dbg(dd->dev, "update done\n");
+	err = dd->err ? : err;
+
+	if (err != -EINPROGRESS && (ready || err)) {
+		dev_dbg(dd->dev, "update done: err: %d\n", err);
 		/* finish curent request */
-		omap_sham_finish_req(req, 0);
+		omap_sham_finish_req(req, err);
 		/* start new request */
-		omap_sham_handle_queue(dd);
+		omap_sham_handle_queue(dd, NULL);
 	}
 }
 
@@ -1019,7 +1078,7 @@
 {
 	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
 
-	omap_sham_handle_queue(dd);
+	omap_sham_handle_queue(dd, NULL);
 }
 
 static irqreturn_t omap_sham_irq(int irq, void *dev_id)
@@ -1041,6 +1100,7 @@
 	omap_sham_read(dd, SHA_REG_CTRL);
 
 	ctx->flags |= FLAGS_OUTPUT_READY;
+	dd->err = 0;
 	tasklet_schedule(&dd->done_task);
 
 	return IRQ_HANDLED;
@@ -1050,8 +1110,13 @@
 {
 	struct omap_sham_dev *dd = data;
 
-	if (likely(lch == dd->dma_lch))
-		tasklet_schedule(&dd->done_task);
+	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
+		pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
+		dd->err = -EIO;
+		dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+	}
+
+	tasklet_schedule(&dd->done_task);
 }
 
 static int omap_sham_dma_init(struct omap_sham_dev *dd)
@@ -1066,15 +1131,6 @@
 		dev_err(dd->dev, "Unable to request DMA channel\n");
 		return err;
 	}
-	omap_set_dma_dest_params(dd->dma_lch, 0,
-			OMAP_DMA_AMODE_CONSTANT,
-			dd->phys_base + SHA_REG_DIN(0), 0, 16);
-
-	omap_set_dma_dest_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_16);
-
-	omap_set_dma_src_burst_mode(dd->dma_lch,
-			OMAP_DMA_DATA_BURST_4);
 
 	return 0;
 }
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 8a515ba..db33d30 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -9,6 +9,7 @@
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/padlock.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/types.h>
@@ -21,7 +22,6 @@
 #include <asm/byteorder.h>
 #include <asm/processor.h>
 #include <asm/i387.h>
-#include "padlock.h"
 
 /*
  * Number of data blocks actually fetched for each xcrypt insn.
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index d3a27e0..adf075b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -13,6 +13,7 @@
  */
 
 #include <crypto/internal/hash.h>
+#include <crypto/padlock.h>
 #include <crypto/sha.h>
 #include <linux/err.h>
 #include <linux/module.h>
@@ -22,13 +23,6 @@
 #include <linux/kernel.h>
 #include <linux/scatterlist.h>
 #include <asm/i387.h>
-#include "padlock.h"
-
-#ifdef CONFIG_64BIT
-#define STACK_ALIGN 16
-#else
-#define STACK_ALIGN 4
-#endif
 
 struct padlock_sha_desc {
 	struct shash_desc fallback;
diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
index b98c676..c461eda 100644
--- a/drivers/dca/dca-core.c
+++ b/drivers/dca/dca-core.c
@@ -110,8 +110,6 @@
 
 	/* at this point only one domain in the list is expected */
 	domain = list_first_entry(&dca_domains, struct dca_domain, node);
-	if (!domain)
-		return;
 
 	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
 		list_del(&dca->node);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6ee2359..ef13873 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -109,7 +109,7 @@
 
 config MPC512X_DMA
 	tristate "Freescale MPC512x built-in DMA engine support"
-	depends on PPC_MPC512x
+	depends on PPC_MPC512x || PPC_MPC831x
 	select DMA_ENGINE
 	---help---
 	  Enable support for the Freescale MPC512x built-in DMA engine.
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 3109bd9..7826638 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -1060,8 +1060,8 @@
  * mid_setup_dma -	Setup the DMA controller
  * @pdev: Controller PCI device structure
  *
- * Initilize the DMA controller, channels, registers with DMA engine,
- * ISR. Initilize DMA controller channels.
+ * Initialize the DMA controller, channels, registers with DMA engine,
+ * ISR. Initialize DMA controller channels.
  */
 static int mid_setup_dma(struct pci_dev *pdev)
 {
@@ -1217,7 +1217,7 @@
  * @pdev: Controller PCI device structure
  * @id: pci device id structure
  *
- * Initilize the PCI device, map BARs, query driver data.
+ * Initialize the PCI device, map BARs, query driver data.
  * Call setup_dma to complete contoller and chan initilzation
  */
 static int __devinit intel_mid_dma_probe(struct pci_dev *pdev,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 4e9cbf3..59c2701 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
  * Copyright (C) Semihalf 2009
+ * Copyright (C) Ilya Yanok, Emcraft Systems 2010
  *
  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
  * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -70,6 +71,8 @@
 #define MPC_DMA_DMAES_SBE	(1 << 1)
 #define MPC_DMA_DMAES_DBE	(1 << 0)
 
+#define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
+
 #define MPC_DMA_TSIZE_1		0x00
 #define MPC_DMA_TSIZE_2		0x01
 #define MPC_DMA_TSIZE_4		0x02
@@ -104,7 +107,10 @@
 	/* 0x30 */
 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
-	u32 dmaihsa;		/* DMA interrupt high select AXE(ch63~32) */
+	union {
+		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
+		u32 dmagpor;	/* (General purpose register on MPC8308) */
+	};
 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
 	/* 0x40 ~ 0xff */
 	u32 reserve0[48];	/* Reserved */
@@ -195,7 +201,9 @@
 	struct mpc_dma_regs __iomem	*regs;
 	struct mpc_dma_tcd __iomem	*tcd;
 	int				irq;
+	int				irq2;
 	uint				error_status;
+	int				is_mpc8308;
 
 	/* Lock for error_status field in this structure */
 	spinlock_t			error_status_lock;
@@ -252,11 +260,13 @@
 		prev = mdesc;
 	}
 
-	prev->tcd->start = 0;
 	prev->tcd->int_maj = 1;
 
 	/* Send first descriptor in chain into hardware */
 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+
+	if (first != prev)
+		mdma->tcd[cid].e_sg = 1;
 	out_8(&mdma->regs->dmassrt, cid);
 }
 
@@ -274,6 +284,9 @@
 
 		spin_lock(&mchan->lock);
 
+		out_8(&mdma->regs->dmacint, ch + off);
+		out_8(&mdma->regs->dmacerr, ch + off);
+
 		/* Check error status */
 		if (es & (1 << ch))
 			list_for_each_entry(mdesc, &mchan->active, node)
@@ -302,36 +315,68 @@
 	spin_unlock(&mdma->error_status_lock);
 
 	/* Handle interrupt on each channel */
-	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+	if (mdma->dma.chancnt > 32) {
+		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
 					in_be32(&mdma->regs->dmaerrh), 32);
+	}
 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
 					in_be32(&mdma->regs->dmaerrl), 0);
 
-	/* Ack interrupt on all channels */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
-
 	/* Schedule tasklet */
 	tasklet_schedule(&mdma->tasklet);
 
 	return IRQ_HANDLED;
 }
 
-/* DMA Tasklet */
-static void mpc_dma_tasklet(unsigned long data)
+/* proccess completed descriptors */
+static void mpc_dma_process_completed(struct mpc_dma *mdma)
 {
-	struct mpc_dma *mdma = (void *)data;
 	dma_cookie_t last_cookie = 0;
 	struct mpc_dma_chan *mchan;
 	struct mpc_dma_desc *mdesc;
 	struct dma_async_tx_descriptor *desc;
 	unsigned long flags;
 	LIST_HEAD(list);
-	uint es;
 	int i;
 
+	for (i = 0; i < mdma->dma.chancnt; i++) {
+		mchan = &mdma->channels[i];
+
+		/* Get all completed descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		if (!list_empty(&mchan->completed))
+			list_splice_tail_init(&mchan->completed, &list);
+		spin_unlock_irqrestore(&mchan->lock, flags);
+
+		if (list_empty(&list))
+			continue;
+
+		/* Execute callbacks and run dependencies */
+		list_for_each_entry(mdesc, &list, node) {
+			desc = &mdesc->desc;
+
+			if (desc->callback)
+				desc->callback(desc->callback_param);
+
+			last_cookie = desc->cookie;
+			dma_run_dependencies(desc);
+		}
+
+		/* Free descriptors */
+		spin_lock_irqsave(&mchan->lock, flags);
+		list_splice_tail_init(&list, &mchan->free);
+		mchan->completed_cookie = last_cookie;
+		spin_unlock_irqrestore(&mchan->lock, flags);
+	}
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+	struct mpc_dma *mdma = (void *)data;
+	unsigned long flags;
+	uint es;
+
 	spin_lock_irqsave(&mdma->error_status_lock, flags);
 	es = mdma->error_status;
 	mdma->error_status = 0;
@@ -370,35 +415,7 @@
 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
 	}
 
-	for (i = 0; i < mdma->dma.chancnt; i++) {
-		mchan = &mdma->channels[i];
-
-		/* Get all completed descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		if (!list_empty(&mchan->completed))
-			list_splice_tail_init(&mchan->completed, &list);
-		spin_unlock_irqrestore(&mchan->lock, flags);
-
-		if (list_empty(&list))
-			continue;
-
-		/* Execute callbacks and run dependencies */
-		list_for_each_entry(mdesc, &list, node) {
-			desc = &mdesc->desc;
-
-			if (desc->callback)
-				desc->callback(desc->callback_param);
-
-			last_cookie = desc->cookie;
-			dma_run_dependencies(desc);
-		}
-
-		/* Free descriptors */
-		spin_lock_irqsave(&mchan->lock, flags);
-		list_splice_tail_init(&list, &mchan->free);
-		mchan->completed_cookie = last_cookie;
-		spin_unlock_irqrestore(&mchan->lock, flags);
-	}
+	mpc_dma_process_completed(mdma);
 }
 
 /* Submit descriptor to hardware */
@@ -563,6 +580,7 @@
 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
 					size_t len, unsigned long flags)
 {
+	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
 	struct mpc_dma_desc *mdesc = NULL;
 	struct mpc_dma_tcd *tcd;
@@ -577,8 +595,11 @@
 	}
 	spin_unlock_irqrestore(&mchan->lock, iflags);
 
-	if (!mdesc)
+	if (!mdesc) {
+		/* try to free completed descriptors */
+		mpc_dma_process_completed(mdma);
 		return NULL;
+	}
 
 	mdesc->error = 0;
 	tcd = mdesc->tcd;
@@ -591,7 +612,8 @@
 		tcd->dsize = MPC_DMA_TSIZE_32;
 		tcd->soff = 32;
 		tcd->doff = 32;
-	} else if (IS_ALIGNED(src | dst | len, 16)) {
+	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
+		/* MPC8308 doesn't support 16 byte transfers */
 		tcd->ssize = MPC_DMA_TSIZE_16;
 		tcd->dsize = MPC_DMA_TSIZE_16;
 		tcd->soff = 16;
@@ -651,6 +673,15 @@
 		return -EINVAL;
 	}
 
+	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
+		mdma->is_mpc8308 = 1;
+		mdma->irq2 = irq_of_parse_and_map(dn, 1);
+		if (mdma->irq2 == NO_IRQ) {
+			dev_err(dev, "Error mapping IRQ!\n");
+			return -EINVAL;
+		}
+	}
+
 	retval = of_address_to_resource(dn, 0, &res);
 	if (retval) {
 		dev_err(dev, "Error parsing memory region!\n");
@@ -681,11 +712,23 @@
 		return -EINVAL;
 	}
 
+	if (mdma->is_mpc8308) {
+		retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
+				DRV_NAME, mdma);
+		if (retval) {
+			dev_err(dev, "Error requesting IRQ2!\n");
+			return -EINVAL;
+		}
+	}
+
 	spin_lock_init(&mdma->error_status_lock);
 
 	dma = &mdma->dma;
 	dma->dev = dev;
-	dma->chancnt = MPC_DMA_CHANNELS;
+	if (!mdma->is_mpc8308)
+		dma->chancnt = MPC_DMA_CHANNELS;
+	else
+		dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
 	dma->device_issue_pending = mpc_dma_issue_pending;
@@ -721,26 +764,40 @@
 	 * - Round-robin group arbitration,
 	 * - Round-robin channel arbitration.
 	 */
-	out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
-				MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+	if (!mdma->is_mpc8308) {
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+					MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
 
-	/* Disable hardware DMA requests */
-	out_be32(&mdma->regs->dmaerqh, 0);
-	out_be32(&mdma->regs->dmaerql, 0);
+		/* Disable hardware DMA requests */
+		out_be32(&mdma->regs->dmaerqh, 0);
+		out_be32(&mdma->regs->dmaerql, 0);
 
-	/* Disable error interrupts */
-	out_be32(&mdma->regs->dmaeeih, 0);
-	out_be32(&mdma->regs->dmaeeil, 0);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeih, 0);
+		out_be32(&mdma->regs->dmaeeil, 0);
 
-	/* Clear interrupts status */
-	out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
-	out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
 
-	/* Route interrupts to IPIC */
-	out_be32(&mdma->regs->dmaihsa, 0);
-	out_be32(&mdma->regs->dmailsa, 0);
+		/* Route interrupts to IPIC */
+		out_be32(&mdma->regs->dmaihsa, 0);
+		out_be32(&mdma->regs->dmailsa, 0);
+	} else {
+		/* MPC8308 has 16 channels and lacks some registers */
+		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
+
+		/* enable snooping */
+		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
+		/* Disable error interrupts */
+		out_be32(&mdma->regs->dmaeeil, 0);
+
+		/* Clear interrupts status */
+		out_be32(&mdma->regs->dmaintl, 0xFFFF);
+		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
+	}
 
 	/* Register DMA engine */
 	dev_set_drvdata(dev, mdma);
diff --git a/drivers/edac/amd8131_edac.h b/drivers/edac/amd8131_edac.h
index 60e0d1c..6f8b071 100644
--- a/drivers/edac/amd8131_edac.h
+++ b/drivers/edac/amd8131_edac.h
@@ -99,7 +99,7 @@
 
 /*
  * AMD8131 chipset has two pairs of PCIX Bridge and related IOAPIC
- * Controler, and ATCA-6101 has two AMD8131 chipsets, so there are
+ * Controller, and ATCA-6101 has two AMD8131 chipsets, so there are
  * four PCIX Bridges on ATCA-6101 altogether.
  *
  * These PCIX Bridges share the same PCI Device ID and are all of
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index c973004..db1df59 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -47,7 +47,7 @@
 	offset = address & ~PAGE_MASK;
 	syndrome = (ar & 0x000000001fe00000ul) >> 21;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ce(mci, csrow->first_page + pfn, offset,
 			  syndrome, 0, chan, "");
 }
@@ -68,7 +68,7 @@
 	pfn = address >> PAGE_SHIFT;
 	offset = address & ~PAGE_MASK;
 
-	/* TODO: Decoding of the error addresss */
+	/* TODO: Decoding of the error address */
 	edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, "");
 }
 
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index ff1eb7b..3d96534 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -259,7 +259,7 @@
  *			for single channel are 64 bits, for dual channel 128
  *			bits.
  *
- * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memmory.
+ * Single-Ranked stick:	A Single-ranked stick has 1 chip-select row of memory.
  *			Motherboards commonly drive two chip-select pins to
  *			a memory stick. A single-ranked stick, will occupy
  *			only one of those rows. The other will be unused.
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 362861c..81154ab 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1,6 +1,6 @@
 /* Intel i7 core/Nehalem Memory Controller kernel module
  *
- * This driver supports yhe memory controllers found on the Intel
+ * This driver supports the memory controllers found on the Intel
  * processor families i7core, i7core 7xx/8xx, i5core, Xeon 35xx,
  * Xeon 55xx and Xeon 56xx also known as Nehalem, Nehalem-EP, Lynnfield
  * and Westmere-EP.
@@ -1271,7 +1271,7 @@
 	int i;
 
 	/*
-	 * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+	 * On Xeon 55xx, the Intel Quick Path Arch Generic Non-core pci buses
 	 * aren't announced by acpi. So, we need to use a legacy scan probing
 	 * to detect them
 	 */
@@ -1864,7 +1864,7 @@
 	if (mce->mcgstatus & 1)
 		i7core_check_error(mci);
 
-	/* Advice mcelog that the error were handled */
+	/* Advise mcelog that the errors were handled */
 	return 1;
 }
 
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 070cea4..b9f0c20 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -873,7 +873,7 @@
 }
 
 /**
- * ppc4xx_edac_init_csrows - intialize driver instance rows
+ * ppc4xx_edac_init_csrows - initialize driver instance rows
  * @mci: A pointer to the EDAC memory controller instance
  *       associated with the ibm,sdram-4xx-ddr2 controller for which
  *       the csrows (i.e. banks/ranks) are being initialized.
@@ -881,7 +881,7 @@
  *          currently set for the controller, from which bank width
  *          and memory typ information is derived.
  *
- * This routine intializes the virtual "chip select rows" associated
+ * This routine initializes the virtual "chip select rows" associated
  * with the EDAC memory controller instance. An ibm,sdram-4xx-ddr2
  * controller bank/rank is mapped to a row.
  *
@@ -992,7 +992,7 @@
 }
 
 /**
- * ppc4xx_edac_mc_init - intialize driver instance
+ * ppc4xx_edac_mc_init - initialize driver instance
  * @mci: A pointer to the EDAC memory controller instance being
  *       initialized.
  * @op: A pointer to the OpenFirmware device tree node associated
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 082495b..664660e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -118,7 +118,7 @@
 
 config GPIO_VX855
 	tristate "VIA VX855/VX875 GPIO"
-	depends on GPIOLIB
+	depends on GPIOLIB && MFD_SUPPORT && PCI
 	select MFD_CORE
 	select MFD_VX855
 	help
@@ -295,7 +295,7 @@
 
 config GPIO_CS5535
 	tristate "AMD CS5535/CS5536 GPIO support"
-	depends on PCI && !CS5535_GPIO
+	depends on PCI && X86 && !CS5535_GPIO
 	help
 	  The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
 	  can be used for quite a number of things.  The CS5535/6 is found on
@@ -333,6 +333,15 @@
 	  which is an IOH(Input/Output Hub) for x86 embedded processor.
 	  This driver can access PCH GPIO device.
 
+config GPIO_ML_IOH
+	tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
+	depends on PCI
+	help
+	  ML7213 is companion chip for Intel Atom E6xx series.
+	  This driver can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/Output
+	  Hub) which is for IVI(In-Vehicle Infotainment) use.
+	  This driver can access the IOH's GPIO device.
+
 config GPIO_TIMBERDALE
 	bool "Support for timberdale GPIO IP"
 	depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
@@ -342,6 +351,7 @@
 config GPIO_RDC321X
 	tristate "RDC R-321x GPIO support"
 	depends on PCI && GPIOLIB
+	select MFD_SUPPORT
 	select MFD_CORE
 	select MFD_RDC321X
 	help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 39bfd7a..3351cf8 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -41,3 +41,4 @@
 obj-$(CONFIG_GPIO_JANZ_TTL)	+= janz-ttl.o
 obj-$(CONFIG_GPIO_SX150X)	+= sx150x.o
 obj-$(CONFIG_GPIO_VX855)	+= vx855_gpio.o
+obj-$(CONFIG_GPIO_ML_IOH)	+= ml_ioh_gpio.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
index 0871f78..33fc685 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/adp5588-gpio.c
@@ -146,9 +146,10 @@
 	return dev->irq_base + off;
 }
 
-static void adp5588_irq_bus_lock(unsigned int irq)
+static void adp5588_irq_bus_lock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+
 	mutex_lock(&dev->irq_lock);
 }
 
@@ -160,9 +161,9 @@
   * and unlocks the bus.
   */
 
-static void adp5588_irq_bus_sync_unlock(unsigned int irq)
+static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
 	int i;
 
 	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
@@ -175,31 +176,31 @@
 	mutex_unlock(&dev->irq_lock);
 }
 
-static void adp5588_irq_mask(unsigned int irq)
+static void adp5588_irq_mask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] &= ~ADP5588_BIT(gpio);
 }
 
-static void adp5588_irq_unmask(unsigned int irq)
+static void adp5588_irq_unmask(struct irq_data *d)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	unsigned gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	unsigned gpio = d->irq - dev->irq_base;
 
 	dev->irq_mask[ADP5588_BANK(gpio)] |= ADP5588_BIT(gpio);
 }
 
-static int adp5588_irq_set_type(unsigned int irq, unsigned int type)
+static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct adp5588_gpio *dev = get_irq_chip_data(irq);
-	uint16_t gpio = irq - dev->irq_base;
+	struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
+	uint16_t gpio = d->irq - dev->irq_base;
 	unsigned bank, bit;
 
 	if ((type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&dev->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -222,11 +223,11 @@
 
 static struct irq_chip adp5588_irq_chip = {
 	.name			= "adp5588",
-	.mask			= adp5588_irq_mask,
-	.unmask			= adp5588_irq_unmask,
-	.bus_lock		= adp5588_irq_bus_lock,
-	.bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
-	.set_type		= adp5588_irq_set_type,
+	.irq_mask		= adp5588_irq_mask,
+	.irq_unmask		= adp5588_irq_unmask,
+	.irq_bus_lock		= adp5588_irq_bus_lock,
+	.irq_bus_sync_unlock	= adp5588_irq_bus_sync_unlock,
+	.irq_set_type		= adp5588_irq_set_type,
 };
 
 static int adp5588_gpio_read_intstat(struct i2c_client *client, u8 *buf)
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index d3e55a0..815d98b2 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -15,6 +15,7 @@
 #include <linux/gpio.h>
 #include <linux/io.h>
 #include <linux/cs5535.h>
+#include <asm/msr.h>
 
 #define DRV_NAME "cs5535-gpio"
 #define GPIO_BAR 1
@@ -144,6 +145,57 @@
 }
 EXPORT_SYMBOL_GPL(cs5535_gpio_isset);
 
+int cs5535_gpio_set_irq(unsigned group, unsigned irq)
+{
+	uint32_t lo, hi;
+
+	if (group > 7 || irq > 15)
+		return -EINVAL;
+
+	rdmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+
+	lo &= ~(0xF << (group * 4));
+	lo |= (irq & 0xF) << (group * 4);
+
+	wrmsr(MSR_PIC_ZSEL_HIGH, lo, hi);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_set_irq);
+
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme)
+{
+	struct cs5535_gpio_chip *chip = &cs5535_gpio_chip;
+	uint32_t shift = (offset % 8) * 4;
+	unsigned long flags;
+	uint32_t val;
+
+	if (offset >= 24)
+		offset = GPIO_MAP_W;
+	else if (offset >= 16)
+		offset = GPIO_MAP_Z;
+	else if (offset >= 8)
+		offset = GPIO_MAP_Y;
+	else
+		offset = GPIO_MAP_X;
+
+	spin_lock_irqsave(&chip->lock, flags);
+	val = inl(chip->base + offset);
+
+	/* Clear whatever was there before */
+	val &= ~(0xF << shift);
+
+	/* Set the new value */
+	val |= ((pair & 7) << shift);
+
+	/* Set the PME bit if this is a PME event */
+	if (pme)
+		val |= (1 << (shift + 3));
+
+	outl(val, chip->base + offset);
+	spin_unlock_irqrestore(&chip->lock, flags);
+}
+EXPORT_SYMBOL_GPL(cs5535_gpio_setup_event);
+
 /*
  * Generic gpio_chip API support.
  */
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index 64db9dc..d81cc74 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -134,10 +134,10 @@
 	return lnw->irq_base + offset;
 }
 
-static int lnw_irq_type(unsigned irq, unsigned type)
+static int lnw_irq_type(struct irq_data *d, unsigned type)
 {
-	struct lnw_gpio *lnw = get_irq_chip_data(irq);
-	u32 gpio = irq - lnw->irq_base;
+	struct lnw_gpio *lnw = irq_data_get_irq_chip_data(d);
+	u32 gpio = d->irq - lnw->irq_base;
 	unsigned long flags;
 	u32 value;
 	void __iomem *grer = gpio_reg(&lnw->chip, gpio, GRER);
@@ -162,19 +162,19 @@
 	return 0;
 }
 
-static void lnw_irq_unmask(unsigned irq)
+static void lnw_irq_unmask(struct irq_data *d)
 {
 }
 
-static void lnw_irq_mask(unsigned irq)
+static void lnw_irq_mask(struct irq_data *d)
 {
 }
 
 static struct irq_chip lnw_irqchip = {
 	.name		= "LNW-GPIO",
-	.mask		= lnw_irq_mask,
-	.unmask		= lnw_irq_unmask,
-	.set_type	= lnw_irq_type,
+	.irq_mask	= lnw_irq_mask,
+	.irq_unmask	= lnw_irq_unmask,
+	.irq_set_type	= lnw_irq_type,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(lnw_gpio_ids) = {   /* pin number */
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/max732x.c
index 9cad60f..9e1d01f 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/max732x.c
@@ -327,40 +327,40 @@
 	return chip->irq_base + off;
 }
 
-static void max732x_irq_mask(unsigned int irq)
+static void max732x_irq_mask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask_cur &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void max732x_irq_unmask(unsigned int irq)
+static void max732x_irq_unmask(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask_cur |= 1 << (irq - chip->irq_base);
+	chip->irq_mask_cur |= 1 << (d->irq - chip->irq_base);
 }
 
-static void max732x_irq_bus_lock(unsigned int irq)
+static void max732x_irq_bus_lock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 	chip->irq_mask_cur = chip->irq_mask;
 }
 
-static void max732x_irq_bus_sync_unlock(unsigned int irq)
+static void max732x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	max732x_irq_update_mask(chip);
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int max732x_irq_set_type(unsigned int irq, unsigned int type)
+static int max732x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct max732x_chip *chip = get_irq_chip_data(irq);
-	uint16_t off = irq - chip->irq_base;
+	struct max732x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t off = d->irq - chip->irq_base;
 	uint16_t mask = 1 << off;
 
 	if (!(mask & chip->dir_input)) {
@@ -371,7 +371,7 @@
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -390,11 +390,11 @@
 
 static struct irq_chip max732x_irq_chip = {
 	.name			= "max732x",
-	.mask			= max732x_irq_mask,
-	.unmask			= max732x_irq_unmask,
-	.bus_lock		= max732x_irq_bus_lock,
-	.bus_sync_unlock	= max732x_irq_bus_sync_unlock,
-	.set_type		= max732x_irq_set_type,
+	.irq_mask		= max732x_irq_mask,
+	.irq_unmask		= max732x_irq_unmask,
+	.irq_bus_lock		= max732x_irq_bus_lock,
+	.irq_bus_sync_unlock	= max732x_irq_bus_sync_unlock,
+	.irq_set_type		= max732x_irq_set_type,
 };
 
 static uint8_t max732x_irq_pending(struct max732x_chip *chip)
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
new file mode 100644
index 0000000..cead8e6
--- /dev/null
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+
+#define PCI_VENDOR_ID_ROHM             0x10DB
+
+struct ioh_reg_comn {
+	u32	ien;
+	u32	istatus;
+	u32	idisp;
+	u32	iclr;
+	u32	imask;
+	u32	imaskclr;
+	u32	po;
+	u32	pi;
+	u32	pm;
+	u32	im_0;
+	u32	im_1;
+	u32	reserved;
+};
+
+struct ioh_regs {
+	struct ioh_reg_comn regs[8];
+	u32 reserve1[16];
+	u32 ioh_sel_reg[4];
+	u32 reserve2[11];
+	u32 srst;
+};
+
+/**
+ * struct ioh_gpio_reg_data - The register store data.
+ * @po_reg:	To store contents of PO register.
+ * @pm_reg:	To store contents of PM register.
+ */
+struct ioh_gpio_reg_data {
+	u32 po_reg;
+	u32 pm_reg;
+};
+
+/**
+ * struct ioh_gpio - GPIO private data structure.
+ * @base:			PCI base address of Memory mapped I/O register.
+ * @reg:			Memory mapped IOH GPIO register list.
+ * @dev:			Pointer to device structure.
+ * @gpio:			Data for GPIO infrastructure.
+ * @ioh_gpio_reg:		Memory mapped Register data is saved here
+ *				when suspend.
+ * @ch:				Indicate GPIO channel
+ */
+struct ioh_gpio {
+	void __iomem *base;
+	struct ioh_regs __iomem *reg;
+	struct device *dev;
+	struct gpio_chip gpio;
+	struct ioh_gpio_reg_data ioh_gpio_reg;
+	struct mutex lock;
+	int ch;
+};
+
+static const int num_ports[] = {6, 12, 16, 16, 15, 16, 16, 12};
+
+static void ioh_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
+{
+	u32 reg_val;
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	mutex_lock(&chip->lock);
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	iowrite32(reg_val, &chip->reg->regs[chip->ch].po);
+	mutex_unlock(&chip->lock);
+}
+
+static int ioh_gpio_get(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+
+	return ioread32(&chip->reg->regs[chip->ch].pi) & (1 << nr);
+}
+
+static int ioh_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
+				     int val)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+	u32 reg_val;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+					((1 << num_ports[chip->ch]) - 1);
+	pm |= (1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+
+	reg_val = ioread32(&chip->reg->regs[chip->ch].po);
+	if (val)
+		reg_val |= (1 << nr);
+	else
+		reg_val &= ~(1 << nr);
+
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
+{
+	struct ioh_gpio *chip =	container_of(gpio, struct ioh_gpio, gpio);
+	u32 pm;
+
+	mutex_lock(&chip->lock);
+	pm = ioread32(&chip->reg->regs[chip->ch].pm) &
+				((1 << num_ports[chip->ch]) - 1);
+	pm &= ~(1 << nr);
+	iowrite32(pm, &chip->reg->regs[chip->ch].pm);
+	mutex_unlock(&chip->lock);
+
+	return 0;
+}
+
+/*
+ * Save register configuration and disable interrupts.
+ */
+static void ioh_gpio_save_reg_conf(struct ioh_gpio *chip)
+{
+	chip->ioh_gpio_reg.po_reg = ioread32(&chip->reg->regs[chip->ch].po);
+	chip->ioh_gpio_reg.pm_reg = ioread32(&chip->reg->regs[chip->ch].pm);
+}
+
+/*
+ * This function restores the register configuration of the GPIO device.
+ */
+static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip)
+{
+	/* to store contents of PO register */
+	iowrite32(chip->ioh_gpio_reg.po_reg, &chip->reg->regs[chip->ch].po);
+	/* to store contents of PM register */
+	iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm);
+}
+
+static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
+{
+	struct gpio_chip *gpio = &chip->gpio;
+
+	gpio->label = dev_name(chip->dev);
+	gpio->owner = THIS_MODULE;
+	gpio->direction_input = ioh_gpio_direction_input;
+	gpio->get = ioh_gpio_get;
+	gpio->direction_output = ioh_gpio_direction_output;
+	gpio->set = ioh_gpio_set;
+	gpio->dbg_show = NULL;
+	gpio->base = -1;
+	gpio->ngpio = num_port;
+	gpio->can_sleep = 0;
+}
+
+static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
+				    const struct pci_device_id *id)
+{
+	int ret;
+	int i;
+	struct ioh_gpio *chip;
+	void __iomem *base;
+	void __iomem *chip_save;
+
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "%s : pci_enable_device failed", __func__);
+		goto err_pci_enable;
+	}
+
+	ret = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_request_regions failed-%d", ret);
+		goto err_request_regions;
+	}
+
+	base = pci_iomap(pdev, 1, 0);
+	if (base == 0) {
+		dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
+		ret = -ENOMEM;
+		goto err_iomap;
+	}
+
+	chip_save = kzalloc(sizeof(*chip) * 8, GFP_KERNEL);
+	if (chip_save == NULL) {
+		dev_err(&pdev->dev, "%s : kzalloc failed", __func__);
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	chip = chip_save;
+	for (i = 0; i < 8; i++, chip++) {
+		chip->dev = &pdev->dev;
+		chip->base = base;
+		chip->reg = chip->base;
+		chip->ch = i;
+		mutex_init(&chip->lock);
+		ioh_gpio_setup(chip, num_ports[i]);
+		ret = gpiochip_add(&chip->gpio);
+		if (ret) {
+			dev_err(&pdev->dev, "IOH gpio: Failed to register GPIO\n");
+			goto err_gpiochip_add;
+		}
+	}
+
+	chip = chip_save;
+	pci_set_drvdata(pdev, chip);
+
+	return 0;
+
+err_gpiochip_add:
+	for (; i != 0; i--) {
+		chip--;
+		ret = gpiochip_remove(&chip->gpio);
+		if (ret)
+			dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
+	}
+	kfree(chip_save);
+
+err_kzalloc:
+	pci_iounmap(pdev, base);
+
+err_iomap:
+	pci_release_regions(pdev);
+
+err_request_regions:
+	pci_disable_device(pdev);
+
+err_pci_enable:
+
+	dev_err(&pdev->dev, "%s Failed returns %d\n", __func__, ret);
+	return ret;
+}
+
+static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
+{
+	int err;
+	int i;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+	void __iomem *chip_save;
+
+	chip_save = chip;
+	for (i = 0; i < 8; i++, chip++) {
+		err = gpiochip_remove(&chip->gpio);
+		if (err)
+			dev_err(&pdev->dev, "Failed gpiochip_remove\n");
+	}
+
+	chip = chip_save;
+	pci_iounmap(pdev, chip->base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	kfree(chip);
+}
+
+#ifdef CONFIG_PM
+static int ioh_gpio_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ioh_gpio_save_reg_conf(chip);
+	ioh_gpio_restore_reg_conf(chip);
+
+	ret = pci_save_state(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_save_state Failed-%d\n", ret);
+		return ret;
+	}
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_wake(pdev, PCI_D0, 1);
+	if (ret)
+		dev_err(&pdev->dev, "pci_enable_wake Failed -%d\n", ret);
+
+	return 0;
+}
+
+static int ioh_gpio_resume(struct pci_dev *pdev)
+{
+	s32 ret;
+	struct ioh_gpio *chip = pci_get_drvdata(pdev);
+
+	ret = pci_enable_wake(pdev, PCI_D0, 0);
+
+	pci_set_power_state(pdev, PCI_D0);
+	ret = pci_enable_device(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_enable_device Failed-%d ", ret);
+		return ret;
+	}
+	pci_restore_state(pdev);
+
+	iowrite32(0x01, &chip->reg->srst);
+	iowrite32(0x00, &chip->reg->srst);
+	ioh_gpio_restore_reg_conf(chip);
+
+	return 0;
+}
+#else
+#define ioh_gpio_suspend NULL
+#define ioh_gpio_resume NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
+	{ 0, }
+};
+
+static struct pci_driver ioh_gpio_driver = {
+	.name = "ml_ioh_gpio",
+	.id_table = ioh_gpio_pcidev_id,
+	.probe = ioh_gpio_probe,
+	.remove = __devexit_p(ioh_gpio_remove),
+	.suspend = ioh_gpio_suspend,
+	.resume = ioh_gpio_resume
+};
+
+static int __init ioh_gpio_pci_init(void)
+{
+	return pci_register_driver(&ioh_gpio_driver);
+}
+module_init(ioh_gpio_pci_init);
+
+static void __exit ioh_gpio_pci_exit(void)
+{
+	pci_unregister_driver(&ioh_gpio_driver);
+}
+module_exit(ioh_gpio_pci_exit);
+
+MODULE_DESCRIPTION("OKI SEMICONDUCTOR ML-IOH series GPIO Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 5018666..a261972 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -228,30 +228,30 @@
 	return chip->irq_base + off;
 }
 
-static void pca953x_irq_mask(unsigned int irq)
+static void pca953x_irq_mask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask &= ~(1 << (irq - chip->irq_base));
+	chip->irq_mask &= ~(1 << (d->irq - chip->irq_base));
 }
 
-static void pca953x_irq_unmask(unsigned int irq)
+static void pca953x_irq_unmask(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
-	chip->irq_mask |= 1 << (irq - chip->irq_base);
+	chip->irq_mask |= 1 << (d->irq - chip->irq_base);
 }
 
-static void pca953x_irq_bus_lock(unsigned int irq)
+static void pca953x_irq_bus_lock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&chip->irq_lock);
 }
 
-static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
 	uint16_t new_irqs;
 	uint16_t level;
 
@@ -268,15 +268,15 @@
 	mutex_unlock(&chip->irq_lock);
 }
 
-static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+static int pca953x_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct pca953x_chip *chip = get_irq_chip_data(irq);
-	uint16_t level = irq - chip->irq_base;
+	struct pca953x_chip *chip = irq_data_get_irq_chip_data(d);
+	uint16_t level = d->irq - chip->irq_base;
 	uint16_t mask = 1 << level;
 
 	if (!(type & IRQ_TYPE_EDGE_BOTH)) {
 		dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
-			irq, type);
+			d->irq, type);
 		return -EINVAL;
 	}
 
@@ -295,11 +295,11 @@
 
 static struct irq_chip pca953x_irq_chip = {
 	.name			= "pca953x",
-	.mask			= pca953x_irq_mask,
-	.unmask			= pca953x_irq_unmask,
-	.bus_lock		= pca953x_irq_bus_lock,
-	.bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
-	.set_type		= pca953x_irq_set_type,
+	.irq_mask		= pca953x_irq_mask,
+	.irq_unmask		= pca953x_irq_unmask,
+	.irq_bus_lock		= pca953x_irq_bus_lock,
+	.irq_bus_sync_unlock	= pca953x_irq_bus_sync_unlock,
+	.irq_set_type		= pca953x_irq_set_type,
 };
 
 static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 5005990..2975d22 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -129,10 +129,10 @@
 /*
  * PL061 GPIO IRQ
  */
-static void pl061_irq_disable(unsigned irq)
+static void pl061_irq_disable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -143,10 +143,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static void pl061_irq_enable(unsigned irq)
+static void pl061_irq_enable(struct irq_data *d)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpioie;
 
@@ -157,10 +157,10 @@
 	spin_unlock_irqrestore(&chip->irq_lock, flags);
 }
 
-static int pl061_irq_type(unsigned irq, unsigned trigger)
+static int pl061_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct pl061_gpio *chip = get_irq_chip_data(irq);
-	int offset = irq - chip->irq_base;
+	struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - chip->irq_base;
 	unsigned long flags;
 	u8 gpiois, gpioibe, gpioiev;
 
@@ -203,9 +203,9 @@
 
 static struct irq_chip pl061_irqchip = {
 	.name		= "GPIO",
-	.enable		= pl061_irq_enable,
-	.disable	= pl061_irq_disable,
-	.set_type	= pl061_irq_type,
+	.irq_enable	= pl061_irq_enable,
+	.irq_disable	= pl061_irq_disable,
+	.irq_set_type	= pl061_irq_type,
 };
 
 static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
@@ -214,7 +214,7 @@
 	struct list_head *ptr;
 	struct pl061_gpio *chip;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->irq_ack(&desc->irq_data);
 	list_for_each(ptr, chip_list) {
 		unsigned long pending;
 		int offset;
@@ -229,7 +229,7 @@
 		for_each_set_bit(offset, &pending, PL061_GPIO_NR)
 			generic_handle_irq(pl061_to_irq(&chip->gc, offset));
 	}
-	desc->chip->unmask(irq);
+	desc->irq_data.chip->irq_unmask(&desc->irq_data);
 }
 
 static int pl061_probe(struct amba_device *dev, struct amba_id *id)
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/stmpe-gpio.c
index 7c9e6a0..eb2901f 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/stmpe-gpio.c
@@ -122,10 +122,10 @@
 	.can_sleep		= 1,
 };
 
-static int stmpe_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -145,16 +145,16 @@
 	return 0;
 }
 
-static void stmpe_gpio_irq_lock(unsigned int irq)
+static void stmpe_gpio_irq_lock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_sync_unlock(unsigned int irq)
+static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
 	struct stmpe *stmpe = stmpe_gpio->stmpe;
 	int num_banks = DIV_ROUND_UP(stmpe->num_gpios, 8);
 	static const u8 regmap[] = {
@@ -180,20 +180,20 @@
 	mutex_unlock(&stmpe_gpio->irq_lock);
 }
 
-static void stmpe_gpio_irq_mask(unsigned int irq)
+static void stmpe_gpio_irq_mask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	stmpe_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void stmpe_gpio_irq_unmask(unsigned int irq)
+static void stmpe_gpio_irq_unmask(struct irq_data *d)
 {
-	struct stmpe_gpio *stmpe_gpio = get_irq_chip_data(irq);
-	int offset = irq - stmpe_gpio->irq_base;
+	struct stmpe_gpio *stmpe_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - stmpe_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -202,11 +202,11 @@
 
 static struct irq_chip stmpe_gpio_irq_chip = {
 	.name			= "stmpe-gpio",
-	.bus_lock		= stmpe_gpio_irq_lock,
-	.bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
-	.mask			= stmpe_gpio_irq_mask,
-	.unmask			= stmpe_gpio_irq_unmask,
-	.set_type		= stmpe_gpio_irq_set_type,
+	.irq_bus_lock		= stmpe_gpio_irq_lock,
+	.irq_bus_sync_unlock	= stmpe_gpio_irq_sync_unlock,
+	.irq_mask		= stmpe_gpio_irq_mask,
+	.irq_unmask		= stmpe_gpio_irq_unmask,
+	.irq_set_type		= stmpe_gpio_irq_set_type,
 };
 
 static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/sx150x.c
index 823559a..e60be001 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/sx150x.c
@@ -304,36 +304,36 @@
 	return chip->irq_base + offset;
 }
 
-static void sx150x_irq_mask(unsigned int irq)
+static void sx150x_irq_mask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 1);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense, 0);
 }
 
-static void sx150x_irq_unmask(unsigned int irq)
+static void sx150x_irq_unmask(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	sx150x_write_cfg(chip, n, 1, chip->dev_cfg->reg_irq_mask, 0);
 	sx150x_write_cfg(chip, n, 2, chip->dev_cfg->reg_sense,
 			 chip->irq_sense >> (n * 2));
 }
 
-static int sx150x_irq_set_type(unsigned int irq, unsigned int flow_type)
+static int sx150x_irq_set_type(struct irq_data *d, unsigned int flow_type)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n, val = 0;
 
@@ -341,7 +341,7 @@
 		return -EINVAL;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
-	n = irq - chip->irq_base;
+	n = d->irq - chip->irq_base;
 
 	if (flow_type & IRQ_TYPE_EDGE_RISING)
 		val |= 0x1;
@@ -386,9 +386,9 @@
 	return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
 }
 
-static void sx150x_irq_bus_lock(unsigned int irq)
+static void sx150x_irq_bus_lock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 
 	chip = container_of(ic, struct sx150x_chip, irq_chip);
@@ -396,9 +396,9 @@
 	mutex_lock(&chip->lock);
 }
 
-static void sx150x_irq_bus_sync_unlock(unsigned int irq)
+static void sx150x_irq_bus_sync_unlock(struct irq_data *d)
 {
-	struct irq_chip *ic = get_irq_chip(irq);
+	struct irq_chip *ic = irq_data_get_irq_chip(d);
 	struct sx150x_chip *chip;
 	unsigned n;
 
@@ -437,16 +437,16 @@
 	if (pdata->oscio_is_gpo)
 		++chip->gpio_chip.ngpio;
 
-	chip->irq_chip.name            = client->name;
-	chip->irq_chip.mask            = sx150x_irq_mask;
-	chip->irq_chip.unmask          = sx150x_irq_unmask;
-	chip->irq_chip.set_type        = sx150x_irq_set_type;
-	chip->irq_chip.bus_lock        = sx150x_irq_bus_lock;
-	chip->irq_chip.bus_sync_unlock = sx150x_irq_bus_sync_unlock;
-	chip->irq_summary              = -1;
-	chip->irq_base                 = -1;
-	chip->irq_sense                = 0;
-	chip->irq_set_type_pending     = 0;
+	chip->irq_chip.name                = client->name;
+	chip->irq_chip.irq_mask            = sx150x_irq_mask;
+	chip->irq_chip.irq_unmask          = sx150x_irq_unmask;
+	chip->irq_chip.irq_set_type        = sx150x_irq_set_type;
+	chip->irq_chip.irq_bus_lock        = sx150x_irq_bus_lock;
+	chip->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+	chip->irq_summary                  = -1;
+	chip->irq_base                     = -1;
+	chip->irq_sense                    = 0;
+	chip->irq_set_type_pending         = 0;
 }
 
 static int sx150x_init_io(struct sx150x_chip *chip, u8 base, u16 cfg)
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/tc3589x-gpio.c
index 180d584..27200af 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/tc3589x-gpio.c
@@ -110,10 +110,10 @@
 	.can_sleep		= 1,
 };
 
-static int tc3589x_gpio_irq_set_type(unsigned int irq, unsigned int type)
+static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -137,16 +137,16 @@
 	return 0;
 }
 
-static void tc3589x_gpio_irq_lock(unsigned int irq)
+static void tc3589x_gpio_irq_lock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 
 	mutex_lock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_sync_unlock(unsigned int irq)
+static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
 	struct tc3589x *tc3589x = tc3589x_gpio->tc3589x;
 	static const u8 regmap[] = {
 		[REG_IBE]	= TC3589x_GPIOIBE0,
@@ -172,20 +172,20 @@
 	mutex_unlock(&tc3589x_gpio->irq_lock);
 }
 
-static void tc3589x_gpio_irq_mask(unsigned int irq)
+static void tc3589x_gpio_irq_mask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
 	tc3589x_gpio->regs[REG_IE][regoffset] &= ~mask;
 }
 
-static void tc3589x_gpio_irq_unmask(unsigned int irq)
+static void tc3589x_gpio_irq_unmask(struct irq_data *d)
 {
-	struct tc3589x_gpio *tc3589x_gpio = get_irq_chip_data(irq);
-	int offset = irq - tc3589x_gpio->irq_base;
+	struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tc3589x_gpio->irq_base;
 	int regoffset = offset / 8;
 	int mask = 1 << (offset % 8);
 
@@ -194,11 +194,11 @@
 
 static struct irq_chip tc3589x_gpio_irq_chip = {
 	.name			= "tc3589x-gpio",
-	.bus_lock		= tc3589x_gpio_irq_lock,
-	.bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
-	.mask			= tc3589x_gpio_irq_mask,
-	.unmask			= tc3589x_gpio_irq_unmask,
-	.set_type		= tc3589x_gpio_irq_set_type,
+	.irq_bus_lock		= tc3589x_gpio_irq_lock,
+	.irq_bus_sync_unlock	= tc3589x_gpio_irq_sync_unlock,
+	.irq_mask		= tc3589x_gpio_irq_mask,
+	.irq_unmask		= tc3589x_gpio_irq_unmask,
+	.irq_set_type		= tc3589x_gpio_irq_set_type,
 };
 
 static irqreturn_t tc3589x_gpio_irq(int irq, void *dev)
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index 4529366..349131e 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -109,10 +109,10 @@
 /*
  * GPIO IRQ
  */
-static void timbgpio_irq_disable(unsigned irq)
+static void timbgpio_irq_disable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -121,10 +121,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static void timbgpio_irq_enable(unsigned irq)
+static void timbgpio_irq_enable(struct irq_data *d)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 
 	spin_lock_irqsave(&tgpio->lock, flags);
@@ -133,10 +133,10 @@
 	spin_unlock_irqrestore(&tgpio->lock, flags);
 }
 
-static int timbgpio_irq_type(unsigned irq, unsigned trigger)
+static int timbgpio_irq_type(struct irq_data *d, unsigned trigger)
 {
-	struct timbgpio *tgpio = get_irq_chip_data(irq);
-	int offset = irq - tgpio->irq_base;
+	struct timbgpio *tgpio = irq_data_get_irq_chip_data(d);
+	int offset = d->irq - tgpio->irq_base;
 	unsigned long flags;
 	u32 lvr, flr, bflr = 0;
 	u32 ver;
@@ -193,13 +193,13 @@
 	return ret;
 }
 
-static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+static void timbgpio_irq(struct irq_data *d, struct irq_desc *desc)
 {
-	struct timbgpio *tgpio = get_irq_data(irq);
+	struct timbgpio *tgpio = irq_data_get_irq_data(d);
 	unsigned long ipr;
 	int offset;
 
-	desc->chip->ack(irq);
+	desc->irq_data.chip->ack(irq_get_irq_data(d));
 	ipr = ioread32(tgpio->membase + TGPIO_IPR);
 	iowrite32(ipr, tgpio->membase + TGPIO_ICR);
 
@@ -217,9 +217,9 @@
 
 static struct irq_chip timbgpio_irqchip = {
 	.name		= "GPIO",
-	.enable		= timbgpio_irq_enable,
-	.disable	= timbgpio_irq_disable,
-	.set_type	= timbgpio_irq_type,
+	.irq_enable	= timbgpio_irq_enable,
+	.irq_disable	= timbgpio_irq_disable,
+	.irq_set_type	= timbgpio_irq_type,
 };
 
 static int __devinit timbgpio_probe(struct platform_device *pdev)
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
index b16c9a8..cffa3bd 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/vr41xx_giu.c
@@ -111,69 +111,69 @@
 	return data;
 }
 
-static void ack_giuint_low(unsigned int irq)
+static void ack_giuint_low(struct irq_data *d)
 {
-	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_giuint_low(unsigned int irq)
+static void mask_giuint_low(struct irq_data *d)
 {
-	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
-static void mask_ack_giuint_low(unsigned int irq)
+static void mask_ack_giuint_low(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq);
+	pin = GPIO_PIN_OF_IRQ(d->irq);
 	giu_clear(GIUINTENL, 1 << pin);
 	giu_write(GIUINTSTATL, 1 << pin);
 }
 
-static void unmask_giuint_low(unsigned int irq)
+static void unmask_giuint_low(struct irq_data *d)
 {
-	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
+	giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
 }
 
 static struct irq_chip giuint_low_irq_chip = {
 	.name		= "GIUINTL",
-	.ack		= ack_giuint_low,
-	.mask		= mask_giuint_low,
-	.mask_ack	= mask_ack_giuint_low,
-	.unmask		= unmask_giuint_low,
+	.irq_ack	= ack_giuint_low,
+	.irq_mask	= mask_giuint_low,
+	.irq_mask_ack	= mask_ack_giuint_low,
+	.irq_unmask	= unmask_giuint_low,
 };
 
-static void ack_giuint_high(unsigned int irq)
+static void ack_giuint_high(struct irq_data *d)
 {
 	giu_write(GIUINTSTATH,
-		  1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+		  1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_giuint_high(unsigned int irq)
+static void mask_giuint_high(struct irq_data *d)
 {
-	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
-static void mask_ack_giuint_high(unsigned int irq)
+static void mask_ack_giuint_high(struct irq_data *d)
 {
 	unsigned int pin;
 
-	pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
+	pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
 	giu_clear(GIUINTENH, 1 << pin);
 	giu_write(GIUINTSTATH, 1 << pin);
 }
 
-static void unmask_giuint_high(unsigned int irq)
+static void unmask_giuint_high(struct irq_data *d)
 {
-	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
+	giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
 }
 
 static struct irq_chip giuint_high_irq_chip = {
 	.name		= "GIUINTH",
-	.ack		= ack_giuint_high,
-	.mask		= mask_giuint_high,
-	.mask_ack	= mask_ack_giuint_high,
-	.unmask		= unmask_giuint_high,
+	.irq_ack	= ack_giuint_high,
+	.irq_mask	= mask_giuint_high,
+	.irq_mask_ack	= mask_ack_giuint_high,
+	.irq_unmask	= unmask_giuint_high,
 };
 
 static int giu_get_irq(unsigned int irq)
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/wm8994-gpio.c
index 618398e..c822baa 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/wm8994-gpio.c
@@ -35,6 +35,29 @@
 	return container_of(chip, struct wm8994_gpio, gpio_chip);
 }
 
+static int wm8994_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
+	struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+
+	switch (wm8994->type) {
+	case WM8958:
+		switch (offset) {
+		case 1:
+		case 2:
+		case 3:
+		case 4:
+		case 6:
+			return -EINVAL;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static int wm8994_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
 {
 	struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -136,6 +159,7 @@
 static struct gpio_chip template_chip = {
 	.label			= "wm8994",
 	.owner			= THIS_MODULE,
+	.request		= wm8994_gpio_request,
 	.direction_input	= wm8994_gpio_direction_in,
 	.get			= wm8994_gpio_get,
 	.direction_output	= wm8994_gpio_direction_out,
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb9..0cb2ba5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@
 }
 EXPORT_SYMBOL(drm_agp_bind_pages);
 
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
-	agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
 #endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2d4e17a..952b3d4 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -336,7 +336,7 @@
 			      struct drm_framebuffer *old_fb)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_display_mode *adjusted_mode, saved_mode;
+	struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
 	struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
 	struct drm_encoder_helper_funcs *encoder_funcs;
 	int saved_x, saved_y;
@@ -350,6 +350,7 @@
 	if (!crtc->enabled)
 		return true;
 
+	saved_hwmode = crtc->hwmode;
 	saved_mode = crtc->mode;
 	saved_x = crtc->x;
 	saved_y = crtc->y;
@@ -427,11 +428,21 @@
 
 	}
 
+	/* Store real post-adjustment hardware mode. */
+	crtc->hwmode = *adjusted_mode;
+
+	/* Calculate and store various constants which
+	 * are later needed by vblank and swap-completion
+	 * timestamping. They are derived from true hwmode.
+	 */
+	drm_calc_timestamping_constants(crtc);
+
 	/* XXX free adjustedmode */
 	drm_mode_destroy(dev, adjusted_mode);
 	/* FIXME: add subpixel order */
 done:
 	if (!ret) {
+		crtc->hwmode = saved_hwmode;
 		crtc->mode = saved_mode;
 		crtc->x = saved_x;
 		crtc->y = saved_y;
@@ -650,6 +661,7 @@
 						      old_fb)) {
 				DRM_ERROR("failed to set mode on [CRTC:%d]\n",
 					  set->crtc->base.id);
+				set->crtc->fb = old_fb;
 				ret = -EINVAL;
 				goto fail;
 			}
@@ -664,8 +676,10 @@
 			set->crtc->fb = set->fb;
 		ret = crtc_funcs->mode_set_base(set->crtc,
 						set->x, set->y, old_fb);
-		if (ret != 0)
+		if (ret != 0) {
+			set->crtc->fb = old_fb;
 			goto fail;
+		}
 	}
 	DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
 	for (i = 0; i < set->num_connectors; i++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index d2849e4..0307d60 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -607,6 +607,25 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_fini);
 
+void drm_fb_helper_fill_fix(struct fb_info *info, struct drm_framebuffer *fb)
+{
+	info->fix.type = FB_TYPE_PACKED_PIXELS;
+	info->fix.visual = fb->depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
+		FB_VISUAL_TRUECOLOR;
+	info->fix.mmio_start = 0;
+	info->fix.mmio_len = 0;
+	info->fix.type_aux = 0;
+	info->fix.xpanstep = 1; /* doing it in hw */
+	info->fix.ypanstep = 1; /* doing it in hw */
+	info->fix.ywrapstep = 0;
+	info->fix.accel = FB_ACCEL_NONE;
+	info->fix.type_aux = 0;
+
+	info->fix.line_length = fb->pitch;
+	return;
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_fix);
+
 static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
 		     u16 blue, u16 regno, struct fb_info *info)
 {
@@ -816,6 +835,7 @@
 			mutex_unlock(&dev->mode_config.mutex);
 			return ret;
 		}
+		drm_fb_helper_fill_fix(info, fb_helper->fb);
 	}
 	mutex_unlock(&dev->mode_config.mutex);
 
@@ -953,6 +973,7 @@
 
 	if (new_fb) {
 		info->var.pixclock = 0;
+		drm_fb_helper_fill_fix(info, fb_helper->fb);
 		if (register_framebuffer(info) < 0) {
 			return -EINVAL;
 		}
@@ -979,24 +1000,6 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
-			    uint32_t depth)
-{
-	info->fix.type = FB_TYPE_PACKED_PIXELS;
-	info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
-		FB_VISUAL_TRUECOLOR;
-	info->fix.type_aux = 0;
-	info->fix.xpanstep = 1; /* doing it in hw */
-	info->fix.ypanstep = 1; /* doing it in hw */
-	info->fix.ywrapstep = 0;
-	info->fix.accel = FB_ACCEL_NONE;
-	info->fix.type_aux = 0;
-
-	info->fix.line_length = pitch;
-	return;
-}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height)
 {
@@ -1005,6 +1008,7 @@
 	info->var.xres_virtual = fb->width;
 	info->var.yres_virtual = fb->height;
 	info->var.bits_per_pixel = fb->bits_per_pixel;
+	info->var.accel_flags = FB_ACCELF_TEXT;
 	info->var.xoffset = 0;
 	info->var.yoffset = 0;
 	info->var.activate = FB_ACTIVATE_NOW;
@@ -1530,3 +1534,24 @@
 }
 EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
 
+/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EMBEDDED)
+ * but the module doesn't depend on any fb console symbols.  At least
+ * attempt to load fbcon to avoid leaving the system without a usable console.
+ */
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EMBEDDED)
+static int __init drm_fb_helper_modinit(void)
+{
+	const char *name = "fbcon";
+	struct module *fbcon;
+
+	mutex_lock(&module_mutex);
+	fbcon = find_module(name);
+	mutex_unlock(&module_mutex);
+
+	if (!fbcon)
+		request_module_nowait(name);
+	return 0;
+}
+
+module_init(drm_fb_helper_modinit);
+#endif
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index a39794b..2ec7d48 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -236,6 +236,8 @@
 		return -EBUSY;	/* No exclusive opens */
 	if (!drm_cpu_valid())
 		return -EINVAL;
+	if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+		return -EINVAL;
 
 	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
 
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 16d5155..0054e95 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -40,6 +40,22 @@
 #include <linux/slab.h>
 
 #include <linux/vgaarb.h>
+
+/* Access macro for slots in vblank timestamp ringbuffer. */
+#define vblanktimestamp(dev, crtc, count) ( \
+	(dev)->_vblank_time[(crtc) * DRM_VBLANKTIME_RBSIZE + \
+	((count) % DRM_VBLANKTIME_RBSIZE)])
+
+/* Retry timestamp calculation up to 3 times to satisfy
+ * drm_timestamp_precision before giving up.
+ */
+#define DRM_TIMESTAMP_MAXRETRIES 3
+
+/* Threshold in nanoseconds for detection of redundant
+ * vblank irq in drm_handle_vblank(). 1 msec should be ok.
+ */
+#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
+
 /**
  * Get interrupt from bus id.
  *
@@ -77,6 +93,87 @@
 	return 0;
 }
 
+/*
+ * Clear vblank timestamp buffer for a crtc.
+ */
+static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+{
+	memset(&dev->_vblank_time[crtc * DRM_VBLANKTIME_RBSIZE], 0,
+		DRM_VBLANKTIME_RBSIZE * sizeof(struct timeval));
+}
+
+/*
+ * Disable vblank irq's on crtc, make sure that last vblank count
+ * of hardware and corresponding consistent software vblank counter
+ * are preserved, even if there are any spurious vblank irq's after
+ * disable.
+ */
+static void vblank_disable_and_save(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+	u32 vblcount;
+	s64 diff_ns;
+	int vblrc;
+	struct timeval tvblank;
+
+	/* Prevent vblank irq processing while disabling vblank irqs,
+	 * so no updates of timestamps or count can happen after we've
+	 * disabled. Needed to prevent races in case of delayed irq's.
+	 * Disable preemption, so vblank_time_lock is held as short as
+	 * possible, even under a kernel with PREEMPT_RT patches.
+	 */
+	preempt_disable();
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	dev->driver->disable_vblank(dev, crtc);
+	dev->vblank_enabled[crtc] = 0;
+
+	/* No further vblank irq's will be processed after
+	 * this point. Get current hardware vblank count and
+	 * vblank timestamp, repeat until they are consistent.
+	 *
+	 * FIXME: There is still a race condition here and in
+	 * drm_update_vblank_count() which can cause off-by-one
+	 * reinitialization of software vblank counter. If gpu
+	 * vblank counter doesn't increment exactly at the leading
+	 * edge of a vblank interval, then we can lose 1 count if
+	 * we happen to execute between start of vblank and the
+	 * delayed gpu counter increment.
+	 */
+	do {
+		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
+	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc));
+
+	/* Compute time difference to stored timestamp of last vblank
+	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
+	 */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* If there is at least 1 msec difference between the last stored
+	 * timestamp and tvblank, then we are currently executing our
+	 * disable inside a new vblank interval, the tvblank timestamp
+	 * corresponds to this new vblank interval and the irq handler
+	 * for this vblank didn't run yet and won't run due to our disable.
+	 * Therefore we need to do the job of drm_handle_vblank() and
+	 * increment the vblank counter by one to account for this vblank.
+	 *
+	 * Skip this step if there isn't any high precision timestamp
+	 * available. In that case we can't account for this and just
+	 * hope for the best.
+	 */
+	if ((vblrc > 0) && (abs(diff_ns) > 1000000))
+		atomic_inc(&dev->_vblank_count[crtc]);
+
+	/* Invalidate all timestamps while vblank irq's are off. */
+	clear_vblank_timestamps(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+	preempt_enable();
+}
+
 static void vblank_disable_fn(unsigned long arg)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
@@ -91,10 +188,7 @@
 		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
 		    dev->vblank_enabled[i]) {
 			DRM_DEBUG("disabling vblank on crtc %d\n", i);
-			dev->last_vblank[i] =
-				dev->driver->get_vblank_counter(dev, i);
-			dev->driver->disable_vblank(dev, i);
-			dev->vblank_enabled[i] = 0;
+			vblank_disable_and_save(dev, i);
 		}
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 	}
@@ -117,6 +211,7 @@
 	kfree(dev->last_vblank);
 	kfree(dev->last_vblank_wait);
 	kfree(dev->vblank_inmodeset);
+	kfree(dev->_vblank_time);
 
 	dev->num_crtcs = 0;
 }
@@ -129,6 +224,8 @@
 	setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
 		    (unsigned long)dev);
 	spin_lock_init(&dev->vbl_lock);
+	spin_lock_init(&dev->vblank_time_lock);
+
 	dev->num_crtcs = num_crtcs;
 
 	dev->vbl_queue = kmalloc(sizeof(wait_queue_head_t) * num_crtcs,
@@ -161,6 +258,19 @@
 	if (!dev->vblank_inmodeset)
 		goto err;
 
+	dev->_vblank_time = kcalloc(num_crtcs * DRM_VBLANKTIME_RBSIZE,
+				    sizeof(struct timeval), GFP_KERNEL);
+	if (!dev->_vblank_time)
+		goto err;
+
+	DRM_INFO("Supports vblank timestamp caching Rev 1 (10.10.2010).\n");
+
+	/* Driver specific high-precision vblank timestamping supported? */
+	if (dev->driver->get_vblank_timestamp)
+		DRM_INFO("Driver supports precise vblank timestamp query.\n");
+	else
+		DRM_INFO("No driver support for vblank timestamp query.\n");
+
 	/* Zero per-crtc vblank stuff */
 	for (i = 0; i < num_crtcs; i++) {
 		init_waitqueue_head(&dev->vbl_queue[i]);
@@ -279,7 +389,7 @@
  *
  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
  */
-int drm_irq_uninstall(struct drm_device * dev)
+int drm_irq_uninstall(struct drm_device *dev)
 {
 	unsigned long irqflags;
 	int irq_enabled, i;
@@ -335,7 +445,9 @@
 {
 	struct drm_control *ctl = data;
 
-	/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+	/* if we haven't irq we fallback for compatibility reasons -
+	 * this used to be a separate function in drm_dma.h
+	 */
 
 
 	switch (ctl->func) {
@@ -360,6 +472,287 @@
 }
 
 /**
+ * drm_calc_timestamping_constants - Calculate and
+ * store various constants which are later needed by
+ * vblank and swap-completion timestamping, e.g, by
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ * They are derived from crtc's true scanout timing,
+ * so they take things like panel scaling or other
+ * adjustments into account.
+ *
+ * @crtc drm_crtc whose timestamp constants should be updated.
+ *
+ */
+void drm_calc_timestamping_constants(struct drm_crtc *crtc)
+{
+	s64 linedur_ns = 0, pixeldur_ns = 0, framedur_ns = 0;
+	u64 dotclock;
+
+	/* Dot clock in Hz: */
+	dotclock = (u64) crtc->hwmode.clock * 1000;
+
+	/* Valid dotclock? */
+	if (dotclock > 0) {
+		/* Convert scanline length in pixels and video dot clock to
+		 * line duration, frame duration and pixel duration in
+		 * nanoseconds:
+		 */
+		pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
+		linedur_ns  = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
+					      1000000000), dotclock);
+		framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
+	} else
+		DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
+			  crtc->base.id);
+
+	crtc->pixeldur_ns = pixeldur_ns;
+	crtc->linedur_ns  = linedur_ns;
+	crtc->framedur_ns = framedur_ns;
+
+	DRM_DEBUG("crtc %d: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+		  crtc->base.id, crtc->hwmode.crtc_htotal,
+		  crtc->hwmode.crtc_vtotal, crtc->hwmode.crtc_vdisplay);
+	DRM_DEBUG("crtc %d: clock %d kHz framedur %d linedur %d, pixeldur %d\n",
+		  crtc->base.id, (int) dotclock/1000, (int) framedur_ns,
+		  (int) linedur_ns, (int) pixeldur_ns);
+}
+EXPORT_SYMBOL(drm_calc_timestamping_constants);
+
+/**
+ * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
+ * drivers. Implements calculation of exact vblank timestamps from
+ * given drm_display_mode timings and current video scanout position
+ * of a crtc. This can be called from within get_vblank_timestamp()
+ * implementation of a kms driver to implement the actual timestamping.
+ *
+ * Should return timestamps conforming to the OML_sync_control OpenML
+ * extension specification. The timestamp corresponds to the end of
+ * the vblank interval, aka start of scanout of topmost-leftmost display
+ * pixel in the following video frame.
+ *
+ * Requires support for optional dev->driver->get_scanout_position()
+ * in kms driver, plus a bit of setup code to provide a drm_display_mode
+ * that corresponds to the true scanout timing.
+ *
+ * The current implementation only handles standard video modes. It
+ * returns as no operation if a doublescan or interlaced video mode is
+ * active. Higher level code is expected to handle this.
+ *
+ * @dev: DRM device.
+ * @crtc: Which crtc's vblank timestamp to retrieve.
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs).
+ *             On return contains true maximum error of timestamp.
+ * @vblank_time: Pointer to struct timeval which should receive the timestamp.
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ * @refcrtc: drm_crtc* of crtc which defines scanout timing.
+ *
+ * Returns negative value on error, failure or if not supported in current
+ * video mode:
+ *
+ * -EINVAL   - Invalid crtc.
+ * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
+ * -ENOTSUPP - Function not supported in current display mode.
+ * -EIO      - Failed, e.g., due to failed scanout position query.
+ *
+ * Returns or'ed positive status flags on success:
+ *
+ * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
+ * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
+ *
+ */
+int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
+					  int *max_error,
+					  struct timeval *vblank_time,
+					  unsigned flags,
+					  struct drm_crtc *refcrtc)
+{
+	struct timeval stime, raw_time;
+	struct drm_display_mode *mode;
+	int vbl_status, vtotal, vdisplay;
+	int vpos, hpos, i;
+	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
+	bool invbl;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Scanout position query not supported? Should not happen. */
+	if (!dev->driver->get_scanout_position) {
+		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+		return -EIO;
+	}
+
+	mode = &refcrtc->hwmode;
+	vtotal = mode->crtc_vtotal;
+	vdisplay = mode->crtc_vdisplay;
+
+	/* Durations of frames, lines, pixels in nanoseconds. */
+	framedur_ns = refcrtc->framedur_ns;
+	linedur_ns  = refcrtc->linedur_ns;
+	pixeldur_ns = refcrtc->pixeldur_ns;
+
+	/* If mode timing undefined, just return as no-op:
+	 * Happens during initial modesetting of a crtc.
+	 */
+	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
+		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
+		return -EAGAIN;
+	}
+
+	/* Don't know yet how to handle interlaced or
+	 * double scan modes. Just no-op for now.
+	 */
+	if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
+		DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
+		return -ENOTSUPP;
+	}
+
+	/* Get current scanout position with system timestamp.
+	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
+	 * if single query takes longer than max_error nanoseconds.
+	 *
+	 * This guarantees a tight bound on maximum error if
+	 * code gets preempted or delayed for some reason.
+	 */
+	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
+		/* Disable preemption to make it very likely to
+		 * succeed in the first iteration even on PREEMPT_RT kernel.
+		 */
+		preempt_disable();
+
+		/* Get system timestamp before query. */
+		do_gettimeofday(&stime);
+
+		/* Get vertical and horizontal scanout pos. vpos, hpos. */
+		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
+
+		/* Get system timestamp after query. */
+		do_gettimeofday(&raw_time);
+
+		preempt_enable();
+
+		/* Return as no-op if scanout query unsupported or failed. */
+		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
+			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
+				  crtc, vbl_status);
+			return -EIO;
+		}
+
+		duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime);
+
+		/* Accept result with <  max_error nsecs timing uncertainty. */
+		if (duration_ns <= (s64) *max_error)
+			break;
+	}
+
+	/* Noisy system timing? */
+	if (i == DRM_TIMESTAMP_MAXRETRIES) {
+		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
+			  crtc, (int) duration_ns/1000, *max_error/1000, i);
+	}
+
+	/* Return upper bound of timestamp precision error. */
+	*max_error = (int) duration_ns;
+
+	/* Check if in vblank area:
+	 * vpos is >=0 in video scanout area, but negative
+	 * within vblank area, counting down the number of lines until
+	 * start of scanout.
+	 */
+	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+
+	/* Convert scanout position into elapsed time at raw_time query
+	 * since start of scanout at first display scanline. delta_ns
+	 * can be negative if start of scanout hasn't happened yet.
+	 */
+	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;
+
+	/* Is vpos outside nominal vblank area, but less than
+	 * 1/100 of a frame height away from start of vblank?
+	 * If so, assume this isn't a massively delayed vblank
+	 * interrupt, but a vblank interrupt that fired a few
+	 * microseconds before true start of vblank. Compensate
+	 * by adding a full frame duration to the final timestamp.
+	 * Happens, e.g., on ATI R500, R600.
+	 *
+	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
+	 */
+	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
+	    ((vdisplay - vpos) < vtotal / 100)) {
+		delta_ns = delta_ns - framedur_ns;
+
+		/* Signal this correction as "applied". */
+		vbl_status |= 0x8;
+	}
+
+	/* Subtract time delta from raw timestamp to get final
+	 * vblank_time timestamp for end of vblank.
+	 */
+	*vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns);
+
+	DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %d.%d -> %d.%d [e %d us, %d rep]\n",
+		  crtc, (int) vbl_status, hpos, vpos, raw_time.tv_sec,
+		  raw_time.tv_usec, vblank_time->tv_sec, vblank_time->tv_usec,
+		  (int) duration_ns/1000, i);
+
+	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
+	if (invbl)
+		vbl_status |= DRM_VBLANKTIME_INVBL;
+
+	return vbl_status;
+}
+EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+
+/**
+ * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
+ * vblank interval.
+ *
+ * @dev: DRM device
+ * @crtc: which crtc's vblank timestamp to retrieve
+ * @tvblank: Pointer to target struct timeval which should receive the timestamp
+ * @flags: Flags to pass to driver:
+ *         0 = Default.
+ *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
+ *
+ * Fetches the system timestamp corresponding to the time of the most recent
+ * vblank interval on specified crtc. May call into kms-driver to
+ * compute the timestamp with a high-precision GPU specific method.
+ *
+ * Returns zero if timestamp originates from uncorrected do_gettimeofday()
+ * call, i.e., it isn't very precisely locked to the true vblank.
+ *
+ * Returns non-zero if timestamp is considered to be very precise.
+ */
+u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+			      struct timeval *tvblank, unsigned flags)
+{
+	int ret = 0;
+
+	/* Define requested maximum error on timestamps (nanoseconds). */
+	int max_error = (int) drm_timestamp_precision * 1000;
+
+	/* Query driver if possible and precision timestamping enabled. */
+	if (dev->driver->get_vblank_timestamp && (max_error > 0)) {
+		ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
+							tvblank, flags);
+		if (ret > 0)
+			return (u32) ret;
+	}
+
+	/* GPU high precision timestamp query unsupported or failed.
+	 * Return gettimeofday timestamp as best estimate.
+	 */
+	do_gettimeofday(tvblank);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_get_last_vbltimestamp);
+
+/**
  * drm_vblank_count - retrieve "cooked" vblank counter value
  * @dev: DRM device
  * @crtc: which counter to retrieve
@@ -375,6 +768,40 @@
 EXPORT_SYMBOL(drm_vblank_count);
 
 /**
+ * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
+ * and the system timestamp corresponding to that vblank counter value.
+ *
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ * @vblanktime: Pointer to struct timeval to receive the vblank timestamp.
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity. Returns corresponding system timestamp of the time
+ * of the vblank interval that corresponds to the current value vblank counter
+ * value.
+ */
+u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+			      struct timeval *vblanktime)
+{
+	u32 cur_vblank;
+
+	/* Read timestamp from slot of _vblank_time ringbuffer
+	 * that corresponds to current vblank count. Retry if
+	 * count has incremented during readout. This works like
+	 * a seqlock.
+	 */
+	do {
+		cur_vblank = atomic_read(&dev->_vblank_count[crtc]);
+		*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
+		smp_rmb();
+	} while (cur_vblank != atomic_read(&dev->_vblank_count[crtc]));
+
+	return cur_vblank;
+}
+EXPORT_SYMBOL(drm_vblank_count_and_time);
+
+/**
  * drm_update_vblank_count - update the master vblank counter
  * @dev: DRM device
  * @crtc: counter to update
@@ -392,7 +819,8 @@
  */
 static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 {
-	u32 cur_vblank, diff;
+	u32 cur_vblank, diff, tslot, rc;
+	struct timeval t_vblank;
 
 	/*
 	 * Interrupts were disabled prior to this call, so deal with counter
@@ -400,8 +828,18 @@
 	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
 	 * here if the register is small or we had vblank interrupts off for
 	 * a long time.
+	 *
+	 * We repeat the hardware vblank counter & timestamp query until
+	 * we get consistent results. This to prevent races between gpu
+	 * updating its hardware counter while we are retrieving the
+	 * corresponding vblank timestamp.
 	 */
-	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+	do {
+		cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+		rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+	} while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+	/* Deal with counter wrap */
 	diff = cur_vblank - dev->last_vblank[crtc];
 	if (cur_vblank < dev->last_vblank[crtc]) {
 		diff += dev->max_vblank_count;
@@ -413,6 +851,16 @@
 	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
 		  crtc, diff);
 
+	/* Reinitialize corresponding vblank timestamp if high-precision query
+	 * available. Skip this step if query unsupported or failed. Will
+	 * reinitialize delayed at next vblank interrupt in that case.
+	 */
+	if (rc) {
+		tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
+		vblanktimestamp(dev, crtc, tslot) = t_vblank;
+		smp_wmb();
+	}
+
 	atomic_add(diff, &dev->_vblank_count[crtc]);
 }
 
@@ -429,15 +877,27 @@
  */
 int drm_vblank_get(struct drm_device *dev, int crtc)
 {
-	unsigned long irqflags;
+	unsigned long irqflags, irqflags2;
 	int ret = 0;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
 	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+		/* Disable preemption while holding vblank_time_lock. Do
+		 * it explicitely to guard against PREEMPT_RT kernel.
+		 */
+		preempt_disable();
+		spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
 		if (!dev->vblank_enabled[crtc]) {
+			/* Enable vblank irqs under vblank_time_lock protection.
+			 * All vblank count & timestamp updates are held off
+			 * until we are done reinitializing master counter and
+			 * timestamps. Filtercode in drm_handle_vblank() will
+			 * prevent double-accounting of same vblank interval.
+			 */
 			ret = dev->driver->enable_vblank(dev, crtc);
-			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n",
+				  crtc, ret);
 			if (ret)
 				atomic_dec(&dev->vblank_refcount[crtc]);
 			else {
@@ -445,6 +905,8 @@
 				drm_update_vblank_count(dev, crtc);
 			}
 		}
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
+		preempt_enable();
 	} else {
 		if (!dev->vblank_enabled[crtc]) {
 			atomic_dec(&dev->vblank_refcount[crtc]);
@@ -463,15 +925,17 @@
  * @crtc: which counter to give up
  *
  * Release ownership of a given vblank counter, turning off interrupts
- * if possible.
+ * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
  */
 void drm_vblank_put(struct drm_device *dev, int crtc)
 {
-	BUG_ON (atomic_read (&dev->vblank_refcount[crtc]) == 0);
+	BUG_ON(atomic_read(&dev->vblank_refcount[crtc]) == 0);
 
 	/* Last user schedules interrupt disable */
-	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
-		mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]) &&
+	    (drm_vblank_offdelay > 0))
+		mod_timer(&dev->vblank_disable_timer,
+			  jiffies + ((drm_vblank_offdelay * DRM_HZ)/1000));
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
@@ -480,10 +944,8 @@
 	unsigned long irqflags;
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
-	dev->driver->disable_vblank(dev, crtc);
+	vblank_disable_and_save(dev, crtc);
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
-	dev->vblank_enabled[crtc] = 0;
-	dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_off);
@@ -602,7 +1064,6 @@
 	e->base.file_priv = file_priv;
 	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
-	do_gettimeofday(&now);
 	spin_lock_irqsave(&dev->event_lock, flags);
 
 	if (file_priv->event_space < sizeof e->event) {
@@ -611,7 +1072,8 @@
 	}
 
 	file_priv->event_space -= sizeof e->event;
-	seq = drm_vblank_count(dev, pipe);
+	seq = drm_vblank_count_and_time(dev, pipe, &now);
+
 	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
 	    (seq - vblwait->request.sequence) <= (1 << 23)) {
 		vblwait->request.sequence = seq + 1;
@@ -626,15 +1088,18 @@
 
 	e->event.sequence = vblwait->request.sequence;
 	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+		e->event.sequence = seq;
 		e->event.tv_sec = now.tv_sec;
 		e->event.tv_usec = now.tv_usec;
 		drm_vblank_put(dev, pipe);
 		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
+		vblwait->reply.sequence = seq;
 		trace_drm_vblank_event_delivered(current->pid, pipe,
 						 vblwait->request.sequence);
 	} else {
 		list_add_tail(&e->base.link, &dev->vblank_event_list);
+		vblwait->reply.sequence = vblwait->request.sequence;
 	}
 
 	spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -727,11 +1192,10 @@
 	if (ret != -EINTR) {
 		struct timeval now;
 
-		do_gettimeofday(&now);
-
+		vblwait->reply.sequence = drm_vblank_count_and_time(dev, crtc, &now);
 		vblwait->reply.tval_sec = now.tv_sec;
 		vblwait->reply.tval_usec = now.tv_usec;
-		vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+
 		DRM_DEBUG("returning %d to client\n",
 			  vblwait->reply.sequence);
 	} else {
@@ -750,8 +1214,7 @@
 	unsigned long flags;
 	unsigned int seq;
 
-	do_gettimeofday(&now);
-	seq = drm_vblank_count(dev, crtc);
+	seq = drm_vblank_count_and_time(dev, crtc, &now);
 
 	spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -789,11 +1252,64 @@
  */
 void drm_handle_vblank(struct drm_device *dev, int crtc)
 {
+	u32 vblcount;
+	s64 diff_ns;
+	struct timeval tvblank;
+	unsigned long irqflags;
+
 	if (!dev->num_crtcs)
 		return;
 
-	atomic_inc(&dev->_vblank_count[crtc]);
+	/* Need timestamp lock to prevent concurrent execution with
+	 * vblank enable/disable, as this would cause inconsistent
+	 * or corrupted timestamps and vblank counts.
+	 */
+	spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+
+	/* Vblank irq handling disabled. Nothing to do. */
+	if (!dev->vblank_enabled[crtc]) {
+		spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+		return;
+	}
+
+	/* Fetch corresponding timestamp for this vblank interval from
+	 * driver and store it in proper slot of timestamp ringbuffer.
+	 */
+
+	/* Get current timestamp and count. */
+	vblcount = atomic_read(&dev->_vblank_count[crtc]);
+	drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
+
+	/* Compute time difference to timestamp of last vblank */
+	diff_ns = timeval_to_ns(&tvblank) -
+		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
+
+	/* Update vblank timestamp and count if at least
+	 * DRM_REDUNDANT_VBLIRQ_THRESH_NS nanoseconds
+	 * difference between last stored timestamp and current
+	 * timestamp. A smaller difference means basically
+	 * identical timestamps. Happens if this vblank has
+	 * been already processed and this is a redundant call,
+	 * e.g., due to spurious vblank interrupts. We need to
+	 * ignore those for accounting.
+	 */
+	if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
+		/* Store new timestamp in ringbuffer. */
+		vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+		smp_wmb();
+
+		/* Increment cooked vblank count. This also atomically commits
+		 * the timestamp computed above.
+		 */
+		atomic_inc(&dev->_vblank_count[crtc]);
+	} else {
+		DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
+			  crtc, (int) diff_ns);
+	}
+
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
 	drm_handle_vblank_events(dev, crtc);
+
+	spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc30..c59515b 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@
 	mm->scanned_blocks = 0;
 	mm->scan_hit_start = 0;
 	mm->scan_hit_size = 0;
+	mm->scan_check_range = 0;
 }
 EXPORT_SYMBOL(drm_mm_init_scan);
 
 /**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+				 unsigned alignment,
+				 unsigned long start,
+				 unsigned long end)
+{
+	mm->scan_alignment = alignment;
+	mm->scan_size = size;
+	mm->scanned_blocks = 0;
+	mm->scan_hit_start = 0;
+	mm->scan_hit_size = 0;
+	mm->scan_start = start;
+	mm->scan_end = end;
+	mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
  * Add a node to the scan list that might be freed to make space for the desired
  * hole.
  *
@@ -406,6 +432,8 @@
 	struct drm_mm *mm = node->mm;
 	struct list_head *prev_free, *next_free;
 	struct drm_mm_node *prev_node, *next_node;
+	unsigned long adj_start;
+	unsigned long adj_end;
 
 	mm->scanned_blocks++;
 
@@ -452,7 +480,17 @@
 	node->free_stack.prev = prev_free;
 	node->free_stack.next = next_free;
 
-	if (check_free_hole(node->start, node->start + node->size,
+	if (mm->scan_check_range) {
+		adj_start = node->start < mm->scan_start ?
+			mm->scan_start : node->start;
+		adj_end = node->start + node->size > mm->scan_end ?
+			mm->scan_end : node->start + node->size;
+	} else {
+		adj_start = node->start;
+		adj_end = node->start + node->size;
+	}
+
+	if (check_free_hole(adj_start , adj_end,
 			    mm->scan_size, mm->scan_alignment)) {
 		mm->scan_hit_start = node->start;
 		mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index cdc89ee..d59edc1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,12 +40,22 @@
 unsigned int drm_debug = 0;	/* 1 to enable debug output */
 EXPORT_SYMBOL(drm_debug);
 
+unsigned int drm_vblank_offdelay = 5000;    /* Default to 5000 msecs. */
+EXPORT_SYMBOL(drm_vblank_offdelay);
+
+unsigned int drm_timestamp_precision = 20;  /* Default to 20 usecs. */
+EXPORT_SYMBOL(drm_timestamp_precision);
+
 MODULE_AUTHOR(CORE_AUTHOR);
 MODULE_DESCRIPTION(CORE_DESC);
 MODULE_LICENSE("GPL and additional rights");
 MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
 
 module_param_named(debug, drm_debug, int, 0600);
+module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
+module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
 
 struct idr drm_minors_idr;
 
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d..0ae6a7c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@
 	  i915_gem.o \
 	  i915_gem_debug.o \
 	  i915_gem_evict.o \
+	  i915_gem_execbuffer.o \
+	  i915_gem_gtt.o \
 	  i915_gem_tiling.o \
 	  i915_trace_points.o \
 	  intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ce..19a3d58 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
 #include "drmP.h"
 #include "drm.h"
 #include "intel_drv.h"
+#include "intel_ringbuffer.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
@@ -72,7 +73,6 @@
 	B(is_broadwater);
 	B(is_crestline);
 	B(has_fbc);
-	B(has_rc6);
 	B(has_pipe_cxsr);
 	B(has_hotplug);
 	B(cursor_needs_physical);
@@ -86,19 +86,19 @@
 	return 0;
 }
 
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
-	if (obj_priv->user_pin_count > 0)
+	if (obj->user_pin_count > 0)
 		return "P";
-	else if (obj_priv->pin_count > 0)
+	else if (obj->pin_count > 0)
 		return "p";
 	else
 		return " ";
 }
 
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-    switch (obj_priv->tiling_mode) {
+    switch (obj->tiling_mode) {
     default:
     case I915_TILING_NONE: return " ";
     case I915_TILING_X: return "X";
@@ -106,10 +106,19 @@
     }
 }
 
+static const char *agp_type_str(int type)
+{
+	switch (type) {
+	case 0: return " uncached";
+	case 1: return " snooped";
+	default: return "";
+	}
+}
+
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
@@ -117,6 +126,8 @@
 		   obj->base.read_domains,
 		   obj->base.write_domain,
 		   obj->last_rendering_seqno,
+		   obj->last_fenced_seqno,
+		   agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -124,7 +135,17 @@
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	if (obj->gtt_space != NULL)
-		seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+		seq_printf(m, " (gtt offset: %08x, size: %08x)",
+			   obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+	if (obj->pin_mappable || obj->fault_mappable) {
+		char s[3], *t = s;
+		if (obj->pin_mappable)
+			*t++ = 'p';
+		if (obj->fault_mappable)
+			*t++ = 'f';
+		*t = '\0';
+		seq_printf(m, " (%s mappable)", s);
+	}
 	if (obj->ring != NULL)
 		seq_printf(m, " (%s)", obj->ring->name);
 }
@@ -136,7 +157,7 @@
 	struct list_head *head;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	size_t total_obj_size, total_gtt_size;
 	int count, ret;
 
@@ -171,12 +192,12 @@
 	}
 
 	total_obj_size = total_gtt_size = count = 0;
-	list_for_each_entry(obj_priv, head, mm_list) {
+	list_for_each_entry(obj, head, mm_list) {
 		seq_printf(m, "   ");
-		describe_obj(m, obj_priv);
+		describe_obj(m, obj);
 		seq_printf(m, "\n");
-		total_obj_size += obj_priv->base.size;
-		total_gtt_size += obj_priv->gtt_space->size;
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
 		count++;
 	}
 	mutex_unlock(&dev->struct_mutex);
@@ -186,30 +207,116 @@
 	return 0;
 }
 
+#define count_objects(list, member) do { \
+	list_for_each_entry(obj, list, member) { \
+		size += obj->gtt_space->size; \
+		++count; \
+		if (obj->map_and_fenceable) { \
+			mappable_size += obj->gtt_space->size; \
+			++mappable_count; \
+		} \
+	} \
+} while(0)
+
 static int i915_gem_object_info(struct seq_file *m, void* data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 count, mappable_count;
+	size_t size, mappable_size;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
-	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
-	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
-	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
-	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
-	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
-	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+	seq_printf(m, "%u objects, %zu bytes\n",
+		   dev_priv->mm.object_count,
+		   dev_priv->mm.object_memory);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.gtt_list, gtt_list);
+	seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.active_list, mm_list);
+	count_objects(&dev_priv->mm.flushing_list, mm_list);
+	seq_printf(m, "  %u [%u] active objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.pinned_list, mm_list);
+	seq_printf(m, "  %u [%u] pinned objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.inactive_list, mm_list);
+	seq_printf(m, "  %u [%u] inactive objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+	seq_printf(m, "  %u [%u] freed objects, %zu [%zu] bytes\n",
+		   count, mappable_count, size, mappable_size);
+
+	size = count = mappable_size = mappable_count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		if (obj->fault_mappable) {
+			size += obj->gtt_space->size;
+			++count;
+		}
+		if (obj->pin_mappable) {
+			mappable_size += obj->gtt_space->size;
+			++mappable_count;
+		}
+	}
+	seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+		   mappable_count, mappable_size);
+	seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+		   count, size);
+
+	seq_printf(m, "%zu [%zu] gtt total\n",
+		   dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
 
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
+static int i915_gem_gtt_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, obj);
+		seq_printf(m, "\n");
+		total_obj_size += obj->base.size;
+		total_gtt_size += obj->gtt_space->size;
+		count++;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
+
+	return 0;
+}
+
 
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
@@ -243,14 +350,14 @@
 			seq_printf(m, "%d prepares\n", work->pending);
 
 			if (work->old_fb_obj) {
-				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
-				if(obj_priv)
-					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+				struct drm_i915_gem_object *obj = work->old_fb_obj;
+				if (obj)
+					seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 			}
 			if (work->pending_flip_obj) {
-				struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
-				if(obj_priv)
-					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+				struct drm_i915_gem_object *obj = work->pending_flip_obj;
+				if (obj)
+					seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
 			}
 		}
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +372,80 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *gem_request;
-	int ret;
+	int ret, count;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	seq_printf(m, "Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
-			list) {
-		seq_printf(m, "    %d @ %d\n",
-			   gem_request->seqno,
-			   (int) (jiffies - gem_request->emitted_jiffies));
+	count = 0;
+	if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+		seq_printf(m, "Render requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[RCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
+	}
+	if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+		seq_printf(m, "BSD requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[VCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
+	}
+	if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+		seq_printf(m, "BLT requests:\n");
+		list_for_each_entry(gem_request,
+				    &dev_priv->ring[BCS].request_list,
+				    list) {
+			seq_printf(m, "    %d @ %d\n",
+				   gem_request->seqno,
+				   (int) (jiffies - gem_request->emitted_jiffies));
+		}
+		count++;
 	}
 	mutex_unlock(&dev->struct_mutex);
 
+	if (count == 0)
+		seq_printf(m, "No requests\n");
+
 	return 0;
 }
 
+static void i915_ring_seqno_info(struct seq_file *m,
+				 struct intel_ring_buffer *ring)
+{
+	if (ring->get_seqno) {
+		seq_printf(m, "Current sequence (%s): %d\n",
+			   ring->name, ring->get_seqno(ring));
+		seq_printf(m, "Waiter sequence (%s):  %d\n",
+			   ring->name, ring->waiting_seqno);
+		seq_printf(m, "IRQ sequence (%s):     %d\n",
+			   ring->name, ring->irq_seqno);
+	}
+}
+
 static int i915_gem_seqno_info(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	if (dev_priv->render_ring.status_page.page_addr != NULL) {
-		seq_printf(m, "Current sequence: %d\n",
-			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
-	} else {
-		seq_printf(m, "Current sequence: hws uninitialized\n");
-	}
-	seq_printf(m, "Waiter sequence:  %d\n",
-			dev_priv->mm.waiting_gem_seqno);
-	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -315,7 +458,7 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
@@ -354,16 +497,14 @@
 	}
 	seq_printf(m, "Interrupts received: %d\n",
 		   atomic_read(&dev_priv->irq_received));
-	if (dev_priv->render_ring.status_page.page_addr != NULL) {
-		seq_printf(m, "Current sequence:    %d\n",
-			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
-	} else {
-		seq_printf(m, "Current sequence:    hws uninitialized\n");
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		if (IS_GEN6(dev)) {
+			seq_printf(m, "Graphics Interrupt mask (%s):	%08x\n",
+				   dev_priv->ring[i].name,
+				   I915_READ_IMR(&dev_priv->ring[i]));
+		}
+		i915_ring_seqno_info(m, &dev_priv->ring[i]);
 	}
-	seq_printf(m, "Waiter sequence:     %d\n",
-		   dev_priv->mm.waiting_gem_seqno);
-	seq_printf(m, "IRQ sequence:        %d\n",
-		   dev_priv->mm.irq_gem_seqno);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -383,29 +524,17 @@
 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
-		struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+		struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
-		if (obj == NULL) {
-			seq_printf(m, "Fenced object[%2d] = unused\n", i);
-		} else {
-			struct drm_i915_gem_object *obj_priv;
-
-			obj_priv = to_intel_bo(obj);
-			seq_printf(m, "Fenced object[%2d] = %p: %s "
-				   "%08x %08zx %08x %s %08x %08x %d",
-				   i, obj, get_pin_flag(obj_priv),
-				   obj_priv->gtt_offset,
-				   obj->size, obj_priv->stride,
-				   get_tiling_flag(obj_priv),
-				   obj->read_domains, obj->write_domain,
-				   obj_priv->last_rendering_seqno);
-			if (obj->name)
-				seq_printf(m, " (name: %d)", obj->name);
-			seq_printf(m, "\n");
-		}
+		seq_printf(m, "Fenced object[%2d] = ", i);
+		if (obj == NULL)
+			seq_printf(m, "unused");
+		else
+			describe_obj(m, obj);
+		seq_printf(m, "\n");
 	}
-	mutex_unlock(&dev->struct_mutex);
 
+	mutex_unlock(&dev->struct_mutex);
 	return 0;
 }
 
@@ -414,10 +543,12 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	struct intel_ring_buffer *ring;
 	volatile u32 *hws;
+	int i;
 
-	hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	hws = (volatile u32 *)ring->status_page.page_addr;
 	if (hws == NULL)
 		return 0;
 
@@ -431,14 +562,14 @@
 
 static void i915_dump_object(struct seq_file *m,
 			     struct io_mapping *mapping,
-			     struct drm_i915_gem_object *obj_priv)
+			     struct drm_i915_gem_object *obj)
 {
 	int page, page_count, i;
 
-	page_count = obj_priv->base.size / PAGE_SIZE;
+	page_count = obj->base.size / PAGE_SIZE;
 	for (page = 0; page < page_count; page++) {
 		u32 *mem = io_mapping_map_wc(mapping,
-					     obj_priv->gtt_offset + page * PAGE_SIZE);
+					     obj->gtt_offset + page * PAGE_SIZE);
 		for (i = 0; i < PAGE_SIZE; i += 4)
 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 		io_mapping_unmap(mem);
@@ -450,25 +581,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		obj = &obj_priv->base;
-		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
-			       obj_priv->gtt_offset);
-		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
 		}
 	}
 
 	mutex_unlock(&dev->struct_mutex);
-
 	return 0;
 }
 
@@ -477,19 +604,21 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
 	if (ret)
 		return ret;
 
-	if (!dev_priv->render_ring.gem_object) {
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	if (!ring->obj) {
 		seq_printf(m, "No ringbuffer setup\n");
 	} else {
-		u8 *virt = dev_priv->render_ring.virtual_start;
+		u8 *virt = ring->virtual_start;
 		uint32_t off;
 
-		for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+		for (off = 0; off < ring->size; off += 4) {
 			uint32_t *ptr = (uint32_t *)(virt + off);
 			seq_printf(m, "%08x :  %08x\n", off, *ptr);
 		}
@@ -504,19 +633,38 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned int head, tail;
+	struct intel_ring_buffer *ring;
 
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+	if (ring->size == 0)
+		return 0;
 
-	seq_printf(m, "RingHead :  %08x\n", head);
-	seq_printf(m, "RingTail :  %08x\n", tail);
-	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
-	seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+	seq_printf(m, "Ring %s:\n", ring->name);
+	seq_printf(m, "  Head :    %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+	seq_printf(m, "  Tail :    %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+	seq_printf(m, "  Size :    %08x\n", ring->size);
+	seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
+	seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
+	if (IS_GEN6(dev)) {
+		seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
+		seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
+	}
+	seq_printf(m, "  Control : %08x\n", I915_READ_CTL(ring));
+	seq_printf(m, "  Start :   %08x\n", I915_READ_START(ring));
 
 	return 0;
 }
 
+static const char *ring_str(int ring)
+{
+	switch (ring) {
+	case RING_RENDER: return " render";
+	case RING_BSD: return " bsd";
+	case RING_BLT: return " blt";
+	default: return "";
+	}
+}
+
 static const char *pin_flag(int pinned)
 {
 	if (pinned > 0)
@@ -547,6 +695,37 @@
 	return purgeable ? " purgeable" : "";
 }
 
+static void print_error_buffers(struct seq_file *m,
+				const char *name,
+				struct drm_i915_error_buffer *err,
+				int count)
+{
+	seq_printf(m, "%s [%d]:\n", name, count);
+
+	while (count--) {
+		seq_printf(m, "  %08x %8zd %04x %04x %08x%s%s%s%s%s%s",
+			   err->gtt_offset,
+			   err->size,
+			   err->read_domains,
+			   err->write_domain,
+			   err->seqno,
+			   pin_flag(err->pinned),
+			   tiling_flag(err->tiling),
+			   dirty_flag(err->dirty),
+			   purgeable_flag(err->purgeable),
+			   ring_str(err->ring),
+			   agp_type_str(err->agp_type));
+
+		if (err->name)
+			seq_printf(m, " (name: %d)", err->name);
+		if (err->fence_reg != I915_FENCE_REG_NONE)
+			seq_printf(m, " (fence: %d)", err->fence_reg);
+
+		seq_printf(m, "\n");
+		err++;
+	}
+}
+
 static int i915_error_state(struct seq_file *m, void *unused)
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,47 +747,54 @@
 		   error->time.tv_usec);
 	seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
 	seq_printf(m, "EIR: 0x%08x\n", error->eir);
-	seq_printf(m, "  PGTBL_ER: 0x%08x\n", error->pgtbl_er);
-	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
+	seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+	if (INTEL_INFO(dev)->gen >= 6) {
+		seq_printf(m, "ERROR: 0x%08x\n", error->error);
+		seq_printf(m, "Blitter command stream:\n");
+		seq_printf(m, "  ACTHD:    0x%08x\n", error->bcs_acthd);
+		seq_printf(m, "  IPEIR:    0x%08x\n", error->bcs_ipeir);
+		seq_printf(m, "  IPEHR:    0x%08x\n", error->bcs_ipehr);
+		seq_printf(m, "  INSTDONE: 0x%08x\n", error->bcs_instdone);
+		seq_printf(m, "  seqno:    0x%08x\n", error->bcs_seqno);
+		seq_printf(m, "Video (BSD) command stream:\n");
+		seq_printf(m, "  ACTHD:    0x%08x\n", error->vcs_acthd);
+		seq_printf(m, "  IPEIR:    0x%08x\n", error->vcs_ipeir);
+		seq_printf(m, "  IPEHR:    0x%08x\n", error->vcs_ipehr);
+		seq_printf(m, "  INSTDONE: 0x%08x\n", error->vcs_instdone);
+		seq_printf(m, "  seqno:    0x%08x\n", error->vcs_seqno);
+	}
+	seq_printf(m, "Render command stream:\n");
+	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 	seq_printf(m, "  IPEIR: 0x%08x\n", error->ipeir);
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
-	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
+		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 	}
-	seq_printf(m, "seqno: 0x%08x\n", error->seqno);
+	seq_printf(m, "  INSTPM: 0x%08x\n", error->instpm);
+	seq_printf(m, "  seqno: 0x%08x\n", error->seqno);
 
-	if (error->active_bo_count) {
-		seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
+	for (i = 0; i < 16; i++)
+		seq_printf(m, "  fence[%d] = %08llx\n", i, error->fence[i]);
 
-		for (i = 0; i < error->active_bo_count; i++) {
-			seq_printf(m, "  %08x %8zd %08x %08x %08x%s%s%s%s",
-				   error->active_bo[i].gtt_offset,
-				   error->active_bo[i].size,
-				   error->active_bo[i].read_domains,
-				   error->active_bo[i].write_domain,
-				   error->active_bo[i].seqno,
-				   pin_flag(error->active_bo[i].pinned),
-				   tiling_flag(error->active_bo[i].tiling),
-				   dirty_flag(error->active_bo[i].dirty),
-				   purgeable_flag(error->active_bo[i].purgeable));
+	if (error->active_bo)
+		print_error_buffers(m, "Active",
+				    error->active_bo,
+				    error->active_bo_count);
 
-			if (error->active_bo[i].name)
-				seq_printf(m, " (name: %d)", error->active_bo[i].name);
-			if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
-				seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
-			seq_printf(m, "\n");
-		}
-	}
+	if (error->pinned_bo)
+		print_error_buffers(m, "Pinned",
+				    error->pinned_bo,
+				    error->pinned_bo_count);
 
 	for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
 		if (error->batchbuffer[i]) {
 			struct drm_i915_error_object *obj = error->batchbuffer[i];
 
-			seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+			seq_printf(m, "%s --- gtt_offset = 0x%08x\n",
+				   dev_priv->ring[i].name,
+				   obj->gtt_offset);
 			offset = 0;
 			for (page = 0; page < obj->page_count; page++) {
 				for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -635,6 +821,9 @@
 	if (error->overlay)
 		intel_overlay_print_error_state(m, error->overlay);
 
+	if (error->display)
+		intel_display_print_error_state(m, dev, error->display);
+
 out:
 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
 
@@ -658,15 +847,51 @@
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u16 rgvswctl = I915_READ16(MEMSWCTL);
-	u16 rgvstat = I915_READ16(MEMSTAT_ILK);
 
-	seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
-	seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
-	seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
-		   MEMSTAT_VID_SHIFT);
-	seq_printf(m, "Current P-state: %d\n",
-		   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	if (IS_GEN5(dev)) {
+		u16 rgvswctl = I915_READ16(MEMSWCTL);
+		u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+		seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+		seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+		seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+			   MEMSTAT_VID_SHIFT);
+		seq_printf(m, "Current P-state: %d\n",
+			   (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+	} else if (IS_GEN6(dev)) {
+		u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+		u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+		u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+		int max_freq;
+
+		/* RPSTAT1 is in the GT power well */
+		__gen6_force_wake_get(dev_priv);
+
+		seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+		seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+		seq_printf(m, "Render p-state ratio: %d\n",
+			   (gt_perf_status & 0xff00) >> 8);
+		seq_printf(m, "Render p-state VID: %d\n",
+			   gt_perf_status & 0xff);
+		seq_printf(m, "Render p-state limit: %d\n",
+			   rp_state_limits & 0xff);
+
+		max_freq = (rp_state_cap & 0xff0000) >> 16;
+		seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		max_freq = (rp_state_cap & 0xff00) >> 8;
+		seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		max_freq = rp_state_cap & 0xff;
+		seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+			   max_freq * 100);
+
+		__gen6_force_wake_put(dev_priv);
+	} else {
+		seq_printf(m, "no P-state info available\n");
+	}
 
 	return 0;
 }
@@ -715,7 +940,7 @@
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 rgvmodectl = I915_READ(MEMMODECTL);
-	u32 rstdbyctl = I915_READ(MCHBAR_RENDER_STANDBY);
+	u32 rstdbyctl = I915_READ(RSTDBYCTL);
 	u16 crstandvid = I915_READ16(CRSTANDVID);
 
 	seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
@@ -738,6 +963,30 @@
 	seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
 	seq_printf(m, "Render standby enabled: %s\n",
 		   (rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
+	seq_printf(m, "Current RS state: ");
+	switch (rstdbyctl & RSX_STATUS_MASK) {
+	case RSX_STATUS_ON:
+		seq_printf(m, "on\n");
+		break;
+	case RSX_STATUS_RC1:
+		seq_printf(m, "RC1\n");
+		break;
+	case RSX_STATUS_RC1E:
+		seq_printf(m, "RC1E\n");
+		break;
+	case RSX_STATUS_RS1:
+		seq_printf(m, "RS1\n");
+		break;
+	case RSX_STATUS_RS2:
+		seq_printf(m, "RS2 (RC6)\n");
+		break;
+	case RSX_STATUS_RS3:
+		seq_printf(m, "RC3 (RC6+)\n");
+		break;
+	default:
+		seq_printf(m, "unknown\n");
+		break;
+	}
 
 	return 0;
 }
@@ -794,7 +1043,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool sr_enabled = false;
 
-	if (IS_GEN5(dev))
+	if (HAS_PCH_SPLIT(dev))
 		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
 	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1135,7 @@
 		   fb->base.height,
 		   fb->base.depth,
 		   fb->base.bits_per_pixel);
-	describe_obj(m, to_intel_bo(fb->obj));
+	describe_obj(m, fb->obj);
 	seq_printf(m, "\n");
 
 	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1147,7 @@
 			   fb->base.height,
 			   fb->base.depth,
 			   fb->base.bits_per_pixel);
-		describe_obj(m, to_intel_bo(fb->obj));
+		describe_obj(m, fb->obj);
 		seq_printf(m, "\n");
 	}
 
@@ -943,7 +1192,6 @@
 		  loff_t *ppos)
 {
 	struct drm_device *dev = filp->private_data;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	char buf[20];
 	int val = 1;
 
@@ -959,12 +1207,7 @@
 	}
 
 	DRM_INFO("Manually setting wedged to %d\n", val);
-
-	atomic_set(&dev_priv->mm.wedged, val);
-	if (val) {
-		wake_up_all(&dev_priv->irq_queue);
-		queue_work(dev_priv->wq, &dev_priv->error_work);
-	}
+	i915_handle_error(dev, val);
 
 	return cnt;
 }
@@ -1018,6 +1261,7 @@
 static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_capabilities", i915_capabilities, 0, 0},
 	{"i915_gem_objects", i915_gem_object_info, 0},
+	{"i915_gem_gtt", i915_gem_gtt_info, 0},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
@@ -1028,9 +1272,15 @@
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
-	{"i915_gem_hws", i915_hws_info, 0},
-	{"i915_ringbuffer_data", i915_ringbuffer_data, 0},
-	{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+	{"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+	{"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+	{"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+	{"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+	{"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+	{"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+	{"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+	{"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+	{"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
 	{"i915_batchbuffers", i915_batchbuffer_info, 0},
 	{"i915_error_state", i915_error_state, 0},
 	{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index cb900dc..844f3c9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,6 +50,8 @@
 static int i915_init_phys_hws(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
 	/* Program Hardware Status Page */
 	dev_priv->status_page_dmah =
 		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -58,11 +60,10 @@
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return -ENOMEM;
 	}
-	dev_priv->render_ring.status_page.page_addr
-		= dev_priv->status_page_dmah->vaddr;
+	ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
 	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
 
-	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
 	if (INTEL_INFO(dev)->gen >= 4)
 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -80,13 +81,15 @@
 static void i915_free_hws(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
 	if (dev_priv->status_page_dmah) {
 		drm_pci_free(dev, dev_priv->status_page_dmah);
 		dev_priv->status_page_dmah = NULL;
 	}
 
-	if (dev_priv->render_ring.status_page.gfx_addr) {
-		dev_priv->render_ring.status_page.gfx_addr = 0;
+	if (ring->status_page.gfx_addr) {
+		ring->status_page.gfx_addr = 0;
 		drm_core_ioremapfree(&dev_priv->hws_map, dev);
 	}
 
@@ -98,7 +101,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv;
-	struct intel_ring_buffer *ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	/*
 	 * We should never lose context on the ring with modesetting
@@ -107,8 +110,8 @@
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+	ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 	ring->space = ring->head - (ring->tail + 8);
 	if (ring->space < 0)
 		ring->space += ring->size;
@@ -124,6 +127,8 @@
 static int i915_dma_cleanup(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
 	 * is freed, it's too late.
@@ -132,9 +137,8 @@
 		drm_irq_uninstall(dev);
 
 	mutex_lock(&dev->struct_mutex);
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
 	mutex_unlock(&dev->struct_mutex);
 
 	/* Clear the HWS virtual address at teardown */
@@ -148,6 +152,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	master_priv->sarea = drm_getsarea(dev);
 	if (master_priv->sarea) {
@@ -158,24 +163,24 @@
 	}
 
 	if (init->ring_size != 0) {
-		if (dev_priv->render_ring.gem_object != NULL) {
+		if (ring->obj != NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("Client tried to initialize ringbuffer in "
 				  "GEM mode\n");
 			return -EINVAL;
 		}
 
-		dev_priv->render_ring.size = init->ring_size;
+		ring->size = init->ring_size;
 
-		dev_priv->render_ring.map.offset = init->ring_start;
-		dev_priv->render_ring.map.size = init->ring_size;
-		dev_priv->render_ring.map.type = 0;
-		dev_priv->render_ring.map.flags = 0;
-		dev_priv->render_ring.map.mtrr = 0;
+		ring->map.offset = init->ring_start;
+		ring->map.size = init->ring_size;
+		ring->map.type = 0;
+		ring->map.flags = 0;
+		ring->map.mtrr = 0;
 
-		drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
+		drm_core_ioremap_wc(&ring->map, dev);
 
-		if (dev_priv->render_ring.map.handle == NULL) {
+		if (ring->map.handle == NULL) {
 			i915_dma_cleanup(dev);
 			DRM_ERROR("can not ioremap virtual address for"
 				  " ring buffer\n");
@@ -183,7 +188,7 @@
 		}
 	}
 
-	dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
+	ring->virtual_start = ring->map.handle;
 
 	dev_priv->cpp = init->cpp;
 	dev_priv->back_offset = init->back_offset;
@@ -202,12 +207,10 @@
 static int i915_dma_resume(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-	struct intel_ring_buffer *ring;
 	DRM_DEBUG_DRIVER("%s\n", __func__);
 
-	ring = &dev_priv->render_ring;
-
 	if (ring->map.handle == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
@@ -222,7 +225,7 @@
 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
 				ring->status_page.page_addr);
 	if (ring->status_page.gfx_addr != 0)
-		intel_ring_setup_status_page(dev, ring);
+		intel_ring_setup_status_page(ring);
 	else
 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 
@@ -264,7 +267,7 @@
  * instruction detected will be given a size of zero, which is a
  * signal to abort the rest of the buffer.
  */
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
 {
 	switch (((cmd >> 29) & 0x7)) {
 	case 0x0:
@@ -322,40 +325,27 @@
 	return 0;
 }
 
-static int validate_cmd(int cmd)
-{
-	int ret = do_validate_cmd(cmd);
-
-/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
-	return ret;
-}
-
 static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	int i, ret;
 
-	if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+	if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
 		return -EINVAL;
 
-	BEGIN_LP_RING((dwords+1)&~1);
-
 	for (i = 0; i < dwords;) {
-		int cmd, sz;
-
-		cmd = buffer[i];
-
-		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+		int sz = validate_cmd(buffer[i]);
+		if (sz == 0 || i + sz > dwords)
 			return -EINVAL;
-
-		OUT_RING(cmd);
-
-		while (++i, --sz) {
-			OUT_RING(buffer[i]);
-		}
+		i += sz;
 	}
 
+	ret = BEGIN_LP_RING((dwords+1)&~1);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dwords; i++)
+		OUT_RING(buffer[i]);
 	if (dwords & 1)
 		OUT_RING(0);
 
@@ -366,34 +356,41 @@
 
 int
 i915_emit_box(struct drm_device *dev,
-	      struct drm_clip_rect *boxes,
-	      int i, int DR1, int DR4)
+	      struct drm_clip_rect *box,
+	      int DR1, int DR4)
 {
-	struct drm_clip_rect box = boxes[i];
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
-	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+	if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+	    box->y2 <= 0 || box->x2 <= 0) {
 		DRM_ERROR("Bad box %d,%d..%d,%d\n",
-			  box.x1, box.y1, box.x2, box.y2);
+			  box->x1, box->y1, box->x2, box->y2);
 		return -EINVAL;
 	}
 
 	if (INTEL_INFO(dev)->gen >= 4) {
-		BEGIN_LP_RING(4);
+		ret = BEGIN_LP_RING(4);
+		if (ret)
+			return ret;
+
 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
 		OUT_RING(DR4);
-		ADVANCE_LP_RING();
 	} else {
-		BEGIN_LP_RING(6);
+		ret = BEGIN_LP_RING(6);
+		if (ret)
+			return ret;
+
 		OUT_RING(GFX_OP_DRAWRECT_INFO);
 		OUT_RING(DR1);
-		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
-		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+		OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
 		OUT_RING(DR4);
 		OUT_RING(0);
-		ADVANCE_LP_RING();
 	}
+	ADVANCE_LP_RING();
 
 	return 0;
 }
@@ -413,12 +410,13 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(0);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
 }
 
 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -440,7 +438,7 @@
 
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			ret = i915_emit_box(dev, cliprects, i,
+			ret = i915_emit_box(dev, &cliprects[i],
 					    cmd->DR1, cmd->DR4);
 			if (ret)
 				return ret;
@@ -459,8 +457,9 @@
 				     drm_i915_batchbuffer_t * batch,
 				     struct drm_clip_rect *cliprects)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	int nbox = batch->num_cliprects;
-	int i = 0, count;
+	int i, count, ret;
 
 	if ((batch->start | batch->used) & 0x7) {
 		DRM_ERROR("alignment");
@@ -470,17 +469,19 @@
 	i915_kernel_lost_context(dev);
 
 	count = nbox ? nbox : 1;
-
 	for (i = 0; i < count; i++) {
 		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						batch->DR1, batch->DR4);
+			ret = i915_emit_box(dev, &cliprects[i],
+					    batch->DR1, batch->DR4);
 			if (ret)
 				return ret;
 		}
 
 		if (!IS_I830(dev) && !IS_845G(dev)) {
-			BEGIN_LP_RING(2);
+			ret = BEGIN_LP_RING(2);
+			if (ret)
+				return ret;
+
 			if (INTEL_INFO(dev)->gen >= 4) {
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
 				OUT_RING(batch->start);
@@ -488,26 +489,29 @@
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
 				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
 			}
-			ADVANCE_LP_RING();
 		} else {
-			BEGIN_LP_RING(4);
+			ret = BEGIN_LP_RING(4);
+			if (ret)
+				return ret;
+
 			OUT_RING(MI_BATCH_BUFFER);
 			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
 			OUT_RING(batch->start + batch->used - 4);
 			OUT_RING(0);
-			ADVANCE_LP_RING();
 		}
+		ADVANCE_LP_RING();
 	}
 
 
 	if (IS_G4X(dev) || IS_GEN5(dev)) {
-		BEGIN_LP_RING(2);
-		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
-		OUT_RING(MI_NOOP);
-		ADVANCE_LP_RING();
+		if (BEGIN_LP_RING(2) == 0) {
+			OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+			OUT_RING(MI_NOOP);
+			ADVANCE_LP_RING();
+		}
 	}
-	i915_emit_breadcrumb(dev);
 
+	i915_emit_breadcrumb(dev);
 	return 0;
 }
 
@@ -516,6 +520,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv =
 		dev->primary->master->driver_priv;
+	int ret;
 
 	if (!master_priv->sarea_priv)
 		return -EINVAL;
@@ -527,12 +532,13 @@
 
 	i915_kernel_lost_context(dev);
 
-	BEGIN_LP_RING(2);
+	ret = BEGIN_LP_RING(10);
+	if (ret)
+		return ret;
+
 	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
 	OUT_RING(0);
-	ADVANCE_LP_RING();
 
-	BEGIN_LP_RING(6);
 	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
 	OUT_RING(0);
 	if (dev_priv->current_page == 0) {
@@ -543,33 +549,32 @@
 		dev_priv->current_page = 0;
 	}
 	OUT_RING(0);
-	ADVANCE_LP_RING();
 
-	BEGIN_LP_RING(2);
 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
 	OUT_RING(0);
+
 	ADVANCE_LP_RING();
 
 	master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(0);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
 
 	master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 	return 0;
 }
 
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
 
 	i915_kernel_lost_context(dev);
-	return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
-				      dev_priv->render_ring.size - 8);
+	return intel_wait_ring_buffer(ring, ring->size - 8);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -768,9 +773,15 @@
 	case I915_PARAM_HAS_BLT:
 		value = HAS_BLT(dev);
 		break;
+	case I915_PARAM_HAS_RELAXED_FENCING:
+		value = 1;
+		break;
 	case I915_PARAM_HAS_COHERENT_RINGS:
 		value = 1;
 		break;
+	case I915_PARAM_HAS_EXEC_CONSTANTS:
+		value = INTEL_INFO(dev)->gen >= 4;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
@@ -826,7 +837,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_hws_addr_t *hws = data;
-	struct intel_ring_buffer *ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	if (!I915_NEED_GFX_HWS(dev))
 		return -EINVAL;
@@ -1005,73 +1016,47 @@
 #define PTE_VALID			(1 << 0)
 
 /**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ *                       a physical one
  * @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
  *
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question.  We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
  */
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
-				      unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
 {
-	unsigned long *gtt;
-	unsigned long entry, phys;
-	int gtt_bar = IS_GEN2(dev) ? 1 : 0;
-	int gtt_offset, gtt_size;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct pci_dev *pdev = dev_priv->bridge_dev;
+	u32 base;
 
-	if (INTEL_INFO(dev)->gen >= 4) {
-		if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
-			gtt_offset = 2*1024*1024;
-			gtt_size = 2*1024*1024;
-		} else {
-			gtt_offset = 512*1024;
-			gtt_size = 512*1024;
-		}
+#if 0
+	/* On the machines I have tested the Graphics Base of Stolen Memory
+	 * is unreliable, so compute the base by subtracting the stolen memory
+	 * from the Top of Low Usable DRAM which is where the BIOS places
+	 * the graphics stolen memory.
+	 */
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		/* top 32bits are reserved = 0 */
+		pci_read_config_dword(pdev, 0xA4, &base);
 	} else {
-		gtt_bar = 3;
-		gtt_offset = 0;
-		gtt_size = pci_resource_len(dev->pdev, gtt_bar);
+		/* XXX presume 8xx is the same as i915 */
+		pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
 	}
-
-	gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
-			 gtt_size);
-	if (!gtt) {
-		DRM_ERROR("ioremap of GTT failed\n");
-		return 0;
+#else
+	if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+		u16 val;
+		pci_read_config_word(pdev, 0xb0, &val);
+		base = val >> 4 << 20;
+	} else {
+		u8 val;
+		pci_read_config_byte(pdev, 0x9c, &val);
+		base = val >> 3 << 27;
 	}
+	base -= dev_priv->mm.gtt->stolen_size;
+#endif
 
-	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
-	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
-	/* Mask out these reserved bits on this hardware. */
-	if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
-		entry &= ~PTE_ADDRESS_MASK_HIGH;
-
-	/* If it's not a mapping type we know, then bail. */
-	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
-	    (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED)	{
-		iounmap(gtt);
-		return 0;
-	}
-
-	if (!(entry & PTE_VALID)) {
-		DRM_ERROR("bad GTT entry in stolen space\n");
-		iounmap(gtt);
-		return 0;
-	}
-
-	iounmap(gtt);
-
-	phys =(entry & PTE_ADDRESS_MASK) |
-		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
-	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
-	return phys;
+	return base + offset;
 }
 
 static void i915_warn_stolen(struct drm_device *dev)
@@ -1087,54 +1072,35 @@
 	unsigned long cfb_base;
 	unsigned long ll_base = 0;
 
-	/* Leave 1M for line length buffer & misc. */
-	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
-	if (!compressed_fb) {
-		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-		i915_warn_stolen(dev);
-		return;
-	}
+	compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+	if (compressed_fb)
+		compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+	if (!compressed_fb)
+		goto err;
 
-	compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
-	if (!compressed_fb) {
-		i915_warn_stolen(dev);
-		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
-		return;
-	}
+	cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+	if (!cfb_base)
+		goto err_fb;
 
-	cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
-	if (!cfb_base) {
-		DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
-		drm_mm_put_block(compressed_fb);
-	}
+	if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+		compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+						    4096, 4096, 0);
+		if (compressed_llb)
+			compressed_llb = drm_mm_get_block(compressed_llb,
+							  4096, 4096);
+		if (!compressed_llb)
+			goto err_fb;
 
-	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
-		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
-						    4096, 0);
-		if (!compressed_llb) {
-			i915_warn_stolen(dev);
-			return;
-		}
-
-		compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
-		if (!compressed_llb) {
-			i915_warn_stolen(dev);
-			return;
-		}
-
-		ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
-		if (!ll_base) {
-			DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
-			drm_mm_put_block(compressed_fb);
-			drm_mm_put_block(compressed_llb);
-		}
+		ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+		if (!ll_base)
+			goto err_llb;
 	}
 
 	dev_priv->cfb_size = size;
 
 	intel_disable_fbc(dev);
 	dev_priv->compressed_fb = compressed_fb;
-	if (IS_IRONLAKE_M(dev))
+	if (HAS_PCH_SPLIT(dev))
 		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
 	else if (IS_GM45(dev)) {
 		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1144,8 +1110,17 @@
 		dev_priv->compressed_llb = compressed_llb;
 	}
 
-	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
-		  ll_base, size >> 20);
+	DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+		      cfb_base, ll_base, size >> 20);
+	return;
+
+err_llb:
+	drm_mm_put_block(compressed_llb);
+err_fb:
+	drm_mm_put_block(compressed_fb);
+err:
+	dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+	i915_warn_stolen(dev);
 }
 
 static void i915_cleanup_compression(struct drm_device *dev)
@@ -1176,12 +1151,16 @@
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_INFO "i915: switched on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		/* i915 resume handler doesn't set to D0 */
 		pci_set_power_state(dev->pdev, PCI_D0);
 		i915_resume(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
 		printk(KERN_ERR "i915: switched off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		i915_suspend(dev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
@@ -1196,17 +1175,20 @@
 	return can_switch;
 }
 
-static int i915_load_modeset_init(struct drm_device *dev,
-				  unsigned long prealloc_size,
-				  unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long prealloc_size, gtt_size, mappable_size;
 	int ret = 0;
 
-	/* Basic memrange allocator for stolen space (aka mm.vram) */
-	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+	prealloc_size = dev_priv->mm.gtt->stolen_size;
+	gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+	mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-	/* Let GEM Manage from end of prealloc space to end of aperture.
+	/* Basic memrange allocator for stolen space */
+	drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
+
+	/* Let GEM Manage all of the aperture.
 	 *
 	 * However, leave one page at the end still bound to the scratch page.
 	 * There are a number of places where the hardware apparently
@@ -1215,7 +1197,7 @@
 	 * at the last page of the aperture.  One page should be enough to
 	 * keep any prefetching inside of the aperture.
 	 */
-	i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+	i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
 
 	mutex_lock(&dev->struct_mutex);
 	ret = i915_gem_init_ringbuffer(dev);
@@ -1227,16 +1209,17 @@
 	if (I915_HAS_FBC(dev) && i915_powersave) {
 		int cfb_size;
 
-		/* Try to get an 8M buffer... */
-		if (prealloc_size > (9*1024*1024))
-			cfb_size = 8*1024*1024;
+		/* Leave 1M for line length buffer & misc. */
+
+		/* Try to get a 32M buffer... */
+		if (prealloc_size > (36*1024*1024))
+			cfb_size = 32*1024*1024;
 		else /* fall back to 7/8 of the stolen space */
 			cfb_size = prealloc_size * 7 / 8;
 		i915_setup_compression(dev, cfb_size);
 	}
 
-	/* Allow hardware batchbuffers unless told otherwise.
-	 */
+	/* Allow hardware batchbuffers unless told otherwise. */
 	dev_priv->allow_batchbuffer = 1;
 
 	ret = intel_parse_bios(dev);
@@ -1252,6 +1235,7 @@
 
 	ret = vga_switcheroo_register_client(dev->pdev,
 					     i915_switcheroo_set_state,
+					     NULL,
 					     i915_switcheroo_can_switch);
 	if (ret)
 		goto cleanup_vga_client;
@@ -1426,152 +1410,12 @@
 	}
 }
 
-struct v_table {
-	u8 vid;
-	unsigned long vd; /* in .1 mil */
-	unsigned long vm; /* in .1 mil */
-	u8 pvid;
-};
-
-static struct v_table v_table[] = {
-	{ 0, 16125, 15000, 0x7f, },
-	{ 1, 16000, 14875, 0x7e, },
-	{ 2, 15875, 14750, 0x7d, },
-	{ 3, 15750, 14625, 0x7c, },
-	{ 4, 15625, 14500, 0x7b, },
-	{ 5, 15500, 14375, 0x7a, },
-	{ 6, 15375, 14250, 0x79, },
-	{ 7, 15250, 14125, 0x78, },
-	{ 8, 15125, 14000, 0x77, },
-	{ 9, 15000, 13875, 0x76, },
-	{ 10, 14875, 13750, 0x75, },
-	{ 11, 14750, 13625, 0x74, },
-	{ 12, 14625, 13500, 0x73, },
-	{ 13, 14500, 13375, 0x72, },
-	{ 14, 14375, 13250, 0x71, },
-	{ 15, 14250, 13125, 0x70, },
-	{ 16, 14125, 13000, 0x6f, },
-	{ 17, 14000, 12875, 0x6e, },
-	{ 18, 13875, 12750, 0x6d, },
-	{ 19, 13750, 12625, 0x6c, },
-	{ 20, 13625, 12500, 0x6b, },
-	{ 21, 13500, 12375, 0x6a, },
-	{ 22, 13375, 12250, 0x69, },
-	{ 23, 13250, 12125, 0x68, },
-	{ 24, 13125, 12000, 0x67, },
-	{ 25, 13000, 11875, 0x66, },
-	{ 26, 12875, 11750, 0x65, },
-	{ 27, 12750, 11625, 0x64, },
-	{ 28, 12625, 11500, 0x63, },
-	{ 29, 12500, 11375, 0x62, },
-	{ 30, 12375, 11250, 0x61, },
-	{ 31, 12250, 11125, 0x60, },
-	{ 32, 12125, 11000, 0x5f, },
-	{ 33, 12000, 10875, 0x5e, },
-	{ 34, 11875, 10750, 0x5d, },
-	{ 35, 11750, 10625, 0x5c, },
-	{ 36, 11625, 10500, 0x5b, },
-	{ 37, 11500, 10375, 0x5a, },
-	{ 38, 11375, 10250, 0x59, },
-	{ 39, 11250, 10125, 0x58, },
-	{ 40, 11125, 10000, 0x57, },
-	{ 41, 11000, 9875, 0x56, },
-	{ 42, 10875, 9750, 0x55, },
-	{ 43, 10750, 9625, 0x54, },
-	{ 44, 10625, 9500, 0x53, },
-	{ 45, 10500, 9375, 0x52, },
-	{ 46, 10375, 9250, 0x51, },
-	{ 47, 10250, 9125, 0x50, },
-	{ 48, 10125, 9000, 0x4f, },
-	{ 49, 10000, 8875, 0x4e, },
-	{ 50, 9875, 8750, 0x4d, },
-	{ 51, 9750, 8625, 0x4c, },
-	{ 52, 9625, 8500, 0x4b, },
-	{ 53, 9500, 8375, 0x4a, },
-	{ 54, 9375, 8250, 0x49, },
-	{ 55, 9250, 8125, 0x48, },
-	{ 56, 9125, 8000, 0x47, },
-	{ 57, 9000, 7875, 0x46, },
-	{ 58, 8875, 7750, 0x45, },
-	{ 59, 8750, 7625, 0x44, },
-	{ 60, 8625, 7500, 0x43, },
-	{ 61, 8500, 7375, 0x42, },
-	{ 62, 8375, 7250, 0x41, },
-	{ 63, 8250, 7125, 0x40, },
-	{ 64, 8125, 7000, 0x3f, },
-	{ 65, 8000, 6875, 0x3e, },
-	{ 66, 7875, 6750, 0x3d, },
-	{ 67, 7750, 6625, 0x3c, },
-	{ 68, 7625, 6500, 0x3b, },
-	{ 69, 7500, 6375, 0x3a, },
-	{ 70, 7375, 6250, 0x39, },
-	{ 71, 7250, 6125, 0x38, },
-	{ 72, 7125, 6000, 0x37, },
-	{ 73, 7000, 5875, 0x36, },
-	{ 74, 6875, 5750, 0x35, },
-	{ 75, 6750, 5625, 0x34, },
-	{ 76, 6625, 5500, 0x33, },
-	{ 77, 6500, 5375, 0x32, },
-	{ 78, 6375, 5250, 0x31, },
-	{ 79, 6250, 5125, 0x30, },
-	{ 80, 6125, 5000, 0x2f, },
-	{ 81, 6000, 4875, 0x2e, },
-	{ 82, 5875, 4750, 0x2d, },
-	{ 83, 5750, 4625, 0x2c, },
-	{ 84, 5625, 4500, 0x2b, },
-	{ 85, 5500, 4375, 0x2a, },
-	{ 86, 5375, 4250, 0x29, },
-	{ 87, 5250, 4125, 0x28, },
-	{ 88, 5125, 4000, 0x27, },
-	{ 89, 5000, 3875, 0x26, },
-	{ 90, 4875, 3750, 0x25, },
-	{ 91, 4750, 3625, 0x24, },
-	{ 92, 4625, 3500, 0x23, },
-	{ 93, 4500, 3375, 0x22, },
-	{ 94, 4375, 3250, 0x21, },
-	{ 95, 4250, 3125, 0x20, },
-	{ 96, 4125, 3000, 0x1f, },
-	{ 97, 4125, 3000, 0x1e, },
-	{ 98, 4125, 3000, 0x1d, },
-	{ 99, 4125, 3000, 0x1c, },
-	{ 100, 4125, 3000, 0x1b, },
-	{ 101, 4125, 3000, 0x1a, },
-	{ 102, 4125, 3000, 0x19, },
-	{ 103, 4125, 3000, 0x18, },
-	{ 104, 4125, 3000, 0x17, },
-	{ 105, 4125, 3000, 0x16, },
-	{ 106, 4125, 3000, 0x15, },
-	{ 107, 4125, 3000, 0x14, },
-	{ 108, 4125, 3000, 0x13, },
-	{ 109, 4125, 3000, 0x12, },
-	{ 110, 4125, 3000, 0x11, },
-	{ 111, 4125, 3000, 0x10, },
-	{ 112, 4125, 3000, 0x0f, },
-	{ 113, 4125, 3000, 0x0e, },
-	{ 114, 4125, 3000, 0x0d, },
-	{ 115, 4125, 3000, 0x0c, },
-	{ 116, 4125, 3000, 0x0b, },
-	{ 117, 4125, 3000, 0x0a, },
-	{ 118, 4125, 3000, 0x09, },
-	{ 119, 4125, 3000, 0x08, },
-	{ 120, 1125, 0, 0x07, },
-	{ 121, 1000, 0, 0x06, },
-	{ 122, 875, 0, 0x05, },
-	{ 123, 750, 0, 0x04, },
-	{ 124, 625, 0, 0x03, },
-	{ 125, 500, 0, 0x02, },
-	{ 126, 375, 0, 0x01, },
-	{ 127, 0, 0, 0x00, },
-};
-
-struct cparams {
-	int i;
-	int t;
-	int m;
-	int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+	u16 i;
+	u16 t;
+	u16 m;
+	u16 c;
+} cparams[] = {
 	{ 1, 1333, 301, 28664 },
 	{ 1, 1066, 294, 24460 },
 	{ 1, 800, 294, 25192 },
@@ -1637,21 +1481,145 @@
 	return ((m * x) / 127) - b;
 }
 
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
 {
-	unsigned long val = 0;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(v_table); i++) {
-		if (v_table[i].pvid == pxvid) {
-			if (IS_MOBILE(dev_priv->dev))
-				val = v_table[i].vm;
-			else
-				val = v_table[i].vd;
-		}
-	}
-
-	return val;
+	static const struct v_table {
+		u16 vd; /* in .1 mil */
+		u16 vm; /* in .1 mil */
+	} v_table[] = {
+		{ 0, 0, },
+		{ 375, 0, },
+		{ 500, 0, },
+		{ 625, 0, },
+		{ 750, 0, },
+		{ 875, 0, },
+		{ 1000, 0, },
+		{ 1125, 0, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4125, 3000, },
+		{ 4250, 3125, },
+		{ 4375, 3250, },
+		{ 4500, 3375, },
+		{ 4625, 3500, },
+		{ 4750, 3625, },
+		{ 4875, 3750, },
+		{ 5000, 3875, },
+		{ 5125, 4000, },
+		{ 5250, 4125, },
+		{ 5375, 4250, },
+		{ 5500, 4375, },
+		{ 5625, 4500, },
+		{ 5750, 4625, },
+		{ 5875, 4750, },
+		{ 6000, 4875, },
+		{ 6125, 5000, },
+		{ 6250, 5125, },
+		{ 6375, 5250, },
+		{ 6500, 5375, },
+		{ 6625, 5500, },
+		{ 6750, 5625, },
+		{ 6875, 5750, },
+		{ 7000, 5875, },
+		{ 7125, 6000, },
+		{ 7250, 6125, },
+		{ 7375, 6250, },
+		{ 7500, 6375, },
+		{ 7625, 6500, },
+		{ 7750, 6625, },
+		{ 7875, 6750, },
+		{ 8000, 6875, },
+		{ 8125, 7000, },
+		{ 8250, 7125, },
+		{ 8375, 7250, },
+		{ 8500, 7375, },
+		{ 8625, 7500, },
+		{ 8750, 7625, },
+		{ 8875, 7750, },
+		{ 9000, 7875, },
+		{ 9125, 8000, },
+		{ 9250, 8125, },
+		{ 9375, 8250, },
+		{ 9500, 8375, },
+		{ 9625, 8500, },
+		{ 9750, 8625, },
+		{ 9875, 8750, },
+		{ 10000, 8875, },
+		{ 10125, 9000, },
+		{ 10250, 9125, },
+		{ 10375, 9250, },
+		{ 10500, 9375, },
+		{ 10625, 9500, },
+		{ 10750, 9625, },
+		{ 10875, 9750, },
+		{ 11000, 9875, },
+		{ 11125, 10000, },
+		{ 11250, 10125, },
+		{ 11375, 10250, },
+		{ 11500, 10375, },
+		{ 11625, 10500, },
+		{ 11750, 10625, },
+		{ 11875, 10750, },
+		{ 12000, 10875, },
+		{ 12125, 11000, },
+		{ 12250, 11125, },
+		{ 12375, 11250, },
+		{ 12500, 11375, },
+		{ 12625, 11500, },
+		{ 12750, 11625, },
+		{ 12875, 11750, },
+		{ 13000, 11875, },
+		{ 13125, 12000, },
+		{ 13250, 12125, },
+		{ 13375, 12250, },
+		{ 13500, 12375, },
+		{ 13625, 12500, },
+		{ 13750, 12625, },
+		{ 13875, 12750, },
+		{ 14000, 12875, },
+		{ 14125, 13000, },
+		{ 14250, 13125, },
+		{ 14375, 13250, },
+		{ 14500, 13375, },
+		{ 14625, 13500, },
+		{ 14750, 13625, },
+		{ 14875, 13750, },
+		{ 15000, 13875, },
+		{ 15125, 14000, },
+		{ 15250, 14125, },
+		{ 15375, 14250, },
+		{ 15500, 14375, },
+		{ 15625, 14500, },
+		{ 15750, 14625, },
+		{ 15875, 14750, },
+		{ 16000, 14875, },
+		{ 16125, 15000, },
+	};
+	if (dev_priv->info->is_mobile)
+		return v_table[pxvid].vm;
+	else
+		return v_table[pxvid].vd;
 }
 
 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1905,9 +1873,9 @@
 int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_i915_private *dev_priv;
-	resource_size_t base, size;
 	int ret = 0, mmio_bar;
-	uint32_t agp_size, prealloc_size;
+	uint32_t agp_size;
+
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -1923,11 +1891,6 @@
 	dev_priv->dev = dev;
 	dev_priv->info = (struct intel_device_info *) flags;
 
-	/* Add register map (needed for suspend/resume) */
-	mmio_bar = IS_GEN2(dev) ? 1 : 0;
-	base = pci_resource_start(dev->pdev, mmio_bar);
-	size = pci_resource_len(dev->pdev, mmio_bar);
-
 	if (i915_get_bridge_dev(dev)) {
 		ret = -EIO;
 		goto free_priv;
@@ -1937,16 +1900,25 @@
 	if (IS_GEN2(dev))
 		dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
-	dev_priv->regs = ioremap(base, size);
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
 	if (!dev_priv->regs) {
 		DRM_ERROR("failed to map registers\n");
 		ret = -EIO;
 		goto put_bridge;
 	}
 
+	dev_priv->mm.gtt = intel_gtt_get();
+	if (!dev_priv->mm.gtt) {
+		DRM_ERROR("Failed to initialize GTT\n");
+		ret = -ENODEV;
+		goto out_iomapfree;
+	}
+
+	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
         dev_priv->mm.gtt_mapping =
-		io_mapping_create_wc(dev->agp->base,
-				     dev->agp->agp_info.aper_size * 1024*1024);
+		io_mapping_create_wc(dev->agp->base, agp_size);
 	if (dev_priv->mm.gtt_mapping == NULL) {
 		ret = -EIO;
 		goto out_rmmap;
@@ -1958,24 +1930,13 @@
 	 * MTRR if present.  Even if a UC MTRR isn't present.
 	 */
 	dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
-					 dev->agp->agp_info.aper_size *
-					 1024 * 1024,
+					 agp_size,
 					 MTRR_TYPE_WRCOMB, 1);
 	if (dev_priv->mm.gtt_mtrr < 0) {
 		DRM_INFO("MTRR allocation failed.  Graphics "
 			 "performance may suffer.\n");
 	}
 
-	dev_priv->mm.gtt = intel_gtt_get();
-	if (!dev_priv->mm.gtt) {
-		DRM_ERROR("Failed to initialize GTT\n");
-		ret = -ENODEV;
-		goto out_iomapfree;
-	}
-
-	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
-	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
 	/* The i915 workqueue is primarily used for batched retirement of
 	 * requests (and thus managing bo) once the task has been completed
 	 * by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1983,7 +1944,7 @@
 	 * bo.
 	 *
 	 * It is also used for periodic low-priority events, such as
-	 * idle-timers and hangcheck.
+	 * idle-timers and recording error state.
 	 *
 	 * All tasks on the workqueue are expected to acquire the dev mutex
 	 * so there is no point in running more than one instance of the
@@ -2001,22 +1962,6 @@
 	/* enable GEM by default */
 	dev_priv->has_gem = 1;
 
-	if (prealloc_size > agp_size * 3 / 4) {
-		DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
-			  "memory stolen.\n",
-			  prealloc_size / 1024, agp_size / 1024);
-		DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
-			  "updating the BIOS to fix).\n");
-		dev_priv->has_gem = 0;
-	}
-
-	if (dev_priv->has_gem == 0 &&
-	    drm_core_check_feature(dev, DRIVER_MODESET)) {
-		DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
-		ret = -ENODEV;
-		goto out_iomapfree;
-	}
-
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
@@ -2037,8 +1982,8 @@
 	/* Init HWS */
 	if (!I915_NEED_GFX_HWS(dev)) {
 		ret = i915_init_phys_hws(dev);
-		if (ret != 0)
-			goto out_workqueue_free;
+		if (ret)
+			goto out_gem_unload;
 	}
 
 	if (IS_PINEVIEW(dev))
@@ -2060,16 +2005,13 @@
 	if (!IS_I945G(dev) && !IS_I945GM(dev))
 		pci_enable_msi(dev->pdev);
 
-	spin_lock_init(&dev_priv->user_irq_lock);
+	spin_lock_init(&dev_priv->irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
 	dev_priv->trace_irq_seqno = 0;
 
 	ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
-	if (ret) {
-		(void) i915_driver_unload(dev);
-		return ret;
-	}
+	if (ret)
+		goto out_gem_unload;
 
 	/* Start out suspended */
 	dev_priv->mm.suspended = 1;
@@ -2077,10 +2019,10 @@
 	intel_detect_pch(dev);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+		ret = i915_load_modeset_init(dev);
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
-			goto out_workqueue_free;
+			goto out_gem_unload;
 		}
 	}
 
@@ -2100,12 +2042,17 @@
 
 	return 0;
 
-out_workqueue_free:
+out_gem_unload:
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	intel_teardown_gmbus(dev);
+	intel_teardown_mchbar(dev);
 	destroy_workqueue(dev_priv->wq);
 out_iomapfree:
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 out_rmmap:
-	iounmap(dev_priv->regs);
+	pci_iounmap(dev->pdev, dev_priv->regs);
 put_bridge:
 	pci_dev_put(dev_priv->bridge_dev);
 free_priv:
@@ -2122,6 +2069,9 @@
 	i915_mch_dev = NULL;
 	spin_unlock(&mchdev_lock);
 
+	if (dev_priv->mm.inactive_shrinker.shrink)
+		unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
 	mutex_lock(&dev->struct_mutex);
 	ret = i915_gpu_idle(dev);
 	if (ret)
@@ -2179,7 +2129,7 @@
 		mutex_unlock(&dev->struct_mutex);
 		if (I915_HAS_FBC(dev) && i915_powersave)
 			i915_cleanup_compression(dev);
-		drm_mm_takedown(&dev_priv->mm.vram);
+		drm_mm_takedown(&dev_priv->mm.stolen);
 
 		intel_cleanup_overlay(dev);
 
@@ -2188,7 +2138,7 @@
 	}
 
 	if (dev_priv->regs != NULL)
-		iounmap(dev_priv->regs);
+		pci_iounmap(dev->pdev, dev_priv->regs);
 
 	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960..0de75a2 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,9 @@
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
 
+bool i915_try_reset = true;
+module_param_named(reset, i915_try_reset, bool, 0600);
+
 static struct drm_driver driver;
 extern int intel_agp_enabled;
 
@@ -111,7 +114,7 @@
 
 static const struct intel_device_info intel_i965gm_info = {
 	.gen = 4, .is_crestline = 1,
-	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
 	.has_overlay = 1,
 	.supports_tv = 1,
 };
@@ -130,7 +133,7 @@
 
 static const struct intel_device_info intel_gm45_info = {
 	.gen = 4, .is_g4x = 1,
-	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.supports_tv = 1,
 	.has_bsd_ring = 1,
@@ -150,7 +153,7 @@
 
 static const struct intel_device_info intel_ironlake_m_info = {
 	.gen = 5, .is_mobile = 1,
-	.need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.need_gfx_hws = 1, .has_hotplug = 1,
 	.has_fbc = 0, /* disabled due to buggy hardware */
 	.has_bsd_ring = 1,
 };
@@ -165,6 +168,7 @@
 static const struct intel_device_info intel_sandybridge_m_info = {
 	.gen = 6, .is_mobile = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_fbc = 1,
 	.has_bsd_ring = 1,
 	.has_blt_ring = 1,
 };
@@ -244,10 +248,34 @@
 	}
 }
 
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+	int count;
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+		udelay(10);
+
+	I915_WRITE_NOTRACE(FORCEWAKE, 1);
+	POSTING_READ(FORCEWAKE);
+
+	count = 0;
+	while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+		udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE_NOTRACE(FORCEWAKE, 0);
+	POSTING_READ(FORCEWAKE);
+}
+
 static int i915_drm_freeze(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
+	drm_kms_helper_poll_disable(dev);
+
 	pci_save_state(dev->pdev);
 
 	/* If KMS is active, we do the leavevt stuff here */
@@ -284,7 +312,9 @@
 	if (state.event == PM_EVENT_PRETHAW)
 		return 0;
 
-	drm_kms_helper_poll_disable(dev);
+
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
 
 	error = i915_drm_freeze(dev);
 	if (error)
@@ -304,6 +334,12 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int error = 0;
 
+	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		mutex_lock(&dev->struct_mutex);
+		i915_gem_restore_gtt_mappings(dev);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
 	i915_restore_state(dev);
 	intel_opregion_setup(dev);
 
@@ -319,6 +355,9 @@
 
 		/* Resume the modeset for every activated CRTC */
 		drm_helper_resume_force_mode(dev);
+
+		if (dev_priv->renderctx && dev_priv->pwrctx)
+			ironlake_enable_rc6(dev);
 	}
 
 	intel_opregion_init(dev);
@@ -332,6 +371,9 @@
 {
 	int ret;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	if (pci_enable_device(dev->pdev))
 		return -EIO;
 
@@ -405,6 +447,14 @@
 	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+	return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
 /**
  * i965_reset - reset chip after a hang
  * @dev: drm device to reset
@@ -431,7 +481,11 @@
 	bool need_display = true;
 	int ret;
 
-	mutex_lock(&dev->struct_mutex);
+	if (!i915_try_reset)
+		return 0;
+
+	if (!mutex_trylock(&dev->struct_mutex))
+		return -EBUSY;
 
 	i915_gem_reset(dev);
 
@@ -439,6 +493,9 @@
 	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
 		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
 	} else switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		ret = gen6_do_reset(dev, flags);
+		break;
 	case 5:
 		ret = ironlake_do_reset(dev, flags);
 		break;
@@ -472,9 +529,14 @@
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 			!dev_priv->mm.suspended) {
-		struct intel_ring_buffer *ring = &dev_priv->render_ring;
 		dev_priv->mm.suspended = 0;
-		ring->init(dev, ring);
+
+		dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+		if (HAS_BSD(dev))
+		    dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+		if (HAS_BLT(dev))
+		    dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
 		mutex_unlock(&dev->struct_mutex);
 		drm_irq_uninstall(dev);
 		drm_irq_install(dev);
@@ -523,6 +585,9 @@
 		return -ENODEV;
 	}
 
+	if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	error = i915_drm_freeze(drm_dev);
 	if (error)
 		return error;
@@ -606,6 +671,8 @@
 	.device_is_agp = i915_driver_device_is_agp,
 	.enable_vblank = i915_enable_vblank,
 	.disable_vblank = i915_disable_vblank,
+	.get_vblank_timestamp = i915_get_vblank_timestamp,
+	.get_scanout_position = i915_get_crtc_scanoutpos,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +728,6 @@
 
 	driver.num_ioctls = i915_max_ioctl;
 
-	i915_gem_shrinker_init();
-
 	/*
 	 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
 	 * explicitly disabled with the module pararmeter.
@@ -684,17 +749,11 @@
 		driver.driver_features &= ~DRIVER_MODESET;
 #endif
 
-	if (!(driver.driver_features & DRIVER_MODESET)) {
-		driver.suspend = i915_suspend;
-		driver.resume = i915_resume;
-	}
-
 	return drm_init(&driver);
 }
 
 static void __exit i915_exit(void)
 {
-	i915_gem_shrinker_exit();
 	drm_exit(&driver);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826d..385fc7e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@
 	int id;
 	struct page **page_list;
 	drm_dma_handle_t *handle;
-	struct drm_gem_object *cur_obj;
+	struct drm_i915_gem_object *cur_obj;
 };
 
 struct mem_block {
@@ -124,9 +124,9 @@
 #define I915_FENCE_REG_NONE -1
 
 struct drm_i915_fence_reg {
-	struct drm_gem_object *obj;
 	struct list_head lru_list;
-	bool gpu;
+	struct drm_i915_gem_object *obj;
+	uint32_t setup_seqno;
 };
 
 struct sdvo_device_mapping {
@@ -139,6 +139,8 @@
 	u8 ddc_pin;
 };
 
+struct intel_display_error_state;
+
 struct drm_i915_error_state {
 	u32 eir;
 	u32 pgtbl_er;
@@ -148,32 +150,47 @@
 	u32 ipehr;
 	u32 instdone;
 	u32 acthd;
+	u32 error; /* gen6+ */
+	u32 bcs_acthd; /* gen6+ blt engine */
+	u32 bcs_ipehr;
+	u32 bcs_ipeir;
+	u32 bcs_instdone;
+	u32 bcs_seqno;
+	u32 vcs_acthd; /* gen6+ bsd engine */
+	u32 vcs_ipehr;
+	u32 vcs_ipeir;
+	u32 vcs_instdone;
+	u32 vcs_seqno;
 	u32 instpm;
 	u32 instps;
 	u32 instdone1;
 	u32 seqno;
 	u64 bbaddr;
+	u64 fence[16];
 	struct timeval time;
 	struct drm_i915_error_object {
 		int page_count;
 		u32 gtt_offset;
 		u32 *pages[0];
-	} *ringbuffer, *batchbuffer[2];
+	} *ringbuffer, *batchbuffer[I915_NUM_RINGS];
 	struct drm_i915_error_buffer {
-		size_t size;
+		u32 size;
 		u32 name;
 		u32 seqno;
 		u32 gtt_offset;
 		u32 read_domains;
 		u32 write_domain;
-		u32 fence_reg;
+		s32 fence_reg:5;
 		s32 pinned:2;
 		u32 tiling:2;
 		u32 dirty:1;
 		u32 purgeable:1;
-	} *active_bo;
-	u32 active_bo_count;
+		u32 ring:4;
+		u32 agp_type:1;
+	} *active_bo, *pinned_bo;
+	u32 active_bo_count, pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
+	struct intel_display_error_state *display;
 };
 
 struct drm_i915_display_funcs {
@@ -207,7 +224,6 @@
 	u8 is_broadwater : 1;
 	u8 is_crestline : 1;
 	u8 has_fbc : 1;
-	u8 has_rc6 : 1;
 	u8 has_pipe_cxsr : 1;
 	u8 has_hotplug : 1;
 	u8 cursor_needs_physical : 1;
@@ -243,6 +259,7 @@
 	const struct intel_device_info *info;
 
 	int has_gem;
+	int relative_constants_mode;
 
 	void __iomem *regs;
 
@@ -253,20 +270,15 @@
 	} *gmbus;
 
 	struct pci_dev *bridge_dev;
-	struct intel_ring_buffer render_ring;
-	struct intel_ring_buffer bsd_ring;
-	struct intel_ring_buffer blt_ring;
+	struct intel_ring_buffer ring[I915_NUM_RINGS];
 	uint32_t next_seqno;
 
 	drm_dma_handle_t *status_page_dmah;
-	void *seqno_page;
 	dma_addr_t dma_status_page;
 	uint32_t counter;
-	unsigned int seqno_gfx_addr;
 	drm_local_map_t hws_map;
-	struct drm_gem_object *seqno_obj;
-	struct drm_gem_object *pwrctx;
-	struct drm_gem_object *renderctx;
+	struct drm_i915_gem_object *pwrctx;
+	struct drm_i915_gem_object *renderctx;
 
 	struct resource mch_res;
 
@@ -275,25 +287,17 @@
 	int front_offset;
 	int current_page;
 	int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
-	unsigned long debug_flags;
 
-	wait_queue_head_t irq_queue;
 	atomic_t irq_received;
-	/** Protects user_irq_refcount and irq_mask_reg */
-	spinlock_t user_irq_lock;
 	u32 trace_irq_seqno;
+
+	/* protects the irq masks */
+	spinlock_t irq_lock;
 	/** Cached value of IMR to avoid reads in updating the bitfield */
-	u32 irq_mask_reg;
 	u32 pipestat[2];
-	/** splitted irq regs for graphics and display engine on Ironlake,
-	    irq_mask_reg is still used for display irq. */
-	u32 gt_irq_mask_reg;
-	u32 gt_irq_enable_reg;
-	u32 de_irq_enable_reg;
-	u32 pch_irq_mask_reg;
-	u32 pch_irq_enable_reg;
+	u32 irq_mask;
+	u32 gt_irq_mask;
+	u32 pch_irq_mask;
 
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
@@ -306,7 +310,7 @@
 	int num_pipe;
 
 	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
 	uint32_t last_acthd;
@@ -329,6 +333,7 @@
 
 	/* LVDS info */
 	int backlight_level;  /* restore backlight to this value */
+	bool backlight_enabled;
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -530,23 +535,21 @@
 
 	struct {
 		/** Bridge to intel-gtt-ko */
-		struct intel_gtt *gtt;
+		const struct intel_gtt *gtt;
 		/** Memory allocator for GTT stolen memory */
-		struct drm_mm vram;
+		struct drm_mm stolen;
 		/** Memory allocator for GTT */
 		struct drm_mm gtt_space;
+		/** List of all objects in gtt_space. Used to restore gtt
+		 * mappings on resume */
+		struct list_head gtt_list;
+		/** End of mappable part of GTT */
+		unsigned long gtt_mappable_end;
 
 		struct io_mapping *gtt_mapping;
 		int gtt_mtrr;
 
-		/**
-		 * Membership on list of all loaded devices, used to evict
-		 * inactive buffers under memory pressure.
-		 *
-		 * Modifications should only be done whilst holding the
-		 * shrink_list_lock spinlock.
-		 */
-		struct list_head shrink_list;
+		struct shrinker inactive_shrinker;
 
 		/**
 		 * List of objects currently involved in rendering.
@@ -609,16 +612,6 @@
 		struct delayed_work retire_work;
 
 		/**
-		 * Waiting sequence number, if any
-		 */
-		uint32_t waiting_gem_seqno;
-
-		/**
-		 * Last seq seen at irq time
-		 */
-		uint32_t irq_gem_seqno;
-
-		/**
 		 * Flag if the X Server, and thus DRM, is not currently in
 		 * control of the device.
 		 *
@@ -645,16 +638,11 @@
 		/* storage for physical objects */
 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
-		uint32_t flush_rings;
-
 		/* accounting, useful for userland debugging */
-		size_t object_memory;
-		size_t pin_memory;
-		size_t gtt_memory;
 		size_t gtt_total;
+		size_t mappable_gtt_total;
+		size_t object_memory;
 		u32 object_count;
-		u32 pin_count;
-		u32 gtt_count;
 	} mm;
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +676,14 @@
 	u8 fmax;
 	u8 fstart;
 
- 	u64 last_count1;
- 	unsigned long last_time1;
- 	u64 last_count2;
- 	struct timespec last_time2;
- 	unsigned long gfx_power;
- 	int c_m;
- 	int r_t;
- 	u8 corr;
+	u64 last_count1;
+	unsigned long last_time1;
+	u64 last_count2;
+	struct timespec last_time2;
+	unsigned long gfx_power;
+	int c_m;
+	int r_t;
+	u8 corr;
 	spinlock_t *mchdev_lock;
 
 	enum no_fbc_reason no_fbc_reason;
@@ -709,20 +697,20 @@
 	struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
-/** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
 	/** Current space allocated to this object in the GTT, if any. */
 	struct drm_mm_node *gtt_space;
+	struct list_head gtt_list;
 
 	/** This object's place on the active/flushing/inactive lists */
 	struct list_head ring_list;
 	struct list_head mm_list;
 	/** This object's place on GPU write list */
 	struct list_head gpu_write_list;
-	/** This object's place on eviction list */
-	struct list_head evict_list;
+	/** This object's place in the batchbuffer or on the eviction list */
+	struct list_head exec_list;
 
 	/**
 	 * This is set if the object is on the active or flushing lists
@@ -738,6 +726,12 @@
 	unsigned int dirty : 1;
 
 	/**
+	 * This is set if the object has been written to since the last
+	 * GPU flush.
+	 */
+	unsigned int pending_gpu_write : 1;
+
+	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -747,29 +741,15 @@
 	signed int fence_reg : 5;
 
 	/**
-	 * Used for checking the object doesn't appear more than once
-	 * in an execbuffer object list.
-	 */
-	unsigned int in_execbuffer : 1;
-
-	/**
 	 * Advice: are the backing pages purgeable?
 	 */
 	unsigned int madv : 2;
 
 	/**
-	 * Refcount for the pages array. With the current locking scheme, there
-	 * are at most two concurrent users: Binding a bo to the gtt and
-	 * pwrite/pread using physical addresses. So two bits for a maximum
-	 * of two users are enough.
-	 */
-	unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
-	/**
 	 * Current tiling mode for the object.
 	 */
 	unsigned int tiling_mode : 2;
+	unsigned int tiling_changed : 1;
 
 	/** How many users have pinned this object in GTT space. The following
 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +763,55 @@
 	unsigned int pin_count : 4;
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
-	/** AGP memory structure for our GTT binding. */
-	DRM_AGP_MEM *agp_mem;
+	/**
+	 * Is the object at the current location in the gtt mappable and
+	 * fenceable? Used to avoid costly recalculations.
+	 */
+	unsigned int map_and_fenceable : 1;
+
+	/**
+	 * Whether the current gtt mapping needs to be mappable (and isn't just
+	 * mappable by accident). Track pin and fault separate for a more
+	 * accurate mappable working set.
+	 */
+	unsigned int fault_mappable : 1;
+	unsigned int pin_mappable : 1;
+
+	/*
+	 * Is the GPU currently using a fence to access this buffer,
+	 */
+	unsigned int pending_fenced_gpu_access:1;
+	unsigned int fenced_gpu_access:1;
 
 	struct page **pages;
 
 	/**
+	 * DMAR support
+	 */
+	struct scatterlist *sg_list;
+	int num_sg;
+
+	/**
+	 * Used for performing relocations during execbuffer insertion.
+	 */
+	struct hlist_node exec_node;
+	unsigned long exec_handle;
+	struct drm_i915_gem_exec_object2 *exec_entry;
+
+	/**
 	 * Current offset of the object in GTT space.
 	 *
 	 * This is the same as gtt_space->start
 	 */
 	uint32_t gtt_offset;
 
-	/* Which ring is refering to is this object */
-	struct intel_ring_buffer *ring;
-
-	/**
-	 * Fake offset for use by mmap(2)
-	 */
-	uint64_t mmap_offset;
-
 	/** Breadcrumb of last rendering to the buffer. */
 	uint32_t last_rendering_seqno;
+	struct intel_ring_buffer *ring;
+
+	/** Breadcrumb of last fenced GPU access to the buffer. */
+	uint32_t last_fenced_seqno;
+	struct intel_ring_buffer *last_fenced_ring;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
@@ -880,6 +887,68 @@
 	CHIP_I965 = 0x08,
 };
 
+#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev)		((dev)->pci_device == 0x3577)
+#define IS_845G(dev)		((dev)->pci_device == 0x2562)
+#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
+#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
+#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+						      IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
@@ -907,8 +976,8 @@
 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 			      unsigned long arg);
 extern int i915_emit_box(struct drm_device *dev,
-			 struct drm_clip_rect *boxes,
-			 int i, int DR1, int DR4);
+			 struct drm_clip_rect *box,
+			 int DR1, int DR4);
 extern int i915_reset(struct drm_device *dev, u8 flags);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +987,7 @@
 
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -939,12 +1009,6 @@
 extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int i915_vblank_swap(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv);
-extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask);
-extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
-extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
-		u32 mask);
 
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -953,6 +1017,13 @@
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1088,28 @@
 				struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-					      size_t size);
+int __must_check i915_gem_flush_ring(struct drm_device *dev,
+				     struct intel_ring_buffer *ring,
+				     uint32_t invalidate_domains,
+				     uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+				     uint32_t alignment,
+				     bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+						bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+				    struct intel_ring_buffer *ring,
+				    u32 seqno);
+
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1035,73 +1119,88 @@
 	return (int32_t)(seq1 - seq2) >= 0;
 }
 
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-				  bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-				  bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+			    struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+					   struct intel_ring_buffer *pipelined,
+					   bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
-			       uint32_t read_domains,
-			       uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-			      bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+					    uint32_t read_domains,
+					    uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+					   bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
-		     unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
-			  struct drm_file *file_priv,
-			  struct drm_i915_gem_request *request,
-			  struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
-			 uint32_t seqno,
-			 bool interruptible,
-			 struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+		      unsigned long start,
+		      unsigned long mappable_end,
+		      unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_i915_gem_request *request,
+				  struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+				      uint32_t seqno,
+				      bool interruptible,
+				      struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
-				      int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-					 bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+				  bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+				     struct intel_ring_buffer *pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
-				struct drm_gem_object *obj,
+				struct drm_i915_gem_object *obj,
 				int id,
 				int align);
 void i915_gem_detach_phys_object(struct drm_device *dev,
-				 struct drm_gem_object *obj);
+				 struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 
 /* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+					  unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+					   bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+					 bool purgeable_only);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
-		    int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
-				     int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
 #else
 #define i915_verify_lists(dev) 0
 #endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+				     int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 
 /* i915_debugfs.c */
@@ -1163,6 +1262,8 @@
 extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_enable_rc6(struct drm_device *dev);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
@@ -1170,79 +1271,120 @@
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+					    struct drm_device *dev,
+					    struct intel_display_error_state *error);
 #endif
 
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+	intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+	intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+	intel_ring_advance(LP_RING(dev_priv))
+
 /**
  * Lock test for when it's just for synchronization of ring access.
  *
  * In that case, we don't need to do it when GEM is initialized as nobody else
  * has access to the ring.
  */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
-	if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
-			== NULL)					\
-		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {			\
+	if (LP_RING(dev->dev_private)->obj == NULL)			\
+		LOCK_TEST_WITH_RETURN(dev, file);			\
 } while (0)
 
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+	u##x val = read##y(dev_priv->regs + reg); \
+	trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+	return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+	trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+	write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg)		i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg)	i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg)	readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val)	writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg)		i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg)		readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val)	writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg)	i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
 {
 	u32 val;
 
-	val = readl(dev_priv->regs + reg);
-	if (dev_priv->debug_flags & I915_DEBUG_READ)
-		printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+	if (dev_priv->info->gen >= 6) {
+		__gen6_force_wake_get(dev_priv);
+		val = I915_READ(reg);
+		__gen6_force_wake_put(dev_priv);
+	} else
+		val = I915_READ(reg);
+
 	return val;
 }
 
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
-			      u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
 {
-	writel(val, dev_priv->regs + reg);
-	if (dev_priv->debug_flags & I915_DEBUG_WRITE)
-		printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+       /* Trace down the write operation before the real write */
+       trace_i915_reg_rw('W', reg, val, len);
+       switch (len) {
+       case 8:
+               writeq(val, dev_priv->regs + reg);
+               break;
+       case 4:
+               writel(val, dev_priv->regs + reg);
+               break;
+       case 2:
+               writew(val, dev_priv->regs + reg);
+               break;
+       case 1:
+               writeb(val, dev_priv->regs + reg);
+               break;
+       }
 }
 
-#define I915_READ(reg)          i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg)	readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg)		readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val)	writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val)	writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg)	readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg)	(void)I915_READ(reg)
-#define POSTING_READ16(reg)	(void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
-				I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
-							    I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n)  do { \
-	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
-	intel_ring_begin(dev, &dev_priv__->render_ring, (n));		\
-} while (0)
-
-
-#define OUT_RING(x) do {						\
-	drm_i915_private_t *dev_priv__ = dev->dev_private;		\
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("   OUT_RING %x\n", (int)(x));		\
-	intel_ring_emit(dev, &dev_priv__->render_ring, x);		\
-} while (0)
-
-#define ADVANCE_LP_RING() do {						\
-	drm_i915_private_t *dev_priv__ = dev->dev_private;                \
-	if (I915_VERBOSE)						\
-		DRM_DEBUG("ADVANCE_LP_RING %x\n",			\
-				dev_priv__->render_ring.tail);		\
-	intel_ring_advance(dev, &dev_priv__->render_ring);		\
-} while(0)
-
 /**
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1401,9 @@
  * The area from dword 0x20 to 0x3ff is available for driver usage.
  */
 #define READ_HWSP(dev_priv, reg)  (((volatile u32 *)\
-			(dev_priv->render_ring.status_page.page_addr))[reg])
+			(LP_RING(dev_priv)->status_page.page_addr))[reg])
 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
 #define I915_GEM_HWS_INDEX		0x20
 #define I915_BREADCRUMB_INDEX		0x21
 
-#define INTEL_INFO(dev)	(((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev)		((dev)->pci_device == 0x3577)
-#define IS_845G(dev)		((dev)->pci_device == 0x2562)
-#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev)		((dev)->pci_device == 0x2572)
-#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
-#define IS_I945G(dev)		((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) 		(INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
-						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
-
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 275ec6e..3dfc848 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,38 +34,31 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/pci.h>
-#include <linux/intel-gtt.h>
 
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
-					     int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
-						     uint64_t offset,
-						     uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
-					  bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
-					   unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static __must_check int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+							  bool write);
+static __must_check int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
+								  uint64_t offset,
+								  uint64_t size);
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+						    unsigned alignment,
+						    bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+				     struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
-				struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+				struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-			  gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+				    int nr_to_scan,
+				    gfp_t gfp_mask);
 
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
 
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -82,34 +75,6 @@
 	dev_priv->mm.object_memory -= size;
 }
 
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
-				  size_t size)
-{
-	dev_priv->mm.gtt_count++;
-	dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
-				     size_t size)
-{
-	dev_priv->mm.gtt_count--;
-	dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
-				  size_t size)
-{
-	dev_priv->mm.pin_count++;
-	dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
-				     size_t size)
-{
-	dev_priv->mm.pin_count--;
-	dev_priv->mm.pin_memory -= size;
-}
-
 int
 i915_gem_check_is_wedged(struct drm_device *dev)
 {
@@ -140,7 +105,7 @@
 	return -EIO;
 }
 
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int ret;
@@ -163,75 +128,76 @@
 }
 
 static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-	return obj_priv->gtt_space &&
-		!obj_priv->active &&
-		obj_priv->pin_count == 0;
+	return obj->gtt_space && !obj->active && obj->pin_count == 0;
 }
 
-int i915_gem_do_init(struct drm_device *dev,
-		     unsigned long start,
-		     unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+		      unsigned long start,
+		      unsigned long mappable_end,
+		      unsigned long end)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (start >= end ||
-	    (start & (PAGE_SIZE - 1)) != 0 ||
-	    (end & (PAGE_SIZE - 1)) != 0) {
-		return -EINVAL;
-	}
-
 	drm_mm_init(&dev_priv->mm.gtt_space, start,
 		    end - start);
 
 	dev_priv->mm.gtt_total = end - start;
-
-	return 0;
+	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+	dev_priv->mm.gtt_mappable_end = mappable_end;
 }
 
 int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+		    struct drm_file *file)
 {
 	struct drm_i915_gem_init *args = data;
-	int ret;
+
+	if (args->gtt_start >= args->gtt_end ||
+	    (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
-	mutex_unlock(&dev->struct_mutex);
-
-	return ret;
-}
-
-int
-i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
-			    struct drm_file *file_priv)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_get_aperture *args = data;
-
-	if (!(dev->driver->driver_features & DRIVER_GEM))
-		return -ENODEV;
-
-	mutex_lock(&dev->struct_mutex);
-	args->aper_size = dev_priv->mm.gtt_total;
-	args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+	i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
 }
 
+int
+i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_get_aperture *args = data;
+	struct drm_i915_gem_object *obj;
+	size_t pinned;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	pinned = 0;
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+		pinned += obj->gtt_space->size;
+	mutex_unlock(&dev->struct_mutex);
+
+	args->aper_size = dev_priv->mm.gtt_total;
+	args->aper_available_size = args->aper_size -pinned;
+
+	return 0;
+}
 
 /**
  * Creates a new mm object and returns a handle to it.
  */
 int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+		      struct drm_file *file)
 {
 	struct drm_i915_gem_create *args = data;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret;
 	u32 handle;
 
@@ -242,45 +208,28 @@
 	if (obj == NULL)
 		return -ENOMEM;
 
-	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	ret = drm_gem_handle_create(file, &obj->base, &handle);
 	if (ret) {
-		drm_gem_object_release(obj);
-		i915_gem_info_remove_obj(dev->dev_private, obj->size);
+		drm_gem_object_release(&obj->base);
+		i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
 		kfree(obj);
 		return ret;
 	}
 
 	/* drop reference from allocate - handle holds it now */
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	trace_i915_gem_object_create(obj);
 
 	args->handle = handle;
 	return 0;
 }
 
-static inline int
-fast_shmem_read(struct page **pages,
-		loff_t page_base, int page_offset,
-		char __user *data,
-		int length)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-	char *vaddr;
-	int ret;
-
-	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-	ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
-	kunmap_atomic(vaddr);
-
-	return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
-{
-	drm_i915_private_t *dev_priv = obj->dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
 	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-		obj_priv->tiling_mode != I915_TILING_NONE;
+		obj->tiling_mode != I915_TILING_NONE;
 }
 
 static inline void
@@ -356,38 +305,51 @@
  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  */
 static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+			  struct drm_i915_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
-	loff_t offset, page_base;
+	loff_t offset;
 	char __user *user_data;
 	int page_offset, page_length;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
+		struct page *page;
+		char *vaddr;
+		int ret;
+
 		/* Operation in this page
 		 *
-		 * page_base = page offset within aperture
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_base = (offset & ~(PAGE_SIZE-1));
 		page_offset = offset & (PAGE_SIZE-1);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		if (fast_shmem_read(obj_priv->pages,
-				    page_base, page_offset,
-				    user_data, page_length))
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		vaddr = kmap_atomic(page);
+		ret = __copy_to_user_inatomic(user_data,
+					      vaddr + page_offset,
+					      page_length);
+		kunmap_atomic(vaddr);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
+		if (ret)
 			return -EFAULT;
 
 		remain -= page_length;
@@ -398,30 +360,6 @@
 	return 0;
 }
 
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
-	int ret;
-
-	ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
-	/* If we've insufficient memory to map in the pages, attempt
-	 * to make some space by throwing out some old buffers.
-	 */
-	if (ret == -ENOMEM) {
-		struct drm_device *dev = obj->dev;
-
-		ret = i915_gem_evict_something(dev, obj->size,
-					       i915_gem_get_gtt_alignment(obj));
-		if (ret)
-			return ret;
-
-		ret = i915_gem_object_get_pages(obj, 0);
-	}
-
-	return ret;
-}
-
 /**
  * This is the fallback shmem pread path, which allocates temporary storage
  * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -429,18 +367,19 @@
  * and not take page faults.
  */
 static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+			  struct drm_i915_gem_object *obj,
 			  struct drm_i915_gem_pread *args,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t offset, pinned_pages, i;
 	loff_t first_data_page, last_data_page, num_pages;
-	int shmem_page_index, shmem_page_offset;
-	int data_page_index,  data_page_offset;
+	int shmem_page_offset;
+	int data_page_index, data_page_offset;
 	int page_length;
 	int ret;
 	uint64_t data_ptr = args->data_ptr;
@@ -479,19 +418,18 @@
 
 	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
 
 	while (remain > 0) {
+		struct page *page;
+
 		/* Operation in this page
 		 *
-		 * shmem_page_index = page number within shmem file
 		 * shmem_page_offset = offset within page in shmem file
 		 * data_page_index = page number in get_user_pages return
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_index = offset / PAGE_SIZE;
 		shmem_page_offset = offset & ~PAGE_MASK;
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 		data_page_offset = data_ptr & ~PAGE_MASK;
@@ -502,8 +440,13 @@
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
 		if (do_bit17_swizzling) {
-			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(page,
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
@@ -512,11 +455,14 @@
 		} else {
 			slow_shmem_copy(user_pages[data_page_index],
 					data_page_offset,
-					obj_priv->pages[shmem_page_index],
+					page,
 					shmem_page_offset,
 					page_length);
 		}
 
+		mark_page_accessed(page);
+		page_cache_release(page);
+
 		remain -= page_length;
 		data_ptr += page_length;
 		offset += page_length;
@@ -525,6 +471,7 @@
 out:
 	for (i = 0; i < pinned_pages; i++) {
 		SetPageDirty(user_pages[i]);
+		mark_page_accessed(user_pages[i]);
 		page_cache_release(user_pages[i]);
 	}
 	drm_free_large(user_pages);
@@ -539,11 +486,10 @@
  */
 int
 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_i915_gem_pread *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	if (args->size == 0)
@@ -563,39 +509,33 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check source.  */
-	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	ret = i915_gem_object_get_pages_or_evict(obj);
-	if (ret)
-		goto out;
-
 	ret = i915_gem_object_set_cpu_read_domain_range(obj,
 							args->offset,
 							args->size);
 	if (ret)
-		goto out_put;
+		goto out;
 
 	ret = -EFAULT;
 	if (!i915_gem_object_needs_bit17_swizzle(obj))
-		ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+		ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
 	if (ret == -EFAULT)
-		ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+		ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
 
-out_put:
-	i915_gem_object_put_pages(obj);
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -645,32 +585,16 @@
 	io_mapping_unmap(dst_vaddr);
 }
 
-static inline int
-fast_shmem_write(struct page **pages,
-		 loff_t page_base, int page_offset,
-		 char __user *data,
-		 int length)
-{
-	char *vaddr;
-	int ret;
-
-	vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
-	ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
-	kunmap_atomic(vaddr);
-
-	return ret;
-}
-
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
-			 struct drm_file *file_priv)
+			 struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t offset, page_base;
@@ -680,8 +604,7 @@
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
-	offset = obj_priv->gtt_offset + args->offset;
+	offset = obj->gtt_offset + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -721,11 +644,11 @@
  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  */
 static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+			 struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_pwrite *args,
-			 struct drm_file *file_priv)
+			 struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t gtt_page_base, offset;
@@ -762,12 +685,15 @@
 		goto out_unpin_pages;
 	}
 
-	ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
 		goto out_unpin_pages;
 
-	obj_priv = to_intel_bo(obj);
-	offset = obj_priv->gtt_offset + args->offset;
+	ret = i915_gem_object_put_fence(obj);
+	if (ret)
+		goto out_unpin_pages;
+
+	offset = obj->gtt_offset + args->offset;
 
 	while (remain > 0) {
 		/* Operation in this page
@@ -813,39 +739,58 @@
  * copy_from_user into the kmapped pages backing the object.
  */
 static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+			   struct drm_i915_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
-			   struct drm_file *file_priv)
+			   struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	ssize_t remain;
-	loff_t offset, page_base;
+	loff_t offset;
 	char __user *user_data;
 	int page_offset, page_length;
 
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	remain = args->size;
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
-	obj_priv->dirty = 1;
+	obj->dirty = 1;
 
 	while (remain > 0) {
+		struct page *page;
+		char *vaddr;
+		int ret;
+
 		/* Operation in this page
 		 *
-		 * page_base = page offset within aperture
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_base = (offset & ~(PAGE_SIZE-1));
 		page_offset = offset & (PAGE_SIZE-1);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
 
-		if (fast_shmem_write(obj_priv->pages,
-				       page_base, page_offset,
-				       user_data, page_length))
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		vaddr = kmap_atomic(page, KM_USER0);
+		ret = __copy_from_user_inatomic(vaddr + page_offset,
+						user_data,
+						page_length);
+		kunmap_atomic(vaddr, KM_USER0);
+
+		set_page_dirty(page);
+		mark_page_accessed(page);
+		page_cache_release(page);
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+		if (ret)
 			return -EFAULT;
 
 		remain -= page_length;
@@ -864,17 +809,18 @@
  * struct_mutex is held.
  */
 static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+			   struct drm_i915_gem_object *obj,
 			   struct drm_i915_gem_pwrite *args,
-			   struct drm_file *file_priv)
+			   struct drm_file *file)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
 	loff_t offset, pinned_pages, i;
 	loff_t first_data_page, last_data_page, num_pages;
-	int shmem_page_index, shmem_page_offset;
+	int shmem_page_offset;
 	int data_page_index,  data_page_offset;
 	int page_length;
 	int ret;
@@ -912,20 +858,19 @@
 
 	do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-	obj_priv = to_intel_bo(obj);
 	offset = args->offset;
-	obj_priv->dirty = 1;
+	obj->dirty = 1;
 
 	while (remain > 0) {
+		struct page *page;
+
 		/* Operation in this page
 		 *
-		 * shmem_page_index = page number within shmem file
 		 * shmem_page_offset = offset within page in shmem file
 		 * data_page_index = page number in get_user_pages return
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_index = offset / PAGE_SIZE;
 		shmem_page_offset = offset & ~PAGE_MASK;
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
 		data_page_offset = data_ptr & ~PAGE_MASK;
@@ -936,21 +881,32 @@
 		if ((data_page_offset + page_length) > PAGE_SIZE)
 			page_length = PAGE_SIZE - data_page_offset;
 
+		page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			goto out;
+		}
+
 		if (do_bit17_swizzling) {
-			slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_bit17_copy(page,
 					      shmem_page_offset,
 					      user_pages[data_page_index],
 					      data_page_offset,
 					      page_length,
 					      0);
 		} else {
-			slow_shmem_copy(obj_priv->pages[shmem_page_index],
+			slow_shmem_copy(page,
 					shmem_page_offset,
 					user_pages[data_page_index],
 					data_page_offset,
 					page_length);
 		}
 
+		set_page_dirty(page);
+		mark_page_accessed(page);
+		page_cache_release(page);
+
 		remain -= page_length;
 		data_ptr += page_length;
 		offset += page_length;
@@ -974,8 +930,7 @@
 		      struct drm_file *file)
 {
 	struct drm_i915_gem_pwrite *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	if (args->size == 0)
@@ -995,15 +950,15 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Bounds check destination. */
-	if (args->offset > obj->size || args->size > obj->size - args->offset) {
+	if (args->offset > obj->base.size ||
+	    args->size > obj->base.size - args->offset) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1014,16 +969,19 @@
 	 * pread/pwrite currently are reading and writing from the CPU
 	 * perspective, requiring manual detiling by the client.
 	 */
-	if (obj_priv->phys_obj)
+	if (obj->phys_obj)
 		ret = i915_gem_phys_pwrite(dev, obj, args, file);
-	else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-		 obj_priv->gtt_space &&
-		 obj->write_domain != I915_GEM_DOMAIN_CPU) {
-		ret = i915_gem_object_pin(obj, 0);
+	else if (obj->gtt_space &&
+		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+		ret = i915_gem_object_pin(obj, 0, true);
 		if (ret)
 			goto out;
 
-		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+		ret = i915_gem_object_set_to_gtt_domain(obj, true);
+		if (ret)
+			goto out_unpin;
+
+		ret = i915_gem_object_put_fence(obj);
 		if (ret)
 			goto out_unpin;
 
@@ -1034,26 +992,19 @@
 out_unpin:
 		i915_gem_object_unpin(obj);
 	} else {
-		ret = i915_gem_object_get_pages_or_evict(obj);
-		if (ret)
-			goto out;
-
 		ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 		if (ret)
-			goto out_put;
+			goto out;
 
 		ret = -EFAULT;
 		if (!i915_gem_object_needs_bit17_swizzle(obj))
 			ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
 		if (ret == -EFAULT)
 			ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
-		i915_gem_object_put_pages(obj);
 	}
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1065,12 +1016,10 @@
  */
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-			  struct drm_file *file_priv)
+			  struct drm_file *file)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_set_domain *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	uint32_t read_domains = args->read_domains;
 	uint32_t write_domain = args->write_domain;
 	int ret;
@@ -1095,28 +1044,15 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
-
-	intel_mark_busy(dev, obj);
 
 	if (read_domains & I915_GEM_DOMAIN_GTT) {
 		ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
 
-		/* Update the LRU on the fence for the CPU access that's
-		 * about to occur.
-		 */
-		if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-			struct drm_i915_fence_reg *reg =
-				&dev_priv->fence_regs[obj_priv->fence_reg];
-			list_move_tail(&reg->lru_list,
-				       &dev_priv->mm.fence_list);
-		}
-
 		/* Silently promote "you're not bound, there was nothing to do"
 		 * to success, since the client was just asking us to
 		 * make sure everything was done.
@@ -1127,11 +1063,7 @@
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
-	/* Maintain LRU order of "inactive" objects */
-	if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1142,10 +1074,10 @@
  */
 int
 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+			 struct drm_file *file)
 {
 	struct drm_i915_gem_sw_finish *args = data;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1155,17 +1087,17 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
 
 	/* Pinned buffers may be scanout, so flush the cache */
-	if (to_intel_bo(obj)->pin_count)
+	if (obj->pin_count)
 		i915_gem_object_flush_cpu_write_domain(obj);
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1180,8 +1112,9 @@
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		    struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_mmap *args = data;
 	struct drm_gem_object *obj;
 	loff_t offset;
@@ -1190,10 +1123,15 @@
 	if (!(dev->driver->driver_features & DRIVER_GEM))
 		return -ENODEV;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = drm_gem_object_lookup(dev, file, args->handle);
 	if (obj == NULL)
 		return -ENOENT;
 
+	if (obj->size > dev_priv->mm.gtt_mappable_end) {
+		drm_gem_object_unreference_unlocked(obj);
+		return -E2BIG;
+	}
+
 	offset = args->offset;
 
 	down_write(&current->mm->mmap_sem);
@@ -1228,10 +1166,9 @@
  */
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-	struct drm_gem_object *obj = vma->vm_private_data;
-	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	pgoff_t page_offset;
 	unsigned long pfn;
 	int ret = 0;
@@ -1243,27 +1180,35 @@
 
 	/* Now bind it into the GTT if needed */
 	mutex_lock(&dev->struct_mutex);
-	if (!obj_priv->gtt_space) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0);
+
+	if (!obj->map_and_fenceable) {
+		ret = i915_gem_object_unbind(obj);
 		if (ret)
 			goto unlock;
-
-		ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	}
+	if (!obj->gtt_space) {
+		ret = i915_gem_object_bind_to_gtt(obj, 0, true);
 		if (ret)
 			goto unlock;
 	}
 
-	/* Need a new fence register? */
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence_reg(obj, true);
-		if (ret)
-			goto unlock;
-	}
+	ret = i915_gem_object_set_to_gtt_domain(obj, write);
+	if (ret)
+		goto unlock;
 
-	if (i915_gem_object_is_inactive(obj_priv))
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+	if (obj->tiling_mode == I915_TILING_NONE)
+		ret = i915_gem_object_put_fence(obj);
+	else
+		ret = i915_gem_object_get_fence(obj, NULL, true);
+	if (ret)
+		goto unlock;
 
-	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+	if (i915_gem_object_is_inactive(obj))
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	obj->fault_mappable = true;
+
+	pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
 		page_offset;
 
 	/* Finally, remap it using the new GTT offset */
@@ -1272,11 +1217,12 @@
 	mutex_unlock(&dev->struct_mutex);
 
 	switch (ret) {
+	case -EAGAIN:
+		set_need_resched();
 	case 0:
 	case -ERESTARTSYS:
 		return VM_FAULT_NOPAGE;
 	case -ENOMEM:
-	case -EAGAIN:
 		return VM_FAULT_OOM;
 	default:
 		return VM_FAULT_SIGBUS;
@@ -1295,37 +1241,39 @@
  * This routine allocates and attaches a fake offset for @obj.
  */
 static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_map_list *list;
 	struct drm_local_map *map;
 	int ret = 0;
 
 	/* Set the object up for mmap'ing */
-	list = &obj->map_list;
+	list = &obj->base.map_list;
 	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
 	if (!list->map)
 		return -ENOMEM;
 
 	map = list->map;
 	map->type = _DRM_GEM;
-	map->size = obj->size;
+	map->size = obj->base.size;
 	map->handle = obj;
 
 	/* Get a DRM GEM mmap offset allocated... */
 	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-						    obj->size / PAGE_SIZE, 0, 0);
+						    obj->base.size / PAGE_SIZE,
+						    0, 0);
 	if (!list->file_offset_node) {
-		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		DRM_ERROR("failed to allocate offset for bo %d\n",
+			  obj->base.name);
 		ret = -ENOSPC;
 		goto out_free_list;
 	}
 
 	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-						  obj->size / PAGE_SIZE, 0);
+						  obj->base.size / PAGE_SIZE,
+						  0);
 	if (!list->file_offset_node) {
 		ret = -ENOMEM;
 		goto out_free_list;
@@ -1338,16 +1286,13 @@
 		goto out_free_mm;
 	}
 
-	/* By now we should be all set, any drm_mmap request on the offset
-	 * below will get to our mmap & fault handler */
-	obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
 	return 0;
 
 out_free_mm:
 	drm_mm_put_block(list->file_offset_node);
 out_free_list:
 	kfree(list->map);
+	list->map = NULL;
 
 	return ret;
 }
@@ -1367,38 +1312,51 @@
  * fixup by i915_gem_fault().
  */
 void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	if (!obj->fault_mappable)
+		return;
 
-	if (dev->dev_mapping)
-		unmap_mapping_range(dev->dev_mapping,
-				    obj_priv->mmap_offset, obj->size, 1);
+	unmap_mapping_range(obj->base.dev->dev_mapping,
+			    (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+			    obj->base.size, 1);
+
+	obj->fault_mappable = false;
 }
 
 static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
+	struct drm_map_list *list = &obj->base.map_list;
 
-	list = &obj->map_list;
 	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	drm_mm_put_block(list->file_offset_node);
+	kfree(list->map);
+	list->map = NULL;
+}
 
-	if (list->file_offset_node) {
-		drm_mm_put_block(list->file_offset_node);
-		list->file_offset_node = NULL;
-	}
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	uint32_t size;
 
-	if (list->map) {
-		kfree(list->map);
-		list->map = NULL;
-	}
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    obj->tiling_mode == I915_TILING_NONE)
+		return obj->base.size;
 
-	obj_priv->mmap_offset = 0;
+	/* Previous chips need a power-of-two fence region when tiling */
+	if (INTEL_INFO(dev)->gen == 3)
+		size = 1024*1024;
+	else
+		size = 512*1024;
+
+	while (size < obj->base.size)
+		size <<= 1;
+
+	return size;
 }
 
 /**
@@ -1406,42 +1364,68 @@
  * @obj: object to check
  *
  * Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int start, i;
+	struct drm_device *dev = obj->base.dev;
 
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+	if (INTEL_INFO(dev)->gen >= 4 ||
+	    obj->tiling_mode == I915_TILING_NONE)
 		return 4096;
 
 	/*
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	if (INTEL_INFO(dev)->gen == 3)
-		start = 1024*1024;
+	return i915_gem_get_gtt_size(obj);
+}
+
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ *					 unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	int tile_height;
+
+	/*
+	 * Minimum alignment is 4k (GTT page size) for sane hw.
+	 */
+	if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+	    obj->tiling_mode == I915_TILING_NONE)
+		return 4096;
+
+	/*
+	 * Older chips need unfenced tiled buffers to be aligned to the left
+	 * edge of an even tile row (where tile rows are counted as if the bo is
+	 * placed in a fenced gtt region).
+	 */
+	if (IS_GEN2(dev) ||
+	    (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+		tile_height = 32;
 	else
-		start = 512*1024;
+		tile_height = 8;
 
-	for (i = start; i < obj->size; i <<= 1)
-		;
-
-	return i;
+	return tile_height * obj->stride * 2;
 }
 
 /**
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
  * @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
  *
  * Simply returns the fake offset to userspace so it can mmap it.
  * The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1454,11 +1438,11 @@
  */
 int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-			struct drm_file *file_priv)
+			struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_mmap_gtt *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1468,130 +1452,196 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+		ret = -E2BIG;
+		goto unlock;
+	}
+
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to mmap a purgeable buffer\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (!obj_priv->mmap_offset) {
+	if (!obj->base.map_list.map) {
 		ret = i915_gem_create_mmap_offset(obj);
 		if (ret)
 			goto out;
 	}
 
-	args->offset = obj_priv->mmap_offset;
-
-	/*
-	 * Pull it into the GTT so that we have a page list (makes the
-	 * initial fault faster and any subsequent flushing possible).
-	 */
-	if (!obj_priv->agp_mem) {
-		ret = i915_gem_object_bind_to_gtt(obj, 0);
-		if (ret)
-			goto out;
-	}
+	args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+			      gfp_t gfpmask)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size / PAGE_SIZE;
+	int page_count, i;
+	struct address_space *mapping;
+	struct inode *inode;
+	struct page *page;
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 */
+	page_count = obj->base.size / PAGE_SIZE;
+	BUG_ON(obj->pages != NULL);
+	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+	if (obj->pages == NULL)
+		return -ENOMEM;
+
+	inode = obj->base.filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+	for (i = 0; i < page_count; i++) {
+		page = read_cache_page_gfp(mapping, i,
+					   GFP_HIGHUSER |
+					   __GFP_COLD |
+					   __GFP_RECLAIMABLE |
+					   gfpmask);
+		if (IS_ERR(page))
+			goto err_pages;
+
+		obj->pages[i] = page;
+	}
+
+	if (obj->tiling_mode != I915_TILING_NONE)
+		i915_gem_object_do_bit_17_swizzle(obj);
+
+	return 0;
+
+err_pages:
+	while (i--)
+		page_cache_release(obj->pages[i]);
+
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
+	return PTR_ERR(page);
+}
+
+static void
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
+{
+	int page_count = obj->base.size / PAGE_SIZE;
 	int i;
 
-	BUG_ON(obj_priv->pages_refcount == 0);
-	BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
-	if (--obj_priv->pages_refcount != 0)
-		return;
-
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		i915_gem_object_save_bit_17_swizzle(obj);
 
-	if (obj_priv->madv == I915_MADV_DONTNEED)
-		obj_priv->dirty = 0;
+	if (obj->madv == I915_MADV_DONTNEED)
+		obj->dirty = 0;
 
 	for (i = 0; i < page_count; i++) {
-		if (obj_priv->dirty)
-			set_page_dirty(obj_priv->pages[i]);
+		if (obj->dirty)
+			set_page_dirty(obj->pages[i]);
 
-		if (obj_priv->madv == I915_MADV_WILLNEED)
-			mark_page_accessed(obj_priv->pages[i]);
+		if (obj->madv == I915_MADV_WILLNEED)
+			mark_page_accessed(obj->pages[i]);
 
-		page_cache_release(obj_priv->pages[i]);
+		page_cache_release(obj->pages[i]);
 	}
-	obj_priv->dirty = 0;
+	obj->dirty = 0;
 
-	drm_free_large(obj_priv->pages);
-	obj_priv->pages = NULL;
+	drm_free_large(obj->pages);
+	obj->pages = NULL;
 }
 
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *ring,
+			       u32 seqno)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	ring->outstanding_lazy_request = true;
-	return dev_priv->next_seqno;
-}
-
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
-			       struct intel_ring_buffer *ring)
-{
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
 
 	BUG_ON(ring == NULL);
-	obj_priv->ring = ring;
+	obj->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
-	if (!obj_priv->active) {
-		drm_gem_object_reference(obj);
-		obj_priv->active = 1;
+	if (!obj->active) {
+		drm_gem_object_reference(&obj->base);
+		obj->active = 1;
 	}
 
 	/* Move from whatever list we were on to the tail of execution. */
-	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
-	list_move_tail(&obj_priv->ring_list, &ring->active_list);
-	obj_priv->last_rendering_seqno = seqno;
+	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+	list_move_tail(&obj->ring_list, &ring->active_list);
+
+	obj->last_rendering_seqno = seqno;
+	if (obj->fenced_gpu_access) {
+		struct drm_i915_fence_reg *reg;
+
+		BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+		obj->last_fenced_seqno = seqno;
+		obj->last_fenced_ring = ring;
+
+		reg = &dev_priv->fence_regs[obj->fence_reg];
+		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	}
 }
 
 static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	list_del_init(&obj->ring_list);
+	obj->last_rendering_seqno = 0;
+}
 
-	BUG_ON(!obj_priv->active);
-	list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
-	list_del_init(&obj_priv->ring_list);
-	obj_priv->last_rendering_seqno = 0;
+static void
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	BUG_ON(!obj->active);
+	list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+	i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (obj->pin_count != 0)
+		list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+	else
+		list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+	BUG_ON(!list_empty(&obj->gpu_write_list));
+	BUG_ON(!obj->active);
+	obj->ring = NULL;
+
+	i915_gem_object_move_off_active(obj);
+	obj->fenced_gpu_access = false;
+
+	obj->active = 0;
+	obj->pending_gpu_write = false;
+	drm_gem_object_unreference(&obj->base);
+
+	WARN_ON(i915_verify_lists(dev));
 }
 
 /* Immediately discard the backing storage */
 static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct inode *inode;
 
 	/* Our goal here is to return as much of the memory as
@@ -1600,42 +1650,18 @@
 	 * backing pages, *now*. Here we mirror the actions taken
 	 * when by shmem_delete_inode() to release the backing store.
 	 */
-	inode = obj->filp->f_path.dentry->d_inode;
+	inode = obj->base.filp->f_path.dentry->d_inode;
 	truncate_inode_pages(inode->i_mapping, 0);
 	if (inode->i_op->truncate_range)
 		inode->i_op->truncate_range(inode, 0, (loff_t)-1);
 
-	obj_priv->madv = __I915_MADV_PURGED;
+	obj->madv = __I915_MADV_PURGED;
 }
 
 static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 {
-	return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
-{
-	struct drm_device *dev = obj->dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->pin_count != 0)
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
-	else
-		list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-	list_del_init(&obj_priv->ring_list);
-
-	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
-	obj_priv->last_rendering_seqno = 0;
-	obj_priv->ring = NULL;
-	if (obj_priv->active) {
-		obj_priv->active = 0;
-		drm_gem_object_unreference(obj);
-	}
-	WARN_ON(i915_verify_lists(dev));
+	return obj->madv == I915_MADV_DONTNEED;
 }
 
 static void
@@ -1643,37 +1669,27 @@
 			       uint32_t flush_domains,
 			       struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv, *next;
+	struct drm_i915_gem_object *obj, *next;
 
-	list_for_each_entry_safe(obj_priv, next,
+	list_for_each_entry_safe(obj, next,
 				 &ring->gpu_write_list,
 				 gpu_write_list) {
-		struct drm_gem_object *obj = &obj_priv->base;
+		if (obj->base.write_domain & flush_domains) {
+			uint32_t old_write_domain = obj->base.write_domain;
 
-		if (obj->write_domain & flush_domains) {
-			uint32_t old_write_domain = obj->write_domain;
-
-			obj->write_domain = 0;
-			list_del_init(&obj_priv->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring);
-
-			/* update the fence lru list */
-			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-				struct drm_i915_fence_reg *reg =
-					&dev_priv->fence_regs[obj_priv->fence_reg];
-				list_move_tail(&reg->lru_list,
-						&dev_priv->mm.fence_list);
-			}
+			obj->base.write_domain = 0;
+			list_del_init(&obj->gpu_write_list);
+			i915_gem_object_move_to_active(obj, ring,
+						       i915_gem_next_request_seqno(dev, ring));
 
 			trace_i915_gem_object_change_domain(obj,
-							    obj->read_domains,
+							    obj->base.read_domains,
 							    old_write_domain);
 		}
 	}
 }
 
-uint32_t
+int
 i915_add_request(struct drm_device *dev,
 		 struct drm_file *file,
 		 struct drm_i915_gem_request *request,
@@ -1683,17 +1699,17 @@
 	struct drm_i915_file_private *file_priv = NULL;
 	uint32_t seqno;
 	int was_empty;
+	int ret;
+
+	BUG_ON(request == NULL);
 
 	if (file != NULL)
 		file_priv = file->driver_priv;
 
-	if (request == NULL) {
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return 0;
-	}
+	ret = ring->add_request(ring, &seqno);
+	if (ret)
+	    return ret;
 
-	seqno = ring->add_request(dev, ring, 0);
 	ring->outstanding_lazy_request = false;
 
 	request->seqno = seqno;
@@ -1717,26 +1733,7 @@
 			queue_delayed_work(dev_priv->wq,
 					   &dev_priv->mm.retire_work, HZ);
 	}
-	return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
-	uint32_t flush_domains = 0;
-
-	/* The sampler always gets flushed on i965 (sigh) */
-	if (INTEL_INFO(dev)->gen >= 4)
-		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
-	ring->flush(dev, ring,
-			I915_GEM_DOMAIN_COMMAND, flush_domains);
+	return 0;
 }
 
 static inline void
@@ -1769,62 +1766,76 @@
 	}
 
 	while (!list_empty(&ring->active_list)) {
-		struct drm_i915_gem_object *obj_priv;
+		struct drm_i915_gem_object *obj;
 
-		obj_priv = list_first_entry(&ring->active_list,
-					    struct drm_i915_gem_object,
-					    ring_list);
+		obj = list_first_entry(&ring->active_list,
+				       struct drm_i915_gem_object,
+				       ring_list);
 
-		obj_priv->base.write_domain = 0;
-		list_del_init(&obj_priv->gpu_write_list);
-		i915_gem_object_move_to_inactive(&obj_priv->base);
+		obj->base.write_domain = 0;
+		list_del_init(&obj->gpu_write_list);
+		i915_gem_object_move_to_inactive(obj);
+	}
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 16; i++) {
+		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+		struct drm_i915_gem_object *obj = reg->obj;
+
+		if (!obj)
+			continue;
+
+		if (obj->tiling_mode)
+			i915_gem_release_mmap(obj);
+
+		reg->obj->fence_reg = I915_FENCE_REG_NONE;
+		reg->obj->fenced_gpu_access = false;
+		reg->obj->last_fenced_seqno = 0;
+		reg->obj->last_fenced_ring = NULL;
+		i915_gem_clear_fence_reg(dev, reg);
 	}
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int i;
 
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
-	i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
 
 	/* Remove anything from the flushing lists. The GPU cache is likely
 	 * to be lost on reset along with the data, so simply move the
 	 * lost bo to the inactive list.
 	 */
 	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-					    struct drm_i915_gem_object,
-					    mm_list);
+		obj= list_first_entry(&dev_priv->mm.flushing_list,
+				      struct drm_i915_gem_object,
+				      mm_list);
 
-		obj_priv->base.write_domain = 0;
-		list_del_init(&obj_priv->gpu_write_list);
-		i915_gem_object_move_to_inactive(&obj_priv->base);
+		obj->base.write_domain = 0;
+		list_del_init(&obj->gpu_write_list);
+		i915_gem_object_move_to_inactive(obj);
 	}
 
 	/* Move everything out of the GPU domains to ensure we do any
 	 * necessary invalidation upon reuse.
 	 */
-	list_for_each_entry(obj_priv,
+	list_for_each_entry(obj,
 			    &dev_priv->mm.inactive_list,
 			    mm_list)
 	{
-		obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 	}
 
 	/* The fence registers are invalidated so clear them out */
-	for (i = 0; i < 16; i++) {
-		struct drm_i915_fence_reg *reg;
-
-		reg = &dev_priv->fence_regs[i];
-		if (!reg->obj)
-			continue;
-
-		i915_gem_clear_fence_reg(reg->obj);
-	}
+	i915_gem_reset_fences(dev);
 }
 
 /**
@@ -1836,6 +1847,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
+	int i;
 
 	if (!ring->status_page.page_addr ||
 	    list_empty(&ring->request_list))
@@ -1843,7 +1855,12 @@
 
 	WARN_ON(i915_verify_lists(dev));
 
-	seqno = ring->get_seqno(dev, ring);
+	seqno = ring->get_seqno(ring);
+
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		if (seqno >= ring->sync_seqno[i])
+			ring->sync_seqno[i] = 0;
+
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -1865,18 +1882,16 @@
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	while (!list_empty(&ring->active_list)) {
-		struct drm_gem_object *obj;
-		struct drm_i915_gem_object *obj_priv;
+		struct drm_i915_gem_object *obj;
 
-		obj_priv = list_first_entry(&ring->active_list,
-					    struct drm_i915_gem_object,
-					    ring_list);
+		obj= list_first_entry(&ring->active_list,
+				      struct drm_i915_gem_object,
+				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
 			break;
 
-		obj = &obj_priv->base;
-		if (obj->write_domain != 0)
+		if (obj->base.write_domain != 0)
 			i915_gem_object_move_to_flushing(obj);
 		else
 			i915_gem_object_move_to_inactive(obj);
@@ -1884,7 +1899,7 @@
 
 	if (unlikely (dev_priv->trace_irq_seqno &&
 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-		ring->user_irq_put(dev, ring);
+		ring->irq_put(ring);
 		dev_priv->trace_irq_seqno = 0;
 	}
 
@@ -1895,24 +1910,24 @@
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
 
 	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-	    struct drm_i915_gem_object *obj_priv, *tmp;
+	    struct drm_i915_gem_object *obj, *next;
 
 	    /* We must be careful that during unbind() we do not
 	     * accidentally infinitely recurse into retire requests.
 	     * Currently:
 	     *   retire -> free -> unbind -> wait -> retire_ring
 	     */
-	    list_for_each_entry_safe(obj_priv, tmp,
+	    list_for_each_entry_safe(obj, next,
 				     &dev_priv->mm.deferred_free_list,
 				     mm_list)
-		    i915_gem_free_object_tail(&obj_priv->base);
+		    i915_gem_free_object_tail(obj);
 	}
 
-	i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
-	i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
-	i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
 }
 
 static void
@@ -1920,6 +1935,8 @@
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
+	bool idle;
+	int i;
 
 	dev_priv = container_of(work, drm_i915_private_t,
 				mm.retire_work.work);
@@ -1933,11 +1950,31 @@
 
 	i915_gem_retire_requests(dev);
 
-	if (!dev_priv->mm.suspended &&
-		(!list_empty(&dev_priv->render_ring.request_list) ||
-		 !list_empty(&dev_priv->bsd_ring.request_list) ||
-		 !list_empty(&dev_priv->blt_ring.request_list)))
+	/* Send a periodic flush down the ring so we don't hold onto GEM
+	 * objects indefinitely.
+	 */
+	idle = true;
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		struct intel_ring_buffer *ring = &dev_priv->ring[i];
+
+		if (!list_empty(&ring->gpu_write_list)) {
+			struct drm_i915_gem_request *request;
+			int ret;
+
+			ret = i915_gem_flush_ring(dev, ring, 0,
+						  I915_GEM_GPU_DOMAINS);
+			request = kzalloc(sizeof(*request), GFP_KERNEL);
+			if (ret || request == NULL ||
+			    i915_add_request(dev, NULL, request, ring))
+			    kfree(request);
+		}
+
+		idle &= list_empty(&ring->request_list);
+	}
+
+	if (!dev_priv->mm.suspended && !idle)
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
+
 	mutex_unlock(&dev->struct_mutex);
 }
 
@@ -1954,14 +1991,23 @@
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EAGAIN;
 
-	if (ring->outstanding_lazy_request) {
-		seqno = i915_add_request(dev, NULL, NULL, ring);
-		if (seqno == 0)
-			return -ENOMEM;
-	}
-	BUG_ON(seqno == dev_priv->next_seqno);
+	if (seqno == ring->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
 
-	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ret = i915_add_request(dev, NULL, request, ring);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
+		seqno = request->seqno;
+	}
+
+	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
 		if (HAS_PCH_SPLIT(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
@@ -1975,21 +2021,23 @@
 
 		trace_i915_gem_request_wait_begin(dev, seqno);
 
-		ring->waiting_gem_seqno = seqno;
-		ring->user_irq_get(dev, ring);
-		if (interruptible)
-			ret = wait_event_interruptible(ring->irq_queue,
-				i915_seqno_passed(
-					ring->get_seqno(dev, ring), seqno)
-				|| atomic_read(&dev_priv->mm.wedged));
-		else
-			wait_event(ring->irq_queue,
-				i915_seqno_passed(
-					ring->get_seqno(dev, ring), seqno)
-				|| atomic_read(&dev_priv->mm.wedged));
+		ring->waiting_seqno = seqno;
+		if (ring->irq_get(ring)) {
+			if (interruptible)
+				ret = wait_event_interruptible(ring->irq_queue,
+							       i915_seqno_passed(ring->get_seqno(ring), seqno)
+							       || atomic_read(&dev_priv->mm.wedged));
+			else
+				wait_event(ring->irq_queue,
+					   i915_seqno_passed(ring->get_seqno(ring), seqno)
+					   || atomic_read(&dev_priv->mm.wedged));
 
-		ring->user_irq_put(dev, ring);
-		ring->waiting_gem_seqno = 0;
+			ring->irq_put(ring);
+		} else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+						      seqno) ||
+				    atomic_read(&dev_priv->mm.wedged), 3000))
+			ret = -EBUSY;
+		ring->waiting_seqno = 0;
 
 		trace_i915_gem_request_wait_end(dev, seqno);
 	}
@@ -1998,7 +2046,7 @@
 
 	if (ret && ret != -ERESTARTSYS)
 		DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
-			  __func__, ret, seqno, ring->get_seqno(dev, ring),
+			  __func__, ret, seqno, ring->get_seqno(ring),
 			  dev_priv->next_seqno);
 
 	/* Directly dispatch request retiring.  While we have the work queue
@@ -2023,70 +2071,30 @@
 	return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
-static void
-i915_gem_flush_ring(struct drm_device *dev,
-		    struct drm_file *file_priv,
-		    struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
-{
-	ring->flush(dev, ring, invalidate_domains, flush_domains);
-	i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
-	       struct drm_file *file_priv,
-	       uint32_t invalidate_domains,
-	       uint32_t flush_domains,
-	       uint32_t flush_rings)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		drm_agp_chipset_flush(dev);
-
-	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
-		if (flush_rings & RING_RENDER)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->render_ring,
-					    invalidate_domains, flush_domains);
-		if (flush_rings & RING_BSD)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->bsd_ring,
-					    invalidate_domains, flush_domains);
-		if (flush_rings & RING_BLT)
-			i915_gem_flush_ring(dev, file_priv,
-					    &dev_priv->blt_ring,
-					    invalidate_domains, flush_domains);
-	}
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
  */
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool interruptible)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	int ret;
 
 	/* This function only exists to support waiting for existing rendering,
 	 * not for emitting required flushes.
 	 */
-	BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+	BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
 
 	/* If there is rendering queued on the buffer being evicted, wait for
 	 * it.
 	 */
-	if (obj_priv->active) {
+	if (obj->active) {
 		ret = i915_do_wait_request(dev,
-					   obj_priv->last_rendering_seqno,
+					   obj->last_rendering_seqno,
 					   interruptible,
-					   obj_priv->ring);
+					   obj->ring);
 		if (ret)
 			return ret;
 	}
@@ -2098,17 +2106,14 @@
  * Unbinds an object from the GTT aperture.
  */
 int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret = 0;
 
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return 0;
 
-	if (obj_priv->pin_count != 0) {
+	if (obj->pin_count != 0) {
 		DRM_ERROR("Attempting to unbind pinned buffer\n");
 		return -EINVAL;
 	}
@@ -2131,27 +2136,27 @@
 	 */
 	if (ret) {
 		i915_gem_clflush_object(obj);
-		obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
 	/* release the fence reg _after_ flushing */
-	if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-		i915_gem_clear_fence_reg(obj);
+	ret = i915_gem_object_put_fence(obj);
+	if (ret == -ERESTARTSYS)
+		return ret;
 
-	drm_unbind_agp(obj_priv->agp_mem);
-	drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+	i915_gem_gtt_unbind_object(obj);
+	i915_gem_object_put_pages_gtt(obj);
 
-	i915_gem_object_put_pages(obj);
-	BUG_ON(obj_priv->pages_refcount);
+	list_del_init(&obj->gtt_list);
+	list_del_init(&obj->mm_list);
+	/* Avoid an unnecessary call to unbind on rebind. */
+	obj->map_and_fenceable = true;
 
-	i915_gem_info_remove_gtt(dev_priv, obj->size);
-	list_del_init(&obj_priv->mm_list);
+	drm_mm_put_block(obj->gtt_space);
+	obj->gtt_space = NULL;
+	obj->gtt_offset = 0;
 
-	drm_mm_put_block(obj_priv->gtt_space);
-	obj_priv->gtt_space = NULL;
-	obj_priv->gtt_offset = 0;
-
-	if (i915_gem_object_is_purgeable(obj_priv))
+	if (i915_gem_object_is_purgeable(obj))
 		i915_gem_object_truncate(obj);
 
 	trace_i915_gem_object_unbind(obj);
@@ -2159,14 +2164,37 @@
 	return ret;
 }
 
+int
+i915_gem_flush_ring(struct drm_device *dev,
+		    struct intel_ring_buffer *ring,
+		    uint32_t invalidate_domains,
+		    uint32_t flush_domains)
+{
+	int ret;
+
+	ret = ring->flush(ring, invalidate_domains, flush_domains);
+	if (ret)
+		return ret;
+
+	i915_gem_process_flushing_list(dev, flush_domains, ring);
+	return 0;
+}
+
 static int i915_ring_idle(struct drm_device *dev,
 			  struct intel_ring_buffer *ring)
 {
+	int ret;
+
 	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
 		return 0;
 
-	i915_gem_flush_ring(dev, NULL, ring,
-			    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (!list_empty(&ring->gpu_write_list)) {
+		ret = i915_gem_flush_ring(dev, ring,
+				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		if (ret)
+			return ret;
+	}
+
 	return i915_wait_request(dev,
 				 i915_gem_next_request_seqno(dev, ring),
 				 ring);
@@ -2177,7 +2205,7 @@
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool lists_empty;
-	int ret;
+	int ret, i;
 
 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
@@ -2185,258 +2213,305 @@
 		return 0;
 
 	/* Flush everything onto the inactive list. */
-	ret = i915_ring_idle(dev, &dev_priv->render_ring);
-	if (ret)
-		return ret;
-
-	ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
-	if (ret)
-		return ret;
-
-	ret = i915_ring_idle(dev, &dev_priv->blt_ring);
-	if (ret)
-		return ret;
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+		if (ret)
+			return ret;
+	}
 
 	return 0;
 }
 
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
-			  gfp_t gfpmask)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+				       struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count, i;
-	struct address_space *mapping;
-	struct inode *inode;
-	struct page *page;
-
-	BUG_ON(obj_priv->pages_refcount
-			== DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
-	if (obj_priv->pages_refcount++ != 0)
-		return 0;
-
-	/* Get the list of pages out of our struct file.  They'll be pinned
-	 * at this point until we release them.
-	 */
-	page_count = obj->size / PAGE_SIZE;
-	BUG_ON(obj_priv->pages != NULL);
-	obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
-	if (obj_priv->pages == NULL) {
-		obj_priv->pages_refcount--;
-		return -ENOMEM;
-	}
-
-	inode = obj->filp->f_path.dentry->d_inode;
-	mapping = inode->i_mapping;
-	for (i = 0; i < page_count; i++) {
-		page = read_cache_page_gfp(mapping, i,
-					   GFP_HIGHUSER |
-					   __GFP_COLD |
-					   __GFP_RECLAIMABLE |
-					   gfpmask);
-		if (IS_ERR(page))
-			goto err_pages;
-
-		obj_priv->pages[i] = page;
-	}
-
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
-		i915_gem_object_do_bit_17_swizzle(obj);
-
-	return 0;
-
-err_pages:
-	while (i--)
-		page_cache_release(obj_priv->pages[i]);
-
-	drm_free_large(obj_priv->pages);
-	obj_priv->pages = NULL;
-	obj_priv->pages_refcount--;
-	return PTR_ERR(page);
-}
-
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
-{
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
-		    0xfffff000) << 32;
-	val |= obj_priv->gtt_offset & 0xfffff000;
-	val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+	val = (uint64_t)((obj->gtt_offset + size - 4096) &
+			 0xfffff000) << 32;
+	val |= obj->gtt_offset & 0xfffff000;
+	val |= (uint64_t)((obj->stride / 128) - 1) <<
 		SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 	val |= I965_FENCE_REG_VALID;
 
-	I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 6);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+		intel_ring_emit(pipelined, (u32)val);
+		intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+		intel_ring_emit(pipelined, (u32)(val >> 32));
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+	return 0;
 }
 
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint64_t val;
 
-	val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+	val = (uint64_t)((obj->gtt_offset + size - 4096) &
 		    0xfffff000) << 32;
-	val |= obj_priv->gtt_offset & 0xfffff000;
-	val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val |= obj->gtt_offset & 0xfffff000;
+	val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I965_FENCE_TILING_Y_SHIFT;
 	val |= I965_FENCE_REG_VALID;
 
-	I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 6);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+		intel_ring_emit(pipelined, (u32)val);
+		intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+		intel_ring_emit(pipelined, (u32)(val >> 32));
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+	return 0;
 }
 
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	u32 fence_reg, val, pitch_val;
 	int tile_width;
-	uint32_t fence_reg, val;
-	uint32_t pitch_val;
 
-	if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
-	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
-		     __func__, obj_priv->gtt_offset, obj->size);
-		return;
-	}
+	if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+		 (size & -size) != size ||
+		 (obj->gtt_offset & (size - 1)),
+		 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+		 obj->gtt_offset, obj->map_and_fenceable, size))
+		return -EINVAL;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y &&
-	    HAS_128_BYTE_Y_TILING(dev))
+	if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 		tile_width = 128;
 	else
 		tile_width = 512;
 
 	/* Note: pitch better be a power of two tile widths */
-	pitch_val = obj_priv->stride / tile_width;
+	pitch_val = obj->stride / tile_width;
 	pitch_val = ffs(pitch_val) - 1;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y &&
-	    HAS_128_BYTE_Y_TILING(dev))
-		WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
-	else
-		WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
-	val = obj_priv->gtt_offset;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val = obj->gtt_offset;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	val |= I915_FENCE_SIZE_BITS(obj->size);
+	val |= I915_FENCE_SIZE_BITS(size);
 	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 	val |= I830_FENCE_REG_VALID;
 
-	if (regnum < 8)
-		fence_reg = FENCE_REG_830_0 + (regnum * 4);
+	fence_reg = obj->fence_reg;
+	if (fence_reg < 8)
+		fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 	else
-		fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
-	I915_WRITE(fence_reg, val);
+		fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(pipelined, fence_reg);
+		intel_ring_emit(pipelined, val);
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE(fence_reg, val);
+
+	return 0;
 }
 
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+				struct intel_ring_buffer *pipelined)
 {
-	struct drm_gem_object *obj = reg->obj;
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int regnum = obj_priv->fence_reg;
+	u32 size = obj->gtt_space->size;
+	int regnum = obj->fence_reg;
 	uint32_t val;
 	uint32_t pitch_val;
-	uint32_t fence_size_bits;
 
-	if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
-	    (obj_priv->gtt_offset & (obj->size - 1))) {
-		WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
-		     __func__, obj_priv->gtt_offset);
-		return;
-	}
+	if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+		 (size & -size) != size ||
+		 (obj->gtt_offset & (size - 1)),
+		 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+		 obj->gtt_offset, size))
+		return -EINVAL;
 
-	pitch_val = obj_priv->stride / 128;
+	pitch_val = obj->stride / 128;
 	pitch_val = ffs(pitch_val) - 1;
-	WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
 
-	val = obj_priv->gtt_offset;
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	val = obj->gtt_offset;
+	if (obj->tiling_mode == I915_TILING_Y)
 		val |= 1 << I830_FENCE_TILING_Y_SHIFT;
-	fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
-	WARN_ON(fence_size_bits & ~0x00000f00);
-	val |= fence_size_bits;
+	val |= I830_FENCE_SIZE_BITS(size);
 	val |= pitch_val << I830_FENCE_PITCH_SHIFT;
 	val |= I830_FENCE_REG_VALID;
 
-	I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+	if (pipelined) {
+		int ret = intel_ring_begin(pipelined, 4);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(pipelined, MI_NOOP);
+		intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+		intel_ring_emit(pipelined, val);
+		intel_ring_advance(pipelined);
+	} else
+		I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+	return 0;
 }
 
-static int i915_find_fence_reg(struct drm_device *dev,
-			       bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
-	struct drm_i915_fence_reg *reg = NULL;
-	struct drm_i915_gem_object *obj_priv = NULL;
+	return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+			    struct intel_ring_buffer *pipelined,
+			    bool interruptible)
+{
+	int ret;
+
+	if (obj->fenced_gpu_access) {
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+			ret = i915_gem_flush_ring(obj->base.dev,
+						  obj->last_fenced_ring,
+						  0, obj->base.write_domain);
+			if (ret)
+				return ret;
+		}
+
+		obj->fenced_gpu_access = false;
+	}
+
+	if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+		if (!ring_passed_seqno(obj->last_fenced_ring,
+				       obj->last_fenced_seqno)) {
+			ret = i915_do_wait_request(obj->base.dev,
+						   obj->last_fenced_seqno,
+						   interruptible,
+						   obj->last_fenced_ring);
+			if (ret)
+				return ret;
+		}
+
+		obj->last_fenced_seqno = 0;
+		obj->last_fenced_ring = NULL;
+	}
+
+	/* Ensure that all CPU reads are completed before installing a fence
+	 * and all writes before removing the fence.
+	 */
+	if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
+		mb();
+
+	return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+	int ret;
+
+	if (obj->tiling_mode)
+		i915_gem_release_mmap(obj);
+
+	ret = i915_gem_object_flush_fence(obj, NULL, true);
+	if (ret)
+		return ret;
+
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+		i915_gem_clear_fence_reg(obj->base.dev,
+					 &dev_priv->fence_regs[obj->fence_reg]);
+
+		obj->fence_reg = I915_FENCE_REG_NONE;
+	}
+
+	return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+		    struct intel_ring_buffer *pipelined)
+{
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj = NULL;
-	int i, avail, ret;
+	struct drm_i915_fence_reg *reg, *first, *avail;
+	int i;
 
 	/* First try to find a free reg */
-	avail = 0;
+	avail = NULL;
 	for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
 		reg = &dev_priv->fence_regs[i];
 		if (!reg->obj)
-			return i;
+			return reg;
 
-		obj_priv = to_intel_bo(reg->obj);
-		if (!obj_priv->pin_count)
-		    avail++;
+		if (!reg->obj->pin_count)
+			avail = reg;
 	}
 
-	if (avail == 0)
-		return -ENOSPC;
+	if (avail == NULL)
+		return NULL;
 
 	/* None available, try to steal one or wait for a user to finish */
-	i = I915_FENCE_REG_NONE;
-	list_for_each_entry(reg, &dev_priv->mm.fence_list,
-			    lru_list) {
-		obj = reg->obj;
-		obj_priv = to_intel_bo(obj);
-
-		if (obj_priv->pin_count)
+	avail = first = NULL;
+	list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+		if (reg->obj->pin_count)
 			continue;
 
-		/* found one! */
-		i = obj_priv->fence_reg;
-		break;
+		if (first == NULL)
+			first = reg;
+
+		if (!pipelined ||
+		    !reg->obj->last_fenced_ring ||
+		    reg->obj->last_fenced_ring == pipelined) {
+			avail = reg;
+			break;
+		}
 	}
 
-	BUG_ON(i == I915_FENCE_REG_NONE);
+	if (avail == NULL)
+		avail = first;
 
-	/* We only have a reference on obj from the active list. put_fence_reg
-	 * might drop that one, causing a use-after-free in it. So hold a
-	 * private reference to obj like the other callers of put_fence_reg
-	 * (set_tiling ioctl) do. */
-	drm_gem_object_reference(obj);
-	ret = i915_gem_object_put_fence_reg(obj, interruptible);
-	drm_gem_object_unreference(obj);
-	if (ret != 0)
-		return ret;
-
-	return i;
+	return avail;
 }
 
 /**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
  * @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
  *
  * When mapping objects through the GTT, userspace wants to be able to write
  * to them without having to worry about swizzling if the object is tiled.
@@ -2448,72 +2523,141 @@
  * and tiling format.
  */
 int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-			      bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+			  struct intel_ring_buffer *pipelined,
+			  bool interruptible)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg = NULL;
+	struct drm_i915_fence_reg *reg;
 	int ret;
 
-	/* Just update our place in the LRU if our fence is getting used. */
-	if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-		reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+	/* XXX disable pipelining. There are bugs. Shocking. */
+	pipelined = NULL;
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (obj->fence_reg != I915_FENCE_REG_NONE) {
+		reg = &dev_priv->fence_regs[obj->fence_reg];
 		list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+			pipelined = NULL;
+
+		if (!pipelined) {
+			if (reg->setup_seqno) {
+				if (!ring_passed_seqno(obj->last_fenced_ring,
+						       reg->setup_seqno)) {
+					ret = i915_do_wait_request(obj->base.dev,
+								   reg->setup_seqno,
+								   interruptible,
+								   obj->last_fenced_ring);
+					if (ret)
+						return ret;
+				}
+
+				reg->setup_seqno = 0;
+			}
+		} else if (obj->last_fenced_ring &&
+			   obj->last_fenced_ring != pipelined) {
+			ret = i915_gem_object_flush_fence(obj,
+							  pipelined,
+							  interruptible);
+			if (ret)
+				return ret;
+		} else if (obj->tiling_changed) {
+			if (obj->fenced_gpu_access) {
+				if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+					ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+								  0, obj->base.write_domain);
+					if (ret)
+						return ret;
+				}
+
+				obj->fenced_gpu_access = false;
+			}
+		}
+
+		if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+			pipelined = NULL;
+		BUG_ON(!pipelined && reg->setup_seqno);
+
+		if (obj->tiling_changed) {
+			if (pipelined) {
+				reg->setup_seqno =
+					i915_gem_next_request_seqno(dev, pipelined);
+				obj->last_fenced_seqno = reg->setup_seqno;
+				obj->last_fenced_ring = pipelined;
+			}
+			goto update;
+		}
+
 		return 0;
 	}
 
-	switch (obj_priv->tiling_mode) {
-	case I915_TILING_NONE:
-		WARN(1, "allocating a fence for non-tiled object?\n");
-		break;
-	case I915_TILING_X:
-		if (!obj_priv->stride)
-			return -EINVAL;
-		WARN((obj_priv->stride & (512 - 1)),
-		     "object 0x%08x is X tiled but has non-512B pitch\n",
-		     obj_priv->gtt_offset);
-		break;
-	case I915_TILING_Y:
-		if (!obj_priv->stride)
-			return -EINVAL;
-		WARN((obj_priv->stride & (128 - 1)),
-		     "object 0x%08x is Y tiled but has non-128B pitch\n",
-		     obj_priv->gtt_offset);
-		break;
-	}
+	reg = i915_find_fence_reg(dev, pipelined);
+	if (reg == NULL)
+		return -ENOSPC;
 
-	ret = i915_find_fence_reg(dev, interruptible);
-	if (ret < 0)
+	ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+	if (ret)
 		return ret;
 
-	obj_priv->fence_reg = ret;
-	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-	list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	if (reg->obj) {
+		struct drm_i915_gem_object *old = reg->obj;
+
+		drm_gem_object_reference(&old->base);
+
+		if (old->tiling_mode)
+			i915_gem_release_mmap(old);
+
+		ret = i915_gem_object_flush_fence(old,
+						  pipelined,
+						  interruptible);
+		if (ret) {
+			drm_gem_object_unreference(&old->base);
+			return ret;
+		}
+
+		if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+			pipelined = NULL;
+
+		old->fence_reg = I915_FENCE_REG_NONE;
+		old->last_fenced_ring = pipelined;
+		old->last_fenced_seqno =
+			pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+		drm_gem_object_unreference(&old->base);
+	} else if (obj->last_fenced_seqno == 0)
+		pipelined = NULL;
 
 	reg->obj = obj;
+	list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+	obj->fence_reg = reg - dev_priv->fence_regs;
+	obj->last_fenced_ring = pipelined;
 
+	reg->setup_seqno =
+		pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+	obj->last_fenced_seqno = reg->setup_seqno;
+
+update:
+	obj->tiling_changed = false;
 	switch (INTEL_INFO(dev)->gen) {
 	case 6:
-		sandybridge_write_fence_reg(reg);
+		ret = sandybridge_write_fence_reg(obj, pipelined);
 		break;
 	case 5:
 	case 4:
-		i965_write_fence_reg(reg);
+		ret = i965_write_fence_reg(obj, pipelined);
 		break;
 	case 3:
-		i915_write_fence_reg(reg);
+		ret = i915_write_fence_reg(obj, pipelined);
 		break;
 	case 2:
-		i830_write_fence_reg(reg);
+		ret = i830_write_fence_reg(obj, pipelined);
 		break;
 	}
 
-	trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
-			obj_priv->tiling_mode);
-
-	return 0;
+	return ret;
 }
 
 /**
@@ -2521,154 +2665,125 @@
  * @obj: object to clear
  *
  * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
  */
 static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+			 struct drm_i915_fence_reg *reg)
 {
-	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg =
-		&dev_priv->fence_regs[obj_priv->fence_reg];
-	uint32_t fence_reg;
+	uint32_t fence_reg = reg - dev_priv->fence_regs;
 
 	switch (INTEL_INFO(dev)->gen) {
 	case 6:
-		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
-			     (obj_priv->fence_reg * 8), 0);
+		I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
 		break;
 	case 5:
 	case 4:
-		I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+		I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
 		break;
 	case 3:
-		if (obj_priv->fence_reg >= 8)
-			fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+		if (fence_reg >= 8)
+			fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
 		else
 	case 2:
-			fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+			fence_reg = FENCE_REG_830_0 + fence_reg * 4;
 
 		I915_WRITE(fence_reg, 0);
 		break;
 	}
 
-	reg->obj = NULL;
-	obj_priv->fence_reg = I915_FENCE_REG_NONE;
 	list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-			      bool interruptible)
-{
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	struct drm_i915_fence_reg *reg;
-
-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
-		return 0;
-
-	/* If we've changed tiling, GTT-mappings of the object
-	 * need to re-fault to ensure that the correct fence register
-	 * setup is in place.
-	 */
-	i915_gem_release_mmap(obj);
-
-	/* On the i915, GPU access to tiled buffers is via a fence,
-	 * therefore we must wait for any outstanding access to complete
-	 * before clearing the fence.
-	 */
-	reg = &dev_priv->fence_regs[obj_priv->fence_reg];
-	if (reg->gpu) {
-		int ret;
-
-		ret = i915_gem_object_flush_gpu_write_domain(obj);
-		if (ret)
-			return ret;
-
-		ret = i915_gem_object_wait_rendering(obj, interruptible);
-		if (ret)
-			return ret;
-
-		reg->gpu = false;
-	}
-
-	i915_gem_object_flush_gtt_write_domain(obj);
-	i915_gem_clear_fence_reg(obj);
-
-	return 0;
+	reg->obj = NULL;
+	reg->setup_seqno = 0;
 }
 
 /**
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+			    unsigned alignment,
+			    bool map_and_fenceable)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	struct drm_mm_node *free_space;
-	gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
+	gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+	u32 size, fence_size, fence_alignment, unfenced_alignment;
+	bool mappable, fenceable;
 	int ret;
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to bind a purgeable object\n");
 		return -EINVAL;
 	}
 
+	fence_size = i915_gem_get_gtt_size(obj);
+	fence_alignment = i915_gem_get_gtt_alignment(obj);
+	unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
 	if (alignment == 0)
-		alignment = i915_gem_get_gtt_alignment(obj);
-	if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+		alignment = map_and_fenceable ? fence_alignment :
+						unfenced_alignment;
+	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
 		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
 		return -EINVAL;
 	}
 
+	size = map_and_fenceable ? fence_size : obj->base.size;
+
 	/* If the object is bigger than the entire aperture, reject it early
 	 * before evicting everything in a vain attempt to find space.
 	 */
-	if (obj->size > dev_priv->mm.gtt_total) {
+	if (obj->base.size >
+	    (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
 		DRM_ERROR("Attempting to bind an object larger than the aperture\n");
 		return -E2BIG;
 	}
 
  search_free:
-	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
-					obj->size, alignment, 0);
-	if (free_space != NULL)
-		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
-						       alignment);
-	if (obj_priv->gtt_space == NULL) {
+	if (map_and_fenceable)
+		free_space =
+			drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+						    size, alignment, 0,
+						    dev_priv->mm.gtt_mappable_end,
+						    0);
+	else
+		free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+						size, alignment, 0);
+
+	if (free_space != NULL) {
+		if (map_and_fenceable)
+			obj->gtt_space =
+				drm_mm_get_block_range_generic(free_space,
+							       size, alignment, 0,
+							       dev_priv->mm.gtt_mappable_end,
+							       0);
+		else
+			obj->gtt_space =
+				drm_mm_get_block(free_space, size, alignment);
+	}
+	if (obj->gtt_space == NULL) {
 		/* If the gtt is empty and we're still having trouble
 		 * fitting our object in, we're out of memory.
 		 */
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
+		ret = i915_gem_evict_something(dev, size, alignment,
+					       map_and_fenceable);
 		if (ret)
 			return ret;
 
 		goto search_free;
 	}
 
-	ret = i915_gem_object_get_pages(obj, gfpmask);
+	ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
 	if (ret) {
-		drm_mm_put_block(obj_priv->gtt_space);
-		obj_priv->gtt_space = NULL;
+		drm_mm_put_block(obj->gtt_space);
+		obj->gtt_space = NULL;
 
 		if (ret == -ENOMEM) {
-			/* first try to clear up some space from the GTT */
-			ret = i915_gem_evict_something(dev, obj->size,
-						       alignment);
+			/* first try to reclaim some memory by clearing the GTT */
+			ret = i915_gem_evict_everything(dev, false);
 			if (ret) {
 				/* now try to shrink everyone else */
 				if (gfpmask) {
@@ -2676,7 +2791,7 @@
 					goto search_free;
 				}
 
-				return ret;
+				return -ENOMEM;
 			}
 
 			goto search_free;
@@ -2685,122 +2800,116 @@
 		return ret;
 	}
 
-	/* Create an AGP memory structure pointing at our pages, and bind it
-	 * into the GTT.
-	 */
-	obj_priv->agp_mem = drm_agp_bind_pages(dev,
-					       obj_priv->pages,
-					       obj->size >> PAGE_SHIFT,
-					       obj_priv->gtt_space->start,
-					       obj_priv->agp_type);
-	if (obj_priv->agp_mem == NULL) {
-		i915_gem_object_put_pages(obj);
-		drm_mm_put_block(obj_priv->gtt_space);
-		obj_priv->gtt_space = NULL;
+	ret = i915_gem_gtt_bind_object(obj);
+	if (ret) {
+		i915_gem_object_put_pages_gtt(obj);
+		drm_mm_put_block(obj->gtt_space);
+		obj->gtt_space = NULL;
 
-		ret = i915_gem_evict_something(dev, obj->size, alignment);
-		if (ret)
+		if (i915_gem_evict_everything(dev, false))
 			return ret;
 
 		goto search_free;
 	}
 
-	/* keep track of bounds object by adding it to the inactive list */
-	list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-	i915_gem_info_add_gtt(dev_priv, obj->size);
+	list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+	list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
 	/* Assert that the object is not currently in any GPU domain. As it
 	 * wasn't in the GTT, there shouldn't be any way it could have been in
 	 * a GPU cache
 	 */
-	BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
-	BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+	BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-	obj_priv->gtt_offset = obj_priv->gtt_space->start;
-	trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+	obj->gtt_offset = obj->gtt_space->start;
 
+	fenceable =
+		obj->gtt_space->size == fence_size &&
+		(obj->gtt_space->start & (fence_alignment -1)) == 0;
+
+	mappable =
+		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+
+	obj->map_and_fenceable = mappable && fenceable;
+
+	trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
 	return 0;
 }
 
 void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
-
 	/* If we don't have a page list set up, then we're not pinned
 	 * to GPU, and we can ignore the cache flush because it'll happen
 	 * again at bind time.
 	 */
-	if (obj_priv->pages == NULL)
+	if (obj->pages == NULL)
 		return;
 
 	trace_i915_gem_object_clflush(obj);
 
-	drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
 static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
-	uint32_t old_write_domain;
+	struct drm_device *dev = obj->base.dev;
 
-	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+	if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
 	/* Queue the GPU write cache flushing we need. */
-	old_write_domain = obj->write_domain;
-	i915_gem_flush_ring(dev, NULL,
-			    to_intel_bo(obj)->ring,
-			    0, obj->write_domain);
-	BUG_ON(obj->write_domain);
-
-	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
-					    old_write_domain);
-
-	return 0;
+	return i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
 }
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 {
 	uint32_t old_write_domain;
 
-	if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
 		return;
 
-	/* No actual flushing is required for the GTT write domain.   Writes
+	/* No actual flushing is required for the GTT write domain.  Writes
 	 * to it immediately go to main memory as far as we know, so there's
 	 * no chipset flush.  It also doesn't land in render cache.
+	 *
+	 * However, we do have to enforce the order so that all writes through
+	 * the GTT land before any writes to the device, such as updates to
+	 * the GATT itself.
 	 */
-	old_write_domain = obj->write_domain;
-	obj->write_domain = 0;
+	wmb();
+
+	i915_gem_release_mmap(obj);
+
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
 
 	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
+					    obj->base.read_domains,
 					    old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
 	uint32_t old_write_domain;
 
-	if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
 		return;
 
 	i915_gem_clflush_object(obj);
-	drm_agp_chipset_flush(dev);
-	old_write_domain = obj->write_domain;
-	obj->write_domain = 0;
+	intel_gtt_chipset_flush();
+	old_write_domain = obj->base.write_domain;
+	obj->base.write_domain = 0;
 
 	trace_i915_gem_object_change_domain(obj,
-					    obj->read_domains,
+					    obj->base.read_domains,
 					    old_write_domain);
 }
 
@@ -2811,37 +2920,39 @@
  * flushes to occur.
  */
 int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
-		return ret;
-	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
 
+	if (obj->pending_gpu_write || write) {
+		ret = i915_gem_object_wait_rendering(obj, true);
+		if (ret)
+			return ret;
+	}
+
 	i915_gem_object_flush_cpu_write_domain(obj);
 
-	old_write_domain = obj->write_domain;
-	old_read_domains = obj->read_domains;
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 	if (write) {
-		obj->read_domains = I915_GEM_DOMAIN_GTT;
-		obj->write_domain = I915_GEM_DOMAIN_GTT;
-		obj_priv->dirty = 1;
+		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+		obj->dirty = 1;
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2856,23 +2967,23 @@
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-				     bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+				     struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_read_domains;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
-	if (obj_priv->gtt_space == NULL)
+	if (obj->gtt_space == NULL)
 		return -EINVAL;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
 	if (ret)
 		return ret;
 
+
 	/* Currently, we are always called from an non-interruptible context. */
-	if (!pipelined) {
+	if (pipelined != obj->ring) {
 		ret = i915_gem_object_wait_rendering(obj, false);
 		if (ret)
 			return ret;
@@ -2880,12 +2991,12 @@
 
 	i915_gem_object_flush_cpu_write_domain(obj);
 
-	old_read_domains = obj->read_domains;
-	obj->read_domains |= I915_GEM_DOMAIN_GTT;
+	old_read_domains = obj->base.read_domains;
+	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
-					    obj->write_domain);
+					    obj->base.write_domain);
 
 	return 0;
 }
@@ -2894,14 +3005,19 @@
 i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
 			  bool interruptible)
 {
+	int ret;
+
 	if (!obj->active)
 		return 0;
 
-	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-		i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
-				    0, obj->base.write_domain);
+	if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+		ret = i915_gem_flush_ring(obj->base.dev, obj->ring,
+					  0, obj->base.write_domain);
+		if (ret)
+			return ret;
+	}
 
-	return i915_gem_object_wait_rendering(&obj->base, interruptible);
+	return i915_gem_object_wait_rendering(obj, interruptible);
 }
 
 /**
@@ -2911,14 +3027,15 @@
  * flushes to occur.
  */
 static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
+	if (ret)
 		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -2930,27 +3047,27 @@
 	 */
 	i915_gem_object_set_to_full_cpu_read_domain(obj);
 
-	old_write_domain = obj->write_domain;
-	old_read_domains = obj->read_domains;
+	old_write_domain = obj->base.write_domain;
+	old_read_domains = obj->base.read_domains;
 
 	/* Flush the CPU cache if it's still invalid. */
-	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
 		i915_gem_clflush_object(obj);
 
-		obj->read_domains |= I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 	}
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
 	/* If we're writing through the CPU, then the GPU read domains will
 	 * need to be invalidated at next use.
 	 */
 	if (write) {
-		obj->read_domains = I915_GEM_DOMAIN_CPU;
-		obj->write_domain = I915_GEM_DOMAIN_CPU;
+		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	}
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2960,184 +3077,6 @@
 	return 0;
 }
 
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Mapped to GTT
- *	4. Read by GPU
- *	5. Unmapped from GTT
- *	6. Freed
- *
- *	Let's take these a step at a time
- *
- *	1. Allocated
- *		Pages allocated from the kernel may still have
- *		cache contents, so we set them to (CPU, CPU) always.
- *	2. Written by CPU (using pwrite)
- *		The pwrite function calls set_domain (CPU, CPU) and
- *		this function does nothing (as nothing changes)
- *	3. Mapped by GTT
- *		This function asserts that the object is not
- *		currently in any GPU-based read or write domains
- *	4. Read by GPU
- *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
- *		As write_domain is zero, this function adds in the
- *		current read domains (CPU+COMMAND, 0).
- *		flush_domains is set to CPU.
- *		invalidate_domains is set to COMMAND
- *		clflush is run to get data out of the CPU caches
- *		then i915_dev_set_domain calls i915_gem_flush to
- *		emit an MI_FLUSH and drm_agp_chipset_flush
- *	5. Unmapped from GTT
- *		i915_gem_object_unbind calls set_domain (CPU, CPU)
- *		flush_domains and invalidate_domains end up both zero
- *		so no flushing/invalidating happens
- *	6. Freed
- *		yay, done
- *
- * Case 2: The shared render buffer
- *
- *	1. Allocated
- *	2. Mapped to GTT
- *	3. Read/written by GPU
- *	4. set_domain to (CPU,CPU)
- *	5. Read/written by CPU
- *	6. Read/written by GPU
- *
- *	1. Allocated
- *		Same as last example, (CPU, CPU)
- *	2. Mapped to GTT
- *		Nothing changes (assertions find that it is not in the GPU)
- *	3. Read/written by GPU
- *		execbuffer calls set_domain (RENDER, RENDER)
- *		flush_domains gets CPU
- *		invalidate_domains gets GPU
- *		clflush (obj)
- *		MI_FLUSH and drm_agp_chipset_flush
- *	4. set_domain (CPU, CPU)
- *		flush_domains gets GPU
- *		invalidate_domains gets CPU
- *		wait_rendering (obj) to make sure all drawing is complete.
- *		This will include an MI_FLUSH to get the data from GPU
- *		to memory
- *		clflush (obj) to invalidate the CPU cache
- *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- *	5. Read/written by CPU
- *		cache lines are loaded and dirtied
- *	6. Read written by GPU
- *		Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- *	1. Allocated
- *	2. Written by CPU
- *	3. Read by GPU
- *	4. Updated (written) by CPU again
- *	5. Read by GPU
- *
- *	1. Allocated
- *		(CPU, CPU)
- *	2. Written by CPU
- *		(CPU, CPU)
- *	3. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- *	4. Updated (written) by CPU again
- *		(CPU, CPU)
- *		flush_domains = 0 (no previous write domain)
- *		invalidate_domains = 0 (no new read domains)
- *	5. Read by GPU
- *		(CPU+RENDER, 0)
- *		flush_domains = CPU
- *		invalidate_domains = RENDER
- *		clflush (obj)
- *		MI_FLUSH
- *		drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
-				  struct intel_ring_buffer *ring)
-{
-	struct drm_device		*dev = obj->dev;
-	struct drm_i915_private		*dev_priv = dev->dev_private;
-	struct drm_i915_gem_object	*obj_priv = to_intel_bo(obj);
-	uint32_t			invalidate_domains = 0;
-	uint32_t			flush_domains = 0;
-	uint32_t			old_read_domains;
-
-	intel_mark_busy(dev, obj);
-
-	/*
-	 * If the object isn't moving to a new write domain,
-	 * let the object stay in multiple read domains
-	 */
-	if (obj->pending_write_domain == 0)
-		obj->pending_read_domains |= obj->read_domains;
-	else
-		obj_priv->dirty = 1;
-
-	/*
-	 * Flush the current write domain if
-	 * the new read domains don't match. Invalidate
-	 * any read domains which differ from the old
-	 * write domain
-	 */
-	if (obj->write_domain &&
-	    (obj->write_domain != obj->pending_read_domains ||
-	     obj_priv->ring != ring)) {
-		flush_domains |= obj->write_domain;
-		invalidate_domains |=
-			obj->pending_read_domains & ~obj->write_domain;
-	}
-	/*
-	 * Invalidate any read caches which may have
-	 * stale data. That is, any new read domains.
-	 */
-	invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
-		i915_gem_clflush_object(obj);
-
-	old_read_domains = obj->read_domains;
-
-	/* The actual obj->write_domain will be updated with
-	 * pending_write_domain after we emit the accumulated flush for all
-	 * of our domain changes in execbuffers (which clears objects'
-	 * write_domains).  So if we have a current write domain that we
-	 * aren't changing, set pending_write_domain to that.
-	 */
-	if (flush_domains == 0 && obj->pending_write_domain == 0)
-		obj->pending_write_domain = obj->write_domain;
-	obj->read_domains = obj->pending_read_domains;
-
-	dev->invalidate_domains |= invalidate_domains;
-	dev->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		dev_priv->mm.flush_rings |= obj_priv->ring->id;
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		dev_priv->mm.flush_rings |= ring->id;
-
-	trace_i915_gem_object_change_domain(obj,
-					    old_read_domains,
-					    obj->write_domain);
-}
-
 /**
  * Moves the object from a partially CPU read to a full one.
  *
@@ -3145,30 +3084,28 @@
  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  */
 static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (!obj_priv->page_cpu_valid)
+	if (!obj->page_cpu_valid)
 		return;
 
 	/* If we're partially in the CPU read domain, finish moving it in.
 	 */
-	if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+	if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
 		int i;
 
-		for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
-			if (obj_priv->page_cpu_valid[i])
+		for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+			if (obj->page_cpu_valid[i])
 				continue;
-			drm_clflush_pages(obj_priv->pages + i, 1);
+			drm_clflush_pages(obj->pages + i, 1);
 		}
 	}
 
 	/* Free the page_cpu_valid mappings which are now stale, whether
 	 * or not we've got I915_GEM_DOMAIN_CPU.
 	 */
-	kfree(obj_priv->page_cpu_valid);
-	obj_priv->page_cpu_valid = NULL;
+	kfree(obj->page_cpu_valid);
+	obj->page_cpu_valid = NULL;
 }
 
 /**
@@ -3184,19 +3121,19 @@
  * flushes to occur.
  */
 static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 					  uint64_t offset, uint64_t size)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_read_domains;
 	int i, ret;
 
-	if (offset == 0 && size == obj->size)
+	if (offset == 0 && size == obj->base.size)
 		return i915_gem_object_set_to_cpu_domain(obj, 0);
 
 	ret = i915_gem_object_flush_gpu_write_domain(obj);
-	if (ret != 0)
+	if (ret)
 		return ret;
+
 	ret = i915_gem_object_wait_rendering(obj, true);
 	if (ret)
 		return ret;
@@ -3204,457 +3141,45 @@
 	i915_gem_object_flush_gtt_write_domain(obj);
 
 	/* If we're already fully in the CPU read domain, we're done. */
-	if (obj_priv->page_cpu_valid == NULL &&
-	    (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+	if (obj->page_cpu_valid == NULL &&
+	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
 		return 0;
 
 	/* Otherwise, create/clear the per-page CPU read domain flag if we're
 	 * newly adding I915_GEM_DOMAIN_CPU
 	 */
-	if (obj_priv->page_cpu_valid == NULL) {
-		obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
-						   GFP_KERNEL);
-		if (obj_priv->page_cpu_valid == NULL)
+	if (obj->page_cpu_valid == NULL) {
+		obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+					      GFP_KERNEL);
+		if (obj->page_cpu_valid == NULL)
 			return -ENOMEM;
-	} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
-		memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+	} else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+		memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
 
 	/* Flush the cache on any pages that are still invalid from the CPU's
 	 * perspective.
 	 */
 	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
 	     i++) {
-		if (obj_priv->page_cpu_valid[i])
+		if (obj->page_cpu_valid[i])
 			continue;
 
-		drm_clflush_pages(obj_priv->pages + i, 1);
+		drm_clflush_pages(obj->pages + i, 1);
 
-		obj_priv->page_cpu_valid[i] = 1;
+		obj->page_cpu_valid[i] = 1;
 	}
 
 	/* It should now be out of any other write domains, and we can update
 	 * the domain values for our changes.
 	 */
-	BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+	BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
-	old_read_domains = obj->read_domains;
-	obj->read_domains |= I915_GEM_DOMAIN_CPU;
+	old_read_domains = obj->base.read_domains;
+	obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 
 	trace_i915_gem_object_change_domain(obj,
 					    old_read_domains,
-					    obj->write_domain);
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
-				   struct drm_file *file_priv,
-				   struct drm_i915_gem_exec_object2 *entry,
-				   struct drm_i915_gem_relocation_entry *reloc)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_gem_object *target_obj;
-	uint32_t target_offset;
-	int ret = -EINVAL;
-
-	target_obj = drm_gem_object_lookup(dev, file_priv,
-					   reloc->target_handle);
-	if (target_obj == NULL)
-		return -ENOENT;
-
-	target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
-	DRM_INFO("%s: obj %p offset %08x target %d "
-		 "read %08x write %08x gtt %08x "
-		 "presumed %08x delta %08x\n",
-		 __func__,
-		 obj,
-		 (int) reloc->offset,
-		 (int) reloc->target_handle,
-		 (int) reloc->read_domains,
-		 (int) reloc->write_domain,
-		 (int) target_offset,
-		 (int) reloc->presumed_offset,
-		 reloc->delta);
-#endif
-
-	/* The target buffer should have appeared before us in the
-	 * exec_object list, so it should have a GTT space bound by now.
-	 */
-	if (target_offset == 0) {
-		DRM_ERROR("No GTT space found for object %d\n",
-			  reloc->target_handle);
-		goto err;
-	}
-
-	/* Validate that the target is in a valid r/w GPU domain */
-	if (reloc->write_domain & (reloc->write_domain - 1)) {
-		DRM_ERROR("reloc with multiple write domains: "
-			  "obj %p target %d offset %d "
-			  "read %08x write %08x",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
-		goto err;
-	}
-	if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
-	    reloc->read_domains & I915_GEM_DOMAIN_CPU) {
-		DRM_ERROR("reloc with read/write CPU domains: "
-			  "obj %p target %d offset %d "
-			  "read %08x write %08x",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->read_domains,
-			  reloc->write_domain);
-		goto err;
-	}
-	if (reloc->write_domain && target_obj->pending_write_domain &&
-	    reloc->write_domain != target_obj->pending_write_domain) {
-		DRM_ERROR("Write domain conflict: "
-			  "obj %p target %d offset %d "
-			  "new %08x old %08x\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  reloc->write_domain,
-			  target_obj->pending_write_domain);
-		goto err;
-	}
-
-	target_obj->pending_read_domains |= reloc->read_domains;
-	target_obj->pending_write_domain |= reloc->write_domain;
-
-	/* If the relocation already has the right value in it, no
-	 * more work needs to be done.
-	 */
-	if (target_offset == reloc->presumed_offset)
-		goto out;
-
-	/* Check that the relocation address is valid... */
-	if (reloc->offset > obj->base.size - 4) {
-		DRM_ERROR("Relocation beyond object bounds: "
-			  "obj %p target %d offset %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset,
-			  (int) obj->base.size);
-		goto err;
-	}
-	if (reloc->offset & 3) {
-		DRM_ERROR("Relocation not 4-byte aligned: "
-			  "obj %p target %d offset %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->offset);
-		goto err;
-	}
-
-	/* and points to somewhere within the target object. */
-	if (reloc->delta >= target_obj->size) {
-		DRM_ERROR("Relocation beyond target object bounds: "
-			  "obj %p target %d delta %d size %d.\n",
-			  obj, reloc->target_handle,
-			  (int) reloc->delta,
-			  (int) target_obj->size);
-		goto err;
-	}
-
-	reloc->delta += target_offset;
-	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
-		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
-		char *vaddr;
-
-		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
-		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
-		kunmap_atomic(vaddr);
-	} else {
-		struct drm_i915_private *dev_priv = dev->dev_private;
-		uint32_t __iomem *reloc_entry;
-		void __iomem *reloc_page;
-
-		ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
-		if (ret)
-			goto err;
-
-		/* Map the page containing the relocation we're going to perform.  */
-		reloc->offset += obj->gtt_offset;
-		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
-						      reloc->offset & PAGE_MASK);
-		reloc_entry = (uint32_t __iomem *)
-			(reloc_page + (reloc->offset & ~PAGE_MASK));
-		iowrite32(reloc->delta, reloc_entry);
-		io_mapping_unmap_atomic(reloc_page);
-	}
-
-	/* and update the user's relocation entry */
-	reloc->presumed_offset = target_offset;
-
-out:
-	ret = 0;
-err:
-	drm_gem_object_unreference(target_obj);
-	return ret;
-}
-
-static int
-i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
-				    struct drm_file *file_priv,
-				    struct drm_i915_gem_exec_object2 *entry)
-{
-	struct drm_i915_gem_relocation_entry __user *user_relocs;
-	int i, ret;
-
-	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
-	for (i = 0; i < entry->relocation_count; i++) {
-		struct drm_i915_gem_relocation_entry reloc;
-
-		if (__copy_from_user_inatomic(&reloc,
-					      user_relocs+i,
-					      sizeof(reloc)))
-			return -EFAULT;
-
-		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
-		if (ret)
-			return ret;
-
-		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
-					    &reloc.presumed_offset,
-					    sizeof(reloc.presumed_offset)))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
-					 struct drm_file *file_priv,
-					 struct drm_i915_gem_exec_object2 *entry,
-					 struct drm_i915_gem_relocation_entry *relocs)
-{
-	int i, ret;
-
-	for (i = 0; i < entry->relocation_count; i++) {
-		ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate(struct drm_device *dev,
-			     struct drm_file *file,
-			     struct drm_gem_object **object_list,
-			     struct drm_i915_gem_exec_object2 *exec_list,
-			     int count)
-{
-	int i, ret;
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
-		ret = i915_gem_execbuffer_relocate_object(obj, file,
-							  &exec_list[i]);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_reserve(struct drm_device *dev,
-			    struct drm_file *file,
-			    struct drm_gem_object **object_list,
-			    struct drm_i915_gem_exec_object2 *exec_list,
-			    int count)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret, i, retry;
-
-	/* attempt to pin all of the buffers into the GTT */
-	for (retry = 0; retry < 2; retry++) {
-		ret = 0;
-		for (i = 0; i < count; i++) {
-			struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
-			struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
-			bool need_fence =
-				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
-				obj->tiling_mode != I915_TILING_NONE;
-
-			/* Check fence reg constraints and rebind if necessary */
-			if (need_fence &&
-			    !i915_gem_object_fence_offset_ok(&obj->base,
-							     obj->tiling_mode)) {
-				ret = i915_gem_object_unbind(&obj->base);
-				if (ret)
-					break;
-			}
-
-			ret = i915_gem_object_pin(&obj->base, entry->alignment);
-			if (ret)
-				break;
-
-			/*
-			 * Pre-965 chips need a fence register set up in order
-			 * to properly handle blits to/from tiled surfaces.
-			 */
-			if (need_fence) {
-				ret = i915_gem_object_get_fence_reg(&obj->base, true);
-				if (ret) {
-					i915_gem_object_unpin(&obj->base);
-					break;
-				}
-
-				dev_priv->fence_regs[obj->fence_reg].gpu = true;
-			}
-
-			entry->offset = obj->gtt_offset;
-		}
-
-		while (i--)
-			i915_gem_object_unpin(object_list[i]);
-
-		if (ret == 0)
-			break;
-
-		if (ret != -ENOSPC || retry)
-			return ret;
-
-		ret = i915_gem_evict_everything(dev);
-		if (ret)
-			return ret;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
-				  struct drm_file *file,
-				  struct drm_gem_object **object_list,
-				  struct drm_i915_gem_exec_object2 *exec_list,
-				  int count)
-{
-	struct drm_i915_gem_relocation_entry *reloc;
-	int i, total, ret;
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->in_execbuffer = false;
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-	total = 0;
-	for (i = 0; i < count; i++)
-		total += exec_list[i].relocation_count;
-
-	reloc = drm_malloc_ab(total, sizeof(*reloc));
-	if (reloc == NULL) {
-		mutex_lock(&dev->struct_mutex);
-		return -ENOMEM;
-	}
-
-	total = 0;
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_relocation_entry __user *user_relocs;
-
-		user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
-
-		if (copy_from_user(reloc+total, user_relocs,
-				   exec_list[i].relocation_count *
-				   sizeof(*reloc))) {
-			ret = -EFAULT;
-			mutex_lock(&dev->struct_mutex);
-			goto err;
-		}
-
-		total += exec_list[i].relocation_count;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret) {
-		mutex_lock(&dev->struct_mutex);
-		goto err;
-	}
-
-	ret = i915_gem_execbuffer_reserve(dev, file,
-					  object_list, exec_list,
-					  count);
-	if (ret)
-		goto err;
-
-	total = 0;
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-		obj->base.pending_read_domains = 0;
-		obj->base.pending_write_domain = 0;
-		ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
-							       &exec_list[i],
-							       reloc + total);
-		if (ret)
-			goto err;
-
-		total += exec_list[i].relocation_count;
-	}
-
-	/* Leave the user relocations as are, this is the painfully slow path,
-	 * and we want to avoid the complication of dropping the lock whilst
-	 * having buffers reserved in the aperture and so causing spurious
-	 * ENOSPC for random operations.
-	 */
-
-err:
-	drm_free_large(reloc);
-	return ret;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
-				struct drm_file *file,
-				struct intel_ring_buffer *ring,
-				struct drm_gem_object **objects,
-				int count)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret, i;
-
-	/* Zero the global flush/invalidate flags. These
-	 * will be modified as new domains are computed
-	 * for each object
-	 */
-	dev->invalidate_domains = 0;
-	dev->flush_domains = 0;
-	dev_priv->mm.flush_rings = 0;
-	for (i = 0; i < count; i++)
-		i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
-	if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-			  __func__,
-			 dev->invalidate_domains,
-			 dev->flush_domains);
-#endif
-		i915_gem_flush(dev, file,
-			       dev->invalidate_domains,
-			       dev->flush_domains,
-			       dev_priv->mm.flush_rings);
-	}
-
-	for (i = 0; i < count; i++) {
-		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
-		/* XXX replace with semaphores */
-		if (obj->ring && ring != obj->ring) {
-			ret = i915_gem_object_wait_rendering(&obj->base, true);
-			if (ret)
-				return ret;
-		}
-	}
+					    obj->base.write_domain);
 
 	return 0;
 }
@@ -3694,20 +3219,21 @@
 		return 0;
 
 	ret = 0;
-	if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
 		/* And wait for the seqno passing without holding any locks and
 		 * causing extra latency for others. This is safe as the irq
 		 * generation is designed to be run atomically and so is
 		 * lockless.
 		 */
-		ring->user_irq_get(dev, ring);
-		ret = wait_event_interruptible(ring->irq_queue,
-					       i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
-					       || atomic_read(&dev_priv->mm.wedged));
-		ring->user_irq_put(dev, ring);
+		if (ring->irq_get(ring)) {
+			ret = wait_event_interruptible(ring->irq_queue,
+						       i915_seqno_passed(ring->get_seqno(ring), seqno)
+						       || atomic_read(&dev_priv->mm.wedged));
+			ring->irq_put(ring);
 
-		if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
-			ret = -EIO;
+			if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+				ret = -EIO;
+		}
 	}
 
 	if (ret == 0)
@@ -3716,577 +3242,106 @@
 	return ret;
 }
 
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
-			  uint64_t exec_offset)
-{
-	uint32_t exec_start, exec_len;
-
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
-
-	if ((exec_start | exec_len) & 0x7)
-		return -EINVAL;
-
-	if (!exec_start)
-		return -EINVAL;
-
-	return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
-		   int count)
-{
-	int i;
-
-	for (i = 0; i < count; i++) {
-		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
-		int length; /* limited by fault_in_pages_readable() */
-
-		/* First check for malicious input causing overflow */
-		if (exec[i].relocation_count >
-		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
-			return -EINVAL;
-
-		length = exec[i].relocation_count *
-			sizeof(struct drm_i915_gem_relocation_entry);
-		if (!access_ok(VERIFY_READ, ptr, length))
-			return -EFAULT;
-
-		/* we may also need to update the presumed offsets */
-		if (!access_ok(VERIFY_WRITE, ptr, length))
-			return -EFAULT;
-
-		if (fault_in_pages_readable(ptr, length))
-			return -EFAULT;
-	}
-
-	return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
-		       struct drm_file *file,
-		       struct drm_i915_gem_execbuffer2 *args,
-		       struct drm_i915_gem_exec_object2 *exec_list)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object **object_list = NULL;
-	struct drm_gem_object *batch_obj;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_clip_rect *cliprects = NULL;
-	struct drm_i915_gem_request *request = NULL;
-	int ret, i, flips;
-	uint64_t exec_offset;
-
-	struct intel_ring_buffer *ring = NULL;
-
-	ret = i915_gem_check_is_wedged(dev);
-	if (ret)
-		return ret;
-
-	ret = validate_exec_list(exec_list, args->buffer_count);
-	if (ret)
-		return ret;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-	switch (args->flags & I915_EXEC_RING_MASK) {
-	case I915_EXEC_DEFAULT:
-	case I915_EXEC_RENDER:
-		ring = &dev_priv->render_ring;
-		break;
-	case I915_EXEC_BSD:
-		if (!HAS_BSD(dev)) {
-			DRM_ERROR("execbuf with invalid ring (BSD)\n");
-			return -EINVAL;
-		}
-		ring = &dev_priv->bsd_ring;
-		break;
-	case I915_EXEC_BLT:
-		if (!HAS_BLT(dev)) {
-			DRM_ERROR("execbuf with invalid ring (BLT)\n");
-			return -EINVAL;
-		}
-		ring = &dev_priv->blt_ring;
-		break;
-	default:
-		DRM_ERROR("execbuf with unknown ring: %d\n",
-			  (int)(args->flags & I915_EXEC_RING_MASK));
-		return -EINVAL;
-	}
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
-	if (object_list == NULL) {
-		DRM_ERROR("Failed to allocate object list for %d buffers\n",
-			  args->buffer_count);
-		ret = -ENOMEM;
-		goto pre_mutex_err;
-	}
-
-	if (args->num_cliprects != 0) {
-		cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
-				    GFP_KERNEL);
-		if (cliprects == NULL) {
-			ret = -ENOMEM;
-			goto pre_mutex_err;
-		}
-
-		ret = copy_from_user(cliprects,
-				     (struct drm_clip_rect __user *)
-				     (uintptr_t) args->cliprects_ptr,
-				     sizeof(*cliprects) * args->num_cliprects);
-		if (ret != 0) {
-			DRM_ERROR("copy %d cliprects failed: %d\n",
-				  args->num_cliprects, ret);
-			ret = -EFAULT;
-			goto pre_mutex_err;
-		}
-	}
-
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL) {
-		ret = -ENOMEM;
-		goto pre_mutex_err;
-	}
-
-	ret = i915_mutex_lock_interruptible(dev);
-	if (ret)
-		goto pre_mutex_err;
-
-	if (dev_priv->mm.suspended) {
-		mutex_unlock(&dev->struct_mutex);
-		ret = -EBUSY;
-		goto pre_mutex_err;
-	}
-
-	/* Look up object handles */
-	for (i = 0; i < args->buffer_count; i++) {
-		object_list[i] = drm_gem_object_lookup(dev, file,
-						       exec_list[i].handle);
-		if (object_list[i] == NULL) {
-			DRM_ERROR("Invalid object handle %d at index %d\n",
-				   exec_list[i].handle, i);
-			/* prevent error path from reading uninitialized data */
-			args->buffer_count = i + 1;
-			ret = -ENOENT;
-			goto err;
-		}
-
-		obj_priv = to_intel_bo(object_list[i]);
-		if (obj_priv->in_execbuffer) {
-			DRM_ERROR("Object %p appears more than once in object list\n",
-				   object_list[i]);
-			/* prevent error path from reading uninitialized data */
-			args->buffer_count = i + 1;
-			ret = -EINVAL;
-			goto err;
-		}
-		obj_priv->in_execbuffer = true;
-	}
-
-	/* Move the objects en-masse into the GTT, evicting if necessary. */
-	ret = i915_gem_execbuffer_reserve(dev, file,
-					  object_list, exec_list,
-					  args->buffer_count);
-	if (ret)
-		goto err;
-
-	/* The objects are in their final locations, apply the relocations. */
-	ret = i915_gem_execbuffer_relocate(dev, file,
-					   object_list, exec_list,
-					   args->buffer_count);
-	if (ret) {
-		if (ret == -EFAULT) {
-			ret = i915_gem_execbuffer_relocate_slow(dev, file,
-								object_list,
-								exec_list,
-								args->buffer_count);
-			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-		}
-		if (ret)
-			goto err;
-	}
-
-	/* Set the pending read domains for the batch buffer to COMMAND */
-	batch_obj = object_list[args->buffer_count-1];
-	if (batch_obj->pending_write_domain) {
-		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
-		ret = -EINVAL;
-		goto err;
-	}
-	batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
-	/* Sanity check the batch buffer */
-	exec_offset = to_intel_bo(batch_obj)->gtt_offset;
-	ret = i915_gem_check_execbuffer(args, exec_offset);
-	if (ret != 0) {
-		DRM_ERROR("execbuf with invalid offset/length\n");
-		goto err;
-	}
-
-	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
-					      object_list, args->buffer_count);
-	if (ret)
-		goto err;
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-		uint32_t old_write_domain = obj->write_domain;
-		obj->write_domain = obj->pending_write_domain;
-		trace_i915_gem_object_change_domain(obj,
-						    obj->read_domains,
-						    old_write_domain);
-	}
-
-#if WATCH_COHERENCY
-	for (i = 0; i < args->buffer_count; i++) {
-		i915_gem_object_check_coherency(object_list[i],
-						exec_list[i].handle);
-	}
-#endif
-
-#if WATCH_EXEC
-	i915_gem_dump_object(batch_obj,
-			      args->batch_len,
-			      __func__,
-			      ~0);
-#endif
-
-	/* Check for any pending flips. As we only maintain a flip queue depth
-	 * of 1, we can simply insert a WAIT for the next display flip prior
-	 * to executing the batch and avoid stalling the CPU.
-	 */
-	flips = 0;
-	for (i = 0; i < args->buffer_count; i++) {
-		if (object_list[i]->write_domain)
-			flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
-	}
-	if (flips) {
-		int plane, flip_mask;
-
-		for (plane = 0; flips >> plane; plane++) {
-			if (((flips >> plane) & 1) == 0)
-				continue;
-
-			if (plane)
-				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
-			else
-				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
-			intel_ring_begin(dev, ring, 2);
-			intel_ring_emit(dev, ring,
-					MI_WAIT_FOR_EVENT | flip_mask);
-			intel_ring_emit(dev, ring, MI_NOOP);
-			intel_ring_advance(dev, ring);
-		}
-	}
-
-	/* Exec the batchbuffer */
-	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
-					    cliprects, exec_offset);
-	if (ret) {
-		DRM_ERROR("dispatch failed %d\n", ret);
-		goto err;
-	}
-
-	/*
-	 * Ensure that the commands in the batch buffer are
-	 * finished before the interrupt fires
-	 */
-	i915_retire_commands(dev, ring);
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-
-		i915_gem_object_move_to_active(obj, ring);
-		if (obj->write_domain)
-			list_move_tail(&to_intel_bo(obj)->gpu_write_list,
-				       &ring->gpu_write_list);
-	}
-
-	i915_add_request(dev, file, request, ring);
-	request = NULL;
-
-err:
-	for (i = 0; i < args->buffer_count; i++) {
-		if (object_list[i]) {
-			obj_priv = to_intel_bo(object_list[i]);
-			obj_priv->in_execbuffer = false;
-		}
-		drm_gem_object_unreference(object_list[i]);
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
-	drm_free_large(object_list);
-	kfree(cliprects);
-	kfree(request);
-
-	return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
 int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+		    uint32_t alignment,
+		    bool map_and_fenceable)
 {
-	struct drm_i915_gem_execbuffer *args = data;
-	struct drm_i915_gem_execbuffer2 exec2;
-	struct drm_i915_gem_exec_object *exec_list = NULL;
-	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret, i;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-
-	/* Copy in the exec list from userland */
-	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
-	if (exec_list == NULL || exec2_list == NULL) {
-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-			  args->buffer_count);
-		drm_free_large(exec_list);
-		drm_free_large(exec2_list);
-		return -ENOMEM;
-	}
-	ret = copy_from_user(exec_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
-			     sizeof(*exec_list) * args->buffer_count);
-	if (ret != 0) {
-		DRM_ERROR("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
-		drm_free_large(exec_list);
-		drm_free_large(exec2_list);
-		return -EFAULT;
-	}
-
-	for (i = 0; i < args->buffer_count; i++) {
-		exec2_list[i].handle = exec_list[i].handle;
-		exec2_list[i].relocation_count = exec_list[i].relocation_count;
-		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
-		exec2_list[i].alignment = exec_list[i].alignment;
-		exec2_list[i].offset = exec_list[i].offset;
-		if (INTEL_INFO(dev)->gen < 4)
-			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
-		else
-			exec2_list[i].flags = 0;
-	}
-
-	exec2.buffers_ptr = args->buffers_ptr;
-	exec2.buffer_count = args->buffer_count;
-	exec2.batch_start_offset = args->batch_start_offset;
-	exec2.batch_len = args->batch_len;
-	exec2.DR1 = args->DR1;
-	exec2.DR4 = args->DR4;
-	exec2.num_cliprects = args->num_cliprects;
-	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = I915_EXEC_RENDER;
-
-	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
-	if (!ret) {
-		/* Copy the new buffer offsets back to the user's exec list. */
-		for (i = 0; i < args->buffer_count; i++)
-			exec_list[i].offset = exec2_list[i].offset;
-		/* ... and back out to userspace */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
-				   exec_list,
-				   sizeof(*exec_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_ERROR("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
-		}
-	}
-
-	drm_free_large(exec_list);
-	drm_free_large(exec2_list);
-	return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
-{
-	struct drm_i915_gem_execbuffer2 *args = data;
-	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
-	int ret;
-
-#if WATCH_EXEC
-	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
-		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
-	if (args->buffer_count < 1) {
-		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
-		return -EINVAL;
-	}
-
-	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
-	if (exec2_list == NULL) {
-		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
-			  args->buffer_count);
-		return -ENOMEM;
-	}
-	ret = copy_from_user(exec2_list,
-			     (struct drm_i915_relocation_entry __user *)
-			     (uintptr_t) args->buffers_ptr,
-			     sizeof(*exec2_list) * args->buffer_count);
-	if (ret != 0) {
-		DRM_ERROR("copy %d exec entries failed %d\n",
-			  args->buffer_count, ret);
-		drm_free_large(exec2_list);
-		return -EFAULT;
-	}
-
-	ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
-	if (!ret) {
-		/* Copy the new buffer offsets back to the user's exec list. */
-		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
-				   (uintptr_t) args->buffers_ptr,
-				   exec2_list,
-				   sizeof(*exec2_list) * args->buffer_count);
-		if (ret) {
-			ret = -EFAULT;
-			DRM_ERROR("failed to copy %d exec entries "
-				  "back to user (%d)\n",
-				  args->buffer_count, ret);
-		}
-	}
-
-	drm_free_large(exec2_list);
-	return ret;
-}
-
-int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
-{
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
-	BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+	BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
 	WARN_ON(i915_verify_lists(dev));
 
-	if (obj_priv->gtt_space != NULL) {
-		if (alignment == 0)
-			alignment = i915_gem_get_gtt_alignment(obj);
-		if (obj_priv->gtt_offset & (alignment - 1)) {
-			WARN(obj_priv->pin_count,
-			     "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
-			     obj_priv->gtt_offset, alignment);
+	if (obj->gtt_space != NULL) {
+		if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+		    (map_and_fenceable && !obj->map_and_fenceable)) {
+			WARN(obj->pin_count,
+			     "bo is already pinned with incorrect alignment:"
+			     " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " obj->map_and_fenceable=%d\n",
+			     obj->gtt_offset, alignment,
+			     map_and_fenceable,
+			     obj->map_and_fenceable);
 			ret = i915_gem_object_unbind(obj);
 			if (ret)
 				return ret;
 		}
 	}
 
-	if (obj_priv->gtt_space == NULL) {
-		ret = i915_gem_object_bind_to_gtt(obj, alignment);
+	if (obj->gtt_space == NULL) {
+		ret = i915_gem_object_bind_to_gtt(obj, alignment,
+						  map_and_fenceable);
 		if (ret)
 			return ret;
 	}
 
-	obj_priv->pin_count++;
-
-	/* If the object is not active and not pending a flush,
-	 * remove it from the inactive list
-	 */
-	if (obj_priv->pin_count == 1) {
-		i915_gem_info_add_pin(dev_priv, obj->size);
-		if (!obj_priv->active)
-			list_move_tail(&obj_priv->mm_list,
+	if (obj->pin_count++ == 0) {
+		if (!obj->active)
+			list_move_tail(&obj->mm_list,
 				       &dev_priv->mm.pinned_list);
 	}
+	obj->pin_mappable |= map_and_fenceable;
 
 	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
 	WARN_ON(i915_verify_lists(dev));
-	obj_priv->pin_count--;
-	BUG_ON(obj_priv->pin_count < 0);
-	BUG_ON(obj_priv->gtt_space == NULL);
+	BUG_ON(obj->pin_count == 0);
+	BUG_ON(obj->gtt_space == NULL);
 
-	/* If the object is no longer pinned, and is
-	 * neither active nor being flushed, then stick it on
-	 * the inactive list
-	 */
-	if (obj_priv->pin_count == 0) {
-		if (!obj_priv->active)
-			list_move_tail(&obj_priv->mm_list,
+	if (--obj->pin_count == 0) {
+		if (!obj->active)
+			list_move_tail(&obj->mm_list,
 				       &dev_priv->mm.inactive_list);
-		i915_gem_info_remove_pin(dev_priv, obj->size);
+		obj->pin_mappable = false;
 	}
 	WARN_ON(i915_verify_lists(dev));
 }
 
 int
 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_pin *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->madv != I915_MADV_WILLNEED) {
+	if (obj->madv != I915_MADV_WILLNEED) {
 		DRM_ERROR("Attempting to pin a purgeable buffer\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+	if (obj->pin_filp != NULL && obj->pin_filp != file) {
 		DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
 			  args->handle);
 		ret = -EINVAL;
 		goto out;
 	}
 
-	obj_priv->user_pin_count++;
-	obj_priv->pin_filp = file_priv;
-	if (obj_priv->user_pin_count == 1) {
-		ret = i915_gem_object_pin(obj, args->alignment);
+	obj->user_pin_count++;
+	obj->pin_filp = file;
+	if (obj->user_pin_count == 1) {
+		ret = i915_gem_object_pin(obj, args->alignment, true);
 		if (ret)
 			goto out;
 	}
@@ -4295,9 +3350,9 @@
 	 * as the X server doesn't manage domains yet
 	 */
 	i915_gem_object_flush_cpu_write_domain(obj);
-	args->offset = obj_priv->gtt_offset;
+	args->offset = obj->gtt_offset;
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4305,38 +3360,36 @@
 
 int
 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-		     struct drm_file *file_priv)
+		     struct drm_file *file)
 {
 	struct drm_i915_gem_pin *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->pin_filp != file_priv) {
+	if (obj->pin_filp != file) {
 		DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
 			  args->handle);
 		ret = -EINVAL;
 		goto out;
 	}
-	obj_priv->user_pin_count--;
-	if (obj_priv->user_pin_count == 0) {
-		obj_priv->pin_filp = NULL;
+	obj->user_pin_count--;
+	if (obj->user_pin_count == 0) {
+		obj->pin_filp = NULL;
 		i915_gem_object_unpin(obj);
 	}
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4344,48 +3397,50 @@
 
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
-		    struct drm_file *file_priv)
+		    struct drm_file *file)
 {
 	struct drm_i915_gem_busy *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
 	/* Count all active objects as busy, even if they are currently not used
 	 * by the gpu. Users of this interface expect objects to eventually
 	 * become non-busy without any further actions, therefore emit any
 	 * necessary flushes here.
 	 */
-	args->busy = obj_priv->active;
+	args->busy = obj->active;
 	if (args->busy) {
 		/* Unconditionally flush objects, even when the gpu still uses this
 		 * object. Userspace calling this function indicates that it wants to
 		 * use this buffer rather sooner than later, so issuing the required
 		 * flush earlier is beneficial.
 		 */
-		if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
-			i915_gem_flush_ring(dev, file_priv,
-					    obj_priv->ring,
-					    0, obj->write_domain);
-		} else if (obj_priv->ring->outstanding_lazy_request) {
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+			ret = i915_gem_flush_ring(dev, obj->ring,
+						  0, obj->base.write_domain);
+		} else if (obj->ring->outstanding_lazy_request ==
+			   obj->last_rendering_seqno) {
+			struct drm_i915_gem_request *request;
+
 			/* This ring is not being cleared by active usage,
 			 * so emit a request to do so.
 			 */
-			u32 seqno = i915_add_request(dev,
-						     NULL, NULL,
-						     obj_priv->ring);
-			if (seqno == 0)
+			request = kzalloc(sizeof(*request), GFP_KERNEL);
+			if (request)
+				ret = i915_add_request(dev,
+						       NULL, request,
+						       obj->ring);
+			else
 				ret = -ENOMEM;
 		}
 
@@ -4394,12 +3449,12 @@
 		 * are actually unmasked, and our working set ends up being
 		 * larger than required.
 		 */
-		i915_gem_retire_requests_ring(dev, obj_priv->ring);
+		i915_gem_retire_requests_ring(dev, obj->ring);
 
-		args->busy = obj_priv->active;
+		args->busy = obj->active;
 	}
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -4417,8 +3472,7 @@
 		       struct drm_file *file_priv)
 {
 	struct drm_i915_gem_madvise *args = data;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	switch (args->madv) {
@@ -4433,37 +3487,36 @@
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
 	if (obj == NULL) {
 		ret = -ENOENT;
 		goto unlock;
 	}
-	obj_priv = to_intel_bo(obj);
 
-	if (obj_priv->pin_count) {
+	if (obj->pin_count) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (obj_priv->madv != __I915_MADV_PURGED)
-		obj_priv->madv = args->madv;
+	if (obj->madv != __I915_MADV_PURGED)
+		obj->madv = args->madv;
 
 	/* if the object is no longer bound, discard its backing storage */
-	if (i915_gem_object_is_purgeable(obj_priv) &&
-	    obj_priv->gtt_space == NULL)
+	if (i915_gem_object_is_purgeable(obj) &&
+	    obj->gtt_space == NULL)
 		i915_gem_object_truncate(obj);
 
-	args->retained = obj_priv->madv != __I915_MADV_PURGED;
+	args->retained = obj->madv != __I915_MADV_PURGED;
 
 out:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
 }
 
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-					      size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+						  size_t size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj;
@@ -4486,11 +3539,15 @@
 	obj->base.driver_private = NULL;
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	INIT_LIST_HEAD(&obj->mm_list);
+	INIT_LIST_HEAD(&obj->gtt_list);
 	INIT_LIST_HEAD(&obj->ring_list);
+	INIT_LIST_HEAD(&obj->exec_list);
 	INIT_LIST_HEAD(&obj->gpu_write_list);
 	obj->madv = I915_MADV_WILLNEED;
+	/* Avoid an unnecessary call to unbind on the first bind. */
+	obj->map_and_fenceable = true;
 
-	return &obj->base;
+	return obj;
 }
 
 int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4500,42 +3557,41 @@
 	return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
 	ret = i915_gem_object_unbind(obj);
 	if (ret == -ERESTARTSYS) {
-		list_move(&obj_priv->mm_list,
+		list_move(&obj->mm_list,
 			  &dev_priv->mm.deferred_free_list);
 		return;
 	}
 
-	if (obj_priv->mmap_offset)
+	if (obj->base.map_list.map)
 		i915_gem_free_mmap_offset(obj);
 
-	drm_gem_object_release(obj);
-	i915_gem_info_remove_obj(dev_priv, obj->size);
+	drm_gem_object_release(&obj->base);
+	i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
-	kfree(obj_priv->page_cpu_valid);
-	kfree(obj_priv->bit_17);
-	kfree(obj_priv);
+	kfree(obj->page_cpu_valid);
+	kfree(obj->bit_17);
+	kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+	struct drm_device *dev = obj->base.dev;
 
 	trace_i915_gem_object_destroy(obj);
 
-	while (obj_priv->pin_count > 0)
+	while (obj->pin_count > 0)
 		i915_gem_object_unpin(obj);
 
-	if (obj_priv->phys_obj)
+	if (obj->phys_obj)
 		i915_gem_detach_phys_object(dev, obj);
 
 	i915_gem_free_object_tail(obj);
@@ -4562,13 +3618,15 @@
 
 	/* Under UMS, be paranoid and evict. */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_gem_evict_inactive(dev);
+		ret = i915_gem_evict_inactive(dev, false);
 		if (ret) {
 			mutex_unlock(&dev->struct_mutex);
 			return ret;
 		}
 	}
 
+	i915_gem_reset_fences(dev);
+
 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
 	 * We need to replace this with a semaphore, or something.
 	 * And not confound mm.suspended!
@@ -4587,82 +3645,15 @@
 	return 0;
 }
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-	int ret;
-
-	obj = i915_gem_alloc_object(dev, 4096);
-	if (obj == NULL) {
-		DRM_ERROR("Failed to allocate seqno page\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-	obj_priv = to_intel_bo(obj);
-	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
-	ret = i915_gem_object_pin(obj, 4096);
-	if (ret)
-		goto err_unref;
-
-	dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
-	dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
-	if (dev_priv->seqno_page == NULL)
-		goto err_unpin;
-
-	dev_priv->seqno_obj = obj;
-	memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
-	return 0;
-
-err_unpin:
-	i915_gem_object_unpin(obj);
-err_unref:
-	drm_gem_object_unreference(obj);
-err:
-	return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-
-	obj = dev_priv->seqno_obj;
-	obj_priv = to_intel_bo(obj);
-	kunmap(obj_priv->pages[0]);
-	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
-	dev_priv->seqno_obj = NULL;
-
-	dev_priv->seqno_page = NULL;
-}
-
 int
 i915_gem_init_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
-	if (HAS_PIPE_CONTROL(dev)) {
-		ret = i915_gem_init_pipe_control(dev);
-		if (ret)
-			return ret;
-	}
-
 	ret = intel_init_render_ring_buffer(dev);
 	if (ret)
-		goto cleanup_pipe_control;
+		return ret;
 
 	if (HAS_BSD(dev)) {
 		ret = intel_init_bsd_ring_buffer(dev);
@@ -4681,12 +3672,9 @@
 	return 0;
 
 cleanup_bsd_ring:
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+	intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
 cleanup_render_ring:
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
-	if (HAS_PIPE_CONTROL(dev))
-		i915_gem_cleanup_pipe_control(dev);
+	intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
 	return ret;
 }
 
@@ -4694,12 +3682,10 @@
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
 
-	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
-	if (HAS_PIPE_CONTROL(dev))
-		i915_gem_cleanup_pipe_control(dev);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		intel_cleanup_ring_buffer(&dev_priv->ring[i]);
 }
 
 int
@@ -4707,7 +3693,7 @@
 		       struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret;
+	int ret, i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return 0;
@@ -4727,14 +3713,12 @@
 	}
 
 	BUG_ON(!list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
-	BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
-	BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
-	BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
-	BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+	for (i = 0; i < I915_NUM_RINGS; i++) {
+		BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+		BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+	}
 	mutex_unlock(&dev->struct_mutex);
 
 	ret = drm_irq_install(dev);
@@ -4796,17 +3780,14 @@
 	INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
-	init_ring_lists(&dev_priv->render_ring);
-	init_ring_lists(&dev_priv->bsd_ring);
-	init_ring_lists(&dev_priv->blt_ring);
+	INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		init_ring_lists(&dev_priv->ring[i]);
 	for (i = 0; i < 16; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
 	init_completion(&dev_priv->error_completion);
-	spin_lock(&shrink_list_lock);
-	list_add(&dev_priv->mm.shrink_list, &shrink_list);
-	spin_unlock(&shrink_list_lock);
 
 	/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
 	if (IS_GEN3(dev)) {
@@ -4818,6 +3799,8 @@
 		}
 	}
 
+	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
 	/* Old X drivers will take 0-2 for front, back, depth buffers */
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		dev_priv->fence_reg_start = 3;
@@ -4849,6 +3832,10 @@
 	}
 	i915_gem_detect_bit_6_swizzle(dev);
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+	dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+	dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&dev_priv->mm.inactive_shrinker);
 }
 
 /*
@@ -4918,47 +3905,47 @@
 }
 
 void i915_gem_detach_phys_object(struct drm_device *dev,
-				 struct drm_gem_object *obj)
+				 struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv;
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+	char *vaddr;
 	int i;
-	int ret;
 	int page_count;
 
-	obj_priv = to_intel_bo(obj);
-	if (!obj_priv->phys_obj)
+	if (!obj->phys_obj)
 		return;
+	vaddr = obj->phys_obj->handle->vaddr;
 
-	ret = i915_gem_object_get_pages(obj, 0);
-	if (ret)
-		goto out;
-
-	page_count = obj->size / PAGE_SIZE;
-
+	page_count = obj->base.size / PAGE_SIZE;
 	for (i = 0; i < page_count; i++) {
-		char *dst = kmap_atomic(obj_priv->pages[i]);
-		char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+		struct page *page = read_cache_page_gfp(mapping, i,
+							GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (!IS_ERR(page)) {
+			char *dst = kmap_atomic(page);
+			memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+			kunmap_atomic(dst);
 
-		memcpy(dst, src, PAGE_SIZE);
-		kunmap_atomic(dst);
+			drm_clflush_pages(&page, 1);
+
+			set_page_dirty(page);
+			mark_page_accessed(page);
+			page_cache_release(page);
+		}
 	}
-	drm_clflush_pages(obj_priv->pages, page_count);
-	drm_agp_chipset_flush(dev);
+	intel_gtt_chipset_flush();
 
-	i915_gem_object_put_pages(obj);
-out:
-	obj_priv->phys_obj->cur_obj = NULL;
-	obj_priv->phys_obj = NULL;
+	obj->phys_obj->cur_obj = NULL;
+	obj->phys_obj = NULL;
 }
 
 int
 i915_gem_attach_phys_object(struct drm_device *dev,
-			    struct drm_gem_object *obj,
+			    struct drm_i915_gem_object *obj,
 			    int id,
 			    int align)
 {
+	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
 	int ret = 0;
 	int page_count;
 	int i;
@@ -4966,10 +3953,8 @@
 	if (id > I915_MAX_PHYS_OBJECT)
 		return -EINVAL;
 
-	obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->phys_obj) {
-		if (obj_priv->phys_obj->id == id)
+	if (obj->phys_obj) {
+		if (obj->phys_obj->id == id)
 			return 0;
 		i915_gem_detach_phys_object(dev, obj);
 	}
@@ -4977,51 +3962,50 @@
 	/* create a new object */
 	if (!dev_priv->mm.phys_objs[id - 1]) {
 		ret = i915_gem_init_phys_object(dev, id,
-						obj->size, align);
+						obj->base.size, align);
 		if (ret) {
-			DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
-			goto out;
+			DRM_ERROR("failed to init phys object %d size: %zu\n",
+				  id, obj->base.size);
+			return ret;
 		}
 	}
 
 	/* bind to the object */
-	obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
-	obj_priv->phys_obj->cur_obj = obj;
+	obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+	obj->phys_obj->cur_obj = obj;
 
-	ret = i915_gem_object_get_pages(obj, 0);
-	if (ret) {
-		DRM_ERROR("failed to get page list\n");
-		goto out;
-	}
-
-	page_count = obj->size / PAGE_SIZE;
+	page_count = obj->base.size / PAGE_SIZE;
 
 	for (i = 0; i < page_count; i++) {
-		char *src = kmap_atomic(obj_priv->pages[i]);
-		char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+		struct page *page;
+		char *dst, *src;
 
+		page = read_cache_page_gfp(mapping, i,
+					   GFP_HIGHUSER | __GFP_RECLAIMABLE);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		src = kmap_atomic(page);
+		dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
 		memcpy(dst, src, PAGE_SIZE);
 		kunmap_atomic(src);
+
+		mark_page_accessed(page);
+		page_cache_release(page);
 	}
 
-	i915_gem_object_put_pages(obj);
-
 	return 0;
-out:
-	return ret;
 }
 
 static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+		     struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_pwrite *args,
 		     struct drm_file *file_priv)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
 	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
 	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
 		unsigned long unwritten;
 
@@ -5036,7 +4020,7 @@
 			return -EFAULT;
 	}
 
-	drm_agp_chipset_flush(dev);
+	intel_gtt_chipset_flush();
 	return 0;
 }
 
@@ -5074,144 +4058,68 @@
 }
 
 static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+			 int nr_to_scan,
+			 gfp_t gfp_mask)
 {
-	drm_i915_private_t *dev_priv, *next_dev;
-	struct drm_i915_gem_object *obj_priv, *next_obj;
-	int cnt = 0;
-	int would_deadlock = 1;
+	struct drm_i915_private *dev_priv =
+		container_of(shrinker,
+			     struct drm_i915_private,
+			     mm.inactive_shrinker);
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_i915_gem_object *obj, *next;
+	int cnt;
+
+	if (!mutex_trylock(&dev->struct_mutex))
+		return 0;
 
 	/* "fast-path" to count number of available objects */
 	if (nr_to_scan == 0) {
-		spin_lock(&shrink_list_lock);
-		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-			struct drm_device *dev = dev_priv->dev;
-
-			if (mutex_trylock(&dev->struct_mutex)) {
-				list_for_each_entry(obj_priv,
-						    &dev_priv->mm.inactive_list,
-						    mm_list)
-					cnt++;
-				mutex_unlock(&dev->struct_mutex);
-			}
-		}
-		spin_unlock(&shrink_list_lock);
-
-		return (cnt / 100) * sysctl_vfs_cache_pressure;
+		cnt = 0;
+		list_for_each_entry(obj,
+				    &dev_priv->mm.inactive_list,
+				    mm_list)
+			cnt++;
+		mutex_unlock(&dev->struct_mutex);
+		return cnt / 100 * sysctl_vfs_cache_pressure;
 	}
 
-	spin_lock(&shrink_list_lock);
-
 rescan:
 	/* first scan for clean buffers */
-	list_for_each_entry_safe(dev_priv, next_dev,
-				 &shrink_list, mm.shrink_list) {
-		struct drm_device *dev = dev_priv->dev;
+	i915_gem_retire_requests(dev);
 
-		if (! mutex_trylock(&dev->struct_mutex))
-			continue;
-
-		spin_unlock(&shrink_list_lock);
-		i915_gem_retire_requests(dev);
-
-		list_for_each_entry_safe(obj_priv, next_obj,
-					 &dev_priv->mm.inactive_list,
-					 mm_list) {
-			if (i915_gem_object_is_purgeable(obj_priv)) {
-				i915_gem_object_unbind(&obj_priv->base);
-				if (--nr_to_scan <= 0)
-					break;
-			}
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (i915_gem_object_is_purgeable(obj)) {
+			if (i915_gem_object_unbind(obj) == 0 &&
+			    --nr_to_scan == 0)
+				break;
 		}
-
-		spin_lock(&shrink_list_lock);
-		mutex_unlock(&dev->struct_mutex);
-
-		would_deadlock = 0;
-
-		if (nr_to_scan <= 0)
-			break;
 	}
 
 	/* second pass, evict/count anything still on the inactive list */
-	list_for_each_entry_safe(dev_priv, next_dev,
-				 &shrink_list, mm.shrink_list) {
-		struct drm_device *dev = dev_priv->dev;
-
-		if (! mutex_trylock(&dev->struct_mutex))
-			continue;
-
-		spin_unlock(&shrink_list_lock);
-
-		list_for_each_entry_safe(obj_priv, next_obj,
-					 &dev_priv->mm.inactive_list,
-					 mm_list) {
-			if (nr_to_scan > 0) {
-				i915_gem_object_unbind(&obj_priv->base);
-				nr_to_scan--;
-			} else
-				cnt++;
-		}
-
-		spin_lock(&shrink_list_lock);
-		mutex_unlock(&dev->struct_mutex);
-
-		would_deadlock = 0;
+	cnt = 0;
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list,
+				 mm_list) {
+		if (nr_to_scan &&
+		    i915_gem_object_unbind(obj) == 0)
+			nr_to_scan--;
+		else
+			cnt++;
 	}
 
-	if (nr_to_scan) {
-		int active = 0;
-
+	if (nr_to_scan && i915_gpu_is_active(dev)) {
 		/*
 		 * We are desperate for pages, so as a last resort, wait
 		 * for the GPU to finish and discard whatever we can.
 		 * This has a dramatic impact to reduce the number of
 		 * OOM-killer events whilst running the GPU aggressively.
 		 */
-		list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
-			struct drm_device *dev = dev_priv->dev;
-
-			if (!mutex_trylock(&dev->struct_mutex))
-				continue;
-
-			spin_unlock(&shrink_list_lock);
-
-			if (i915_gpu_is_active(dev)) {
-				i915_gpu_idle(dev);
-				active++;
-			}
-
-			spin_lock(&shrink_list_lock);
-			mutex_unlock(&dev->struct_mutex);
-		}
-
-		if (active)
+		if (i915_gpu_idle(dev) == 0)
 			goto rescan;
 	}
-
-	spin_unlock(&shrink_list_lock);
-
-	if (would_deadlock)
-		return -1;
-	else if (cnt > 0)
-		return (cnt / 100) * sysctl_vfs_cache_pressure;
-	else
-		return 0;
-}
-
-static struct shrinker shrinker = {
-	.shrink = i915_gem_shrink,
-	.seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
-    register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
-    unregister_shrinker(&shrinker);
+	mutex_unlock(&dev->struct_mutex);
+	return cnt / 100 * sysctl_vfs_cache_pressure;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b8..29d014c 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@
 }
 
 void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
 		     const char *where, uint32_t mark)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int page;
 
-	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+	DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
 	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
 		int page_len, chunk, chunk_len;
 
@@ -170,9 +169,9 @@
 			chunk_len = page_len - chunk;
 			if (chunk_len > 128)
 				chunk_len = 128;
-			i915_gem_dump_page(obj_priv->pages[page],
+			i915_gem_dump_page(obj->pages[page],
 					   chunk, chunk + chunk_len,
-					   obj_priv->gtt_offset +
+					   obj->gtt_offset +
 					   page * PAGE_SIZE,
 					   mark);
 		}
@@ -182,21 +181,19 @@
 
 #if WATCH_COHERENCY
 void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	struct drm_device *dev = obj->base.dev;
 	int page;
 	uint32_t *gtt_mapping;
 	uint32_t *backing_map = NULL;
 	int bad_count = 0;
 
 	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
-		 __func__, obj, obj_priv->gtt_offset, handle,
+		 __func__, obj, obj->gtt_offset, handle,
 		 obj->size / 1024);
 
-	gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
-			      obj->size);
+	gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
 	if (gtt_mapping == NULL) {
 		DRM_ERROR("failed to map GTT space\n");
 		return;
@@ -205,7 +202,7 @@
 	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
 		int i;
 
-		backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+		backing_map = kmap_atomic(obj->pages[page], KM_USER0);
 
 		if (backing_map == NULL) {
 			DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@
 			if (cpuval != gttval) {
 				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
 					 "0x%08x vs 0x%08x\n",
-					 (int)(obj_priv->gtt_offset +
+					 (int)(obj->gtt_offset +
 					       page * PAGE_SIZE + i * 4),
 					 cpuval, gttval);
 				if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1..3d39005 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
 #include "i915_drm.h"
 
 static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
-	   struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
-	list_add(&obj_priv->evict_list, unwind);
-	drm_gem_object_reference(&obj_priv->base);
-	return drm_mm_scan_add_block(obj_priv->gtt_space);
+	list_add(&obj->exec_list, unwind);
+	drm_gem_object_reference(&obj->base);
+	return drm_mm_scan_add_block(obj->gtt_space);
 }
 
 int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+			 unsigned alignment, bool mappable)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret = 0;
 
 	i915_gem_retire_requests(dev);
 
 	/* Re-check for free space after retiring requests */
-	if (drm_mm_search_free(&dev_priv->mm.gtt_space,
-			       min_size, alignment, 0))
-		return 0;
+	if (mappable) {
+		if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+						min_size, alignment, 0,
+						dev_priv->mm.gtt_mappable_end,
+						0))
+			return 0;
+	} else {
+		if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+				       min_size, alignment, 0))
+			return 0;
+	}
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,56 @@
 	 */
 
 	INIT_LIST_HEAD(&unwind_list);
-	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+	if (mappable)
+		drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+					    alignment, 0,
+					    dev_priv->mm.gtt_mappable_end);
+	else
+		drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-		if (mark_free(obj_priv, &unwind_list))
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
 		/* Does the object require an outstanding flush? */
-		if (obj_priv->base.write_domain || obj_priv->pin_count)
+		if (obj->base.write_domain || obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-		if (obj_priv->pin_count)
+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+		if (obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		if (! obj_priv->base.write_domain || obj_priv->pin_count)
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (! obj->base.write_domain || obj->pin_count)
 			continue;
 
-		if (mark_free(obj_priv, &unwind_list))
+		if (mark_free(obj, &unwind_list))
 			goto found;
 	}
 
 	/* Nothing found, clean up and bail out! */
-	list_for_each_entry(obj_priv, &unwind_list, evict_list) {
-		ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+	while (!list_empty(&unwind_list)) {
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+
+		ret = drm_mm_scan_remove_block(obj->gtt_space);
 		BUG_ON(ret);
-		drm_gem_object_unreference(&obj_priv->base);
+
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +150,34 @@
 	 * temporary list. */
 	INIT_LIST_HEAD(&eviction_list);
 	while (!list_empty(&unwind_list)) {
-		obj_priv = list_first_entry(&unwind_list,
-					    struct drm_i915_gem_object,
-					    evict_list);
-		if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-			list_move(&obj_priv->evict_list, &eviction_list);
+		obj = list_first_entry(&unwind_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		if (drm_mm_scan_remove_block(obj->gtt_space)) {
+			list_move(&obj->exec_list, &eviction_list);
 			continue;
 		}
-		list_del(&obj_priv->evict_list);
-		drm_gem_object_unreference(&obj_priv->base);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	/* Unbinding will emit any required flushes */
 	while (!list_empty(&eviction_list)) {
-		obj_priv = list_first_entry(&eviction_list,
-					    struct drm_i915_gem_object,
-					    evict_list);
+		obj = list_first_entry(&eviction_list,
+				       struct drm_i915_gem_object,
+				       exec_list);
 		if (ret == 0)
-			ret = i915_gem_object_unbind(&obj_priv->base);
-		list_del(&obj_priv->evict_list);
-		drm_gem_object_unreference(&obj_priv->base);
+			ret = i915_gem_object_unbind(obj);
+
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
 	}
 
 	return ret;
 }
 
 int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
@@ -176,36 +196,22 @@
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
-	ret = i915_gem_evict_inactive(dev);
-	if (ret)
-		return ret;
-
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
-		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
-	BUG_ON(!lists_empty);
-
-	return 0;
+	return i915_gem_evict_inactive(dev, purgeable_only);
 }
 
 /** Unbinds all inactive objects. */
 int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj, *next;
 
-	while (!list_empty(&dev_priv->mm.inactive_list)) {
-		struct drm_gem_object *obj;
-		int ret;
-
-		obj = &list_first_entry(&dev_priv->mm.inactive_list,
-					struct drm_i915_gem_object,
-					mm_list)->base;
-
-		ret = i915_gem_object_unbind(obj);
-		if (ret != 0) {
-			DRM_ERROR("Error unbinding object: %d\n", ret);
-			return ret;
+	list_for_each_entry_safe(obj, next,
+				 &dev_priv->mm.inactive_list, mm_list) {
+		if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+			int ret = i915_gem_object_unbind(obj);
+			if (ret)
+				return ret;
 		}
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 0000000..e698343
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1374 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+	uint32_t invalidate_domains;
+	uint32_t flush_domains;
+	uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Mapped to GTT
+ *	4. Read by GPU
+ *	5. Unmapped from GTT
+ *	6. Freed
+ *
+ *	Let's take these a step at a time
+ *
+ *	1. Allocated
+ *		Pages allocated from the kernel may still have
+ *		cache contents, so we set them to (CPU, CPU) always.
+ *	2. Written by CPU (using pwrite)
+ *		The pwrite function calls set_domain (CPU, CPU) and
+ *		this function does nothing (as nothing changes)
+ *	3. Mapped by GTT
+ *		This function asserts that the object is not
+ *		currently in any GPU-based read or write domains
+ *	4. Read by GPU
+ *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ *		As write_domain is zero, this function adds in the
+ *		current read domains (CPU+COMMAND, 0).
+ *		flush_domains is set to CPU.
+ *		invalidate_domains is set to COMMAND
+ *		clflush is run to get data out of the CPU caches
+ *		then i915_dev_set_domain calls i915_gem_flush to
+ *		emit an MI_FLUSH and drm_agp_chipset_flush
+ *	5. Unmapped from GTT
+ *		i915_gem_object_unbind calls set_domain (CPU, CPU)
+ *		flush_domains and invalidate_domains end up both zero
+ *		so no flushing/invalidating happens
+ *	6. Freed
+ *		yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ *	1. Allocated
+ *	2. Mapped to GTT
+ *	3. Read/written by GPU
+ *	4. set_domain to (CPU,CPU)
+ *	5. Read/written by CPU
+ *	6. Read/written by GPU
+ *
+ *	1. Allocated
+ *		Same as last example, (CPU, CPU)
+ *	2. Mapped to GTT
+ *		Nothing changes (assertions find that it is not in the GPU)
+ *	3. Read/written by GPU
+ *		execbuffer calls set_domain (RENDER, RENDER)
+ *		flush_domains gets CPU
+ *		invalidate_domains gets GPU
+ *		clflush (obj)
+ *		MI_FLUSH and drm_agp_chipset_flush
+ *	4. set_domain (CPU, CPU)
+ *		flush_domains gets GPU
+ *		invalidate_domains gets CPU
+ *		wait_rendering (obj) to make sure all drawing is complete.
+ *		This will include an MI_FLUSH to get the data from GPU
+ *		to memory
+ *		clflush (obj) to invalidate the CPU cache
+ *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ *	5. Read/written by CPU
+ *		cache lines are loaded and dirtied
+ *	6. Read written by GPU
+ *		Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Read by GPU
+ *	4. Updated (written) by CPU again
+ *	5. Read by GPU
+ *
+ *	1. Allocated
+ *		(CPU, CPU)
+ *	2. Written by CPU
+ *		(CPU, CPU)
+ *	3. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ *	4. Updated (written) by CPU again
+ *		(CPU, CPU)
+ *		flush_domains = 0 (no previous write domain)
+ *		invalidate_domains = 0 (no new read domains)
+ *	5. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+				  struct intel_ring_buffer *ring,
+				  struct change_domains *cd)
+{
+	uint32_t invalidate_domains = 0, flush_domains = 0;
+
+	/*
+	 * If the object isn't moving to a new write domain,
+	 * let the object stay in multiple read domains
+	 */
+	if (obj->base.pending_write_domain == 0)
+		obj->base.pending_read_domains |= obj->base.read_domains;
+
+	/*
+	 * Flush the current write domain if
+	 * the new read domains don't match. Invalidate
+	 * any read domains which differ from the old
+	 * write domain
+	 */
+	if (obj->base.write_domain &&
+	    (((obj->base.write_domain != obj->base.pending_read_domains ||
+	       obj->ring != ring)) ||
+	     (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+		flush_domains |= obj->base.write_domain;
+		invalidate_domains |=
+			obj->base.pending_read_domains & ~obj->base.write_domain;
+	}
+	/*
+	 * Invalidate any read caches which may have
+	 * stale data. That is, any new read domains.
+	 */
+	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+		i915_gem_clflush_object(obj);
+
+	/* blow away mappings if mapped through GTT */
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+		i915_gem_release_mmap(obj);
+
+	/* The actual obj->write_domain will be updated with
+	 * pending_write_domain after we emit the accumulated flush for all
+	 * of our domain changes in execbuffers (which clears objects'
+	 * write_domains).  So if we have a current write domain that we
+	 * aren't changing, set pending_write_domain to that.
+	 */
+	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+		obj->base.pending_write_domain = obj->base.write_domain;
+
+	cd->invalidate_domains |= invalidate_domains;
+	cd->flush_domains |= flush_domains;
+	if (flush_domains & I915_GEM_GPU_DOMAINS)
+		cd->flush_rings |= obj->ring->id;
+	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+		cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+	int and;
+	struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+	struct eb_objects *eb;
+	int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+	while (count > size)
+		count >>= 1;
+	eb = kzalloc(count*sizeof(struct hlist_head) +
+		     sizeof(struct eb_objects),
+		     GFP_KERNEL);
+	if (eb == NULL)
+		return eb;
+
+	eb->and = count - 1;
+	return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+	memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+	hlist_add_head(&obj->exec_node,
+		       &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+	struct hlist_head *head;
+	struct hlist_node *node;
+	struct drm_i915_gem_object *obj;
+
+	head = &eb->buckets[handle & eb->and];
+	hlist_for_each(node, head) {
+		obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+		if (obj->exec_handle == handle)
+			return obj;
+	}
+
+	return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+	kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+				   struct eb_objects *eb,
+				   struct drm_i915_gem_relocation_entry *reloc)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_gem_object *target_obj;
+	uint32_t target_offset;
+	int ret = -EINVAL;
+
+	/* we've already hold a reference to all valid objects */
+	target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+	if (unlikely(target_obj == NULL))
+		return -ENOENT;
+
+	target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+	DRM_INFO("%s: obj %p offset %08x target %d "
+		 "read %08x write %08x gtt %08x "
+		 "presumed %08x delta %08x\n",
+		 __func__,
+		 obj,
+		 (int) reloc->offset,
+		 (int) reloc->target_handle,
+		 (int) reloc->read_domains,
+		 (int) reloc->write_domain,
+		 (int) target_offset,
+		 (int) reloc->presumed_offset,
+		 reloc->delta);
+#endif
+
+	/* The target buffer should have appeared before us in the
+	 * exec_object list, so it should have a GTT space bound by now.
+	 */
+	if (unlikely(target_offset == 0)) {
+		DRM_ERROR("No GTT space found for object %d\n",
+			  reloc->target_handle);
+		return ret;
+	}
+
+	/* Validate that the target is in a valid r/w GPU domain */
+	if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+		DRM_ERROR("reloc with multiple write domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+	if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+		DRM_ERROR("reloc with read/write CPU domains: "
+			  "obj %p target %d offset %d "
+			  "read %08x write %08x",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->read_domains,
+			  reloc->write_domain);
+		return ret;
+	}
+	if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+		     reloc->write_domain != target_obj->pending_write_domain)) {
+		DRM_ERROR("Write domain conflict: "
+			  "obj %p target %d offset %d "
+			  "new %08x old %08x\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  reloc->write_domain,
+			  target_obj->pending_write_domain);
+		return ret;
+	}
+
+	target_obj->pending_read_domains |= reloc->read_domains;
+	target_obj->pending_write_domain |= reloc->write_domain;
+
+	/* If the relocation already has the right value in it, no
+	 * more work needs to be done.
+	 */
+	if (target_offset == reloc->presumed_offset)
+		return 0;
+
+	/* Check that the relocation address is valid... */
+	if (unlikely(reloc->offset > obj->base.size - 4)) {
+		DRM_ERROR("Relocation beyond object bounds: "
+			  "obj %p target %d offset %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset,
+			  (int) obj->base.size);
+		return ret;
+	}
+	if (unlikely(reloc->offset & 3)) {
+		DRM_ERROR("Relocation not 4-byte aligned: "
+			  "obj %p target %d offset %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->offset);
+		return ret;
+	}
+
+	/* and points to somewhere within the target object. */
+	if (unlikely(reloc->delta >= target_obj->size)) {
+		DRM_ERROR("Relocation beyond target object bounds: "
+			  "obj %p target %d delta %d size %d.\n",
+			  obj, reloc->target_handle,
+			  (int) reloc->delta,
+			  (int) target_obj->size);
+		return ret;
+	}
+
+	reloc->delta += target_offset;
+	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+		uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+		char *vaddr;
+
+		vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+		*(uint32_t *)(vaddr + page_offset) = reloc->delta;
+		kunmap_atomic(vaddr);
+	} else {
+		struct drm_i915_private *dev_priv = dev->dev_private;
+		uint32_t __iomem *reloc_entry;
+		void __iomem *reloc_page;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+		if (ret)
+			return ret;
+
+		/* Map the page containing the relocation we're going to perform.  */
+		reloc->offset += obj->gtt_offset;
+		reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+						      reloc->offset & PAGE_MASK);
+		reloc_entry = (uint32_t __iomem *)
+			(reloc_page + (reloc->offset & ~PAGE_MASK));
+		iowrite32(reloc->delta, reloc_entry);
+		io_mapping_unmap_atomic(reloc_page);
+	}
+
+	/* and update the user's relocation entry */
+	reloc->presumed_offset = target_offset;
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+				    struct eb_objects *eb)
+{
+	struct drm_i915_gem_relocation_entry __user *user_relocs;
+	struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int i, ret;
+
+	user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+	for (i = 0; i < entry->relocation_count; i++) {
+		struct drm_i915_gem_relocation_entry reloc;
+
+		if (__copy_from_user_inatomic(&reloc,
+					      user_relocs+i,
+					      sizeof(reloc)))
+			return -EFAULT;
+
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &reloc);
+		if (ret)
+			return ret;
+
+		if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+					    &reloc.presumed_offset,
+					    sizeof(reloc.presumed_offset)))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+					 struct eb_objects *eb,
+					 struct drm_i915_gem_relocation_entry *relocs)
+{
+	const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+	int i, ret;
+
+	for (i = 0; i < entry->relocation_count; i++) {
+		ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+			     struct eb_objects *eb,
+			     struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		obj->base.pending_read_domains = 0;
+		obj->base.pending_write_domain = 0;
+		ret = i915_gem_execbuffer_relocate_object(obj, eb);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+			    struct drm_file *file,
+			    struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int ret, retry;
+	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+	struct list_head ordered_objects;
+
+	INIT_LIST_HEAD(&ordered_objects);
+	while (!list_empty(objects)) {
+		struct drm_i915_gem_exec_object2 *entry;
+		bool need_fence, need_mappable;
+
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		entry = obj->exec_entry;
+
+		need_fence =
+			has_fenced_gpu_access &&
+			entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+			obj->tiling_mode != I915_TILING_NONE;
+		need_mappable =
+			entry->relocation_count ? true : need_fence;
+
+		if (need_mappable)
+			list_move(&obj->exec_list, &ordered_objects);
+		else
+			list_move_tail(&obj->exec_list, &ordered_objects);
+	}
+	list_splice(&ordered_objects, objects);
+
+	/* Attempt to pin all of the buffers into the GTT.
+	 * This is done in 3 phases:
+	 *
+	 * 1a. Unbind all objects that do not match the GTT constraints for
+	 *     the execbuffer (fenceable, mappable, alignment etc).
+	 * 1b. Increment pin count for already bound objects.
+	 * 2.  Bind new objects.
+	 * 3.  Decrement pin count.
+	 *
+	 * This avoid unnecessary unbinding of later objects in order to makr
+	 * room for the earlier objects *unless* we need to defragment.
+	 */
+	retry = 0;
+	do {
+		ret = 0;
+
+		/* Unbind any ill-fitting objects or pin. */
+		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+			bool need_fence, need_mappable;
+			if (!obj->gtt_space)
+				continue;
+
+			need_fence =
+				has_fenced_gpu_access &&
+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+				obj->tiling_mode != I915_TILING_NONE;
+			need_mappable =
+				entry->relocation_count ? true : need_fence;
+
+			if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+			    (need_mappable && !obj->map_and_fenceable))
+				ret = i915_gem_object_unbind(obj);
+			else
+				ret = i915_gem_object_pin(obj,
+							  entry->alignment,
+							  need_mappable);
+			if (ret)
+				goto err;
+
+			entry++;
+		}
+
+		/* Bind fresh objects */
+		list_for_each_entry(obj, objects, exec_list) {
+			struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
+			bool need_fence;
+
+			need_fence =
+				has_fenced_gpu_access &&
+				entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+				obj->tiling_mode != I915_TILING_NONE;
+
+			if (!obj->gtt_space) {
+				bool need_mappable =
+					entry->relocation_count ? true : need_fence;
+
+				ret = i915_gem_object_pin(obj,
+							  entry->alignment,
+							  need_mappable);
+				if (ret)
+					break;
+			}
+
+			if (has_fenced_gpu_access) {
+				if (need_fence) {
+					ret = i915_gem_object_get_fence(obj, ring, 1);
+					if (ret)
+						break;
+				} else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+					   obj->tiling_mode == I915_TILING_NONE) {
+					/* XXX pipelined! */
+					ret = i915_gem_object_put_fence(obj);
+					if (ret)
+						break;
+				}
+				obj->pending_fenced_gpu_access = need_fence;
+			}
+
+			entry->offset = obj->gtt_offset;
+		}
+
+		/* Decrement pin count for bound objects */
+		list_for_each_entry(obj, objects, exec_list) {
+			if (obj->gtt_space)
+				i915_gem_object_unpin(obj);
+		}
+
+		if (ret != -ENOSPC || retry > 1)
+			return ret;
+
+		/* First attempt, just clear anything that is purgeable.
+		 * Second attempt, clear the entire GTT.
+		 */
+		ret = i915_gem_evict_everything(ring->dev, retry == 0);
+		if (ret)
+			return ret;
+
+		retry++;
+	} while (1);
+
+err:
+	obj = list_entry(obj->exec_list.prev,
+			 struct drm_i915_gem_object,
+			 exec_list);
+	while (objects != &obj->exec_list) {
+		if (obj->gtt_space)
+			i915_gem_object_unpin(obj);
+
+		obj = list_entry(obj->exec_list.prev,
+				 struct drm_i915_gem_object,
+				 exec_list);
+	}
+
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+				  struct drm_file *file,
+				  struct intel_ring_buffer *ring,
+				  struct list_head *objects,
+				  struct eb_objects *eb,
+				  struct drm_i915_gem_exec_object2 *exec,
+				  int count)
+{
+	struct drm_i915_gem_relocation_entry *reloc;
+	struct drm_i915_gem_object *obj;
+	int i, total, ret;
+
+	/* We may process another execbuffer during the unlock... */
+	while (!list_empty(objects)) {
+		obj = list_first_entry(objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	total = 0;
+	for (i = 0; i < count; i++)
+		total += exec[i].relocation_count;
+
+	reloc = drm_malloc_ab(total, sizeof(*reloc));
+	if (reloc == NULL) {
+		mutex_lock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	total = 0;
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+		user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+		if (copy_from_user(reloc+total, user_relocs,
+				   exec[i].relocation_count * sizeof(*reloc))) {
+			ret = -EFAULT;
+			mutex_lock(&dev->struct_mutex);
+			goto err;
+		}
+
+		total += exec[i].relocation_count;
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret) {
+		mutex_lock(&dev->struct_mutex);
+		goto err;
+	}
+
+	/* reacquire the objects */
+	eb_reset(eb);
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+							exec[i].handle));
+		if (obj == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			ret = -ENOENT;
+			goto err;
+		}
+
+		list_add_tail(&obj->exec_list, objects);
+		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
+		eb_add_object(eb, obj);
+	}
+
+	ret = i915_gem_execbuffer_reserve(ring, file, objects);
+	if (ret)
+		goto err;
+
+	total = 0;
+	list_for_each_entry(obj, objects, exec_list) {
+		obj->base.pending_read_domains = 0;
+		obj->base.pending_write_domain = 0;
+		ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+							       reloc + total);
+		if (ret)
+			goto err;
+
+		total += exec->relocation_count;
+		exec++;
+	}
+
+	/* Leave the user relocations as are, this is the painfully slow path,
+	 * and we want to avoid the complication of dropping the lock whilst
+	 * having buffers reserved in the aperture and so causing spurious
+	 * ENOSPC for random operations.
+	 */
+
+err:
+	drm_free_large(reloc);
+	return ret;
+}
+
+static int
+i915_gem_execbuffer_flush(struct drm_device *dev,
+			  uint32_t invalidate_domains,
+			  uint32_t flush_domains,
+			  uint32_t flush_rings)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
+
+	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+		for (i = 0; i < I915_NUM_RINGS; i++)
+			if (flush_rings & (1 << i)) {
+				ret = i915_gem_flush_ring(dev,
+							  &dev_priv->ring[i],
+							  invalidate_domains,
+							  flush_domains);
+				if (ret)
+					return ret;
+			}
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+			       struct intel_ring_buffer *to)
+{
+	struct intel_ring_buffer *from = obj->ring;
+	u32 seqno;
+	int ret, idx;
+
+	if (from == NULL || to == from)
+		return 0;
+
+	if (INTEL_INFO(obj->base.dev)->gen < 6)
+		return i915_gem_object_wait_rendering(obj, true);
+
+	idx = intel_ring_sync_index(from, to);
+
+	seqno = obj->last_rendering_seqno;
+	if (seqno <= from->sync_seqno[idx])
+		return 0;
+
+	if (seqno == from->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ret = i915_add_request(obj->base.dev, NULL, request, from);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
+		seqno = request->seqno;
+	}
+
+	from->sync_seqno[idx] = seqno;
+	return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+				struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	struct change_domains cd;
+	int ret;
+
+	cd.invalidate_domains = 0;
+	cd.flush_domains = 0;
+	cd.flush_rings = 0;
+	list_for_each_entry(obj, objects, exec_list)
+		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+	if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 cd.invalidate_domains,
+			 cd.flush_domains);
+#endif
+		ret = i915_gem_execbuffer_flush(ring->dev,
+						cd.invalidate_domains,
+						cd.flush_domains,
+						cd.flush_rings);
+		if (ret)
+			return ret;
+	}
+
+	list_for_each_entry(obj, objects, exec_list) {
+		ret = i915_gem_execbuffer_sync_rings(obj, ring);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+	return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+		   int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++) {
+		char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+		int length; /* limited by fault_in_pages_readable() */
+
+		/* First check for malicious input causing overflow */
+		if (exec[i].relocation_count >
+		    INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+			return -EINVAL;
+
+		length = exec[i].relocation_count *
+			sizeof(struct drm_i915_gem_relocation_entry);
+		if (!access_ok(VERIFY_READ, ptr, length))
+			return -EFAULT;
+
+		/* we may also need to update the presumed offsets */
+		if (!access_ok(VERIFY_WRITE, ptr, length))
+			return -EFAULT;
+
+		if (fault_in_pages_readable(ptr, length))
+			return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+				   struct list_head *objects)
+{
+	struct drm_i915_gem_object *obj;
+	int flips;
+
+	/* Check for any pending flips. As we only maintain a flip queue depth
+	 * of 1, we can simply insert a WAIT for the next display flip prior
+	 * to executing the batch and avoid stalling the CPU.
+	 */
+	flips = 0;
+	list_for_each_entry(obj, objects, exec_list) {
+		if (obj->base.write_domain)
+			flips |= atomic_read(&obj->pending_flip);
+	}
+	if (flips) {
+		int plane, flip_mask, ret;
+
+		for (plane = 0; flips >> plane; plane++) {
+			if (((flips >> plane) & 1) == 0)
+				continue;
+
+			if (plane)
+				flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+			else
+				flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+			ret = intel_ring_begin(ring, 2);
+			if (ret)
+				return ret;
+
+			intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_advance(ring);
+		}
+	}
+
+	return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+				   struct intel_ring_buffer *ring,
+				   u32 seqno)
+{
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, objects, exec_list) {
+		obj->base.read_domains = obj->base.pending_read_domains;
+		obj->base.write_domain = obj->base.pending_write_domain;
+		obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+		i915_gem_object_move_to_active(obj, ring, seqno);
+		if (obj->base.write_domain) {
+			obj->dirty = 1;
+			obj->pending_gpu_write = true;
+			list_move_tail(&obj->gpu_write_list,
+				       &ring->gpu_write_list);
+			intel_mark_busy(ring->dev, obj);
+		}
+
+		trace_i915_gem_object_change_domain(obj,
+						    obj->base.read_domains,
+						    obj->base.write_domain);
+	}
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+				    struct drm_file *file,
+				    struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_request *request;
+	u32 invalidate;
+
+	/*
+	 * Ensure that the commands in the batch buffer are
+	 * finished before the interrupt fires.
+	 *
+	 * The sampler always gets flushed on i965 (sigh).
+	 */
+	invalidate = I915_GEM_DOMAIN_COMMAND;
+	if (INTEL_INFO(dev)->gen >= 4)
+		invalidate |= I915_GEM_DOMAIN_SAMPLER;
+	if (ring->flush(ring, invalidate, 0)) {
+		i915_gem_next_request_seqno(dev, ring);
+		return;
+	}
+
+	/* Add a breadcrumb for the completion of the batch buffer */
+	request = kzalloc(sizeof(*request), GFP_KERNEL);
+	if (request == NULL || i915_add_request(dev, file, request, ring)) {
+		i915_gem_next_request_seqno(dev, ring);
+		kfree(request);
+	}
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+		       struct drm_file *file,
+		       struct drm_i915_gem_execbuffer2 *args,
+		       struct drm_i915_gem_exec_object2 *exec)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct list_head objects;
+	struct eb_objects *eb;
+	struct drm_i915_gem_object *batch_obj;
+	struct drm_clip_rect *cliprects = NULL;
+	struct intel_ring_buffer *ring;
+	u32 exec_start, exec_len;
+	u32 seqno;
+	int ret, mode, i;
+
+	if (!i915_gem_check_execbuffer(args)) {
+		DRM_ERROR("execbuf with invalid offset/length\n");
+		return -EINVAL;
+	}
+
+	ret = validate_exec_list(exec, args->buffer_count);
+	if (ret)
+		return ret;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+	switch (args->flags & I915_EXEC_RING_MASK) {
+	case I915_EXEC_DEFAULT:
+	case I915_EXEC_RENDER:
+		ring = &dev_priv->ring[RCS];
+		break;
+	case I915_EXEC_BSD:
+		if (!HAS_BSD(dev)) {
+			DRM_ERROR("execbuf with invalid ring (BSD)\n");
+			return -EINVAL;
+		}
+		ring = &dev_priv->ring[VCS];
+		break;
+	case I915_EXEC_BLT:
+		if (!HAS_BLT(dev)) {
+			DRM_ERROR("execbuf with invalid ring (BLT)\n");
+			return -EINVAL;
+		}
+		ring = &dev_priv->ring[BCS];
+		break;
+	default:
+		DRM_ERROR("execbuf with unknown ring: %d\n",
+			  (int)(args->flags & I915_EXEC_RING_MASK));
+		return -EINVAL;
+	}
+
+	mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+	switch (mode) {
+	case I915_EXEC_CONSTANTS_REL_GENERAL:
+	case I915_EXEC_CONSTANTS_ABSOLUTE:
+	case I915_EXEC_CONSTANTS_REL_SURFACE:
+		if (ring == &dev_priv->ring[RCS] &&
+		    mode != dev_priv->relative_constants_mode) {
+			if (INTEL_INFO(dev)->gen < 4)
+				return -EINVAL;
+
+			if (INTEL_INFO(dev)->gen > 5 &&
+			    mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+				return -EINVAL;
+
+			ret = intel_ring_begin(ring, 4);
+			if (ret)
+				return ret;
+
+			intel_ring_emit(ring, MI_NOOP);
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit(ring, INSTPM);
+			intel_ring_emit(ring,
+					I915_EXEC_CONSTANTS_MASK << 16 | mode);
+			intel_ring_advance(ring);
+
+			dev_priv->relative_constants_mode = mode;
+		}
+		break;
+	default:
+		DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+		return -EINVAL;
+	}
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	if (args->num_cliprects != 0) {
+		if (ring != &dev_priv->ring[RCS]) {
+			DRM_ERROR("clip rectangles are only valid with the render ring\n");
+			return -EINVAL;
+		}
+
+		cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+				    GFP_KERNEL);
+		if (cliprects == NULL) {
+			ret = -ENOMEM;
+			goto pre_mutex_err;
+		}
+
+		if (copy_from_user(cliprects,
+				     (struct drm_clip_rect __user *)(uintptr_t)
+				     args->cliprects_ptr,
+				     sizeof(*cliprects)*args->num_cliprects)) {
+			ret = -EFAULT;
+			goto pre_mutex_err;
+		}
+	}
+
+	ret = i915_mutex_lock_interruptible(dev);
+	if (ret)
+		goto pre_mutex_err;
+
+	if (dev_priv->mm.suspended) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -EBUSY;
+		goto pre_mutex_err;
+	}
+
+	eb = eb_create(args->buffer_count);
+	if (eb == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+
+	/* Look up object handles */
+	INIT_LIST_HEAD(&objects);
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_i915_gem_object *obj;
+
+		obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+							exec[i].handle));
+		if (obj == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec[i].handle, i);
+			/* prevent error path from reading uninitialized data */
+			ret = -ENOENT;
+			goto err;
+		}
+
+		if (!list_empty(&obj->exec_list)) {
+			DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+				   obj, exec[i].handle, i);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		list_add_tail(&obj->exec_list, &objects);
+		obj->exec_handle = exec[i].handle;
+		obj->exec_entry = &exec[i];
+		eb_add_object(eb, obj);
+	}
+
+	/* take note of the batch buffer before we might reorder the lists */
+	batch_obj = list_entry(objects.prev,
+			       struct drm_i915_gem_object,
+			       exec_list);
+
+	/* Move the objects en-masse into the GTT, evicting if necessary. */
+	ret = i915_gem_execbuffer_reserve(ring, file, &objects);
+	if (ret)
+		goto err;
+
+	/* The objects are in their final locations, apply the relocations. */
+	ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
+	if (ret) {
+		if (ret == -EFAULT) {
+			ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+								&objects, eb,
+								exec,
+								args->buffer_count);
+			BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+		}
+		if (ret)
+			goto err;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	if (batch_obj->base.pending_write_domain) {
+		DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+		ret = -EINVAL;
+		goto err;
+	}
+	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+	if (ret)
+		goto err;
+
+	ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+	if (ret)
+		goto err;
+
+	seqno = i915_gem_next_request_seqno(dev, ring);
+	for (i = 0; i < I915_NUM_RINGS-1; i++) {
+		if (seqno < ring->sync_seqno[i]) {
+			/* The GPU can not handle its semaphore value wrapping,
+			 * so every billion or so execbuffers, we need to stall
+			 * the GPU in order to reset the counters.
+			 */
+			ret = i915_gpu_idle(dev);
+			if (ret)
+				goto err;
+
+			BUG_ON(ring->sync_seqno[i]);
+		}
+	}
+
+	exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+	exec_len = args->batch_len;
+	if (cliprects) {
+		for (i = 0; i < args->num_cliprects; i++) {
+			ret = i915_emit_box(dev, &cliprects[i],
+					    args->DR1, args->DR4);
+			if (ret)
+				goto err;
+
+			ret = ring->dispatch_execbuffer(ring,
+							exec_start, exec_len);
+			if (ret)
+				goto err;
+		}
+	} else {
+		ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+		if (ret)
+			goto err;
+	}
+
+	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+	i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+	eb_destroy(eb);
+	while (!list_empty(&objects)) {
+		struct drm_i915_gem_object *obj;
+
+		obj = list_first_entry(&objects,
+				       struct drm_i915_gem_object,
+				       exec_list);
+		list_del_init(&obj->exec_list);
+		drm_gem_object_unreference(&obj->base);
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	kfree(cliprects);
+	return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_execbuffer2 exec2;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret, i;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	/* Copy in the exec list from userland */
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec_list == NULL || exec2_list == NULL) {
+		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec_list);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < args->buffer_count; i++) {
+		exec2_list[i].handle = exec_list[i].handle;
+		exec2_list[i].relocation_count = exec_list[i].relocation_count;
+		exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+		exec2_list[i].alignment = exec_list[i].alignment;
+		exec2_list[i].offset = exec_list[i].offset;
+		if (INTEL_INFO(dev)->gen < 4)
+			exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+		else
+			exec2_list[i].flags = 0;
+	}
+
+	exec2.buffers_ptr = args->buffers_ptr;
+	exec2.buffer_count = args->buffer_count;
+	exec2.batch_start_offset = args->batch_start_offset;
+	exec2.batch_len = args->batch_len;
+	exec2.DR1 = args->DR1;
+	exec2.DR4 = args->DR4;
+	exec2.num_cliprects = args->num_cliprects;
+	exec2.cliprects_ptr = args->cliprects_ptr;
+	exec2.flags = I915_EXEC_RENDER;
+
+	ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		for (i = 0; i < args->buffer_count; i++)
+			exec_list[i].offset = exec2_list[i].offset;
+		/* ... and back out to userspace */
+		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+				   (uintptr_t) args->buffers_ptr,
+				   exec_list,
+				   sizeof(*exec_list) * args->buffer_count);
+		if (ret) {
+			ret = -EFAULT;
+			DRM_ERROR("failed to copy %d exec entries "
+				  "back to user (%d)\n",
+				  args->buffer_count, ret);
+		}
+	}
+
+	drm_free_large(exec_list);
+	drm_free_large(exec2_list);
+	return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+		     struct drm_file *file)
+{
+	struct drm_i915_gem_execbuffer2 *args = data;
+	struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+	int ret;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+
+	exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+	if (exec2_list == NULL) {
+		DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+			  args->buffer_count);
+		return -ENOMEM;
+	}
+	ret = copy_from_user(exec2_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec2_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		drm_free_large(exec2_list);
+		return -EFAULT;
+	}
+
+	ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+	if (!ret) {
+		/* Copy the new buffer offsets back to the user's exec list. */
+		ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+				   (uintptr_t) args->buffers_ptr,
+				   exec2_list,
+				   sizeof(*exec2_list) * args->buffer_count);
+		if (ret) {
+			ret = -EFAULT;
+			DRM_ERROR("failed to copy %d exec entries "
+				  "back to user (%d)\n",
+				  args->buffer_count, ret);
+		}
+	}
+
+	drm_free_large(exec2_list);
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 0000000..70433ae
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj;
+
+	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+		i915_gem_clflush_object(obj);
+
+		if (dev_priv->mm.gtt->needs_dmar) {
+			BUG_ON(!obj->sg_list);
+
+			intel_gtt_insert_sg_entries(obj->sg_list,
+						    obj->num_sg,
+						    obj->gtt_space->start
+							>> PAGE_SHIFT,
+						    obj->agp_type);
+		} else
+			intel_gtt_insert_pages(obj->gtt_space->start
+						   >> PAGE_SHIFT,
+					       obj->base.size >> PAGE_SHIFT,
+					       obj->pages,
+					       obj->agp_type);
+	}
+
+	intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev_priv->mm.gtt->needs_dmar) {
+		ret = intel_gtt_map_memory(obj->pages,
+					   obj->base.size >> PAGE_SHIFT,
+					   &obj->sg_list,
+					   &obj->num_sg);
+		if (ret != 0)
+			return ret;
+
+		intel_gtt_insert_sg_entries(obj->sg_list,
+					    obj->num_sg,
+					    obj->gtt_space->start >> PAGE_SHIFT,
+					    obj->agp_type);
+	} else
+		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+				       obj->base.size >> PAGE_SHIFT,
+				       obj->pages,
+				       obj->agp_type);
+
+	return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+			      obj->base.size >> PAGE_SHIFT);
+
+	if (obj->sg_list) {
+		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+		obj->sg_list = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de..22a32b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@
 }
 
 /* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 {
 	int tile_width;
@@ -232,32 +232,44 @@
 	return true;
 }
 
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
-	struct drm_device *dev = obj->dev;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-	if (obj_priv->gtt_space == NULL)
-		return true;
+	u32 size;
 
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(dev)->gen >= 4)
+	if (INTEL_INFO(obj->base.dev)->gen >= 4)
 		return true;
 
-	if (obj_priv->gtt_offset & (obj->size - 1))
-		return false;
-
-	if (IS_GEN3(dev)) {
-		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+	if (INTEL_INFO(obj->base.dev)->gen == 3) {
+		if (obj->gtt_offset & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
-		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+		if (obj->gtt_offset & ~I830_FENCE_START_MASK)
 			return false;
 	}
 
+	/*
+	 * Previous chips need to be aligned to the size of the smallest
+	 * fence register that can contain the object.
+	 */
+	if (INTEL_INFO(obj->base.dev)->gen == 3)
+		size = 1024*1024;
+	else
+		size = 512*1024;
+
+	while (size < obj->base.size)
+		size <<= 1;
+
+	if (obj->gtt_space->size != size)
+		return false;
+
+	if (obj->gtt_offset & (size - 1))
+		return false;
+
 	return true;
 }
 
@@ -267,30 +279,29 @@
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_set_tiling *args = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ret = i915_gem_check_is_wedged(dev);
 	if (ret)
 		return ret;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL)
 		return -ENOENT;
-	obj_priv = to_intel_bo(obj);
 
-	if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
-		drm_gem_object_unreference_unlocked(obj);
+	if (!i915_tiling_ok(dev,
+			    args->stride, obj->base.size, args->tiling_mode)) {
+		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EINVAL;
 	}
 
-	if (obj_priv->pin_count) {
-		drm_gem_object_unreference_unlocked(obj);
+	if (obj->pin_count) {
+		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 	}
 
@@ -324,34 +335,28 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	if (args->tiling_mode != obj_priv->tiling_mode ||
-	    args->stride != obj_priv->stride) {
+	if (args->tiling_mode != obj->tiling_mode ||
+	    args->stride != obj->stride) {
 		/* We need to rebind the object if its current allocation
 		 * no longer meets the alignment restrictions for its new
 		 * tiling mode. Otherwise we can just leave it alone, but
 		 * need to ensure that any fence register is cleared.
 		 */
-		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
-			ret = i915_gem_object_unbind(obj);
-		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-			ret = i915_gem_object_put_fence_reg(obj, true);
-		else
-			i915_gem_release_mmap(obj);
+		i915_gem_release_mmap(obj);
 
-		if (ret != 0) {
-			args->tiling_mode = obj_priv->tiling_mode;
-			args->stride = obj_priv->stride;
-			goto err;
-		}
+		obj->map_and_fenceable =
+			obj->gtt_space == NULL ||
+			(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+			 i915_gem_object_fence_ok(obj, args->tiling_mode));
 
-		obj_priv->tiling_mode = args->tiling_mode;
-		obj_priv->stride = args->stride;
+		obj->tiling_changed = true;
+		obj->tiling_mode = args->tiling_mode;
+		obj->stride = args->stride;
 	}
-err:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
-	return ret;
+	return 0;
 }
 
 /**
@@ -359,22 +364,20 @@
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
-		   struct drm_file *file_priv)
+		   struct drm_file *file)
 {
 	struct drm_i915_gem_get_tiling *args = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 
-	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (obj == NULL)
 		return -ENOENT;
-	obj_priv = to_intel_bo(obj);
 
 	mutex_lock(&dev->struct_mutex);
 
-	args->tiling_mode = obj_priv->tiling_mode;
-	switch (obj_priv->tiling_mode) {
+	args->tiling_mode = obj->tiling_mode;
+	switch (obj->tiling_mode) {
 	case I915_TILING_X:
 		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
 		break;
@@ -394,7 +397,7 @@
 	if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
 		args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 
 	return 0;
@@ -424,46 +427,44 @@
 }
 
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size >> PAGE_SHIFT;
+	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
 		return;
 
-	if (obj_priv->bit_17 == NULL)
+	if (obj->bit_17 == NULL)
 		return;
 
 	for (i = 0; i < page_count; i++) {
-		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+		char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
 		if ((new_bit_17 & 0x1) !=
-		    (test_bit(i, obj_priv->bit_17) != 0)) {
-			i915_gem_swizzle_page(obj_priv->pages[i]);
-			set_page_dirty(obj_priv->pages[i]);
+		    (test_bit(i, obj->bit_17) != 0)) {
+			i915_gem_swizzle_page(obj->pages[i]);
+			set_page_dirty(obj->pages[i]);
 		}
 	}
 }
 
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->dev;
+	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	int page_count = obj->size >> PAGE_SHIFT;
+	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
 	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
 		return;
 
-	if (obj_priv->bit_17 == NULL) {
-		obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+	if (obj->bit_17 == NULL) {
+		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
 					   sizeof(long), GFP_KERNEL);
-		if (obj_priv->bit_17 == NULL) {
+		if (obj->bit_17 == NULL) {
 			DRM_ERROR("Failed to allocate memory for bit 17 "
 				  "record\n");
 			return;
@@ -471,9 +472,9 @@
 	}
 
 	for (i = 0; i < page_count; i++) {
-		if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
-			__set_bit(i, obj_priv->bit_17);
+		if (page_to_phys(obj->pages[i]) & (1 << 17))
+			__set_bit(i, obj->bit_17);
 		else
-			__clear_bit(i, obj_priv->bit_17);
+			__clear_bit(i, obj->bit_17);
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c..e418e8b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -64,64 +64,24 @@
 #define DRM_I915_VBLANK_PIPE_ALL	(DRM_I915_VBLANK_PIPE_A | \
 					 DRM_I915_VBLANK_PIPE_B)
 
-void
-ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
-		dev_priv->gt_irq_mask_reg &= ~mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-		(void) I915_READ(GTIMR);
-	}
-}
-
-void
-ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
-		dev_priv->gt_irq_mask_reg |= mask;
-		I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-		(void) I915_READ(GTIMR);
-	}
-}
-
 /* For display hotplug interrupt */
 static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-	if ((dev_priv->irq_mask_reg & mask) != 0) {
-		dev_priv->irq_mask_reg &= ~mask;
-		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(DEIMR);
+	if ((dev_priv->irq_mask & mask) != 0) {
+		dev_priv->irq_mask &= ~mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
 	}
 }
 
 static inline void
 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
-	if ((dev_priv->irq_mask_reg & mask) != mask) {
-		dev_priv->irq_mask_reg |= mask;
-		I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(DEIMR);
-	}
-}
-
-void
-i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask_reg & mask) != 0) {
-		dev_priv->irq_mask_reg &= ~mask;
-		I915_WRITE(IMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(IMR);
-	}
-}
-
-void
-i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
-{
-	if ((dev_priv->irq_mask_reg & mask) != mask) {
-		dev_priv->irq_mask_reg |= mask;
-		I915_WRITE(IMR, dev_priv->irq_mask_reg);
-		(void) I915_READ(IMR);
+	if ((dev_priv->irq_mask & mask) != mask) {
+		dev_priv->irq_mask |= mask;
+		I915_WRITE(DEIMR, dev_priv->irq_mask);
+		POSTING_READ(DEIMR);
 	}
 }
 
@@ -144,7 +104,7 @@
 		dev_priv->pipestat[pipe] |= mask;
 		/* Enable the interrupt, clear any pending status */
 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
-		(void) I915_READ(reg);
+		POSTING_READ(reg);
 	}
 }
 
@@ -156,16 +116,19 @@
 
 		dev_priv->pipestat[pipe] &= ~mask;
 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
-		(void) I915_READ(reg);
+		POSTING_READ(reg);
 	}
 }
 
 /**
  * intel_enable_asle - enable ASLE interrupt for OpRegion
  */
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
 {
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 
 	if (HAS_PCH_SPLIT(dev))
 		ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +139,8 @@
 			i915_enable_pipestat(dev_priv, 0,
 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
 	}
+
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 /**
@@ -243,6 +208,92 @@
 	return I915_READ(reg);
 }
 
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+			     int *vpos, int *hpos)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 vbl = 0, position = 0;
+	int vbl_start, vbl_end, htotal, vtotal;
+	bool in_vbl = true;
+	int ret = 0;
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+					"pipe %d\n", pipe);
+		return 0;
+	}
+
+	/* Get vtotal. */
+	vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+	if (INTEL_INFO(dev)->gen >= 4) {
+		/* No obvious pixelcount register. Only query vertical
+		 * scanout position from Display scan line register.
+		 */
+		position = I915_READ(PIPEDSL(pipe));
+
+		/* Decode into vertical scanout position. Don't have
+		 * horizontal scanout position.
+		 */
+		*vpos = position & 0x1fff;
+		*hpos = 0;
+	} else {
+		/* Have access to pixelcount since start of frame.
+		 * We can split this into vertical and horizontal
+		 * scanout position.
+		 */
+		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+		htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+		*vpos = position / htotal;
+		*hpos = position - (*vpos * htotal);
+	}
+
+	/* Query vblank area. */
+	vbl = I915_READ(VBLANK(pipe));
+
+	/* Test position against vblank region. */
+	vbl_start = vbl & 0x1fff;
+	vbl_end = (vbl >> 16) & 0x1fff;
+
+	if ((*vpos < vbl_start) || (*vpos > vbl_end))
+		in_vbl = false;
+
+	/* Inside "upper part" of vblank area? Apply corrective offset: */
+	if (in_vbl && (*vpos >= vbl_start))
+		*vpos = *vpos - vtotal;
+
+	/* Readouts valid? */
+	if (vbl > 0)
+		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+	/* In vblank? */
+	if (in_vbl)
+		ret |= DRM_SCANOUTPOS_INVBL;
+
+	return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+			      int *max_error,
+			      struct timeval *vblank_time,
+			      unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get drm_crtc to timestamp: */
+	drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags, drmcrtc);
+}
+
 /*
  * Handle hotplug events outside the interrupt handler proper.
  */
@@ -297,20 +348,105 @@
 			struct intel_ring_buffer *ring)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 seqno = ring->get_seqno(dev, ring);
-	ring->irq_gem_seqno = seqno;
+	u32 seqno = ring->get_seqno(ring);
+
 	trace_i915_gem_request_complete(dev, seqno);
+
+	ring->irq_seqno = seqno;
 	wake_up_all(&ring->irq_queue);
+
 	dev_priv->hangcheck_count = 0;
 	mod_timer(&dev_priv->hangcheck_timer,
 		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 }
 
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u8 new_delay = dev_priv->cur_delay;
+	u32 pm_iir;
+
+	pm_iir = I915_READ(GEN6_PMIIR);
+	if (!pm_iir)
+		return;
+
+	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+		if (dev_priv->cur_delay != dev_priv->max_delay)
+			new_delay = dev_priv->cur_delay + 1;
+		if (new_delay > dev_priv->max_delay)
+			new_delay = dev_priv->max_delay;
+	} else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+		if (dev_priv->cur_delay != dev_priv->min_delay)
+			new_delay = dev_priv->cur_delay - 1;
+		if (new_delay < dev_priv->min_delay) {
+			new_delay = dev_priv->min_delay;
+			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+				   ((new_delay << 16) & 0x3f0000));
+		} else {
+			/* Make sure we continue to get down interrupts
+			 * until we hit the minimum frequency */
+			I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+				   I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+		}
+
+	}
+
+	gen6_set_rps(dev, new_delay);
+	dev_priv->cur_delay = new_delay;
+
+	I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
+static void pch_irq_handler(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 pch_iir;
+
+	pch_iir = I915_READ(SDEIIR);
+
+	if (pch_iir & SDE_AUDIO_POWER_MASK)
+		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
+				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
+				 SDE_AUDIO_POWER_SHIFT);
+
+	if (pch_iir & SDE_GMBUS)
+		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_HDCP_MASK)
+		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+
+	if (pch_iir & SDE_AUDIO_TRANS_MASK)
+		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+
+	if (pch_iir & SDE_POISON)
+		DRM_ERROR("PCH poison interrupt\n");
+
+	if (pch_iir & SDE_FDI_MASK) {
+		u32 fdia, fdib;
+
+		fdia = I915_READ(FDI_RXA_IIR);
+		fdib = I915_READ(FDI_RXB_IIR);
+		DRM_DEBUG_DRIVER("PCH FDI RX interrupt; FDI RXA IIR: 0x%08x, FDI RXB IIR: 0x%08x\n", fdia, fdib);
+	}
+
+	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+
+	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
+		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+
+	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
+	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
+		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
+}
+
 static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
-	u32 de_iir, gt_iir, de_ier, pch_iir;
+	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
 	u32 hotplug_mask;
 	struct drm_i915_master_private *master_priv;
 	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +457,15 @@
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
-	(void)I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	de_iir = I915_READ(DEIIR);
 	gt_iir = I915_READ(GTIIR);
 	pch_iir = I915_READ(SDEIIR);
+	pm_iir = I915_READ(GEN6_PMIIR);
 
-	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+	    (!IS_GEN6(dev) || pm_iir == 0))
 		goto done;
 
 	if (HAS_PCH_CPT(dev))
@@ -344,12 +482,12 @@
 				READ_BREADCRUMB(dev_priv);
 	}
 
-	if (gt_iir & GT_PIPE_NOTIFY)
-		notify_ring(dev, &dev_priv->render_ring);
+	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+		notify_ring(dev, &dev_priv->ring[RCS]);
 	if (gt_iir & bsd_usr_interrupt)
-		notify_ring(dev, &dev_priv->bsd_ring);
-	if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
-		notify_ring(dev, &dev_priv->blt_ring);
+		notify_ring(dev, &dev_priv->ring[VCS]);
+	if (gt_iir & GT_BLT_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->ring[BCS]);
 
 	if (de_iir & DE_GSE)
 		intel_opregion_gse_intr(dev);
@@ -371,14 +509,20 @@
 		drm_handle_vblank(dev, 1);
 
 	/* check event from PCH */
-	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
-		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+	if (de_iir & DE_PCH_EVENT) {
+		if (pch_iir & hotplug_mask)
+			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+		pch_irq_handler(dev);
+	}
 
 	if (de_iir & DE_PCU_EVENT) {
 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 		i915_handle_rps_change(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_pm_irq_handler(dev);
+
 	/* should clear PCH hotplug event before clear CPU irq */
 	I915_WRITE(SDEIIR, pch_iir);
 	I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +530,7 @@
 
 done:
 	I915_WRITE(DEIER, de_ier);
-	(void)I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	return ret;
 }
@@ -422,29 +566,23 @@
 
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
-i915_error_object_create(struct drm_device *dev,
-			 struct drm_gem_object *src)
+i915_error_object_create(struct drm_i915_private *dev_priv,
+			 struct drm_i915_gem_object *src)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_error_object *dst;
-	struct drm_i915_gem_object *src_priv;
 	int page, page_count;
 	u32 reloc_offset;
 
-	if (src == NULL)
+	if (src == NULL || src->pages == NULL)
 		return NULL;
 
-	src_priv = to_intel_bo(src);
-	if (src_priv->pages == NULL)
-		return NULL;
-
-	page_count = src->size / PAGE_SIZE;
+	page_count = src->base.size / PAGE_SIZE;
 
 	dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
 	if (dst == NULL)
 		return NULL;
 
-	reloc_offset = src_priv->gtt_offset;
+	reloc_offset = src->gtt_offset;
 	for (page = 0; page < page_count; page++) {
 		unsigned long flags;
 		void __iomem *s;
@@ -466,7 +604,7 @@
 		reloc_offset += PAGE_SIZE;
 	}
 	dst->page_count = page_count;
-	dst->gtt_offset = src_priv->gtt_offset;
+	dst->gtt_offset = src->gtt_offset;
 
 	return dst;
 
@@ -503,53 +641,98 @@
 	kfree(error);
 }
 
-static u32
-i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+			   int count,
+			   struct list_head *head)
 {
-	u32 cmd;
+	struct drm_i915_gem_object *obj;
+	int i = 0;
 
-	if (IS_I830(dev) || IS_845G(dev))
-		cmd = MI_BATCH_BUFFER;
-	else if (INTEL_INFO(dev)->gen >= 4)
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
-		       MI_BATCH_NON_SECURE_I965);
-	else
-		cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+	list_for_each_entry(obj, head, mm_list) {
+		err->size = obj->base.size;
+		err->name = obj->base.name;
+		err->seqno = obj->last_rendering_seqno;
+		err->gtt_offset = obj->gtt_offset;
+		err->read_domains = obj->base.read_domains;
+		err->write_domain = obj->base.write_domain;
+		err->fence_reg = obj->fence_reg;
+		err->pinned = 0;
+		if (obj->pin_count > 0)
+			err->pinned = 1;
+		if (obj->user_pin_count > 0)
+			err->pinned = -1;
+		err->tiling = obj->tiling_mode;
+		err->dirty = obj->dirty;
+		err->purgeable = obj->madv != I915_MADV_WILLNEED;
+		err->ring = obj->ring ? obj->ring->id : 0;
+		err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
 
-	return ring[0] == cmd ? ring[1] : 0;
+		if (++i == count)
+			break;
+
+		err++;
+	}
+
+	return i;
 }
 
-static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+static void i915_gem_record_fences(struct drm_device *dev,
+				   struct drm_i915_error_state *error)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 head, bbaddr;
-	u32 *ring;
+	int i;
 
-	/* Locate the current position in the ringbuffer and walk back
-	 * to find the most recently dispatched batch buffer.
-	 */
-	bbaddr = 0;
-	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
-	ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
 
-	while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
-		bbaddr = i915_get_bbaddr(dev, ring);
-		if (bbaddr)
-			break;
+	}
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+			     struct intel_ring_buffer *ring)
+{
+	struct drm_i915_gem_object *obj;
+	u32 seqno;
+
+	if (!ring->get_seqno)
+		return NULL;
+
+	seqno = ring->get_seqno(ring);
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+		if (obj->ring != ring)
+			continue;
+
+		if (!i915_seqno_passed(obj->last_rendering_seqno, seqno))
+			continue;
+
+		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+			continue;
+
+		/* We need to copy these to an anonymous buffer as the simplest
+		 * method to avoid being overwritten by userspace.
+		 */
+		return i915_error_object_create(dev_priv, obj);
 	}
 
-	if (bbaddr == 0) {
-		ring = (u32 *)(dev_priv->render_ring.virtual_start
-				+ dev_priv->render_ring.size);
-		while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
-			bbaddr = i915_get_bbaddr(dev, ring);
-			if (bbaddr)
-				break;
-		}
-	}
-
-	return bbaddr;
+	return NULL;
 }
 
 /**
@@ -564,12 +747,10 @@
 static void i915_capture_error_state(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_i915_error_state *error;
-	struct drm_gem_object *batchbuffer[2];
 	unsigned long flags;
-	u32 bbaddr;
-	int count;
+	int i;
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	error = dev_priv->first_error;
@@ -585,20 +766,33 @@
 
 	DRM_DEBUG_DRIVER("generating error event\n");
 
-	error->seqno =
-		dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+	error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
 	error->pipebstat = I915_READ(PIPEBSTAT);
 	error->instpm = I915_READ(INSTPM);
-	if (INTEL_INFO(dev)->gen < 4) {
-		error->ipeir = I915_READ(IPEIR);
-		error->ipehr = I915_READ(IPEHR);
-		error->instdone = I915_READ(INSTDONE);
-		error->acthd = I915_READ(ACTHD);
-		error->bbaddr = 0;
-	} else {
+	error->error = 0;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		error->error = I915_READ(ERROR_GEN6);
+
+		error->bcs_acthd = I915_READ(BCS_ACTHD);
+		error->bcs_ipehr = I915_READ(BCS_IPEHR);
+		error->bcs_ipeir = I915_READ(BCS_IPEIR);
+		error->bcs_instdone = I915_READ(BCS_INSTDONE);
+		error->bcs_seqno = 0;
+		if (dev_priv->ring[BCS].get_seqno)
+			error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+		error->vcs_acthd = I915_READ(VCS_ACTHD);
+		error->vcs_ipehr = I915_READ(VCS_IPEHR);
+		error->vcs_ipeir = I915_READ(VCS_IPEIR);
+		error->vcs_instdone = I915_READ(VCS_INSTDONE);
+		error->vcs_seqno = 0;
+		if (dev_priv->ring[VCS].get_seqno)
+			error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+	}
+	if (INTEL_INFO(dev)->gen >= 4) {
 		error->ipeir = I915_READ(IPEIR_I965);
 		error->ipehr = I915_READ(IPEHR_I965);
 		error->instdone = I915_READ(INSTDONE_I965);
@@ -606,118 +800,61 @@
 		error->instdone1 = I915_READ(INSTDONE1);
 		error->acthd = I915_READ(ACTHD_I965);
 		error->bbaddr = I915_READ64(BB_ADDR);
+	} else {
+		error->ipeir = I915_READ(IPEIR);
+		error->ipehr = I915_READ(IPEHR);
+		error->instdone = I915_READ(INSTDONE);
+		error->acthd = I915_READ(ACTHD);
+		error->bbaddr = 0;
 	}
+	i915_gem_record_fences(dev, error);
 
-	bbaddr = i915_ringbuffer_last_batch(dev);
-
-	/* Grab the current batchbuffer, most likely to have crashed. */
-	batchbuffer[0] = NULL;
-	batchbuffer[1] = NULL;
-	count = 0;
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-		struct drm_gem_object *obj = &obj_priv->base;
-
-		if (batchbuffer[0] == NULL &&
-		    bbaddr >= obj_priv->gtt_offset &&
-		    bbaddr < obj_priv->gtt_offset + obj->size)
-			batchbuffer[0] = obj;
-
-		if (batchbuffer[1] == NULL &&
-		    error->acthd >= obj_priv->gtt_offset &&
-		    error->acthd < obj_priv->gtt_offset + obj->size)
-			batchbuffer[1] = obj;
-
-		count++;
-	}
-	/* Scan the other lists for completeness for those bizarre errors. */
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj_priv->gtt_offset &&
-			    bbaddr < obj_priv->gtt_offset + obj->size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj_priv->gtt_offset &&
-			    error->acthd < obj_priv->gtt_offset + obj->size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			if (batchbuffer[0] == NULL &&
-			    bbaddr >= obj_priv->gtt_offset &&
-			    bbaddr < obj_priv->gtt_offset + obj->size)
-				batchbuffer[0] = obj;
-
-			if (batchbuffer[1] == NULL &&
-			    error->acthd >= obj_priv->gtt_offset &&
-			    error->acthd < obj_priv->gtt_offset + obj->size)
-				batchbuffer[1] = obj;
-
-			if (batchbuffer[0] && batchbuffer[1])
-				break;
-		}
-	}
-
-	/* We need to copy these to an anonymous buffer as the simplest
-	 * method to avoid being overwritten by userspace.
-	 */
-	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
-	if (batchbuffer[1] != batchbuffer[0])
-		error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
-	else
-		error->batchbuffer[1] = NULL;
+	/* Record the active batchbuffers */
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		error->batchbuffer[i] =
+			i915_error_first_batchbuffer(dev_priv,
+						     &dev_priv->ring[i]);
 
 	/* Record the ringbuffer */
-	error->ringbuffer = i915_error_object_create(dev,
-			dev_priv->render_ring.gem_object);
+	error->ringbuffer = i915_error_object_create(dev_priv,
+						     dev_priv->ring[RCS].obj);
 
-	/* Record buffers on the active list. */
+	/* Record buffers on the active and pinned lists. */
 	error->active_bo = NULL;
-	error->active_bo_count = 0;
+	error->pinned_bo = NULL;
 
-	if (count)
-		error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+	i = 0;
+	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
+		i++;
+	error->active_bo_count = i;
+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+		i++;
+	error->pinned_bo_count = i - error->active_bo_count;
+
+	if (i) {
+		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
 					   GFP_ATOMIC);
-
-	if (error->active_bo) {
-		int i = 0;
-		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-			struct drm_gem_object *obj = &obj_priv->base;
-
-			error->active_bo[i].size = obj->size;
-			error->active_bo[i].name = obj->name;
-			error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
-			error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
-			error->active_bo[i].read_domains = obj->read_domains;
-			error->active_bo[i].write_domain = obj->write_domain;
-			error->active_bo[i].fence_reg = obj_priv->fence_reg;
-			error->active_bo[i].pinned = 0;
-			if (obj_priv->pin_count > 0)
-				error->active_bo[i].pinned = 1;
-			if (obj_priv->user_pin_count > 0)
-				error->active_bo[i].pinned = -1;
-			error->active_bo[i].tiling = obj_priv->tiling_mode;
-			error->active_bo[i].dirty = obj_priv->dirty;
-			error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
-			if (++i == count)
-				break;
-		}
-		error->active_bo_count = i;
+		if (error->active_bo)
+			error->pinned_bo =
+				error->active_bo + error->active_bo_count;
 	}
 
+	if (error->active_bo)
+		error->active_bo_count =
+			capture_bo_list(error->active_bo,
+					error->active_bo_count,
+					&dev_priv->mm.active_list);
+
+	if (error->pinned_bo)
+		error->pinned_bo_count =
+			capture_bo_list(error->pinned_bo,
+					error->pinned_bo_count,
+					&dev_priv->mm.pinned_list);
+
 	do_gettimeofday(&error->time);
 
 	error->overlay = intel_overlay_capture_error_state(dev);
+	error->display = intel_display_capture_error_state(dev);
 
 	spin_lock_irqsave(&dev_priv->error_lock, flags);
 	if (dev_priv->first_error == NULL) {
@@ -775,7 +912,7 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
-			(void)I915_READ(IPEIR_I965);
+			POSTING_READ(IPEIR_I965);
 		}
 		if (eir & GM45_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +920,7 @@
 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
 			       pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
-			(void)I915_READ(PGTBL_ER);
+			POSTING_READ(PGTBL_ER);
 		}
 	}
 
@@ -794,7 +931,7 @@
 			printk(KERN_ERR "  PGTBL_ER: 0x%08x\n",
 			       pgtbl_err);
 			I915_WRITE(PGTBL_ER, pgtbl_err);
-			(void)I915_READ(PGTBL_ER);
+			POSTING_READ(PGTBL_ER);
 		}
 	}
 
@@ -825,7 +962,7 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD));
 			I915_WRITE(IPEIR, ipeir);
-			(void)I915_READ(IPEIR);
+			POSTING_READ(IPEIR);
 		} else {
 			u32 ipeir = I915_READ(IPEIR_I965);
 
@@ -842,12 +979,12 @@
 			printk(KERN_ERR "  ACTHD: 0x%08x\n",
 			       I915_READ(ACTHD_I965));
 			I915_WRITE(IPEIR_I965, ipeir);
-			(void)I915_READ(IPEIR_I965);
+			POSTING_READ(IPEIR_I965);
 		}
 	}
 
 	I915_WRITE(EIR, eir);
-	(void)I915_READ(EIR);
+	POSTING_READ(EIR);
 	eir = I915_READ(EIR);
 	if (eir) {
 		/*
@@ -870,7 +1007,7 @@
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
  */
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -884,11 +1021,11 @@
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		wake_up_all(&dev_priv->render_ring.irq_queue);
+		wake_up_all(&dev_priv->ring[RCS].irq_queue);
 		if (HAS_BSD(dev))
-			wake_up_all(&dev_priv->bsd_ring.irq_queue);
+			wake_up_all(&dev_priv->ring[VCS].irq_queue);
 		if (HAS_BLT(dev))
-			wake_up_all(&dev_priv->blt_ring.irq_queue);
+			wake_up_all(&dev_priv->ring[BCS].irq_queue);
 	}
 
 	queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1036,7 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct intel_unpin_work *work;
 	unsigned long flags;
 	bool stall_detected;
@@ -918,13 +1055,13 @@
 	}
 
 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
-	obj_priv = to_intel_bo(work->pending_flip_obj);
+	obj = work->pending_flip_obj;
 	if (INTEL_INFO(dev)->gen >= 4) {
 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
-		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+		stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
 	} else {
 		int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
-		stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
 							crtc->y * crtc->fb->pitch +
 							crtc->x * crtc->fb->bits_per_pixel/8);
 	}
@@ -970,7 +1107,7 @@
 		 * It doesn't set the bit in iir again, but it still produces
 		 * interrupts (for non-MSI).
 		 */
-		spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+		spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 		pipea_stats = I915_READ(PIPEASTAT);
 		pipeb_stats = I915_READ(PIPEBSTAT);
 
@@ -993,7 +1130,7 @@
 			I915_WRITE(PIPEBSTAT, pipeb_stats);
 			irq_received = 1;
 		}
-		spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+		spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 		if (!irq_received)
 			break;
@@ -1026,9 +1163,9 @@
 		}
 
 		if (iir & I915_USER_INTERRUPT)
-			notify_ring(dev, &dev_priv->render_ring);
-		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
-			notify_ring(dev, &dev_priv->bsd_ring);
+			notify_ring(dev, &dev_priv->ring[RCS]);
+		if (iir & I915_BSD_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->ring[VCS]);
 
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
 			intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1238,13 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(dev_priv->counter);
-	OUT_RING(MI_USER_INTERRUPT);
-	ADVANCE_LP_RING();
+	if (BEGIN_LP_RING(4) == 0) {
+		OUT_RING(MI_STORE_DWORD_INDEX);
+		OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(MI_USER_INTERRUPT);
+		ADVANCE_LP_RING();
+	}
 
 	return dev_priv->counter;
 }
@@ -1114,12 +1252,11 @@
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
-	if (dev_priv->trace_irq_seqno == 0)
-		render_ring->user_irq_get(dev, render_ring);
-
-	dev_priv->trace_irq_seqno = seqno;
+	if (dev_priv->trace_irq_seqno == 0 &&
+	    ring->irq_get(ring))
+		dev_priv->trace_irq_seqno = seqno;
 }
 
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1264,7 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *ring = LP_RING(dev_priv);
 
 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1278,12 @@
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	render_ring->user_irq_get(dev, render_ring);
-	DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
-		    READ_BREADCRUMB(dev_priv) >= irq_nr);
-	render_ring->user_irq_put(dev, render_ring);
+	ret = -ENODEV;
+	if (ring->irq_get(ring)) {
+		DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+			    READ_BREADCRUMB(dev_priv) >= irq_nr);
+		ring->irq_put(ring);
+	}
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1302,7 @@
 	drm_i915_irq_emit_t *emit = data;
 	int result;
 
-	if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+	if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
@@ -1209,9 +1348,9 @@
 	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
 
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	if (HAS_PCH_SPLIT(dev))
-		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
+		ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
 	else if (INTEL_INFO(dev)->gen >= 4)
 		i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1358,7 @@
 	else
 		i915_enable_pipestat(dev_priv, pipe,
 				     PIPE_VBLANK_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 	return 0;
 }
 
@@ -1231,15 +1370,15 @@
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	if (HAS_PCH_SPLIT(dev))
-		ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 
+		ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
 					     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
 	else
 		i915_disable_pipestat(dev_priv, pipe,
 				      PIPE_VBLANK_INTERRUPT_ENABLE |
 				      PIPE_START_VBLANK_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
 void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1445,50 @@
 	return -EINVAL;
 }
 
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return list_entry(dev_priv->render_ring.request_list.prev,
-			struct drm_i915_gem_request, list);
+	return list_entry(ring->request_list.prev,
+			  struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+	if (list_empty(&ring->request_list) ||
+	    i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+		/* Issue a wake-up to catch stuck h/w. */
+		if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+			DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+				  ring->name,
+				  ring->waiting_seqno,
+				  ring->get_seqno(ring));
+			wake_up_all(&ring->irq_queue);
+			*err = true;
+		}
+		return true;
+	}
+	return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT) {
+		DRM_ERROR("Kicking stuck wait on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+	if (IS_GEN6(dev) &&
+	    (tmp & RING_WAIT_SEMAPHORE)) {
+		DRM_ERROR("Kicking stuck semaphore on %s\n",
+			  ring->name);
+		I915_WRITE_CTL(ring, tmp);
+		return true;
+	}
+	return false;
 }
 
 /**
@@ -1325,6 +1502,17 @@
 	struct drm_device *dev = (struct drm_device *)data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t acthd, instdone, instdone1;
+	bool err = false;
+
+	/* If all work is done then ACTHD clearly hasn't advanced. */
+	if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+	    i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+	    i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+		dev_priv->hangcheck_count = 0;
+		if (err)
+			goto repeat;
+		return;
+	}
 
 	if (INTEL_INFO(dev)->gen < 4) {
 		acthd = I915_READ(ACTHD);
@@ -1336,38 +1524,6 @@
 		instdone1 = I915_READ(INSTDONE1);
 	}
 
-	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (list_empty(&dev_priv->render_ring.request_list) ||
-		i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
-				  i915_get_tail_request(dev)->seqno)) {
-		bool missed_wakeup = false;
-
-		dev_priv->hangcheck_count = 0;
-
-		/* Issue a wake-up to catch stuck h/w. */
-		if (dev_priv->render_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
-			wake_up_all(&dev_priv->render_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (dev_priv->bsd_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
-			wake_up_all(&dev_priv->bsd_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (dev_priv->blt_ring.waiting_gem_seqno &&
-		    waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
-			wake_up_all(&dev_priv->blt_ring.irq_queue);
-			missed_wakeup = true;
-		}
-
-		if (missed_wakeup)
-			DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
-		return;
-	}
-
 	if (dev_priv->last_acthd == acthd &&
 	    dev_priv->last_instdone == instdone &&
 	    dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1536,17 @@
 				 * and break the hang. This should work on
 				 * all but the second generation chipsets.
 				 */
-				u32 tmp = I915_READ(PRB0_CTL);
-				if (tmp & RING_WAIT) {
-					I915_WRITE(PRB0_CTL, tmp);
-					POSTING_READ(PRB0_CTL);
-					goto out;
-				}
+
+				if (kick_ring(&dev_priv->ring[RCS]))
+					goto repeat;
+
+				if (HAS_BSD(dev) &&
+				    kick_ring(&dev_priv->ring[VCS]))
+					goto repeat;
+
+				if (HAS_BLT(dev) &&
+				    kick_ring(&dev_priv->ring[BCS]))
+					goto repeat;
 			}
 
 			i915_handle_error(dev, true);
@@ -1399,7 +1560,7 @@
 		dev_priv->last_instdone1 = instdone1;
 	}
 
-out:
+repeat:
 	/* Reset timer case chip hangs without another request being added */
 	mod_timer(&dev_priv->hangcheck_timer,
 		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1578,17 @@
 
 	I915_WRITE(DEIMR, 0xffffffff);
 	I915_WRITE(DEIER, 0x0);
-	(void) I915_READ(DEIER);
+	POSTING_READ(DEIER);
 
 	/* and GT */
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
-	(void) I915_READ(GTIER);
+	POSTING_READ(GTIER);
 
 	/* south display irq */
 	I915_WRITE(SDEIMR, 0xffffffff);
 	I915_WRITE(SDEIER, 0x0);
-	(void) I915_READ(SDEIER);
+	POSTING_READ(SDEIER);
 }
 
 static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1597,34 @@
 	/* enable kind of interrupts always enabled */
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+	u32 render_irqs;
 	u32 hotplug_mask;
 
-	dev_priv->irq_mask_reg = ~display_mask;
-	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+	dev_priv->irq_mask = ~display_mask;
 
 	/* should always can generate irq */
 	I915_WRITE(DEIIR, I915_READ(DEIIR));
-	I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
-	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
-	(void) I915_READ(DEIER);
+	I915_WRITE(DEIMR, dev_priv->irq_mask);
+	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+	POSTING_READ(DEIER);
 
-	if (IS_GEN6(dev)) {
-		render_mask =
-			GT_PIPE_NOTIFY |
-			GT_GEN6_BSD_USER_INTERRUPT |
-			GT_BLT_USER_INTERRUPT;
-	}
-
-	dev_priv->gt_irq_mask_reg = ~render_mask;
-	dev_priv->gt_irq_enable_reg = render_mask;
+	dev_priv->gt_irq_mask = ~0;
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
-	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-	if (IS_GEN6(dev)) {
-		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
-		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
-		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
-	}
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
 
-	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
-	(void) I915_READ(GTIER);
+	if (IS_GEN6(dev))
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GT_GEN6_BSD_USER_INTERRUPT |
+			GT_BLT_USER_INTERRUPT;
+	else
+		render_irqs =
+			GT_USER_INTERRUPT |
+			GT_PIPE_NOTIFY |
+			GT_BSD_USER_INTERRUPT;
+	I915_WRITE(GTIER, render_irqs);
+	POSTING_READ(GTIER);
 
 	if (HAS_PCH_CPT(dev)) {
 		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
@@ -1475,15 +1632,17 @@
 	} else {
 		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+		hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
+		I915_WRITE(FDI_RXA_IMR, 0);
+		I915_WRITE(FDI_RXB_IMR, 0);
 	}
 
-	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
-	dev_priv->pch_irq_enable_reg = hotplug_mask;
+	dev_priv->pch_irq_mask = ~hotplug_mask;
 
 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
-	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
-	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
-	(void) I915_READ(SDEIER);
+	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+	I915_WRITE(SDEIER, hotplug_mask);
+	POSTING_READ(SDEIER);
 
 	if (IS_IRONLAKE_M(dev)) {
 		/* Clear & enable PCU event interrupts */
@@ -1519,7 +1678,7 @@
 	I915_WRITE(PIPEBSTAT, 0);
 	I915_WRITE(IMR, 0xffffffff);
 	I915_WRITE(IER, 0x0);
-	(void) I915_READ(IER);
+	POSTING_READ(IER);
 }
 
 /*
@@ -1532,11 +1691,11 @@
 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
 	u32 error_mask;
 
-	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+	DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
 	if (HAS_BSD(dev))
-		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+		DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
 	if (HAS_BLT(dev))
-		DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+		DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
@@ -1544,7 +1703,7 @@
 		return ironlake_irq_postinstall(dev);
 
 	/* Unmask the interrupts that we always want on. */
-	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+	dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
 
 	dev_priv->pipestat[0] = 0;
 	dev_priv->pipestat[1] = 0;
@@ -1553,7 +1712,7 @@
 		/* Enable in IER... */
 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
 		/* and unmask in IMR */
-		dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
 	}
 
 	/*
@@ -1571,9 +1730,9 @@
 	}
 	I915_WRITE(EMR, error_mask);
 
-	I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	I915_WRITE(IMR, dev_priv->irq_mask);
 	I915_WRITE(IER, enable_mask);
-	(void) I915_READ(IER);
+	POSTING_READ(IER);
 
 	if (I915_HAS_HOTPLUG(dev)) {
 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cb8f434..40a407f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
 #define  GRDOM_RENDER	(1<<2)
 #define  GRDOM_MEDIA	(3<<2)
 
+#define GEN6_GDRST	0x941c
+#define  GEN6_GRDOM_FULL		(1 << 0)
+#define  GEN6_GRDOM_RENDER		(1 << 1)
+#define  GEN6_GRDOM_MEDIA		(1 << 2)
+#define  GEN6_GRDOM_BLT			(1 << 3)
+
 /* VGA stuff */
 
 #define VGA_ST01_MDA 0x3ba
@@ -139,6 +145,8 @@
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define   MI_INVALIDATE_ISP	(1 << 5) /* invalidate indirect state pointers */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
+#define   MI_SUSPEND_FLUSH_EN	(1<<0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
 #define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
@@ -153,17 +161,29 @@
 #define   MI_MM_SPACE_PHYSICAL		(0<<8)
 #define   MI_SAVE_EXT_STATE_EN		(1<<3)
 #define   MI_RESTORE_EXT_STATE_EN	(1<<2)
+#define   MI_FORCE_RESTORE		(1<<1)
 #define   MI_RESTORE_INHIBIT		(1<<0)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ *   simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ *   address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x)	MI_INSTR(0x22, 2*x-1)
 #define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX	MI_INSTR(0x16, 1) /* gen6+ */
+#define  MI_SEMAPHORE_GLOBAL_GTT    (1<<22)
+#define  MI_SEMAPHORE_UPDATE	    (1<<21)
+#define  MI_SEMAPHORE_COMPARE	    (1<<20)
+#define  MI_SEMAPHORE_REGISTER	    (1<<18)
 /*
  * 3D instructions used by the kernel
  */
@@ -256,10 +276,6 @@
  * Instruction and interrupt control regs
  */
 #define PGTBL_ER	0x02024
-#define PRB0_TAIL	0x02030
-#define PRB0_HEAD	0x02034
-#define PRB0_START	0x02038
-#define PRB0_CTL	0x0203c
 #define RENDER_RING_BASE	0x02000
 #define BSD_RING_BASE		0x04000
 #define GEN6_BSD_RING_BASE	0x12000
@@ -268,9 +284,14 @@
 #define RING_HEAD(base)		((base)+0x34)
 #define RING_START(base)	((base)+0x38)
 #define RING_CTL(base)		((base)+0x3c)
+#define RING_SYNC_0(base)	((base)+0x40)
+#define RING_SYNC_1(base)	((base)+0x44)
+#define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
 #define RING_ACTHD(base)	((base)+0x74)
+#define RING_NOPID(base)	((base)+0x94)
+#define RING_IMR(base)		((base)+0xa8)
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -285,10 +306,17 @@
 #define   RING_INVALID		0x00000000
 #define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
 #define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
+#define   RING_WAIT_SEMAPHORE	(1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
 #define PRB1_TAIL	0x02040 /* 915+ only */
 #define PRB1_HEAD	0x02044 /* 915+ only */
 #define PRB1_START	0x02048 /* 915+ only */
 #define PRB1_CTL	0x0204c /* 915+ only */
+#endif
 #define IPEIR_I965	0x02064
 #define IPEHR_I965	0x02068
 #define INSTDONE_I965	0x0206c
@@ -305,11 +333,42 @@
 #define INSTDONE	0x02090
 #define NOPID		0x02094
 #define HWSTAM		0x02098
+#define VCS_INSTDONE	0x1206C
+#define VCS_IPEIR	0x12064
+#define VCS_IPEHR	0x12068
+#define VCS_ACTHD	0x12074
+#define BCS_INSTDONE	0x2206C
+#define BCS_IPEIR	0x22064
+#define BCS_IPEHR	0x22068
+#define BCS_ACTHD	0x22074
+
+#define ERROR_GEN6	0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior.  The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN	0x02084
+#define _3D_CHICKEN2	0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED			(1 << 14)
+#define _3D_CHICKEN3	0x02090
 
 #define MI_MODE		0x0209c
 # define VS_TIMER_DISPATCH				(1 << 6)
 # define MI_FLUSH_ENABLE				(1 << 11)
 
+#define GFX_MODE	0x02520
+#define   GFX_RUN_LIST_ENABLE		(1<<15)
+#define   GFX_TLB_INVALIDATE_ALWAYS	(1<<13)
+#define   GFX_SURFACE_FAULT_ENABLE	(1<<12)
+#define   GFX_REPLAY_MODE		(1<<11)
+#define   GFX_PSMI_GRANULARITY		(1<<10)
+#define   GFX_PPGTT_ENABLE		(1<<9)
+
 #define SCPD0		0x0209c /* 915+ only */
 #define IER		0x020a0
 #define IIR		0x020a4
@@ -461,7 +520,7 @@
 #define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3)
 
 #define GEN6_BSD_IMR			0x120a8
-#define   GEN6_BSD_IMR_USER_INTERRUPT	(1 << 12)
+#define   GEN6_BSD_USER_INTERRUPT	(1 << 12)
 
 #define GEN6_BSD_RNCID			0x12198
 
@@ -541,6 +600,18 @@
 
 #define ILK_DISPLAY_CHICKEN1	0x42000
 #define   ILK_FBCQ_DIS		(1<<22)
+#define   ILK_PABSTRETCH_DIS 	(1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA		0x100100
+#define   SNB_CPU_FENCE_ENABLE	(1<<29)
+#define DPFC_CPU_FENCE_OFFSET	0x100104
+
 
 /*
  * GPIO regs
@@ -900,6 +971,8 @@
  */
 #define MCHBAR_MIRROR_BASE	0x10000
 
+#define MCHBAR_MIRROR_BASE_SNB	0x140000
+
 /** 915-945 and GM965 MCH register controlling DRAM channel access */
 #define DCC			0x10200
 #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
@@ -1061,9 +1134,50 @@
 #define RCBMINAVG		0x111a0
 #define RCUPEI			0x111b0
 #define RCDNEI			0x111b4
-#define MCHBAR_RENDER_STANDBY		0x111b8
-#define   RCX_SW_EXIT		(1<<23)
-#define   RSX_STATUS_MASK	0x00700000
+#define RSTDBYCTL		0x111b8
+#define   RS1EN			(1<<31)
+#define   RS2EN			(1<<30)
+#define   RS3EN			(1<<29)
+#define   D3RS3EN		(1<<28) /* Display D3 imlies RS3 */
+#define   SWPROMORSX		(1<<27) /* RSx promotion timers ignored */
+#define   RCWAKERW		(1<<26) /* Resetwarn from PCH causes wakeup */
+#define   DPRSLPVREN		(1<<25) /* Fast voltage ramp enable */
+#define   GFXTGHYST		(1<<24) /* Hysteresis to allow trunk gating */
+#define   RCX_SW_EXIT		(1<<23) /* Leave RSx and prevent re-entry */
+#define   RSX_STATUS_MASK	(7<<20)
+#define   RSX_STATUS_ON		(0<<20)
+#define   RSX_STATUS_RC1	(1<<20)
+#define   RSX_STATUS_RC1E	(2<<20)
+#define   RSX_STATUS_RS1	(3<<20)
+#define   RSX_STATUS_RS2	(4<<20) /* aka rc6 */
+#define   RSX_STATUS_RSVD	(5<<20) /* deep rc6 unsupported on ilk */
+#define   RSX_STATUS_RS3	(6<<20) /* rs3 unsupported on ilk */
+#define   RSX_STATUS_RSVD2	(7<<20)
+#define   UWRCRSXE		(1<<19) /* wake counter limit prevents rsx */
+#define   RSCRP			(1<<18) /* rs requests control on rs1/2 reqs */
+#define   JRSC			(1<<17) /* rsx coupled to cpu c-state */
+#define   RS2INC0		(1<<16) /* allow rs2 in cpu c0 */
+#define   RS1CONTSAV_MASK	(3<<14)
+#define   RS1CONTSAV_NO_RS1	(0<<14) /* rs1 doesn't save/restore context */
+#define   RS1CONTSAV_RSVD	(1<<14)
+#define   RS1CONTSAV_SAVE_RS1	(2<<14) /* rs1 saves context */
+#define   RS1CONTSAV_FULL_RS1	(3<<14) /* rs1 saves and restores context */
+#define   NORMSLEXLAT_MASK	(3<<12)
+#define   SLOW_RS123		(0<<12)
+#define   SLOW_RS23		(1<<12)
+#define   SLOW_RS3		(2<<12)
+#define   NORMAL_RS123		(3<<12)
+#define   RCMODE_TIMEOUT	(1<<11) /* 0 is eval interval method */
+#define   IMPROMOEN		(1<<10) /* promo is immediate or delayed until next idle interval (only for timeout method above) */
+#define   RCENTSYNC		(1<<9) /* rs coupled to cpu c-state (3/6/7) */
+#define   STATELOCK		(1<<7) /* locked to rs_cstate if 0 */
+#define   RS_CSTATE_MASK	(3<<4)
+#define   RS_CSTATE_C367_RS1	(0<<4)
+#define   RS_CSTATE_C36_RS1_C7_RS2 (1<<4)
+#define   RS_CSTATE_RSVD	(2<<4)
+#define   RS_CSTATE_C367_RS2	(3<<4)
+#define   REDSAVES		(1<<3) /* no context save if was idle during rs0 */
+#define   REDRESTORES		(1<<2) /* no restore if was idle during rs0 */
 #define VIDCTL			0x111c0
 #define VIDSTS			0x111c8
 #define VIDSTART		0x111cc /* 8 bits */
@@ -1119,6 +1233,10 @@
 #define DDRMPLL1		0X12c20
 #define PEG_BAND_GAP_DATA	0x14d68
 
+#define GEN6_GT_PERF_STATUS	0x145948
+#define GEN6_RP_STATE_LIMITS	0x145994
+#define GEN6_RP_STATE_CAP	0x145998
+
 /*
  * Logical Context regs
  */
@@ -1168,7 +1286,6 @@
 #define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
 #define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
 #define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
 #define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
 
 /* VGA port control */
@@ -2182,8 +2299,10 @@
 #define   PIPE_6BPC				(2 << 5)
 #define   PIPE_12BPC				(3 << 5)
 
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
 #define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
 #define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe)  _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
 
 #define DSPARB			0x70030
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
@@ -2271,8 +2390,13 @@
 
 /* Memory latency timer register */
 #define MLTR_ILK		0x11222
+#define  MLTR_WM1_SHIFT		0
+#define  MLTR_WM2_SHIFT		8
 /* the unit of memory self-refresh latency time is 0.5us */
 #define  ILK_SRLT_MASK		0x3f
+#define ILK_LATENCY(shift)	(I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
+#define ILK_READ_WM1_LATENCY()	ILK_LATENCY(MLTR_WM1_SHIFT)
+#define ILK_READ_WM2_LATENCY()	ILK_LATENCY(MLTR_WM2_SHIFT)
 
 /* define the fifo size on Ironlake */
 #define ILK_DISPLAY_FIFO	128
@@ -2291,6 +2415,40 @@
 
 #define ILK_FIFO_LINE_SIZE	64
 
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO	128
+#define SNB_DISPLAY_MAXWM	0x7f	/* bit 16:22 */
+#define SNB_DISPLAY_DFTWM	8
+#define SNB_CURSOR_FIFO		32
+#define SNB_CURSOR_MAXWM	0x1f	/* bit 4:0 */
+#define SNB_CURSOR_DFTWM	8
+
+#define SNB_DISPLAY_SR_FIFO	512
+#define SNB_DISPLAY_MAX_SRWM	0x1ff	/* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM	0x3f
+#define SNB_CURSOR_SR_FIFO	64
+#define SNB_CURSOR_MAX_SRWM	0x3f	/* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM	8
+
+#define SNB_FBC_MAX_SRWM	0xf	/* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE	64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD			0x5d10
+#define SSKPD_WM_MASK		0x3f
+#define SSKPD_WM0_SHIFT		0
+#define SSKPD_WM1_SHIFT		8
+#define SSKPD_WM2_SHIFT		16
+#define SSKPD_WM3_SHIFT		24
+
+#define SNB_LATENCY(shift)	(I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY()		SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY()		SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY()		SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY()		SNB_LATENCY(SSKPD_WM3_SHIFT)
+
 /*
  * The two pipe frame counter registers are not synchronized, so
  * reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2509,10 @@
 #define CURBBASE		0x700c4
 #define CURBPOS			0x700c8
 
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
 /* Display A control */
 #define DSPACNTR                0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
@@ -2589,6 +2751,8 @@
 #define GTIER   0x4401c
 
 #define ILK_DISPLAY_CHICKEN2	0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define  ILK_ELPIN_409_SELECT	(1 << 25)
 #define  ILK_DPARB_GATE	(1<<22)
 #define  ILK_VSDPFD_FULL	(1<<21)
 #define ILK_DISPLAY_CHICKEN_FUSES	0x42014
@@ -2600,6 +2764,8 @@
 #define  ILK_DESKTOP			(1<<23)
 #define ILK_DSPCLK_GATE		0x42020
 #define  ILK_DPARB_CLK_GATE	(1<<5)
+#define  ILK_DPFD_CLK_GATE	(1<<7)
+
 /* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
 #define   ILK_CLK_FBC		(1<<7)
 #define   ILK_DPFC_DIS1		(1<<8)
@@ -2612,12 +2778,41 @@
 /* PCH */
 
 /* south display engine interrupt */
+#define SDE_AUDIO_POWER_D	(1 << 27)
+#define SDE_AUDIO_POWER_C	(1 << 26)
+#define SDE_AUDIO_POWER_B	(1 << 25)
+#define SDE_AUDIO_POWER_SHIFT	(25)
+#define SDE_AUDIO_POWER_MASK	(7 << SDE_AUDIO_POWER_SHIFT)
+#define SDE_GMBUS		(1 << 24)
+#define SDE_AUDIO_HDCP_TRANSB	(1 << 23)
+#define SDE_AUDIO_HDCP_TRANSA	(1 << 22)
+#define SDE_AUDIO_HDCP_MASK	(3 << 22)
+#define SDE_AUDIO_TRANSB	(1 << 21)
+#define SDE_AUDIO_TRANSA	(1 << 20)
+#define SDE_AUDIO_TRANS_MASK	(3 << 20)
+#define SDE_POISON		(1 << 19)
+/* 18 reserved */
+#define SDE_FDI_RXB		(1 << 17)
+#define SDE_FDI_RXA		(1 << 16)
+#define SDE_FDI_MASK		(3 << 16)
+#define SDE_AUXD		(1 << 15)
+#define SDE_AUXC		(1 << 14)
+#define SDE_AUXB		(1 << 13)
+#define SDE_AUX_MASK		(7 << 13)
+/* 12 reserved */
 #define SDE_CRT_HOTPLUG         (1 << 11)
 #define SDE_PORTD_HOTPLUG       (1 << 10)
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
 #define SDE_HOTPLUG_MASK	(0xf << 8)
+#define SDE_TRANSB_CRC_DONE	(1 << 5)
+#define SDE_TRANSB_CRC_ERR	(1 << 4)
+#define SDE_TRANSB_FIFO_UNDER	(1 << 3)
+#define SDE_TRANSA_CRC_DONE	(1 << 2)
+#define SDE_TRANSA_CRC_ERR	(1 << 1)
+#define SDE_TRANSA_FIFO_UNDER	(1 << 0)
+#define SDE_TRANS_MASK		(0x3f)
 /* CPT */
 #define SDE_CRT_HOTPLUG_CPT	(1 << 19)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
@@ -2679,6 +2874,7 @@
 #define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
 
 #define PCH_FPA0                0xc6040
+#define  FP_CB_TUNE		(0x3<<22)
 #define PCH_FPA1                0xc6044
 #define PCH_FPB0                0xc6048
 #define PCH_FPB1                0xc604c
@@ -3057,10 +3253,74 @@
 #define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_A	(0x01<<22)
 #define  EDP_LINK_TRAIN_800MV_0DB_SNB_A		(0x0<<22)
 /* SNB B-stepping */
-#define  EDP_LINK_TRAIN_400MV_0DB_SNB_B		(0x0<<22)
-#define  EDP_LINK_TRAIN_400MV_6DB_SNB_B		(0x3a<<22)
-#define  EDP_LINK_TRAIN_600MV_3_5DB_SNB_B	(0x39<<22)
-#define  EDP_LINK_TRAIN_800MV_0DB_SNB_B		(0x38<<22)
+#define  EDP_LINK_TRAIN_400_600MV_0DB_SNB_B	(0x0<<22)
+#define  EDP_LINK_TRAIN_400MV_3_5DB_SNB_B	(0x1<<22)
+#define  EDP_LINK_TRAIN_400_600MV_6DB_SNB_B	(0x3a<<22)
+#define  EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B	(0x39<<22)
+#define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B	(0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB	(0x3f<<22)
 
+#define  FORCEWAKE				0xA18C
+#define  FORCEWAKE_ACK				0x130090
+
+#define GEN6_RPNSWREQ				0xA008
+#define   GEN6_TURBO_DISABLE			(1<<31)
+#define   GEN6_FREQUENCY(x)			((x)<<25)
+#define   GEN6_OFFSET(x)			((x)<<19)
+#define   GEN6_AGGRESSIVE_TURBO			(0<<15)
+#define GEN6_RC_VIDEO_FREQ			0xA00C
+#define GEN6_RC_CONTROL				0xA090
+#define   GEN6_RC_CTL_RC6pp_ENABLE		(1<<16)
+#define   GEN6_RC_CTL_RC6p_ENABLE		(1<<17)
+#define   GEN6_RC_CTL_RC6_ENABLE		(1<<18)
+#define   GEN6_RC_CTL_RC1e_ENABLE		(1<<20)
+#define   GEN6_RC_CTL_RC7_ENABLE		(1<<22)
+#define   GEN6_RC_CTL_EI_MODE(x)		((x)<<27)
+#define   GEN6_RC_CTL_HW_ENABLE			(1<<31)
+#define GEN6_RP_DOWN_TIMEOUT			0xA010
+#define GEN6_RP_INTERRUPT_LIMITS		0xA014
+#define GEN6_RPSTAT1				0xA01C
+#define GEN6_RP_CONTROL				0xA024
+#define   GEN6_RP_MEDIA_TURBO			(1<<11)
+#define   GEN6_RP_USE_NORMAL_FREQ		(1<<9)
+#define   GEN6_RP_MEDIA_IS_GFX			(1<<8)
+#define   GEN6_RP_ENABLE			(1<<7)
+#define   GEN6_RP_UP_BUSY_MAX			(0x2<<3)
+#define   GEN6_RP_DOWN_BUSY_MIN			(0x2<<0)
+#define GEN6_RP_UP_THRESHOLD			0xA02C
+#define GEN6_RP_DOWN_THRESHOLD			0xA030
+#define GEN6_RP_UP_EI				0xA068
+#define GEN6_RP_DOWN_EI				0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS			0xA070
+#define GEN6_RC_STATE				0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT		0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT		0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT		0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL		0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS			0xA0AC
+#define GEN6_RC_SLEEP				0xA0B0
+#define GEN6_RC1e_THRESHOLD			0xA0B4
+#define GEN6_RC6_THRESHOLD			0xA0B8
+#define GEN6_RC6p_THRESHOLD			0xA0BC
+#define GEN6_RC6pp_THRESHOLD			0xA0C0
+#define GEN6_PMINTRMSK				0xA168
+
+#define GEN6_PMISR				0x44020
+#define GEN6_PMIMR				0x44024
+#define GEN6_PMIIR				0x44028
+#define GEN6_PMIER				0x4402C
+#define  GEN6_PM_MBOX_EVENT			(1<<25)
+#define  GEN6_PM_THERMAL_EVENT			(1<<24)
+#define  GEN6_PM_RP_DOWN_TIMEOUT		(1<<6)
+#define  GEN6_PM_RP_UP_THRESHOLD		(1<<5)
+#define  GEN6_PM_RP_DOWN_THRESHOLD		(1<<4)
+#define  GEN6_PM_RP_UP_EI_EXPIRED		(1<<2)
+#define  GEN6_PM_RP_DOWN_EI_EXPIRED		(1<<1)
+
+#define GEN6_PCODE_MAILBOX			0x138124
+#define   GEN6_PCODE_READY			(1<<31)
+#define   GEN6_READ_OC_PARAMS			0xc
+#define   GEN6_PCODE_WRITE_MIN_FREQ_TABLE	0x9
+#define GEN6_PCODE_DATA				0x138128
+
 #endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 42729d2..0521ecf2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,6 +235,7 @@
 static void i915_save_modeset_reg(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
@@ -367,6 +368,28 @@
 	}
 	i915_save_palette(dev, PIPE_B);
 	dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+		break;
+	case 3:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+	case 2:
+		for (i = 0; i < 8; i++)
+			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+		break;
+	}
+
 	return;
 }
 
@@ -375,10 +398,33 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int dpll_a_reg, fpa0_reg, fpa1_reg;
 	int dpll_b_reg, fpb0_reg, fpb1_reg;
+	int i;
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
+	/* Fences */
+	switch (INTEL_INFO(dev)->gen) {
+	case 6:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+		break;
+	case 5:
+	case 4:
+		for (i = 0; i < 16; i++)
+			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+		break;
+	case 3:
+	case 2:
+		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+			for (i = 0; i < 8; i++)
+				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+		for (i = 0; i < 8; i++)
+			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+		break;
+	}
+
+
 	if (HAS_PCH_SPLIT(dev)) {
 		dpll_a_reg = PCH_DPLL_A;
 		dpll_b_reg = PCH_DPLL_B;
@@ -694,7 +740,7 @@
 		I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
 		I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
 		I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
-		I915_WRITE(MCHBAR_RENDER_STANDBY,
+		I915_WRITE(RSTDBYCTL,
 			   dev_priv->saveMCHBAR_RENDER_STANDBY);
 	} else {
 		I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
@@ -765,14 +811,16 @@
 		dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
 		dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
 		dev_priv->saveMCHBAR_RENDER_STANDBY =
-			I915_READ(MCHBAR_RENDER_STANDBY);
+			I915_READ(RSTDBYCTL);
 	} else {
 		dev_priv->saveIER = I915_READ(IER);
 		dev_priv->saveIMR = I915_READ(IMR);
 	}
 
-	if (HAS_PCH_SPLIT(dev))
+	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_drps(dev);
+	if (IS_GEN6(dev))
+		gen6_disable_rps(dev);
 
 	/* Cache mode state */
 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +836,6 @@
 	for (i = 0; i < 3; i++)
 		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
 
-	/* Fences */
-	switch (INTEL_INFO(dev)->gen) {
-	case 6:
-		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
-		break;
-	case 5:
-	case 4:
-		for (i = 0; i < 16; i++)
-			dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
-		break;
-	case 3:
-		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-			for (i = 0; i < 8; i++)
-				dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
-	case 2:
-		for (i = 0; i < 8; i++)
-			dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
-		break;
-
-	}
-
 	return 0;
 }
 
@@ -823,27 +849,6 @@
 	/* Hardware status page */
 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
 
-	/* Fences */
-	switch (INTEL_INFO(dev)->gen) {
-	case 6:
-		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
-		break;
-	case 5:
-	case 4:
-		for (i = 0; i < 16; i++)
-			I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
-		break;
-	case 3:
-	case 2:
-		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
-			for (i = 0; i < 8; i++)
-				I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
-		for (i = 0; i < 8; i++)
-			I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
-		break;
-	}
-
 	i915_restore_display(dev);
 
 	/* Interrupt state */
@@ -860,13 +865,16 @@
 	}
 
 	/* Clock gating state */
-	intel_init_clock_gating(dev);
+	intel_enable_clock_gating(dev);
 
-	if (HAS_PCH_SPLIT(dev)) {
+	if (IS_IRONLAKE_M(dev)) {
 		ironlake_enable_drps(dev);
 		intel_init_emon(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_enable_rps(dev_priv);
+
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a2..7f0fc3ed 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
 #include <linux/tracepoint.h>
 
 #include <drm/drmP.h>
+#include "i915_drv.h"
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
 
 TRACE_EVENT(i915_gem_object_create,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, size)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->size = obj->size;
+			   __entry->size = obj->base.size;
 			   ),
 
 	    TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@
 
 TRACE_EVENT(i915_gem_object_bind,
 
-	    TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+	    TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
 
-	    TP_ARGS(obj, gtt_offset),
+	    TP_ARGS(obj, gtt_offset, mappable),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, gtt_offset)
+			     __field(bool, mappable)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
 			   __entry->gtt_offset = gtt_offset;
+			   __entry->mappable = mappable;
 			   ),
 
-	    TP_printk("obj=%p, gtt_offset=%08x",
-		      __entry->obj, __entry->gtt_offset)
+	    TP_printk("obj=%p, gtt_offset=%08x%s",
+		      __entry->obj, __entry->gtt_offset,
+		      __entry->mappable ? ", mappable" : "")
 );
 
 TRACE_EVENT(i915_gem_object_change_domain,
 
-	    TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+	    TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
 
 	    TP_ARGS(obj, old_read_domains, old_write_domain),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     __field(u32, read_domains)
 			     __field(u32, write_domain)
 			     ),
 
 	    TP_fast_assign(
 			   __entry->obj = obj;
-			   __entry->read_domains = obj->read_domains | (old_read_domains << 16);
-			   __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+			   __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+			   __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
 			   ),
 
 	    TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@
 		      __entry->read_domains, __entry->write_domain)
 );
 
-TRACE_EVENT(i915_gem_object_get_fence,
-
-	    TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
-	    TP_ARGS(obj, fence, tiling_mode),
-
-	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
-			     __field(int, fence)
-			     __field(int, tiling_mode)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->obj = obj;
-			   __entry->fence = fence;
-			   __entry->tiling_mode = tiling_mode;
-			   ),
-
-	    TP_printk("obj=%p, fence=%d, tiling=%d",
-		      __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
 DECLARE_EVENT_CLASS(i915_gem_object,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj),
 
 	    TP_STRUCT__entry(
-			     __field(struct drm_gem_object *, obj)
+			     __field(struct drm_i915_gem_object *, obj)
 			     ),
 
 	    TP_fast_assign(
@@ -117,21 +99,21 @@
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 
-	    TP_PROTO(struct drm_gem_object *obj),
+	    TP_PROTO(struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(obj)
 );
@@ -263,13 +245,13 @@
 );
 
 TRACE_EVENT(i915_flip_request,
-	    TP_PROTO(int plane, struct drm_gem_object *obj),
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(plane, obj),
 
 	    TP_STRUCT__entry(
 		    __field(int, plane)
-		    __field(struct drm_gem_object *, obj)
+		    __field(struct drm_i915_gem_object *, obj)
 		    ),
 
 	    TP_fast_assign(
@@ -281,13 +263,13 @@
 );
 
 TRACE_EVENT(i915_flip_complete,
-	    TP_PROTO(int plane, struct drm_gem_object *obj),
+	    TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
 	    TP_ARGS(plane, obj),
 
 	    TP_STRUCT__entry(
 		    __field(int, plane)
-		    __field(struct drm_gem_object *, obj)
+		    __field(struct drm_i915_gem_object *, obj)
 		    ),
 
 	    TP_fast_assign(
@@ -298,6 +280,29 @@
 	    TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
 );
 
+TRACE_EVENT(i915_reg_rw,
+           TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+           TP_ARGS(cmd, reg, val, len),
+
+           TP_STRUCT__entry(
+                   __field(int, cmd)
+                   __field(uint32_t, reg)
+                   __field(uint64_t, val)
+                   __field(int, len)
+                   ),
+
+           TP_fast_assign(
+                   __entry->cmd = cmd;
+                   __entry->reg = reg;
+                   __entry->val = (uint64_t)val;
+                   __entry->len = len;
+                   ),
+
+           TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+                     __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
 #endif /* _I915_TRACE_H_ */
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 8df5743..17035b8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -30,6 +30,7 @@
 #include "drm.h"
 #include "drm_crtc.h"
 #include "drm_crtc_helper.h"
+#include "drm_edid.h"
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
@@ -287,8 +288,9 @@
 	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
 }
 
-static bool intel_crt_detect_ddc(struct intel_crt *crt)
+static bool intel_crt_detect_ddc(struct drm_connector *connector)
 {
+	struct intel_crt *crt = intel_attached_crt(connector);
 	struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
 
 	/* CRT should always be at 0, but check anyway */
@@ -301,8 +303,26 @@
 	}
 
 	if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
-		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-		return true;
+		struct edid *edid;
+		bool is_digital = false;
+
+		edid = drm_get_edid(connector,
+			&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+		/*
+		 * This may be a DVI-I connector with a shared DDC
+		 * link between analog and digital outputs, so we
+		 * have to check the EDID input spec of the attached device.
+		 */
+		if (edid != NULL) {
+			is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
+			connector->display_info.raw_edid = NULL;
+			kfree(edid);
+		}
+
+		if (!is_digital) {
+			DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+			return true;
+		}
 	}
 
 	return false;
@@ -458,7 +478,7 @@
 		}
 	}
 
-	if (intel_crt_detect_ddc(crt))
+	if (intel_crt_detect_ddc(connector))
 		return connector_status_connected;
 
 	if (!force)
@@ -472,7 +492,7 @@
 		crtc = intel_get_load_detect_pipe(&crt->base, connector,
 						  NULL, &dpms_mode);
 		if (crtc) {
-			if (intel_crt_detect_ddc(crt))
+			if (intel_crt_detect_ddc(connector))
 				status = connector_status_connected;
 			else
 				status = intel_crt_load_detect(crtc, crt);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fca5232..25d9688 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@
         .find_pll = intel_find_pll_ironlake_dp,
 };
 
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+						int refclk)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	const intel_limit_t *limit;
-	int refclk = 120;
 
 	if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
-		if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
-			refclk = 100;
-
 		if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
 		    LVDS_CLKB_POWER_UP) {
 			/* LVDS dual channel */
-			if (refclk == 100)
+			if (refclk == 100000)
 				limit = &intel_limits_ironlake_dual_lvds_100m;
 			else
 				limit = &intel_limits_ironlake_dual_lvds;
 		} else {
-			if (refclk == 100)
+			if (refclk == 100000)
 				limit = &intel_limits_ironlake_single_lvds_100m;
 			else
 				limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@
 	return limit;
 }
 
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
 {
 	struct drm_device *dev = crtc->dev;
 	const intel_limit_t *limit;
 
 	if (HAS_PCH_SPLIT(dev))
-		limit = intel_ironlake_limit(crtc);
+		limit = intel_ironlake_limit(crtc, refclk);
 	else if (IS_G4X(dev)) {
 		limit = intel_g4x_limit(crtc);
 	} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@
  * the given connectors.
  */
 
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+			       const intel_limit_t *limit,
+			       const intel_clock_t *clock)
 {
-	const intel_limit_t *limit = intel_limit (crtc);
-	struct drm_device *dev = crtc->dev;
-
 	if (clock->p1  < limit->p1.min  || limit->p1.max  < clock->p1)
 		INTELPllInvalid ("p1 out of range\n");
 	if (clock->p   < limit->p.min   || limit->p.max   < clock->p)
@@ -849,8 +845,8 @@
 					int this_err;
 
 					intel_clock(dev, refclk, &clock);
-
-					if (!intel_PLL_is_valid(crtc, &clock))
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
 						continue;
 
 					this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@
 					int this_err;
 
 					intel_clock(dev, refclk, &clock);
-					if (!intel_PLL_is_valid(crtc, &clock))
+					if (!intel_PLL_is_valid(dev, limit,
+								&clock))
 						continue;
-					this_err = abs(clock.dot - target) ;
+
+					this_err = abs(clock.dot - target);
 					if (this_err < err_most) {
 						*best_clock = clock;
 						err_most = this_err;
@@ -1066,13 +1064,13 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane, i;
 	u32 fbc_ctl, fbc_ctl2;
 
 	if (fb->pitch == dev_priv->cfb_pitch &&
-	    obj_priv->fence_reg == dev_priv->cfb_fence &&
+	    obj->fence_reg == dev_priv->cfb_fence &&
 	    intel_crtc->plane == dev_priv->cfb_plane &&
 	    I915_READ(FBC_CONTROL) & FBC_CTL_EN)
 		return;
@@ -1086,7 +1084,7 @@
 
 	/* FBC_CTL wants 64B units */
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
 	plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
@@ -1096,7 +1094,7 @@
 
 	/* Set it up... */
 	fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		fbc_ctl2 |= FBC_CTL_CPU_FENCE;
 	I915_WRITE(FBC_CONTROL2, fbc_ctl2);
 	I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@
 		fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
 	fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
 	fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
-	if (obj_priv->tiling_mode != I915_TILING_NONE)
+	if (obj->tiling_mode != I915_TILING_NONE)
 		fbc_ctl |= dev_priv->cfb_fence;
 	I915_WRITE(FBC_CONTROL, fbc_ctl);
 
@@ -1150,7 +1148,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 	unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@
 	dpfc_ctl = I915_READ(DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-		    dev_priv->cfb_fence == obj_priv->fence_reg &&
+		    dev_priv->cfb_fence == obj->fence_reg &&
 		    dev_priv->cfb_plane == intel_crtc->plane &&
 		    dev_priv->cfb_y == crtc->y)
 			return;
@@ -1170,12 +1168,12 @@
 	}
 
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
 	dev_priv->cfb_y = crtc->y;
 
 	dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+	if (obj->tiling_mode != I915_TILING_NONE) {
 		dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
 		I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
 	} else {
@@ -1221,7 +1219,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_framebuffer *fb = crtc->fb;
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
 	unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@
 	dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
 	if (dpfc_ctl & DPFC_CTL_EN) {
 		if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-		    dev_priv->cfb_fence == obj_priv->fence_reg &&
+		    dev_priv->cfb_fence == obj->fence_reg &&
 		    dev_priv->cfb_plane == intel_crtc->plane &&
-		    dev_priv->cfb_offset == obj_priv->gtt_offset &&
+		    dev_priv->cfb_offset == obj->gtt_offset &&
 		    dev_priv->cfb_y == crtc->y)
 			return;
 
@@ -1242,14 +1240,14 @@
 	}
 
 	dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-	dev_priv->cfb_fence = obj_priv->fence_reg;
+	dev_priv->cfb_fence = obj->fence_reg;
 	dev_priv->cfb_plane = intel_crtc->plane;
-	dev_priv->cfb_offset = obj_priv->gtt_offset;
+	dev_priv->cfb_offset = obj->gtt_offset;
 	dev_priv->cfb_y = crtc->y;
 
 	dpfc_ctl &= DPFC_RESERVED;
 	dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-	if (obj_priv->tiling_mode != I915_TILING_NONE) {
+	if (obj->tiling_mode != I915_TILING_NONE) {
 		dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
 		I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
 	} else {
@@ -1260,10 +1258,16 @@
 		   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
 		   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
 	I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-	I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+	I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
 	/* enable it... */
 	I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
+	if (IS_GEN6(dev)) {
+		I915_WRITE(SNB_DPFC_CTL_SA,
+			   SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+		I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+	}
+
 	DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
 }
 
@@ -1345,7 +1349,7 @@
 	struct intel_crtc *intel_crtc;
 	struct drm_framebuffer *fb;
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 
 	DRM_DEBUG_KMS("\n");
 
@@ -1384,9 +1388,9 @@
 	intel_crtc = to_intel_crtc(crtc);
 	fb = crtc->fb;
 	intel_fb = to_intel_framebuffer(fb);
-	obj_priv = to_intel_bo(intel_fb->obj);
+	obj = intel_fb->obj;
 
-	if (intel_fb->obj->size > dev_priv->cfb_size) {
+	if (intel_fb->obj->base.size > dev_priv->cfb_size) {
 		DRM_DEBUG_KMS("framebuffer too large, disabling "
 			      "compression\n");
 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@
 		dev_priv->no_fbc_reason = FBC_BAD_PLANE;
 		goto out_disable;
 	}
-	if (obj_priv->tiling_mode != I915_TILING_X) {
+	if (obj->tiling_mode != I915_TILING_X) {
 		DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
 		dev_priv->no_fbc_reason = FBC_NOT_TILED;
 		goto out_disable;
@@ -1433,14 +1437,13 @@
 
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
-			   struct drm_gem_object *obj,
-			   bool pipelined)
+			   struct drm_i915_gem_object *obj,
+			   struct intel_ring_buffer *pipelined)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	u32 alignment;
 	int ret;
 
-	switch (obj_priv->tiling_mode) {
+	switch (obj->tiling_mode) {
 	case I915_TILING_NONE:
 		if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
 			alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@
 		BUG();
 	}
 
-	ret = i915_gem_object_pin(obj, alignment);
+	ret = i915_gem_object_pin(obj, alignment, true);
 	if (ret)
 		return ret;
 
@@ -1474,9 +1477,8 @@
 	 * framebuffer compression.  For simplicity, we always install
 	 * a fence as the cost is not that onerous.
 	 */
-	if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
-	    obj_priv->tiling_mode != I915_TILING_NONE) {
-		ret = i915_gem_object_get_fence_reg(obj, false);
+	if (obj->tiling_mode != I915_TILING_NONE) {
+		ret = i915_gem_object_get_fence(obj, pipelined, false);
 		if (ret)
 			goto err_unpin;
 	}
@@ -1497,8 +1499,7 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int plane = intel_crtc->plane;
 	unsigned long Start, Offset;
 	u32 dspcntr;
@@ -1515,7 +1516,6 @@
 
 	intel_fb = to_intel_framebuffer(fb);
 	obj = intel_fb->obj;
-	obj_priv = to_intel_bo(obj);
 
 	reg = DSPCNTR(plane);
 	dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@
 		return -EINVAL;
 	}
 	if (INTEL_INFO(dev)->gen >= 4) {
-		if (obj_priv->tiling_mode != I915_TILING_NONE)
+		if (obj->tiling_mode != I915_TILING_NONE)
 			dspcntr |= DISPPLANE_TILED;
 		else
 			dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@
 
 	I915_WRITE(reg, dspcntr);
 
-	Start = obj_priv->gtt_offset;
+	Start = obj->gtt_offset;
 	Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
 	DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@
 	mutex_lock(&dev->struct_mutex);
 	ret = intel_pin_and_fence_fb_obj(dev,
 					 to_intel_framebuffer(crtc->fb)->obj,
-					 false);
+					 NULL);
 	if (ret != 0) {
 		mutex_unlock(&dev->struct_mutex);
 		return ret;
@@ -1606,18 +1606,17 @@
 
 	if (old_fb) {
 		struct drm_i915_private *dev_priv = dev->dev_private;
-		struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-		struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+		struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
 		wait_event(dev_priv->pending_flip_queue,
-			   atomic_read(&obj_priv->pending_flip) == 0);
+			   atomic_read(&obj->pending_flip) == 0);
 
 		/* Big Hammer, we also need to ensure that any pending
 		 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
 		 * current scanout is retired before unpinning the old
 		 * framebuffer.
 		 */
-		ret = i915_gem_object_flush_gpu(obj_priv, false);
+		ret = i915_gem_object_flush_gpu(obj, false);
 		if (ret) {
 			i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
 			mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@
 		return ret;
 	}
 
-	if (old_fb)
+	if (old_fb) {
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 		i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+	}
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -1996,31 +1997,31 @@
 static void intel_clear_scanline_wait(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring;
 	u32 tmp;
 
 	if (IS_GEN2(dev))
 		/* Can't break the hang on i8xx */
 		return;
 
-	tmp = I915_READ(PRB0_CTL);
-	if (tmp & RING_WAIT) {
-		I915_WRITE(PRB0_CTL, tmp);
-		POSTING_READ(PRB0_CTL);
-	}
+	ring = LP_RING(dev_priv);
+	tmp = I915_READ_CTL(ring);
+	if (tmp & RING_WAIT)
+		I915_WRITE_CTL(ring, tmp);
 }
 
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_i915_private *dev_priv;
 
 	if (crtc->fb == NULL)
 		return;
 
-	obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+	obj = to_intel_framebuffer(crtc->fb)->obj;
 	dev_priv = crtc->dev->dev_private;
 	wait_event(dev_priv->pending_flip_queue,
-		   atomic_read(&obj_priv->pending_flip) == 0);
+		   atomic_read(&obj->pending_flip) == 0);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2850,6 +2851,39 @@
 	ILK_FIFO_LINE_SIZE
 };
 
+static struct intel_watermark_params sandybridge_display_wm_info = {
+	SNB_DISPLAY_FIFO,
+	SNB_DISPLAY_MAXWM,
+	SNB_DISPLAY_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+	SNB_CURSOR_FIFO,
+	SNB_CURSOR_MAXWM,
+	SNB_CURSOR_DFTWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+	SNB_DISPLAY_SR_FIFO,
+	SNB_DISPLAY_MAX_SRWM,
+	SNB_DISPLAY_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+	SNB_CURSOR_SR_FIFO,
+	SNB_CURSOR_MAX_SRWM,
+	SNB_CURSOR_DFT_SRWM,
+	2,
+	SNB_FIFO_LINE_SIZE
+};
+
+
 /**
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
@@ -3383,12 +3417,17 @@
 
 static bool ironlake_compute_wm0(struct drm_device *dev,
 				 int pipe,
+				 const struct intel_watermark_params *display,
+				 int display_latency_ns,
+				 const struct intel_watermark_params *cursor,
+				 int cursor_latency_ns,
 				 int *plane_wm,
 				 int *cursor_wm)
 {
 	struct drm_crtc *crtc;
-	int htotal, hdisplay, clock, pixel_size = 0;
-	int line_time_us, line_count, entries;
+	int htotal, hdisplay, clock, pixel_size;
+	int line_time_us, line_count;
+	int entries, tlb_miss;
 
 	crtc = intel_get_crtc_for_pipe(dev, pipe);
 	if (crtc->fb == NULL || !crtc->enabled)
@@ -3400,37 +3439,141 @@
 	pixel_size = crtc->fb->bits_per_pixel / 8;
 
 	/* Use the small buffer method to calculate plane watermark */
-	entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
-	entries = DIV_ROUND_UP(entries,
-			       ironlake_display_wm_info.cacheline_size);
-	*plane_wm = entries + ironlake_display_wm_info.guard_size;
-	if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
-		*plane_wm = ironlake_display_wm_info.max_wm;
+	entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+	tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, display->cacheline_size);
+	*plane_wm = entries + display->guard_size;
+	if (*plane_wm > (int)display->max_wm)
+		*plane_wm = display->max_wm;
 
 	/* Use the large buffer method to calculate cursor watermark */
 	line_time_us = ((htotal * 1000) / clock);
-	line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+	line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
 	entries = line_count * 64 * pixel_size;
-	entries = DIV_ROUND_UP(entries,
-			       ironlake_cursor_wm_info.cacheline_size);
-	*cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
-	if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
-		*cursor_wm = ironlake_cursor_wm_info.max_wm;
+	tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
+	if (tlb_miss > 0)
+		entries += tlb_miss;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+	if (*cursor_wm > (int)cursor->max_wm)
+		*cursor_wm = (int)cursor->max_wm;
 
 	return true;
 }
 
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ */
+static bool ironlake_check_srwm(struct drm_device *dev, int level,
+				int fbc_wm, int display_wm, int cursor_wm,
+				const struct intel_watermark_params *display,
+				const struct intel_watermark_params *cursor)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+		      " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+	if (fbc_wm > SNB_FBC_MAX_SRWM) {
+		DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+			      fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+		/* fbc has it's own way to disable FBC WM */
+		I915_WRITE(DISP_ARB_CTL,
+			   I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+		return false;
+	}
+
+	if (display_wm > display->max_wm) {
+		DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+			      display_wm, SNB_DISPLAY_MAX_SRWM, level);
+		return false;
+	}
+
+	if (cursor_wm > cursor->max_wm) {
+		DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+			      cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+		return false;
+	}
+
+	if (!(fbc_wm || display_wm || cursor_wm)) {
+		DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool ironlake_compute_srwm(struct drm_device *dev, int level,
+				  int hdisplay, int htotal,
+				  int pixel_size, int clock, int latency_ns,
+				  const struct intel_watermark_params *display,
+				  const struct intel_watermark_params *cursor,
+				  int *fbc_wm, int *display_wm, int *cursor_wm)
+{
+
+	unsigned long line_time_us;
+	int line_count, line_size;
+	int small, large;
+	int entries;
+
+	if (!latency_ns) {
+		*fbc_wm = *display_wm = *cursor_wm = 0;
+		return false;
+	}
+
+	line_time_us = (htotal * 1000) / clock;
+	line_count = (latency_ns / line_time_us + 1000) / 1000;
+	line_size = hdisplay * pixel_size;
+
+	/* Use the minimum of the small and large buffer method for primary */
+	small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+	large = line_count * line_size;
+
+	entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+	*display_wm = entries + display->guard_size;
+
+	/*
+	 * Spec says:
+	 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+	 */
+	*fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+	/* calculate the self-refresh watermark for display cursor */
+	entries = line_count * pixel_size * 64;
+	entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+	*cursor_wm = entries + cursor->guard_size;
+
+	return ironlake_check_srwm(dev, level,
+				   *fbc_wm, *display_wm, *cursor_wm,
+				   display, cursor);
+}
+
 static void ironlake_update_wm(struct drm_device *dev,
 			       int planea_clock, int planeb_clock,
-			       int sr_hdisplay, int sr_htotal,
+			       int hdisplay, int htotal,
 			       int pixel_size)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int plane_wm, cursor_wm, enabled;
-	int tmp;
+	int fbc_wm, plane_wm, cursor_wm, enabled;
+	int clock;
 
 	enabled = 0;
-	if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+	if (ironlake_compute_wm0(dev, 0,
+				 &ironlake_display_wm_info,
+				 ILK_LP0_PLANE_LATENCY,
+				 &ironlake_cursor_wm_info,
+				 ILK_LP0_CURSOR_LATENCY,
+				 &plane_wm, &cursor_wm)) {
 		I915_WRITE(WM0_PIPEA_ILK,
 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3439,7 +3582,12 @@
 		enabled++;
 	}
 
-	if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+	if (ironlake_compute_wm0(dev, 1,
+				 &ironlake_display_wm_info,
+				 ILK_LP0_PLANE_LATENCY,
+				 &ironlake_cursor_wm_info,
+				 ILK_LP0_CURSOR_LATENCY,
+				 &plane_wm, &cursor_wm)) {
 		I915_WRITE(WM0_PIPEB_ILK,
 			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
 		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3452,57 +3600,151 @@
 	 * Calculate and update the self-refresh watermark only when one
 	 * display plane is used.
 	 */
-	tmp = 0;
-	if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
-		unsigned long line_time_us;
-		int small, large, plane_fbc;
-		int sr_clock, entries;
-		int line_count, line_size;
-		/* Read the self-refresh latency. The unit is 0.5us */
-		int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
 
-		sr_clock = planea_clock ? planea_clock : planeb_clock;
-		line_time_us = (sr_htotal * 1000) / sr_clock;
+	if (enabled != 1)
+		return;
 
-		/* Use ns/us then divide to preserve precision */
-		line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
-			/ 1000;
-		line_size = sr_hdisplay * pixel_size;
+	clock = planea_clock ? planea_clock : planeb_clock;
 
-		/* Use the minimum of the small and large buffer method for primary */
-		small = ((sr_clock * pixel_size / 1000) * (ilk_sr_latency * 500)) / 1000;
-		large = line_count * line_size;
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM1_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		entries = DIV_ROUND_UP(min(small, large),
-				       ironlake_display_srwm_info.cacheline_size);
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
-		plane_fbc = entries * 64;
-		plane_fbc = DIV_ROUND_UP(plane_fbc, line_size);
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2, hdisplay, htotal, pixel_size,
+				   clock, ILK_READ_WM2_LATENCY() * 500,
+				   &ironlake_display_srwm_info,
+				   &ironlake_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
 
-		plane_wm = entries + ironlake_display_srwm_info.guard_size;
-		if (plane_wm > (int)ironlake_display_srwm_info.max_wm)
-			plane_wm = ironlake_display_srwm_info.max_wm;
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 
-		/* calculate the self-refresh watermark for display cursor */
-		entries = line_count * pixel_size * 64;
-		entries = DIV_ROUND_UP(entries,
-				       ironlake_cursor_srwm_info.cacheline_size);
+	/*
+	 * WM3 is unsupported on ILK, probably because we don't have latency
+	 * data for that power state
+	 */
+}
 
-		cursor_wm = entries + ironlake_cursor_srwm_info.guard_size;
-		if (cursor_wm > (int)ironlake_cursor_srwm_info.max_wm)
-			cursor_wm = ironlake_cursor_srwm_info.max_wm;
+static void sandybridge_update_wm(struct drm_device *dev,
+			       int planea_clock, int planeb_clock,
+			       int hdisplay, int htotal,
+			       int pixel_size)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int latency = SNB_READ_WM0_LATENCY() * 100;	/* In unit 0.1us */
+	int fbc_wm, plane_wm, cursor_wm, enabled;
+	int clock;
 
-		/* configure watermark and enable self-refresh */
-		tmp = (WM1_LP_SR_EN |
-		       (ilk_sr_latency << WM1_LP_LATENCY_SHIFT) |
-		       (plane_fbc << WM1_LP_FBC_SHIFT) |
-		       (plane_wm << WM1_LP_SR_SHIFT) |
-		       cursor_wm);
-		DRM_DEBUG_KMS("self-refresh watermark: display plane %d, fbc lines %d,"
-			      " cursor %d\n", plane_wm, plane_fbc, cursor_wm);
+	enabled = 0;
+	if (ironlake_compute_wm0(dev, 0,
+				 &sandybridge_display_wm_info, latency,
+				 &sandybridge_cursor_wm_info, latency,
+				 &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEA_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+			      " plane %d, " "cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled++;
 	}
-	I915_WRITE(WM1_LP_ILK, tmp);
-	/* XXX setup WM2 and WM3 */
+
+	if (ironlake_compute_wm0(dev, 1,
+				 &sandybridge_display_wm_info, latency,
+				 &sandybridge_cursor_wm_info, latency,
+				 &plane_wm, &cursor_wm)) {
+		I915_WRITE(WM0_PIPEB_ILK,
+			   (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+		DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+			      " plane %d, cursor: %d\n",
+			      plane_wm, cursor_wm);
+		enabled++;
+	}
+
+	/*
+	 * Calculate and update the self-refresh watermark only when one
+	 * display plane is used.
+	 *
+	 * SNB support 3 levels of watermark.
+	 *
+	 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+	 * and disabled in the descending order
+	 *
+	 */
+	I915_WRITE(WM3_LP_ILK, 0);
+	I915_WRITE(WM2_LP_ILK, 0);
+	I915_WRITE(WM1_LP_ILK, 0);
+
+	if (enabled != 1)
+		return;
+
+	clock = planea_clock ? planea_clock : planeb_clock;
+
+	/* WM1 */
+	if (!ironlake_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM1_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM1_LP_ILK,
+		   WM1_LP_SR_EN |
+		   (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM2 */
+	if (!ironlake_compute_srwm(dev, 2,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM2_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM2_LP_ILK,
+		   WM2_LP_EN |
+		   (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
+
+	/* WM3 */
+	if (!ironlake_compute_srwm(dev, 3,
+				   hdisplay, htotal, pixel_size,
+				   clock, SNB_READ_WM3_LATENCY() * 500,
+				   &sandybridge_display_srwm_info,
+				   &sandybridge_cursor_srwm_info,
+				   &fbc_wm, &plane_wm, &cursor_wm))
+		return;
+
+	I915_WRITE(WM3_LP_ILK,
+		   WM3_LP_EN |
+		   (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+		   (fbc_wm << WM1_LP_FBC_SHIFT) |
+		   (plane_wm << WM1_LP_SR_SHIFT) |
+		   cursor_wm);
 }
 
 /**
@@ -3660,7 +3902,7 @@
 	 * refclk, or FALSE.  The returned values represent the clock equation:
 	 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
 	 */
-	limit = intel_limit(crtc);
+	limit = intel_limit(crtc, refclk);
 	ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
 	if (!ok) {
 		DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3714,7 +3956,7 @@
 		int lane = 0, link_bw, bpp;
 		/* CPU eDP doesn't require FDI link, so just set DP M/N
 		   according to current link config */
-		if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) {
+		if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
 			target_clock = mode->clock;
 			intel_edp_link_config(has_edp_encoder,
 					      &lane, &link_bw);
@@ -3857,6 +4099,22 @@
 				reduced_clock.m2;
 	}
 
+	/* Enable autotuning of the PLL clock (if permissible) */
+	if (HAS_PCH_SPLIT(dev)) {
+		int factor = 21;
+
+		if (is_lvds) {
+			if ((dev_priv->lvds_use_ssc &&
+			     dev_priv->lvds_ssc_freq == 100) ||
+			    (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+				factor = 25;
+		} else if (is_sdvo && is_tv)
+			factor = 20;
+
+		if (clock.m1 < factor * clock.n)
+			fp |= FP_CB_TUNE;
+	}
+
 	dpll = 0;
 	if (!HAS_PCH_SPLIT(dev))
 		dpll = DPLL_VGA_MODE_DIS;
@@ -4071,7 +4329,6 @@
 	}
 
 	if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
-		I915_WRITE(fp_reg, fp);
 		I915_WRITE(dpll_reg, dpll);
 
 		/* Wait for the clocks to stabilize. */
@@ -4089,13 +4346,13 @@
 			}
 			I915_WRITE(DPLL_MD(pipe), temp);
 		} else {
-			/* write it again -- the BIOS does, after all */
+			/* The pixel multiplier can only be updated once the
+			 * DPLL is enabled and the clocks are stable.
+			 *
+			 * So write it again.
+			 */
 			I915_WRITE(dpll_reg, dpll);
 		}
-
-		/* Wait for the clocks to stabilize. */
-		POSTING_READ(dpll_reg);
-		udelay(150);
 	}
 
 	intel_crtc->lowfreq_avail = false;
@@ -4331,15 +4588,14 @@
 }
 
 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
-				 struct drm_file *file_priv,
+				 struct drm_file *file,
 				 uint32_t handle,
 				 uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	struct drm_gem_object *bo;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	uint32_t addr;
 	int ret;
 
@@ -4349,7 +4605,7 @@
 	if (!handle) {
 		DRM_DEBUG_KMS("cursor off\n");
 		addr = 0;
-		bo = NULL;
+		obj = NULL;
 		mutex_lock(&dev->struct_mutex);
 		goto finish;
 	}
@@ -4360,13 +4616,11 @@
 		return -EINVAL;
 	}
 
-	bo = drm_gem_object_lookup(dev, file_priv, handle);
-	if (!bo)
+	obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+	if (!obj)
 		return -ENOENT;
 
-	obj_priv = to_intel_bo(bo);
-
-	if (bo->size < width * height * 4) {
+	if (obj->base.size < width * height * 4) {
 		DRM_ERROR("buffer is to small\n");
 		ret = -ENOMEM;
 		goto fail;
@@ -4375,29 +4629,41 @@
 	/* we only need to pin inside GTT if cursor is non-phy */
 	mutex_lock(&dev->struct_mutex);
 	if (!dev_priv->info->cursor_needs_physical) {
-		ret = i915_gem_object_pin(bo, PAGE_SIZE);
+		if (obj->tiling_mode) {
+			DRM_ERROR("cursor cannot be tiled\n");
+			ret = -EINVAL;
+			goto fail_locked;
+		}
+
+		ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
 		if (ret) {
 			DRM_ERROR("failed to pin cursor bo\n");
 			goto fail_locked;
 		}
 
-		ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+		ret = i915_gem_object_set_to_gtt_domain(obj, 0);
 		if (ret) {
 			DRM_ERROR("failed to move cursor bo into the GTT\n");
 			goto fail_unpin;
 		}
 
-		addr = obj_priv->gtt_offset;
+		ret = i915_gem_object_put_fence(obj);
+		if (ret) {
+			DRM_ERROR("failed to move cursor bo into the GTT\n");
+			goto fail_unpin;
+		}
+
+		addr = obj->gtt_offset;
 	} else {
 		int align = IS_I830(dev) ? 16 * 1024 : 256;
-		ret = i915_gem_attach_phys_object(dev, bo,
+		ret = i915_gem_attach_phys_object(dev, obj,
 						  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
 						  align);
 		if (ret) {
 			DRM_ERROR("failed to attach phys object\n");
 			goto fail_locked;
 		}
-		addr = obj_priv->phys_obj->handle->busaddr;
+		addr = obj->phys_obj->handle->busaddr;
 	}
 
 	if (IS_GEN2(dev))
@@ -4406,17 +4672,17 @@
  finish:
 	if (intel_crtc->cursor_bo) {
 		if (dev_priv->info->cursor_needs_physical) {
-			if (intel_crtc->cursor_bo != bo)
+			if (intel_crtc->cursor_bo != obj)
 				i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
 		} else
 			i915_gem_object_unpin(intel_crtc->cursor_bo);
-		drm_gem_object_unreference(intel_crtc->cursor_bo);
+		drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
 	intel_crtc->cursor_addr = addr;
-	intel_crtc->cursor_bo = bo;
+	intel_crtc->cursor_bo = obj;
 	intel_crtc->cursor_width = width;
 	intel_crtc->cursor_height = height;
 
@@ -4424,11 +4690,11 @@
 
 	return 0;
 fail_unpin:
-	i915_gem_object_unpin(bo);
+	i915_gem_object_unpin(obj);
 fail_locked:
 	mutex_unlock(&dev->struct_mutex);
 fail:
-	drm_gem_object_unreference_unlocked(bo);
+	drm_gem_object_unreference_unlocked(&obj->base);
 	return ret;
 }
 
@@ -4739,8 +5005,14 @@
 	struct drm_device *dev = (struct drm_device *)arg;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	dev_priv->busy = false;
+	if (!list_empty(&dev_priv->mm.active_list)) {
+		/* Still processing requests, so just re-arm the timer. */
+		mod_timer(&dev_priv->idle_timer, jiffies +
+			  msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+		return;
+	}
 
+	dev_priv->busy = false;
 	queue_work(dev_priv->wq, &dev_priv->idle_work);
 }
 
@@ -4751,9 +5023,17 @@
 	struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
 	struct drm_crtc *crtc = &intel_crtc->base;
 	drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+	struct intel_framebuffer *intel_fb;
+
+	intel_fb = to_intel_framebuffer(crtc->fb);
+	if (intel_fb && intel_fb->obj->active) {
+		/* The framebuffer is still being accessed by the GPU. */
+		mod_timer(&intel_crtc->idle_timer, jiffies +
+			  msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+		return;
+	}
 
 	intel_crtc->busy = false;
-
 	queue_work(dev_priv->wq, &dev_priv->idle_work);
 }
 
@@ -4763,8 +5043,8 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	int pipe = intel_crtc->pipe;
-	int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
-	int dpll = I915_READ(dpll_reg);
+	int dpll_reg = DPLL(pipe);
+	int dpll;
 
 	if (HAS_PCH_SPLIT(dev))
 		return;
@@ -4772,17 +5052,19 @@
 	if (!dev_priv->lvds_downclock_avail)
 		return;
 
+	dpll = I915_READ(dpll_reg);
 	if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
 		DRM_DEBUG_DRIVER("upclocking LVDS\n");
 
 		/* Unlock panel regs */
-		I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) |
-			   PANEL_UNLOCK_REGS);
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
 
 		dpll &= ~DISPLAY_RATE_SELECT_FPA1;
 		I915_WRITE(dpll_reg, dpll);
-		dpll = I915_READ(dpll_reg);
+		POSTING_READ(dpll_reg);
 		intel_wait_for_vblank(dev, pipe);
+
 		dpll = I915_READ(dpll_reg);
 		if (dpll & DISPLAY_RATE_SELECT_FPA1)
 			DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
@@ -4888,7 +5170,7 @@
  * buffer), we'll also mark the display as busy, so we know to increase its
  * clock frequency.
  */
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_crtc *crtc = NULL;
@@ -4969,8 +5251,9 @@
 
 	mutex_lock(&work->dev->struct_mutex);
 	i915_gem_object_unpin(work->old_fb_obj);
-	drm_gem_object_unreference(work->pending_flip_obj);
-	drm_gem_object_unreference(work->old_fb_obj);
+	drm_gem_object_unreference(&work->pending_flip_obj->base);
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+
 	mutex_unlock(&work->dev->struct_mutex);
 	kfree(work);
 }
@@ -4981,15 +5264,17 @@
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct drm_pending_vblank_event *e;
-	struct timeval now;
+	struct timeval tnow, tvbl;
 	unsigned long flags;
 
 	/* Ignore early vblank irqs */
 	if (intel_crtc == NULL)
 		return;
 
+	do_gettimeofday(&tnow);
+
 	spin_lock_irqsave(&dev->event_lock, flags);
 	work = intel_crtc->unpin_work;
 	if (work == NULL || !work->pending) {
@@ -4998,26 +5283,49 @@
 	}
 
 	intel_crtc->unpin_work = NULL;
-	drm_vblank_put(dev, intel_crtc->pipe);
 
 	if (work->event) {
 		e = work->event;
-		do_gettimeofday(&now);
-		e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
-		e->event.tv_sec = now.tv_sec;
-		e->event.tv_usec = now.tv_usec;
+		e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+		/* Called before vblank count and timestamps have
+		 * been updated for the vblank interval of flip
+		 * completion? Need to increment vblank count and
+		 * add one videorefresh duration to returned timestamp
+		 * to account for this. We assume this happened if we
+		 * get called over 0.9 frame durations after the last
+		 * timestamped vblank.
+		 *
+		 * This calculation can not be used with vrefresh rates
+		 * below 5Hz (10Hz to be on the safe side) without
+		 * promoting to 64 integers.
+		 */
+		if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+		    9 * crtc->framedur_ns) {
+			e->event.sequence++;
+			tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+					     crtc->framedur_ns);
+		}
+
+		e->event.tv_sec = tvbl.tv_sec;
+		e->event.tv_usec = tvbl.tv_usec;
+
 		list_add_tail(&e->base.link,
 			      &e->base.file_priv->event_list);
 		wake_up_interruptible(&e->base.file_priv->event_wait);
 	}
 
+	drm_vblank_put(dev, intel_crtc->pipe);
+
 	spin_unlock_irqrestore(&dev->event_lock, flags);
 
-	obj_priv = to_intel_bo(work->old_fb_obj);
+	obj = work->old_fb_obj;
+
 	atomic_clear_mask(1 << intel_crtc->plane,
-			  &obj_priv->pending_flip.counter);
-	if (atomic_read(&obj_priv->pending_flip) == 0)
+			  &obj->pending_flip.counter);
+	if (atomic_read(&obj->pending_flip) == 0)
 		wake_up(&dev_priv->pending_flip_queue);
+
 	schedule_work(&work->work);
 
 	trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5063,8 +5371,7 @@
 	struct drm_device *dev = crtc->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_framebuffer *intel_fb;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_unpin_work *work;
 	unsigned long flags, offset;
@@ -5098,13 +5405,13 @@
 	obj = intel_fb->obj;
 
 	mutex_lock(&dev->struct_mutex);
-	ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
 	if (ret)
 		goto cleanup_work;
 
 	/* Reference the objects for the scheduled work. */
-	drm_gem_object_reference(work->old_fb_obj);
-	drm_gem_object_reference(obj);
+	drm_gem_object_reference(&work->old_fb_obj->base);
+	drm_gem_object_reference(&obj->base);
 
 	crtc->fb = fb;
 
@@ -5112,22 +5419,16 @@
 	if (ret)
 		goto cleanup_objs;
 
-	/* Block clients from rendering to the new back buffer until
-	 * the flip occurs and the object is no longer visible.
-	 */
-	atomic_add(1 << intel_crtc->plane,
-		   &to_intel_bo(work->old_fb_obj)->pending_flip);
-
-	work->pending_flip_obj = obj;
-	obj_priv = to_intel_bo(obj);
-
 	if (IS_GEN3(dev) || IS_GEN2(dev)) {
 		u32 flip_mask;
 
 		/* Can't queue multiple flips, so wait for the previous
 		 * one to finish before executing the next.
 		 */
-		BEGIN_LP_RING(2);
+		ret = BEGIN_LP_RING(2);
+		if (ret)
+			goto cleanup_objs;
+
 		if (intel_crtc->plane)
 			flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 		else
@@ -5137,18 +5438,28 @@
 		ADVANCE_LP_RING();
 	}
 
+	work->pending_flip_obj = obj;
+
 	work->enable_stall_check = true;
 
 	/* Offset into the new buffer for cases of shared fbs between CRTCs */
 	offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
 
-	BEGIN_LP_RING(4);
-	switch(INTEL_INFO(dev)->gen) {
+	ret = BEGIN_LP_RING(4);
+	if (ret)
+		goto cleanup_objs;
+
+	/* Block clients from rendering to the new back buffer until
+	 * the flip occurs and the object is no longer visible.
+	 */
+	atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+	switch (INTEL_INFO(dev)->gen) {
 	case 2:
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset + offset);
+		OUT_RING(obj->gtt_offset + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -5156,7 +5467,7 @@
 		OUT_RING(MI_DISPLAY_FLIP_I915 |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset + offset);
+		OUT_RING(obj->gtt_offset + offset);
 		OUT_RING(MI_NOOP);
 		break;
 
@@ -5169,7 +5480,7 @@
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
 		OUT_RING(fb->pitch);
-		OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+		OUT_RING(obj->gtt_offset | obj->tiling_mode);
 
 		/* XXX Enabling the panel-fitter across page-flip is so far
 		 * untested on non-native modes, so ignore it for now.
@@ -5183,8 +5494,8 @@
 	case 6:
 		OUT_RING(MI_DISPLAY_FLIP |
 			 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-		OUT_RING(fb->pitch | obj_priv->tiling_mode);
-		OUT_RING(obj_priv->gtt_offset);
+		OUT_RING(fb->pitch | obj->tiling_mode);
+		OUT_RING(obj->gtt_offset);
 
 		pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
 		pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5200,8 +5511,8 @@
 	return 0;
 
 cleanup_objs:
-	drm_gem_object_unreference(work->old_fb_obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&work->old_fb_obj->base);
+	drm_gem_object_unreference(&obj->base);
 cleanup_work:
 	mutex_unlock(&dev->struct_mutex);
 
@@ -5338,7 +5649,7 @@
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
-				struct drm_file *file_priv)
+				struct drm_file *file)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5498,6 +5809,8 @@
 		encoder->base.possible_clones =
 			intel_encoder_clones(dev, encoder->clone_mask);
 	}
+
+	intel_panel_setup_backlight(dev);
 }
 
 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -5505,19 +5818,19 @@
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
 	drm_framebuffer_cleanup(fb);
-	drm_gem_object_unreference_unlocked(intel_fb->obj);
+	drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
 
 	kfree(intel_fb);
 }
 
 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-						struct drm_file *file_priv,
+						struct drm_file *file,
 						unsigned int *handle)
 {
 	struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-	struct drm_gem_object *object = intel_fb->obj;
+	struct drm_i915_gem_object *obj = intel_fb->obj;
 
-	return drm_gem_handle_create(file_priv, object, handle);
+	return drm_gem_handle_create(file, &obj->base, handle);
 }
 
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5528,12 +5841,11 @@
 int intel_framebuffer_init(struct drm_device *dev,
 			   struct intel_framebuffer *intel_fb,
 			   struct drm_mode_fb_cmd *mode_cmd,
-			   struct drm_gem_object *obj)
+			   struct drm_i915_gem_object *obj)
 {
-	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	int ret;
 
-	if (obj_priv->tiling_mode == I915_TILING_Y)
+	if (obj->tiling_mode == I915_TILING_Y)
 		return -EINVAL;
 
 	if (mode_cmd->pitch & 63)
@@ -5565,11 +5877,11 @@
 			      struct drm_file *filp,
 			      struct drm_mode_fb_cmd *mode_cmd)
 {
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	struct intel_framebuffer *intel_fb;
 	int ret;
 
-	obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+	obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
 	if (!obj)
 		return ERR_PTR(-ENOENT);
 
@@ -5577,10 +5889,9 @@
 	if (!intel_fb)
 		return ERR_PTR(-ENOMEM);
 
-	ret = intel_framebuffer_init(dev, intel_fb,
-				     mode_cmd, obj);
+	ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
 	if (ret) {
-		drm_gem_object_unreference_unlocked(obj);
+		drm_gem_object_unreference_unlocked(&obj->base);
 		kfree(intel_fb);
 		return ERR_PTR(ret);
 	}
@@ -5593,10 +5904,10 @@
 	.output_poll_changed = intel_fb_output_poll_changed,
 };
 
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
 intel_alloc_context_page(struct drm_device *dev)
 {
-	struct drm_gem_object *ctx;
+	struct drm_i915_gem_object *ctx;
 	int ret;
 
 	ctx = i915_gem_alloc_object(dev, 4096);
@@ -5606,7 +5917,7 @@
 	}
 
 	mutex_lock(&dev->struct_mutex);
-	ret = i915_gem_object_pin(ctx, 4096);
+	ret = i915_gem_object_pin(ctx, 4096, true);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
@@ -5624,7 +5935,7 @@
 err_unpin:
 	i915_gem_object_unpin(ctx);
 err_unref:
-	drm_gem_object_unreference(ctx);
+	drm_gem_object_unreference(&ctx->base);
 	mutex_unlock(&dev->struct_mutex);
 	return NULL;
 }
@@ -5736,6 +6047,25 @@
 
 }
 
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 swreq;
+
+	swreq = (val & 0x3ff) << 25;
+	I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+	I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+	I915_WRITE(GEN6_PMIER, 0);
+	I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
 static unsigned long intel_pxfreq(u32 vidfreq)
 {
 	unsigned long freq;
@@ -5822,7 +6152,123 @@
 	dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+	u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+	u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+	u32 pcu_mbox;
+	int cur_freq, min_freq, max_freq;
+	int i;
+
+	/* Here begins a magic sequence of register writes to enable
+	 * auto-downclocking.
+	 *
+	 * Perhaps there might be some value in exposing these to
+	 * userspace...
+	 */
+	I915_WRITE(GEN6_RC_STATE, 0);
+	__gen6_force_wake_get(dev_priv);
+
+	/* disable the counters and set deterministic thresholds */
+	I915_WRITE(GEN6_RC_CONTROL, 0);
+
+	I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+	I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+	I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+	I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+	I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+	for (i = 0; i < I915_NUM_RINGS; i++)
+		I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+	I915_WRITE(GEN6_RC_SLEEP, 0);
+	I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+	I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+	I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+	I915_WRITE(GEN6_RC_CONTROL,
+		   GEN6_RC_CTL_RC6p_ENABLE |
+		   GEN6_RC_CTL_RC6_ENABLE |
+		   GEN6_RC_CTL_EI_MODE(1) |
+		   GEN6_RC_CTL_HW_ENABLE);
+
+	I915_WRITE(GEN6_RPNSWREQ,
+		   GEN6_FREQUENCY(10) |
+		   GEN6_OFFSET(0) |
+		   GEN6_AGGRESSIVE_TURBO);
+	I915_WRITE(GEN6_RC_VIDEO_FREQ,
+		   GEN6_FREQUENCY(12));
+
+	I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+	I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+		   18 << 24 |
+		   6 << 16);
+	I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+	I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+	I915_WRITE(GEN6_RP_UP_EI, 100000);
+	I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+	I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+	I915_WRITE(GEN6_RP_CONTROL,
+		   GEN6_RP_MEDIA_TURBO |
+		   GEN6_RP_USE_NORMAL_FREQ |
+		   GEN6_RP_MEDIA_IS_GFX |
+		   GEN6_RP_ENABLE |
+		   GEN6_RP_UP_BUSY_MAX |
+		   GEN6_RP_DOWN_BUSY_MIN);
+
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+	I915_WRITE(GEN6_PCODE_DATA, 0);
+	I915_WRITE(GEN6_PCODE_MAILBOX,
+		   GEN6_PCODE_READY |
+		   GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+	min_freq = (rp_state_cap & 0xff0000) >> 16;
+	max_freq = rp_state_cap & 0xff;
+	cur_freq = (gt_perf_status & 0xff00) >> 8;
+
+	/* Check for overclock support */
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+	I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
+	pcu_mbox = I915_READ(GEN6_PCODE_DATA);
+	if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+		     500))
+		DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+	if (pcu_mbox & (1<<31)) { /* OC supported */
+		max_freq = pcu_mbox & 0xff;
+		DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100);
+	}
+
+	/* In units of 100MHz */
+	dev_priv->max_delay = max_freq;
+	dev_priv->min_delay = min_freq;
+	dev_priv->cur_delay = cur_freq;
+
+	/* requires MSI enabled */
+	I915_WRITE(GEN6_PMIER,
+		   GEN6_PM_MBOX_EVENT |
+		   GEN6_PM_THERMAL_EVENT |
+		   GEN6_PM_RP_DOWN_TIMEOUT |
+		   GEN6_PM_RP_UP_THRESHOLD |
+		   GEN6_PM_RP_DOWN_THRESHOLD |
+		   GEN6_PM_RP_UP_EI_EXPIRED |
+		   GEN6_PM_RP_DOWN_EI_EXPIRED);
+	I915_WRITE(GEN6_PMIMR, 0);
+	/* enable all PM interrupts */
+	I915_WRITE(GEN6_PMINTRMSK, 0);
+
+	__gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
@@ -5872,9 +6318,9 @@
 			I915_WRITE(DISP_ARB_CTL,
 					(I915_READ(DISP_ARB_CTL) |
 						DISP_FBC_WM_DIS));
-		I915_WRITE(WM3_LP_ILK, 0);
-		I915_WRITE(WM2_LP_ILK, 0);
-		I915_WRITE(WM1_LP_ILK, 0);
+			I915_WRITE(WM3_LP_ILK, 0);
+			I915_WRITE(WM2_LP_ILK, 0);
+			I915_WRITE(WM1_LP_ILK, 0);
 		}
 		/*
 		 * Based on the document from hardware guys the following bits
@@ -5896,7 +6342,49 @@
 				   ILK_DPFC_DIS2 |
 				   ILK_CLK_FBC);
 		}
-		return;
+
+		I915_WRITE(ILK_DISPLAY_CHICKEN2,
+			   I915_READ(ILK_DISPLAY_CHICKEN2) |
+			   ILK_ELPIN_409_SELECT);
+
+		if (IS_GEN5(dev)) {
+			I915_WRITE(_3D_CHICKEN2,
+				   _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+				   _3D_CHICKEN2_WM_READ_PIPELINED);
+		}
+
+		if (IS_GEN6(dev)) {
+			I915_WRITE(WM3_LP_ILK, 0);
+			I915_WRITE(WM2_LP_ILK, 0);
+			I915_WRITE(WM1_LP_ILK, 0);
+
+			/*
+			 * According to the spec the following bits should be
+			 * set in order to enable memory self-refresh and fbc:
+			 * The bit21 and bit22 of 0x42000
+			 * The bit21 and bit22 of 0x42004
+			 * The bit5 and bit7 of 0x42020
+			 * The bit14 of 0x70180
+			 * The bit14 of 0x71180
+			 */
+			I915_WRITE(ILK_DISPLAY_CHICKEN1,
+				   I915_READ(ILK_DISPLAY_CHICKEN1) |
+				   ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+			I915_WRITE(ILK_DISPLAY_CHICKEN2,
+				   I915_READ(ILK_DISPLAY_CHICKEN2) |
+				   ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+			I915_WRITE(ILK_DSPCLK_GATE,
+				   I915_READ(ILK_DSPCLK_GATE) |
+				   ILK_DPARB_CLK_GATE  |
+				   ILK_DPFD_CLK_GATE);
+
+			I915_WRITE(DSPACNTR,
+				   I915_READ(DSPACNTR) |
+				   DISPPLANE_TRICKLE_FEED_DISABLE);
+			I915_WRITE(DSPBCNTR,
+				   I915_READ(DSPBCNTR) |
+				   DISPPLANE_TRICKLE_FEED_DISABLE);
+		}
 	} else if (IS_G4X(dev)) {
 		uint32_t dspclk_gate;
 		I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5934,55 +6422,84 @@
 	} else if (IS_I830(dev)) {
 		I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
 	}
+}
+
+void intel_disable_clock_gating(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->renderctx) {
+		struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+		I915_WRITE(CCID, 0);
+		POSTING_READ(CCID);
+
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(&obj->base);
+		dev_priv->renderctx = NULL;
+	}
+
+	if (dev_priv->pwrctx) {
+		struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+		I915_WRITE(PWRCTXA, 0);
+		POSTING_READ(PWRCTXA);
+
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(&obj->base);
+		dev_priv->pwrctx = NULL;
+	}
+}
+
+static void ironlake_disable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	/* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
+	wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
+		 10);
+	POSTING_READ(CCID);
+	I915_WRITE(PWRCTXA, 0);
+	POSTING_READ(PWRCTXA);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
+	POSTING_READ(RSTDBYCTL);
+	i915_gem_object_unpin(dev_priv->renderctx);
+	drm_gem_object_unreference(&dev_priv->renderctx->base);
+	dev_priv->renderctx = NULL;
+	i915_gem_object_unpin(dev_priv->pwrctx);
+	drm_gem_object_unreference(&dev_priv->pwrctx->base);
+	dev_priv->pwrctx = NULL;
+}
+
+void ironlake_enable_rc6(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
 	/*
 	 * GPU can automatically power down the render unit if given a page
 	 * to save state.
 	 */
-	if (IS_IRONLAKE_M(dev)) {
-		if (dev_priv->renderctx == NULL)
-			dev_priv->renderctx = intel_alloc_context_page(dev);
-		if (dev_priv->renderctx) {
-			struct drm_i915_gem_object *obj_priv;
-			obj_priv = to_intel_bo(dev_priv->renderctx);
-			if (obj_priv) {
-				BEGIN_LP_RING(4);
-				OUT_RING(MI_SET_CONTEXT);
-				OUT_RING(obj_priv->gtt_offset |
-						MI_MM_SPACE_GTT |
-						MI_SAVE_EXT_STATE_EN |
-						MI_RESTORE_EXT_STATE_EN |
-						MI_RESTORE_INHIBIT);
-				OUT_RING(MI_NOOP);
-				OUT_RING(MI_FLUSH);
-				ADVANCE_LP_RING();
-			}
-		} else
-			DRM_DEBUG_KMS("Failed to allocate render context."
-				       "Disable RC6\n");
+	ret = BEGIN_LP_RING(6);
+	if (ret) {
+		ironlake_disable_rc6(dev);
+		return;
 	}
+	OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+	OUT_RING(MI_SET_CONTEXT);
+	OUT_RING(dev_priv->renderctx->gtt_offset |
+		 MI_MM_SPACE_GTT |
+		 MI_SAVE_EXT_STATE_EN |
+		 MI_RESTORE_EXT_STATE_EN |
+		 MI_RESTORE_INHIBIT);
+	OUT_RING(MI_SUSPEND_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_FLUSH);
+	ADVANCE_LP_RING();
 
-	if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
-		struct drm_i915_gem_object *obj_priv = NULL;
-
-		if (dev_priv->pwrctx) {
-			obj_priv = to_intel_bo(dev_priv->pwrctx);
-		} else {
-			struct drm_gem_object *pwrctx;
-
-			pwrctx = intel_alloc_context_page(dev);
-			if (pwrctx) {
-				dev_priv->pwrctx = pwrctx;
-				obj_priv = to_intel_bo(pwrctx);
-			}
-		}
-
-		if (obj_priv) {
-			I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
-			I915_WRITE(MCHBAR_RENDER_STANDBY,
-				   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
-		}
-	}
+	I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
+	I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
 }
 
 /* Set up chip specific display functions */
@@ -5997,7 +6514,7 @@
 		dev_priv->display.dpms = i9xx_crtc_dpms;
 
 	if (I915_HAS_FBC(dev)) {
-		if (IS_IRONLAKE_M(dev)) {
+		if (HAS_PCH_SPLIT(dev)) {
 			dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
 			dev_priv->display.enable_fbc = ironlake_enable_fbc;
 			dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -6046,6 +6563,14 @@
 					      "Disable CxSR\n");
 				dev_priv->display.update_wm = NULL;
 			}
+		} else if (IS_GEN6(dev)) {
+			if (SNB_READ_WM0_LATENCY()) {
+				dev_priv->display.update_wm = sandybridge_update_wm;
+			} else {
+				DRM_DEBUG_KMS("Failed to read display plane latency. "
+					      "Disable CxSR\n");
+				dev_priv->display.update_wm = NULL;
+			}
 		} else
 			dev_priv->display.update_wm = NULL;
 	} else if (IS_PINEVIEW(dev)) {
@@ -6191,12 +6716,7 @@
 		dev->mode_config.max_width = 8192;
 		dev->mode_config.max_height = 8192;
 	}
-
-	/* set memory base */
-	if (IS_GEN2(dev))
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0);
-	else
-		dev->mode_config.fb_base = pci_resource_start(dev->pdev, 2);
+	dev->mode_config.fb_base = dev->agp->base;
 
 	if (IS_MOBILE(dev) || !IS_GEN2(dev))
 		dev_priv->num_pipe = 2;
@@ -6211,7 +6731,7 @@
 
 	intel_setup_outputs(dev);
 
-	intel_init_clock_gating(dev);
+	intel_enable_clock_gating(dev);
 
 	/* Just disable it once at startup */
 	i915_disable_vga(dev);
@@ -6221,6 +6741,24 @@
 		intel_init_emon(dev);
 	}
 
+	if (IS_GEN6(dev))
+		gen6_enable_rps(dev_priv);
+
+	if (IS_IRONLAKE_M(dev)) {
+		dev_priv->renderctx = intel_alloc_context_page(dev);
+		if (!dev_priv->renderctx)
+			goto skip_rc6;
+		dev_priv->pwrctx = intel_alloc_context_page(dev);
+		if (!dev_priv->pwrctx) {
+			i915_gem_object_unpin(dev_priv->renderctx);
+			drm_gem_object_unreference(&dev_priv->renderctx->base);
+			dev_priv->renderctx = NULL;
+			goto skip_rc6;
+		}
+		ironlake_enable_rc6(dev);
+	}
+
+skip_rc6:
 	INIT_WORK(&dev_priv->idle_work, intel_idle_update);
 	setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
 		    (unsigned long)dev);
@@ -6252,28 +6790,13 @@
 	if (dev_priv->display.disable_fbc)
 		dev_priv->display.disable_fbc(dev);
 
-	if (dev_priv->renderctx) {
-		struct drm_i915_gem_object *obj_priv;
-
-		obj_priv = to_intel_bo(dev_priv->renderctx);
-		I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
-		I915_READ(CCID);
-		i915_gem_object_unpin(dev_priv->renderctx);
-		drm_gem_object_unreference(dev_priv->renderctx);
-	}
-
-	if (dev_priv->pwrctx) {
-		struct drm_i915_gem_object *obj_priv;
-
-		obj_priv = to_intel_bo(dev_priv->pwrctx);
-		I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
-		I915_READ(PWRCTXA);
-		i915_gem_object_unpin(dev_priv->pwrctx);
-		drm_gem_object_unreference(dev_priv->pwrctx);
-	}
-
 	if (IS_IRONLAKE_M(dev))
 		ironlake_disable_drps(dev);
+	if (IS_GEN6(dev))
+		gen6_disable_rps(dev);
+
+	if (IS_IRONLAKE_M(dev))
+		ironlake_disable_rc6(dev);
 
 	mutex_unlock(&dev->struct_mutex);
 
@@ -6325,3 +6848,113 @@
 	pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
 	return 0;
 }
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+	struct intel_cursor_error_state {
+		u32 control;
+		u32 position;
+		u32 base;
+		u32 size;
+	} cursor[2];
+
+	struct intel_pipe_error_state {
+		u32 conf;
+		u32 source;
+
+		u32 htotal;
+		u32 hblank;
+		u32 hsync;
+		u32 vtotal;
+		u32 vblank;
+		u32 vsync;
+	} pipe[2];
+
+	struct intel_plane_error_state {
+		u32 control;
+		u32 stride;
+		u32 size;
+		u32 pos;
+		u32 addr;
+		u32 surface;
+		u32 tile_offset;
+	} plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_display_error_state *error;
+	int i;
+
+	error = kmalloc(sizeof(*error), GFP_ATOMIC);
+	if (error == NULL)
+		return NULL;
+
+	for (i = 0; i < 2; i++) {
+		error->cursor[i].control = I915_READ(CURCNTR(i));
+		error->cursor[i].position = I915_READ(CURPOS(i));
+		error->cursor[i].base = I915_READ(CURBASE(i));
+
+		error->plane[i].control = I915_READ(DSPCNTR(i));
+		error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+		error->plane[i].size = I915_READ(DSPSIZE(i));
+		error->plane[i].pos= I915_READ(DSPPOS(i));
+		error->plane[i].addr = I915_READ(DSPADDR(i));
+		if (INTEL_INFO(dev)->gen >= 4) {
+			error->plane[i].surface = I915_READ(DSPSURF(i));
+			error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+		}
+
+		error->pipe[i].conf = I915_READ(PIPECONF(i));
+		error->pipe[i].source = I915_READ(PIPESRC(i));
+		error->pipe[i].htotal = I915_READ(HTOTAL(i));
+		error->pipe[i].hblank = I915_READ(HBLANK(i));
+		error->pipe[i].hsync = I915_READ(HSYNC(i));
+		error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+		error->pipe[i].vblank = I915_READ(VBLANK(i));
+		error->pipe[i].vsync = I915_READ(VSYNC(i));
+	}
+
+	return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+				struct drm_device *dev,
+				struct intel_display_error_state *error)
+{
+	int i;
+
+	for (i = 0; i < 2; i++) {
+		seq_printf(m, "Pipe [%d]:\n", i);
+		seq_printf(m, "  CONF: %08x\n", error->pipe[i].conf);
+		seq_printf(m, "  SRC: %08x\n", error->pipe[i].source);
+		seq_printf(m, "  HTOTAL: %08x\n", error->pipe[i].htotal);
+		seq_printf(m, "  HBLANK: %08x\n", error->pipe[i].hblank);
+		seq_printf(m, "  HSYNC: %08x\n", error->pipe[i].hsync);
+		seq_printf(m, "  VTOTAL: %08x\n", error->pipe[i].vtotal);
+		seq_printf(m, "  VBLANK: %08x\n", error->pipe[i].vblank);
+		seq_printf(m, "  VSYNC: %08x\n", error->pipe[i].vsync);
+
+		seq_printf(m, "Plane [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->plane[i].control);
+		seq_printf(m, "  STRIDE: %08x\n", error->plane[i].stride);
+		seq_printf(m, "  SIZE: %08x\n", error->plane[i].size);
+		seq_printf(m, "  POS: %08x\n", error->plane[i].pos);
+		seq_printf(m, "  ADDR: %08x\n", error->plane[i].addr);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			seq_printf(m, "  SURF: %08x\n", error->plane[i].surface);
+			seq_printf(m, "  TILEOFF: %08x\n", error->plane[i].tile_offset);
+		}
+
+		seq_printf(m, "Cursor [%d]:\n", i);
+		seq_printf(m, "  CNTR: %08x\n", error->cursor[i].control);
+		seq_printf(m, "  POS: %08x\n", error->cursor[i].position);
+		seq_printf(m, "  BASE: %08x\n", error->cursor[i].base);
+	}
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 864417c..1f4242b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1153,18 +1153,27 @@
 static uint32_t
 intel_gen6_edp_signal_levels(uint8_t train_set)
 {
-	switch (train_set & (DP_TRAIN_VOLTAGE_SWING_MASK|DP_TRAIN_PRE_EMPHASIS_MASK)) {
+	int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+					 DP_TRAIN_PRE_EMPHASIS_MASK);
+	switch (signal_levels) {
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
-		return EDP_LINK_TRAIN_400MV_6DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
+		return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
-		return EDP_LINK_TRAIN_600MV_3_5DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+		return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
 	case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
-		return EDP_LINK_TRAIN_800MV_0DB_SNB_B;
+	case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
+		return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
 	default:
-		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level\n");
-		return EDP_LINK_TRAIN_400MV_0DB_SNB_B;
+		DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+			      "0x%x\n", signal_levels);
+		return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
 	}
 }
 
@@ -1334,17 +1343,24 @@
 	struct drm_device *dev = intel_dp->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	bool channel_eq = false;
-	int tries;
+	int tries, cr_tries;
 	u32 reg;
 	uint32_t DP = intel_dp->DP;
 
 	/* channel equalization */
 	tries = 0;
+	cr_tries = 0;
 	channel_eq = false;
 	for (;;) {
 		/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
 		uint32_t    signal_levels;
 
+		if (cr_tries > 5) {
+			DRM_ERROR("failed to train DP, aborting\n");
+			intel_dp_link_down(intel_dp);
+			break;
+		}
+
 		if (IS_GEN6(dev) && is_edp(intel_dp)) {
 			signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
 			DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
@@ -1367,14 +1383,26 @@
 		if (!intel_dp_get_link_status(intel_dp))
 			break;
 
+		/* Make sure clock is still ok */
+		if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+			intel_dp_start_link_train(intel_dp);
+			cr_tries++;
+			continue;
+		}
+
 		if (intel_channel_eq_ok(intel_dp)) {
 			channel_eq = true;
 			break;
 		}
 
-		/* Try 5 times */
-		if (tries > 5)
-			break;
+		/* Try 5 times, then try clock recovery if that fails */
+		if (tries > 5) {
+			intel_dp_link_down(intel_dp);
+			intel_dp_start_link_train(intel_dp);
+			tries = 0;
+			cr_tries++;
+			continue;
+		}
 
 		/* Compute new intel_dp->train_set as requested by target */
 		intel_get_adjust_train(intel_dp);
@@ -1442,8 +1470,7 @@
 		/* Changes to enable or select take place the vblank
 		 * after being written.
 		 */
-		intel_wait_for_vblank(intel_dp->base.base.dev,
-				      intel_crtc->pipe);
+		intel_wait_for_vblank(dev, intel_crtc->pipe);
 	}
 
 	I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e52c612..74db255 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@
 
 struct intel_framebuffer {
 	struct drm_framebuffer base;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 };
 
 struct intel_fbdev {
@@ -166,7 +166,7 @@
 	struct intel_unpin_work *unpin_work;
 	int fdi_lanes;
 
-	struct drm_gem_object *cursor_bo;
+	struct drm_i915_gem_object *cursor_bo;
 	uint32_t cursor_addr;
 	int16_t cursor_x, cursor_y;
 	int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@
 struct intel_unpin_work {
 	struct work_struct work;
 	struct drm_device *dev;
-	struct drm_gem_object *old_fb_obj;
-	struct drm_gem_object *pending_flip_obj;
+	struct drm_i915_gem_object *old_fb_obj;
+	struct drm_i915_gem_object *pending_flip_obj;
 	struct drm_pending_vblank_event *event;
 	int pending;
 	bool enable_stall_check;
@@ -236,7 +236,8 @@
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+			    struct drm_i915_gem_object *obj);
 extern bool intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
@@ -256,6 +257,9 @@
 extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
 extern u32 intel_panel_get_backlight(struct drm_device *dev);
 extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
+extern void intel_panel_setup_backlight(struct drm_device *dev);
+extern void intel_panel_enable_backlight(struct drm_device *dev);
+extern void intel_panel_disable_backlight(struct drm_device *dev);
 
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
@@ -293,19 +297,22 @@
 				    u16 blue, int regno);
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
 extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-				      struct drm_gem_object *obj,
-				      bool pipelined);
+				      struct drm_i915_gem_object *obj,
+				      struct intel_ring_buffer *pipelined);
 
 extern int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct drm_mode_fb_cmd *mode_cmd,
-				  struct drm_gem_object *obj);
+				  struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index af2a1dd..ee145a2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -62,13 +62,13 @@
 			  struct drm_fb_helper_surface_size *sizes)
 {
 	struct drm_device *dev = ifbdev->helper.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct fb_info *info;
 	struct drm_framebuffer *fb;
 	struct drm_mode_fb_cmd mode_cmd;
-	struct drm_gem_object *fbo = NULL;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	struct device *device = &dev->pdev->dev;
-	int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
+	int size, ret;
 
 	/* we don't do packed 24bpp */
 	if (sizes->surface_bpp == 24)
@@ -78,23 +78,22 @@
 	mode_cmd.height = sizes->surface_height;
 
 	mode_cmd.bpp = sizes->surface_bpp;
-	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
+	mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
 	mode_cmd.depth = sizes->surface_depth;
 
 	size = mode_cmd.pitch * mode_cmd.height;
 	size = ALIGN(size, PAGE_SIZE);
-	fbo = i915_gem_alloc_object(dev, size);
-	if (!fbo) {
+	obj = i915_gem_alloc_object(dev, size);
+	if (!obj) {
 		DRM_ERROR("failed to allocate framebuffer\n");
 		ret = -ENOMEM;
 		goto out;
 	}
-	obj_priv = to_intel_bo(fbo);
 
 	mutex_lock(&dev->struct_mutex);
 
 	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+	ret = intel_pin_and_fence_fb_obj(dev, obj, false);
 	if (ret) {
 		DRM_ERROR("failed to pin fb: %d\n", ret);
 		goto out_unref;
@@ -108,7 +107,7 @@
 
 	info->par = ifbdev;
 
-	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+	ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
 	if (ret)
 		goto out_unpin;
 
@@ -122,6 +121,11 @@
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &intelfb_ops;
 
+	ret = fb_alloc_cmap(&info->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
 	/* setup aperture base/size for vesafb takeover */
 	info->apertures = alloc_apertures(1);
 	if (!info->apertures) {
@@ -129,37 +133,23 @@
 		goto out_unpin;
 	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-	if (!IS_GEN2(dev))
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
-	else
-		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
+	info->apertures->ranges[0].size =
+		dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-	info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+	info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
 	info->fix.smem_len = size;
 
-	info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
-				       size);
+	info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
 	if (!info->screen_base) {
 		ret = -ENOSPC;
 		goto out_unpin;
 	}
-
-	ret = fb_alloc_cmap(&info->cmap, 256, 0);
-	if (ret) {
-		ret = -ENOMEM;
-		goto out_unpin;
-	}
 	info->screen_size = size;
 
 //	memset(info->screen_base, 0, size);
 
-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	/* FIXME: we really shouldn't expose mmio space at all */
-	info->fix.mmio_start = pci_resource_start(dev->pdev, mmio_bar);
-	info->fix.mmio_len = pci_resource_len(dev->pdev, mmio_bar);
-
 	info->pixmap.size = 64*1024;
 	info->pixmap.buf_align = 8;
 	info->pixmap.access_align = 32;
@@ -168,7 +158,7 @@
 
 	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
 		      fb->width, fb->height,
-		      obj_priv->gtt_offset, fbo);
+		      obj->gtt_offset, obj);
 
 
 	mutex_unlock(&dev->struct_mutex);
@@ -176,9 +166,9 @@
 	return 0;
 
 out_unpin:
-	i915_gem_object_unpin(fbo);
+	i915_gem_object_unpin(obj);
 out_unref:
-	drm_gem_object_unreference(fbo);
+	drm_gem_object_unreference(&obj->base);
 	mutex_unlock(&dev->struct_mutex);
 out:
 	return ret;
@@ -225,7 +215,7 @@
 
 	drm_framebuffer_cleanup(&ifb->base);
 	if (ifb->obj) {
-		drm_gem_object_unreference_unlocked(ifb->obj);
+		drm_gem_object_unreference_unlocked(&ifb->obj->base);
 		ifb->obj = NULL;
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086..58040f6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@
 
 	/* On most chips, these bits must be preserved in software. */
 	if (!IS_I830(dev) && !IS_845G(dev))
-		reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
-						   GPIO_CLOCK_PULLUP_DISABLE);
+		reserved = I915_READ_NOTRACE(gpio->reg) &
+					     (GPIO_DATA_PULLUP_DISABLE |
+					      GPIO_CLOCK_PULLUP_DISABLE);
 
 	return reserved;
 }
@@ -96,9 +97,9 @@
 	struct intel_gpio *gpio = data;
 	struct drm_i915_private *dev_priv = gpio->dev_priv;
 	u32 reserved = get_reserved(gpio);
-	I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
-	I915_WRITE(gpio->reg, reserved);
-	return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	I915_WRITE_NOTRACE(gpio->reg, reserved);
+	return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 
 static int get_data(void *data)
@@ -106,9 +107,9 @@
 	struct intel_gpio *gpio = data;
 	struct drm_i915_private *dev_priv = gpio->dev_priv;
 	u32 reserved = get_reserved(gpio);
-	I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
-	I915_WRITE(gpio->reg, reserved);
-	return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+	I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	I915_WRITE_NOTRACE(gpio->reg, reserved);
+	return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 
 static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 			GPIO_CLOCK_VAL_MASK;
 
-	I915_WRITE(gpio->reg, reserved | clock_bits);
+	I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
 	POSTING_READ(gpio->reg);
 }
 
@@ -141,7 +142,7 @@
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 			GPIO_DATA_VAL_MASK;
 
-	I915_WRITE(gpio->reg, reserved | data_bits);
+	I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
 	POSTING_READ(gpio->reg);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 25bcedf..8f4f6bd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -106,7 +106,7 @@
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
 	POSTING_READ(lvds_reg);
 
-	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	intel_panel_enable_backlight(dev);
 }
 
 static void intel_lvds_disable(struct intel_lvds *intel_lvds)
@@ -123,8 +123,7 @@
 		lvds_reg = LVDS;
 	}
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-	intel_panel_set_backlight(dev, 0);
+	intel_panel_disable_backlight(dev);
 
 	I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 
@@ -304,14 +303,13 @@
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 
-			pfit_control |= PFIT_ENABLE;
 			/* 965+ is easy, it does everything in hw */
 			if (scaled_width > scaled_height)
-				pfit_control |= PFIT_SCALING_PILLAR;
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
 			else if (scaled_width < scaled_height)
-				pfit_control |= PFIT_SCALING_LETTER;
-			else
-				pfit_control |= PFIT_SCALING_AUTO;
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+			else if (adjusted_mode->hdisplay != mode->hdisplay)
+				pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
 		} else {
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -358,13 +356,17 @@
 		 * Full scaling, even if it changes the aspect ratio.
 		 * Fortunately this is all done for us in hw.
 		 */
-		pfit_control |= PFIT_ENABLE;
-		if (INTEL_INFO(dev)->gen >= 4)
-			pfit_control |= PFIT_SCALING_AUTO;
-		else
-			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
-					 VERT_INTERP_BILINEAR |
-					 HORIZ_INTERP_BILINEAR);
+		if (mode->vdisplay != adjusted_mode->vdisplay ||
+		    mode->hdisplay != adjusted_mode->hdisplay) {
+			pfit_control |= PFIT_ENABLE;
+			if (INTEL_INFO(dev)->gen >= 4)
+				pfit_control |= PFIT_SCALING_AUTO;
+			else
+				pfit_control |= (VERT_AUTO_SCALE |
+						 VERT_INTERP_BILINEAR |
+						 HORIZ_AUTO_SCALE |
+						 HORIZ_INTERP_BILINEAR);
+		}
 		break;
 
 	default:
@@ -372,6 +374,10 @@
 	}
 
 out:
+	if ((pfit_control & PFIT_ENABLE) == 0) {
+		pfit_control = 0;
+		pfit_pgm_ratios = 0;
+	}
 	if (pfit_control != intel_lvds->pfit_control ||
 	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
 		intel_lvds->pfit_control = pfit_control;
@@ -395,8 +401,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
 	/* We try to do the minimum that is necessary in order to unlock
 	 * the registers for mode setting.
 	 *
@@ -427,9 +431,6 @@
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
-	if (dev_priv->backlight_level == 0)
-		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
-
 	/* Undo any unlocking done in prepare to prevent accidental
 	 * adjustment of the registers.
 	 */
@@ -914,6 +915,8 @@
 
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
 	intel_encoder->crtc_mask = (1 << 1);
+	if (INTEL_INFO(dev)->gen >= 5)
+		intel_encoder->crtc_mask |= (1 << 0);
 	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1019,10 +1022,18 @@
 out:
 	if (HAS_PCH_SPLIT(dev)) {
 		u32 pwm;
-		/* make sure PWM is enabled */
+
+		pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+		/* make sure PWM is enabled and locked to the LVDS pipe */
 		pwm = I915_READ(BLC_PWM_CPU_CTL2);
-		pwm |= (PWM_ENABLE | PWM_PIPE_B);
-		I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+		if (pipe == 0 && (pwm & PWM_PIPE_B))
+			I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+		if (pipe)
+			pwm |= PWM_PIPE_B;
+		else
+			pwm &= ~PWM_PIPE_B;
+		I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
 
 		pwm = I915_READ(BLC_PWM_PCH_CTL1);
 		pwm |= PWM_PCH_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a8..f295a7a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 
 	if (asle) {
-		if (IS_MOBILE(dev)) {
-			unsigned long irqflags;
-
-			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+		if (IS_MOBILE(dev))
 			intel_enable_asle(dev);
-			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
-					       irqflags);
-		}
 
 		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
 			ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a4..3fbb98b 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	overlay->last_flip_req =
-		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
-	if (overlay->last_flip_req == 0)
-		return -ENOMEM;
-
+	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	if (ret) {
+	    kfree(request);
+	    return ret;
+	}
+	overlay->last_flip_req = request->seqno;
 	overlay->flip_tail = tail;
 	ret = i915_do_wait_request(dev,
 				   overlay->last_flip_req, true,
-				   &dev_priv->render_ring);
+				   LP_RING(dev_priv));
 	if (ret)
 		return ret;
 
@@ -289,6 +290,7 @@
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *request;
 	int pipe_a_quirk = 0;
 	int ret;
@@ -308,7 +310,12 @@
 		goto out;
 	}
 
-	BEGIN_LP_RING(4);
+	ret = BEGIN_LP_RING(4);
+	if (ret) {
+		kfree(request);
+		goto out;
+	}
+
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
 	OUT_RING(overlay->flip_addr | OFC_UPDATE);
 	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@
 	struct drm_i915_gem_request *request;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
+	int ret;
 
 	BUG_ON(!overlay->active);
 
@@ -347,36 +355,44 @@
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	BEGIN_LP_RING(2);
+	ret = BEGIN_LP_RING(2);
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req =
-		i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+	ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
+
+	overlay->last_flip_req = request->seqno;
 	return 0;
 }
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
-	struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 
 	overlay->old_vid_bo = NULL;
 }
 
 static void intel_overlay_off_tail(struct intel_overlay *overlay)
 {
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj = overlay->vid_bo;
 
 	/* never have the overlay hw on without showing a frame */
 	BUG_ON(!overlay->vid_bo);
-	obj = &overlay->vid_bo->base;
 
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	overlay->vid_bo = NULL;
 
 	overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@
 			     bool interruptible)
 {
 	struct drm_device *dev = overlay->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_i915_gem_request *request;
+	int ret;
 
 	BUG_ON(!overlay->active);
 
@@ -404,7 +422,11 @@
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	BEGIN_LP_RING(6);
+	ret = BEGIN_LP_RING(6);
+	if (ret) {
+		kfree(request);
+		return ret;
+	}
 	/* wait for overlay to go idle */
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	OUT_RING(flip_addr);
@@ -432,7 +454,7 @@
 		return 0;
 
 	ret = i915_do_wait_request(dev, overlay->last_flip_req,
-				   interruptible, &dev_priv->render_ring);
+				   interruptible, LP_RING(dev_priv));
 	if (ret)
 		return ret;
 
@@ -467,7 +489,12 @@
 		if (request == NULL)
 			return -ENOMEM;
 
-		BEGIN_LP_RING(2);
+		ret = BEGIN_LP_RING(2);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
 		OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
@@ -736,13 +763,12 @@
 }
 
 static int intel_overlay_do_put_image(struct intel_overlay *overlay,
-				      struct drm_gem_object *new_bo,
+				      struct drm_i915_gem_object *new_bo,
 				      struct put_image_params *params)
 {
 	int ret, tmp_width;
 	struct overlay_registers *regs;
 	bool scale_changed = false;
-	struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
 	struct drm_device *dev = overlay->dev;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+	ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
 	if (ret != 0)
 		return ret;
 
@@ -761,6 +787,10 @@
 	if (ret != 0)
 		goto out_unpin;
 
+	ret = i915_gem_object_put_fence(new_bo);
+	if (ret)
+		goto out_unpin;
+
 	if (!overlay->active) {
 		regs = intel_overlay_map_regs(overlay);
 		if (!regs) {
@@ -797,7 +827,7 @@
 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
 				       params->offset_Y, tmp_width);
 	regs->SHEIGHT = params->src_h;
-	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+	regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
 	regs->OSTRIDE = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@
 				      params->src_w/uv_hscale);
 		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
 		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
-		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
-		regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+		regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+		regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
 		regs->OSTRIDE |= params->stride_UV << 16;
 	}
 
@@ -829,7 +859,7 @@
 		goto out_unpin;
 
 	overlay->old_vid_bo = overlay->vid_bo;
-	overlay->vid_bo = to_intel_bo(new_bo);
+	overlay->vid_bo = new_bo;
 
 	return 0;
 
@@ -942,7 +972,7 @@
 
 static int check_overlay_src(struct drm_device *dev,
 			     struct drm_intel_overlay_put_image *rec,
-			     struct drm_gem_object *new_bo)
+			     struct drm_i915_gem_object *new_bo)
 {
 	int uv_hscale = uv_hsubsampling(rec->flags);
 	int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@
 			return -EINVAL;
 
 		tmp = rec->stride_Y*rec->src_height;
-		if (rec->offset_Y + tmp > new_bo->size)
+		if (rec->offset_Y + tmp > new_bo->base.size)
 			return -EINVAL;
 		break;
 
@@ -1038,12 +1068,12 @@
 			return -EINVAL;
 
 		tmp = rec->stride_Y * rec->src_height;
-		if (rec->offset_Y + tmp > new_bo->size)
+		if (rec->offset_Y + tmp > new_bo->base.size)
 			return -EINVAL;
 
 		tmp = rec->stride_UV * (rec->src_height / uv_vscale);
-		if (rec->offset_U + tmp > new_bo->size ||
-		    rec->offset_V + tmp > new_bo->size)
+		if (rec->offset_U + tmp > new_bo->base.size ||
+		    rec->offset_V + tmp > new_bo->base.size)
 			return -EINVAL;
 		break;
 	}
@@ -1086,7 +1116,7 @@
 	struct intel_overlay *overlay;
 	struct drm_mode_object *drmmode_obj;
 	struct intel_crtc *crtc;
-	struct drm_gem_object *new_bo;
+	struct drm_i915_gem_object *new_bo;
 	struct put_image_params *params;
 	int ret;
 
@@ -1125,8 +1155,8 @@
 	}
 	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 
-	new_bo = drm_gem_object_lookup(dev, file_priv,
-				       put_image_rec->bo_handle);
+	new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+						   put_image_rec->bo_handle));
 	if (!new_bo) {
 		ret = -ENOENT;
 		goto out_free;
@@ -1135,6 +1165,12 @@
 	mutex_lock(&dev->mode_config.mutex);
 	mutex_lock(&dev->struct_mutex);
 
+	if (new_bo->tiling_mode) {
+		DRM_ERROR("buffer used for overlay image can not be tiled\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
 	ret = intel_overlay_recover_from_interrupt(overlay, true);
 	if (ret != 0)
 		goto out_unlock;
@@ -1217,7 +1253,7 @@
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->mode_config.mutex);
-	drm_gem_object_unreference_unlocked(new_bo);
+	drm_gem_object_unreference_unlocked(&new_bo->base);
 out_free:
 	kfree(params);
 
@@ -1370,7 +1406,7 @@
 {
         drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
-	struct drm_gem_object *reg_bo;
+	struct drm_i915_gem_object *reg_bo;
 	struct overlay_registers *regs;
 	int ret;
 
@@ -1385,7 +1421,7 @@
 	reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
 	if (!reg_bo)
 		goto out_free;
-	overlay->reg_bo = to_intel_bo(reg_bo);
+	overlay->reg_bo = reg_bo;
 
 	if (OVERLAY_NEEDS_PHYSICAL(dev)) {
 		ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@
                         DRM_ERROR("failed to attach phys overlay regs\n");
                         goto out_free_bo;
                 }
-		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
-		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
 		if (ret) {
                         DRM_ERROR("failed to pin overlay register bo\n");
                         goto out_free_bo;
                 }
-		overlay->flip_addr = overlay->reg_bo->gtt_offset;
+		overlay->flip_addr = reg_bo->gtt_offset;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
@@ -1434,7 +1470,7 @@
 out_unpin_bo:
 	i915_gem_object_unpin(reg_bo);
 out_free_bo:
-	drm_gem_object_unreference(reg_bo);
+	drm_gem_object_unreference(&reg_bo->base);
 out_free:
 	kfree(overlay);
 	return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f3..e00d200 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@
 	return 0;
 }
 
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+	u32 val;
+
+	/* Restore the CTL value if it lost, e.g. GPU reset */
+
+	if (HAS_PCH_SPLIT(dev_priv->dev)) {
+		val = I915_READ(BLC_PWM_PCH_CTL2);
+		if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+			dev_priv->saveBLC_PWM_CTL2 = val;
+		} else if (val == 0) {
+			I915_WRITE(BLC_PWM_PCH_CTL2,
+				   dev_priv->saveBLC_PWM_CTL);
+			val = dev_priv->saveBLC_PWM_CTL;
+		}
+	} else {
+		val = I915_READ(BLC_PWM_CTL);
+		if (dev_priv->saveBLC_PWM_CTL == 0) {
+			dev_priv->saveBLC_PWM_CTL = val;
+			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+		} else if (val == 0) {
+			I915_WRITE(BLC_PWM_CTL,
+				   dev_priv->saveBLC_PWM_CTL);
+			I915_WRITE(BLC_PWM_CTL2,
+				   dev_priv->saveBLC_PWM_CTL2);
+			val = dev_priv->saveBLC_PWM_CTL;
+		}
+	}
+
+	return val;
+}
+
 u32 intel_panel_get_max_backlight(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 max;
 
+	max = i915_read_blc_pwm_ctl(dev_priv);
+	if (max == 0) {
+		/* XXX add code here to query mode clock or hardware clock
+		 * and program max PWM appropriately.
+		 */
+		printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+		return 1;
+	}
+
 	if (HAS_PCH_SPLIT(dev)) {
-		max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+		max >>= 16;
 	} else {
-		max = I915_READ(BLC_PWM_CTL);
 		if (IS_PINEVIEW(dev)) {
 			max >>= 17;
 		} else {
@@ -146,14 +186,6 @@
 			max *= 0xff;
 	}
 
-	if (max == 0) {
-		/* XXX add code here to query mode clock or hardware clock
-		 * and program max PWM appropriately.
-		 */
-		DRM_ERROR("fixme: max PWM is zero.\n");
-		max = 1;
-	}
-
 	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
 	return max;
 }
@@ -218,3 +250,34 @@
 		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_enabled) {
+		dev_priv->backlight_level = intel_panel_get_backlight(dev);
+		dev_priv->backlight_enabled = false;
+	}
+
+	intel_panel_set_backlight(dev, 0);
+}
+
+void intel_panel_enable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->backlight_level == 0)
+		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+	intel_panel_set_backlight(dev, dev_priv->backlight_level);
+	dev_priv->backlight_enabled = true;
+}
+
+void intel_panel_setup_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+	dev_priv->backlight_enabled = dev_priv->backlight_level != 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 31cd7e3..03e3370 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -48,14 +48,15 @@
 	return seqno;
 }
 
-static void
-render_ring_flush(struct drm_device *dev,
-		  struct intel_ring_buffer *ring,
+static int
+render_ring_flush(struct intel_ring_buffer *ring,
 		  u32	invalidate_domains,
 		  u32	flush_domains)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 cmd;
+	int ret;
 
 #if WATCH_EXEC
 	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
@@ -109,49 +110,54 @@
 		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
 			cmd |= MI_EXE_FLUSH;
 
+		if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+		    (IS_G4X(dev) || IS_GEN5(dev)))
+			cmd |= MI_INVALIDATE_ISP;
+
 #if WATCH_EXEC
 		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
 #endif
-		intel_ring_begin(dev, ring, 2);
-		intel_ring_emit(dev, ring, cmd);
-		intel_ring_emit(dev, ring, MI_NOOP);
-		intel_ring_advance(dev, ring);
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, cmd);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 	}
+
+	return 0;
 }
 
-static void ring_write_tail(struct drm_device *dev,
-			    struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 	I915_WRITE_TAIL(ring, value);
 }
 
-u32 intel_ring_get_active_head(struct drm_device *dev,
-			       struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
 			RING_ACTHD(ring->mmio_base) : ACTHD;
 
 	return I915_READ(acthd_reg);
 }
 
-static int init_ring_common(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
 {
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj = ring->obj;
 	u32 head;
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	obj_priv = to_intel_bo(ring->gem_object);
 
 	/* Stop the ring if it's running. */
 	I915_WRITE_CTL(ring, 0);
 	I915_WRITE_HEAD(ring, 0);
-	ring->write_tail(dev, ring, 0);
+	ring->write_tail(ring, 0);
 
 	/* Initialize the ring. */
-	I915_WRITE_START(ring, obj_priv->gtt_offset);
+	I915_WRITE_START(ring, obj->gtt_offset);
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 	/* G45 ring initialization fails to reset head to zero */
@@ -178,12 +184,13 @@
 	}
 
 	I915_WRITE_CTL(ring,
-			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_REPORT_64K | RING_VALID);
 
-	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	/* If the head is still not zero, the ring is dead */
-	if (head != 0) {
+	if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+	    I915_READ_START(ring) != obj->gtt_offset ||
+	    (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
 		DRM_ERROR("%s initialization failed "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				ring->name,
@@ -194,8 +201,8 @@
 		return -EIO;
 	}
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_kernel_lost_context(dev);
+	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+		i915_kernel_lost_context(ring->dev);
 	else {
 		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -203,335 +210,562 @@
 		if (ring->space < 0)
 			ring->space += ring->size;
 	}
+
 	return 0;
 }
 
-static int init_render_ring(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+	struct drm_i915_gem_object *obj;
+	volatile u32 *cpu_page;
+	u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int ret = init_ring_common(dev, ring);
-	int mode;
+	struct pipe_control *pc;
+	struct drm_i915_gem_object *obj;
+	int ret;
+
+	if (ring->private)
+		return 0;
+
+	pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+	if (!pc)
+		return -ENOMEM;
+
+	obj = i915_gem_alloc_object(ring->dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate seqno page\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+	ret = i915_gem_object_pin(obj, 4096, true);
+	if (ret)
+		goto err_unref;
+
+	pc->gtt_offset = obj->gtt_offset;
+	pc->cpu_page =  kmap(obj->pages[0]);
+	if (pc->cpu_page == NULL)
+		goto err_unpin;
+
+	pc->obj = obj;
+	ring->private = pc;
+	return 0;
+
+err_unpin:
+	i915_gem_object_unpin(obj);
+err_unref:
+	drm_gem_object_unreference(&obj->base);
+err:
+	kfree(pc);
+	return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	struct drm_i915_gem_object *obj;
+
+	if (!ring->private)
+		return;
+
+	obj = pc->obj;
+	kunmap(obj->pages[0]);
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(&obj->base);
+
+	kfree(pc);
+	ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret = init_ring_common(ring);
 
 	if (INTEL_INFO(dev)->gen > 3) {
-		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+		int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
 		if (IS_GEN6(dev))
 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
 		I915_WRITE(MI_MODE, mode);
 	}
+
+	if (INTEL_INFO(dev)->gen >= 6) {
+	} else if (IS_GEN5(dev)) {
+		ret = init_pipe_control(ring);
+		if (ret)
+			return ret;
+	}
+
 	return ret;
 }
 
-#define PIPE_CONTROL_FLUSH(addr)					\
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+	if (!ring->private)
+		return;
+
+	cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int id;
+
+	/*
+	 * cs -> 1 = vcs, 0 = bcs
+	 * vcs -> 1 = bcs, 0 = cs,
+	 * bcs -> 1 = cs, 0 = vcs.
+	 */
+	id = ring - dev_priv->ring;
+	id += 2 - i;
+	id %= 3;
+
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_MBOX |
+			MI_SEMAPHORE_REGISTER |
+			MI_SEMAPHORE_UPDATE);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring,
+			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+		 u32 *result)
+{
+	u32 seqno;
+	int ret;
+
+	ret = intel_ring_begin(ring, 10);
+	if (ret)
+		return ret;
+
+	seqno = i915_gem_get_seqno(ring->dev);
+	update_semaphore(ring, 0, seqno);
+	update_semaphore(ring, 1, seqno);
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	*result = seqno;
+	return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+		struct intel_ring_buffer *to,
+		u32 seqno)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_MBOX |
+			MI_SEMAPHORE_REGISTER |
+			intel_ring_sync_index(ring, to) << 17 |
+			MI_SEMAPHORE_COMPARE);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__)					\
 do {									\
-	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
+	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
 		 PIPE_CONTROL_DEPTH_STALL | 2);				\
-	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
-	OUT_RING(0);							\
-	OUT_RING(0);							\
+	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
+	intel_ring_emit(ring__, 0);							\
+	intel_ring_emit(ring__, 0);							\
 } while (0)
 
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+		      u32 *result)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 seqno;
+	struct drm_device *dev = ring->dev;
+	u32 seqno = i915_gem_get_seqno(dev);
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
 
-	seqno = i915_gem_get_seqno(dev);
+	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+	 * incoherent with writes to memory, i.e. completely fubar,
+	 * so we need to use PIPE_NOTIFY instead.
+	 *
+	 * However, we also need to workaround the qword write
+	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+	 * memory before requesting an interrupt.
+	 */
+	ret = intel_ring_begin(ring, 32);
+	if (ret)
+		return ret;
 
-	if (IS_GEN6(dev)) {
-		BEGIN_LP_RING(6);
-		OUT_RING(GFX_OP_PIPE_CONTROL | 3);
-		OUT_RING(PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else if (HAS_PIPE_CONTROL(dev)) {
-		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128; /* write to separate cachelines */
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	scratch_addr += 128;
+	PIPE_CONTROL_FLUSH(ring, scratch_addr);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+			PIPE_CONTROL_NOTIFY);
+	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
-		/*
-		 * Workaround qword write incoherence by flushing the
-		 * PIPE_NOTIFY buffers out to memory before requesting
-		 * an interrupt.
-		 */
-		BEGIN_LP_RING(32);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128; /* write to separate cachelines */
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else {
-		BEGIN_LP_RING(4);
-		OUT_RING(MI_STORE_DWORD_INDEX);
-		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(seqno);
+	*result = seqno;
+	return 0;
+}
 
-		OUT_RING(MI_USER_INTERRUPT);
-		ADVANCE_LP_RING();
-	}
-	return seqno;
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+			u32 *result)
+{
+	struct drm_device *dev = ring->dev;
+	u32 seqno = i915_gem_get_seqno(dev);
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	*result = seqno;
+	return 0;
 }
 
 static u32
-render_ring_get_seqno(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	if (HAS_PIPE_CONTROL(dev))
-		return ((volatile u32 *)(dev_priv->seqno_page))[0];
-	else
-		return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
-}
-
-static void
-render_ring_get_user_irq(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-static void
-render_ring_put_user_irq(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
-	if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
-		else
-			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void intel_ring_setup_status_page(struct drm_device *dev,
-				  struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	if (IS_GEN6(dev)) {
-		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
-			   ring->status_page.gfx_addr);
-		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
-	} else {
-		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
-			   ring->status_page.gfx_addr);
-		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
-	}
-
-}
-
-static void
-bsd_ring_flush(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		u32     invalidate_domains,
-		u32     flush_domains)
-{
-	intel_ring_begin(dev, ring, 2);
-	intel_ring_emit(dev, ring, MI_FLUSH);
-	intel_ring_emit(dev, ring, MI_NOOP);
-	intel_ring_advance(dev, ring);
-}
-
-static int init_bsd_ring(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
-{
-	return init_ring_common(dev, ring);
-}
-
-static u32
-ring_add_request(struct drm_device *dev,
-		 struct intel_ring_buffer *ring,
-		 u32 flush_domains)
-{
-	u32 seqno;
-
-	seqno = i915_gem_get_seqno(dev);
-
-	intel_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(dev, ring,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(dev, ring, seqno);
-	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-	intel_ring_advance(dev, ring);
-
-	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-
-	return seqno;
-}
-
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	/* do nothing */
-}
-static void
-bsd_ring_put_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
-{
-	/* do nothing */
-}
-
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
-			   struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
-			     struct intel_ring_buffer *ring,
-			     struct drm_i915_gem_execbuffer2 *exec,
-			     struct drm_clip_rect *cliprects,
-			     uint64_t exec_offset)
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
 {
-	uint32_t exec_start;
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	intel_ring_begin(dev, ring, 2);
-	intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
-			(2 << 6) | MI_BATCH_NON_SECURE_I965);
-	intel_ring_emit(dev, ring, exec_start);
-	intel_ring_advance(dev, ring);
+	struct pipe_control *pc = ring->private;
+	return pc->cpu_page[0];
+}
+
+static void
+ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask &= ~mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->gt_irq_mask |= mask;
+	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+	POSTING_READ(GTIMR);
+}
+
+static void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask &= ~mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
+static void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	dev_priv->irq_mask |= mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+		return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_enable_irq(dev_priv,
+					    GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
+		else
+			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_disable_irq(dev_priv,
+					     GT_USER_INTERRUPT |
+					     GT_PIPE_NOTIFY);
+		else
+			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+	}
+	spin_unlock(&ring->irq_lock);
+}
+
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
+{
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	u32 mmio = IS_GEN6(ring->dev) ?
+		RING_HWS_PGA_GEN6(ring->mmio_base) :
+		RING_HWS_PGA(ring->mmio_base);
+	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+	POSTING_READ(mmio);
+}
+
+static int
+bsd_ring_flush(struct intel_ring_buffer *ring,
+	       u32     invalidate_domains,
+	       u32     flush_domains)
+{
+	int ret;
+
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 	return 0;
 }
 
 static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-				    struct intel_ring_buffer *ring,
-				    struct drm_i915_gem_execbuffer2 *exec,
-				    struct drm_clip_rect *cliprects,
-				    uint64_t exec_offset)
+ring_add_request(struct intel_ring_buffer *ring,
+		 u32 *result)
 {
+	u32 seqno;
+	int ret;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	seqno = i915_gem_get_seqno(ring->dev);
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+	*result = seqno;
+	return 0;
+}
+
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int nbox = exec->num_cliprects;
-	int i = 0, count;
-	uint32_t exec_start, exec_len;
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0)
+		ironlake_enable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0)
+		ironlake_disable_irq(dev_priv, flag);
+	spin_unlock(&ring->irq_lock);
+}
+
+static bool
+gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev->irq_enabled)
+	       return false;
+
+	spin_lock(&ring->irq_lock);
+	if (ring->irq_refcount++ == 0) {
+		ring->irq_mask &= ~rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_enable_irq(dev_priv, gflag);
+	}
+	spin_unlock(&ring->irq_lock);
+
+	return true;
+}
+
+static void
+gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	spin_lock(&ring->irq_lock);
+	if (--ring->irq_refcount == 0) {
+		ring->irq_mask |= rflag;
+		I915_WRITE_IMR(ring, ring->irq_mask);
+		ironlake_disable_irq(dev_priv, gflag);
+	}
+	spin_unlock(&ring->irq_lock);
+}
+
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+
+static int
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+{
+	int ret;
+
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring,
+			MI_BATCH_BUFFER_START | (2 << 6) |
+			MI_BATCH_NON_SECURE_I965);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+				u32 offset, u32 len)
+{
+	struct drm_device *dev = ring->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
 
 	trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
 
-	count = nbox ? nbox : 1;
+	if (IS_I830(dev) || IS_845G(dev)) {
+		ret = intel_ring_begin(ring, 4);
+		if (ret)
+			return ret;
 
-	for (i = 0; i < count; i++) {
-		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						exec->DR1, exec->DR4);
-			if (ret)
-				return ret;
-		}
+		intel_ring_emit(ring, MI_BATCH_BUFFER);
+		intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+		intel_ring_emit(ring, offset + len - 8);
+		intel_ring_emit(ring, 0);
+	} else {
+		ret = intel_ring_begin(ring, 2);
+		if (ret)
+			return ret;
 
-		if (IS_I830(dev) || IS_845G(dev)) {
-			intel_ring_begin(dev, ring, 4);
-			intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
-			intel_ring_emit(dev, ring,
-					exec_start | MI_BATCH_NON_SECURE);
-			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
-			intel_ring_emit(dev, ring, 0);
+		if (INTEL_INFO(dev)->gen >= 4) {
+			intel_ring_emit(ring,
+					MI_BATCH_BUFFER_START | (2 << 6) |
+					MI_BATCH_NON_SECURE_I965);
+			intel_ring_emit(ring, offset);
 		} else {
-			intel_ring_begin(dev, ring, 2);
-			if (INTEL_INFO(dev)->gen >= 4) {
-				intel_ring_emit(dev, ring,
-						MI_BATCH_BUFFER_START | (2 << 6)
-						| MI_BATCH_NON_SECURE_I965);
-				intel_ring_emit(dev, ring, exec_start);
-			} else {
-				intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
-						| (2 << 6));
-				intel_ring_emit(dev, ring, exec_start |
-						MI_BATCH_NON_SECURE);
-			}
+			intel_ring_emit(ring,
+					MI_BATCH_BUFFER_START | (2 << 6));
+			intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
 		}
-		intel_ring_advance(dev, ring);
 	}
-
-	if (IS_G4X(dev) || IS_GEN5(dev)) {
-		intel_ring_begin(dev, ring, 2);
-		intel_ring_emit(dev, ring, MI_FLUSH |
-				MI_NO_WRITE_FLUSH |
-				MI_INVALIDATE_ISP );
-		intel_ring_emit(dev, ring, MI_NOOP);
-		intel_ring_advance(dev, ring);
-	}
-	/* XXX breadcrumb */
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static void cleanup_status_page(struct drm_device *dev,
-				struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	struct drm_i915_gem_object *obj;
 
 	obj = ring->status_page.obj;
 	if (obj == NULL)
 		return;
-	obj_priv = to_intel_bo(obj);
 
-	kunmap(obj_priv->pages[0]);
+	kunmap(obj->pages[0]);
 	i915_gem_object_unpin(obj);
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
 
 	memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 }
 
-static int init_status_page(struct drm_device *dev,
-			    struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
 {
+	struct drm_device *dev = ring->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	obj = i915_gem_alloc_object(dev, 4096);
@@ -540,16 +774,15 @@
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj_priv = to_intel_bo(obj);
-	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+	obj->agp_type = AGP_USER_CACHED_MEMORY;
 
-	ret = i915_gem_object_pin(obj, 4096);
+	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret != 0) {
 		goto err_unref;
 	}
 
-	ring->status_page.gfx_addr = obj_priv->gtt_offset;
-	ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+	ring->status_page.gfx_addr = obj->gtt_offset;
+	ring->status_page.page_addr = kmap(obj->pages[0]);
 	if (ring->status_page.page_addr == NULL) {
 		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
 		goto err_unpin;
@@ -557,7 +790,7 @@
 	ring->status_page.obj = obj;
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
-	intel_ring_setup_status_page(dev, ring);
+	intel_ring_setup_status_page(ring);
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			ring->name, ring->status_page.gfx_addr);
 
@@ -566,7 +799,7 @@
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
-	drm_gem_object_unreference(obj);
+	drm_gem_object_unreference(&obj->base);
 err:
 	return ret;
 }
@@ -574,9 +807,7 @@
 int intel_init_ring_buffer(struct drm_device *dev,
 			   struct intel_ring_buffer *ring)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *obj_priv;
-	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj;
 	int ret;
 
 	ring->dev = dev;
@@ -584,8 +815,11 @@
 	INIT_LIST_HEAD(&ring->request_list);
 	INIT_LIST_HEAD(&ring->gpu_write_list);
 
+	spin_lock_init(&ring->irq_lock);
+	ring->irq_mask = ~0;
+
 	if (I915_NEED_GFX_HWS(dev)) {
-		ret = init_status_page(dev, ring);
+		ret = init_status_page(ring);
 		if (ret)
 			return ret;
 	}
@@ -597,15 +831,14 @@
 		goto err_hws;
 	}
 
-	ring->gem_object = obj;
+	ring->obj = obj;
 
-	ret = i915_gem_object_pin(obj, PAGE_SIZE);
+	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
 	if (ret)
 		goto err_unref;
 
-	obj_priv = to_intel_bo(obj);
 	ring->map.size = ring->size;
-	ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+	ring->map.offset = dev->agp->base + obj->gtt_offset;
 	ring->map.type = 0;
 	ring->map.flags = 0;
 	ring->map.mtrr = 0;
@@ -618,60 +851,64 @@
 	}
 
 	ring->virtual_start = ring->map.handle;
-	ret = ring->init(dev, ring);
+	ret = ring->init(ring);
 	if (ret)
 		goto err_unmap;
 
-	if (!drm_core_check_feature(dev, DRIVER_MODESET))
-		i915_kernel_lost_context(dev);
-	else {
-		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
-		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
-		ring->space = ring->head - (ring->tail + 8);
-		if (ring->space < 0)
-			ring->space += ring->size;
-	}
-	return ret;
+	/* Workaround an erratum on the i830 which causes a hang if
+	 * the TAIL pointer points to within the last 2 cachelines
+	 * of the buffer.
+	 */
+	ring->effective_size = ring->size;
+	if (IS_I830(ring->dev))
+		ring->effective_size -= 128;
+
+	return 0;
 
 err_unmap:
 	drm_core_ioremapfree(&ring->map, dev);
 err_unpin:
 	i915_gem_object_unpin(obj);
 err_unref:
-	drm_gem_object_unreference(obj);
-	ring->gem_object = NULL;
+	drm_gem_object_unreference(&obj->base);
+	ring->obj = NULL;
 err_hws:
-	cleanup_status_page(dev, ring);
+	cleanup_status_page(ring);
 	return ret;
 }
 
-void intel_cleanup_ring_buffer(struct drm_device *dev,
-			       struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 {
-	if (ring->gem_object == NULL)
+	struct drm_i915_private *dev_priv;
+	int ret;
+
+	if (ring->obj == NULL)
 		return;
 
-	drm_core_ioremapfree(&ring->map, dev);
+	/* Disable the ring buffer. The ring must be idle at this point */
+	dev_priv = ring->dev->dev_private;
+	ret = intel_wait_ring_buffer(ring, ring->size - 8);
+	I915_WRITE_CTL(ring, 0);
 
-	i915_gem_object_unpin(ring->gem_object);
-	drm_gem_object_unreference(ring->gem_object);
-	ring->gem_object = NULL;
+	drm_core_ioremapfree(&ring->map, ring->dev);
+
+	i915_gem_object_unpin(ring->obj);
+	drm_gem_object_unreference(&ring->obj->base);
+	ring->obj = NULL;
 
 	if (ring->cleanup)
 		ring->cleanup(ring);
 
-	cleanup_status_page(dev, ring);
+	cleanup_status_page(ring);
 }
 
-static int intel_wrap_ring_buffer(struct drm_device *dev,
-				  struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
 {
 	unsigned int *virt;
-	int rem;
-	rem = ring->size - ring->tail;
+	int rem = ring->size - ring->tail;
 
 	if (ring->space < rem) {
-		int ret = intel_wait_ring_buffer(dev, ring, rem);
+		int ret = intel_wait_ring_buffer(ring, rem);
 		if (ret)
 			return ret;
 	}
@@ -689,11 +926,11 @@
 	return 0;
 }
 
-int intel_wait_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
 {
+	struct drm_device *dev = ring->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long end;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 head;
 
 	trace_i915_ring_wait_begin (dev);
@@ -711,7 +948,7 @@
 		if (ring->space < 0)
 			ring->space += ring->size;
 		if (ring->space >= n) {
-			trace_i915_ring_wait_end (dev);
+			trace_i915_ring_wait_end(dev);
 			return 0;
 		}
 
@@ -722,29 +959,39 @@
 		}
 
 		msleep(1);
+		if (atomic_read(&dev_priv->mm.wedged))
+			return -EAGAIN;
 	} while (!time_after(jiffies, end));
 	trace_i915_ring_wait_end (dev);
 	return -EBUSY;
 }
 
-void intel_ring_begin(struct drm_device *dev,
-		      struct intel_ring_buffer *ring,
-		      int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+		     int num_dwords)
 {
 	int n = 4*num_dwords;
-	if (unlikely(ring->tail + n > ring->size))
-		intel_wrap_ring_buffer(dev, ring);
-	if (unlikely(ring->space < n))
-		intel_wait_ring_buffer(dev, ring, n);
+	int ret;
+
+	if (unlikely(ring->tail + n > ring->effective_size)) {
+		ret = intel_wrap_ring_buffer(ring);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	if (unlikely(ring->space < n)) {
+		ret = intel_wait_ring_buffer(ring, n);
+		if (unlikely(ret))
+			return ret;
+	}
 
 	ring->space -= n;
+	return 0;
 }
 
-void intel_ring_advance(struct drm_device *dev,
-			struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
 {
 	ring->tail &= ring->size - 1;
-	ring->write_tail(dev, ring, ring->tail);
+	ring->write_tail(ring, ring->tail);
 }
 
 static const struct intel_ring_buffer render_ring = {
@@ -756,10 +1003,11 @@
 	.write_tail		= ring_write_tail,
 	.flush			= render_ring_flush,
 	.add_request		= render_ring_add_request,
-	.get_seqno		= render_ring_get_seqno,
-	.user_irq_get		= render_ring_get_user_irq,
-	.user_irq_put		= render_ring_put_user_irq,
-	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= render_ring_get_irq,
+	.irq_put		= render_ring_put_irq,
+	.dispatch_execbuffer	= render_ring_dispatch_execbuffer,
+       .cleanup			= render_ring_cleanup,
 };
 
 /* ring buffer for bit-stream decoder */
@@ -769,22 +1017,21 @@
 	.id			= RING_BSD,
 	.mmio_base		= BSD_RING_BASE,
 	.size			= 32 * PAGE_SIZE,
-	.init			= init_bsd_ring,
+	.init			= init_ring_common,
 	.write_tail		= ring_write_tail,
 	.flush			= bsd_ring_flush,
 	.add_request		= ring_add_request,
-	.get_seqno		= ring_status_page_get_seqno,
-	.user_irq_get		= bsd_ring_get_user_irq,
-	.user_irq_put		= bsd_ring_put_user_irq,
-	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= bsd_ring_get_irq,
+	.irq_put		= bsd_ring_put_irq,
+	.dispatch_execbuffer	= ring_dispatch_execbuffer,
 };
 
 
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
-				     struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 				     u32 value)
 {
-       drm_i915_private_t *dev_priv = dev->dev_private;
+       drm_i915_private_t *dev_priv = ring->dev->dev_private;
 
        /* Every tail move must follow the sequence below */
        I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -803,69 +1050,109 @@
 	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
 }
 
-static void gen6_ring_flush(struct drm_device *dev,
-			    struct intel_ring_buffer *ring,
-			    u32 invalidate_domains,
-			    u32 flush_domains)
+static int gen6_ring_flush(struct intel_ring_buffer *ring,
+			   u32 invalidate_domains,
+			   u32 flush_domains)
 {
-       intel_ring_begin(dev, ring, 4);
-       intel_ring_emit(dev, ring, MI_FLUSH_DW);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_emit(dev, ring, 0);
-       intel_ring_advance(dev, ring);
+	int ret;
+
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
+
+	ret = intel_ring_begin(ring, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_FLUSH_DW);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-				  struct intel_ring_buffer *ring,
-				  struct drm_i915_gem_execbuffer2 *exec,
-				  struct drm_clip_rect *cliprects,
-				  uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+			      u32 offset, u32 len)
 {
-       uint32_t exec_start;
+       int ret;
 
-       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+	       return ret;
 
-       intel_ring_begin(dev, ring, 2);
-       intel_ring_emit(dev, ring,
-		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
        /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(dev, ring, exec_start);
-       intel_ring_advance(dev, ring);
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
 
        return 0;
 }
 
+static bool
+gen6_render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_get_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static void
+gen6_render_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_put_irq(ring,
+				 GT_USER_INTERRUPT,
+				 GEN6_RENDER_USER_INTERRUPT);
+}
+
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_get_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+	return gen6_ring_put_irq(ring,
+				 GT_GEN6_BSD_USER_INTERRUPT,
+				 GEN6_BSD_USER_INTERRUPT);
+}
+
 /* ring buffer for Video Codec for Gen6+ */
 static const struct intel_ring_buffer gen6_bsd_ring = {
-       .name			= "gen6 bsd ring",
-       .id			= RING_BSD,
-       .mmio_base		= GEN6_BSD_RING_BASE,
-       .size			= 32 * PAGE_SIZE,
-       .init			= init_bsd_ring,
-       .write_tail		= gen6_bsd_ring_write_tail,
-       .flush			= gen6_ring_flush,
-       .add_request		= ring_add_request,
-       .get_seqno		= ring_status_page_get_seqno,
-       .user_irq_get		= bsd_ring_get_user_irq,
-       .user_irq_put		= bsd_ring_put_user_irq,
-       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+	.name			= "gen6 bsd ring",
+	.id			= RING_BSD,
+	.mmio_base		= GEN6_BSD_RING_BASE,
+	.size			= 32 * PAGE_SIZE,
+	.init			= init_ring_common,
+	.write_tail		= gen6_bsd_ring_write_tail,
+	.flush			= gen6_ring_flush,
+	.add_request		= gen6_add_request,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= gen6_bsd_ring_get_irq,
+	.irq_put		= gen6_bsd_ring_put_irq,
+	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
 };
 
 /* Blitter support (SandyBridge+) */
 
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
 {
-	/* do nothing */
+	return gen6_ring_get_irq(ring,
+				 GT_BLT_USER_INTERRUPT,
+				 GEN6_BLITTER_USER_INTERRUPT);
 }
+
 static void
-blt_ring_put_user_irq(struct drm_device *dev,
-		      struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
 {
-	/* do nothing */
+	gen6_ring_put_irq(ring,
+			  GT_BLT_USER_INTERRUPT,
+			  GEN6_BLITTER_USER_INTERRUPT);
 }
 
 
@@ -883,32 +1170,31 @@
 	return ring->private;
 }
 
-static int blt_ring_init(struct drm_device *dev,
-			 struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
 {
-	if (NEED_BLT_WORKAROUND(dev)) {
+	if (NEED_BLT_WORKAROUND(ring->dev)) {
 		struct drm_i915_gem_object *obj;
-		u32 __iomem *ptr;
+		u32 *ptr;
 		int ret;
 
-		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+		obj = i915_gem_alloc_object(ring->dev, 4096);
 		if (obj == NULL)
 			return -ENOMEM;
 
-		ret = i915_gem_object_pin(&obj->base, 4096);
+		ret = i915_gem_object_pin(obj, 4096, true);
 		if (ret) {
 			drm_gem_object_unreference(&obj->base);
 			return ret;
 		}
 
 		ptr = kmap(obj->pages[0]);
-		iowrite32(MI_BATCH_BUFFER_END, ptr);
-		iowrite32(MI_NOOP, ptr+1);
+		*ptr++ = MI_BATCH_BUFFER_END;
+		*ptr++ = MI_NOOP;
 		kunmap(obj->pages[0]);
 
-		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
 		if (ret) {
-			i915_gem_object_unpin(&obj->base);
+			i915_gem_object_unpin(obj);
 			drm_gem_object_unreference(&obj->base);
 			return ret;
 		}
@@ -916,51 +1202,44 @@
 		ring->private = obj;
 	}
 
-	return init_ring_common(dev, ring);
+	return init_ring_common(ring);
 }
 
-static void blt_ring_begin(struct drm_device *dev,
-			   struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
 			  int num_dwords)
 {
 	if (ring->private) {
-		intel_ring_begin(dev, ring, num_dwords+2);
-		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
-		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+		int ret = intel_ring_begin(ring, num_dwords+2);
+		if (ret)
+			return ret;
+
+		intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+		return 0;
 	} else
-		intel_ring_begin(dev, ring, 4);
+		return intel_ring_begin(ring, 4);
 }
 
-static void blt_ring_flush(struct drm_device *dev,
-			   struct intel_ring_buffer *ring,
+static int blt_ring_flush(struct intel_ring_buffer *ring,
 			   u32 invalidate_domains,
 			   u32 flush_domains)
 {
-	blt_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_FLUSH_DW);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_emit(dev, ring, 0);
-	intel_ring_advance(dev, ring);
-}
+	int ret;
 
-static u32
-blt_ring_add_request(struct drm_device *dev,
-		     struct intel_ring_buffer *ring,
-		     u32 flush_domains)
-{
-	u32 seqno = i915_gem_get_seqno(dev);
+	if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+		return 0;
 
-	blt_ring_begin(dev, ring, 4);
-	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(dev, ring,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(dev, ring, seqno);
-	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
-	intel_ring_advance(dev, ring);
+	ret = blt_ring_begin(ring, 4);
+	if (ret)
+		return ret;
 
-	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
-	return seqno;
+	intel_ring_emit(ring, MI_FLUSH_DW);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
+	return 0;
 }
 
 static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -981,47 +1260,56 @@
        .init			= blt_ring_init,
        .write_tail		= ring_write_tail,
        .flush			= blt_ring_flush,
-       .add_request		= blt_ring_add_request,
-       .get_seqno		= ring_status_page_get_seqno,
-       .user_irq_get		= blt_ring_get_user_irq,
-       .user_irq_put		= blt_ring_put_user_irq,
-       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+       .add_request		= gen6_add_request,
+       .get_seqno		= ring_get_seqno,
+       .irq_get			= blt_ring_get_irq,
+       .irq_put			= blt_ring_put_irq,
+       .dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
        .cleanup			= blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
 
-	dev_priv->render_ring = render_ring;
-
-	if (!I915_NEED_GFX_HWS(dev)) {
-		dev_priv->render_ring.status_page.page_addr
-			= dev_priv->status_page_dmah->vaddr;
-		memset(dev_priv->render_ring.status_page.page_addr,
-				0, PAGE_SIZE);
+	*ring = render_ring;
+	if (INTEL_INFO(dev)->gen >= 6) {
+		ring->add_request = gen6_add_request;
+		ring->irq_get = gen6_render_ring_get_irq;
+		ring->irq_put = gen6_render_ring_put_irq;
+	} else if (IS_GEN5(dev)) {
+		ring->add_request = pc_render_add_request;
+		ring->get_seqno = pc_render_get_seqno;
 	}
 
-	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
+	}
+
+	return intel_init_ring_buffer(dev, ring);
 }
 
 int intel_init_bsd_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
 
 	if (IS_GEN6(dev))
-		dev_priv->bsd_ring = gen6_bsd_ring;
+		*ring = gen6_bsd_ring;
 	else
-		dev_priv->bsd_ring = bsd_ring;
+		*ring = bsd_ring;
 
-	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+	return intel_init_ring_buffer(dev, ring);
 }
 
 int intel_init_blt_ring_buffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
 
-	dev_priv->blt_ring = gen6_blt_ring;
+	*ring = gen6_blt_ring;
 
-	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+	return intel_init_ring_buffer(dev, ring);
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index d2cd0f1..be9087e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,40 @@
 #ifndef _INTEL_RINGBUFFER_H_
 #define _INTEL_RINGBUFFER_H_
 
-struct  intel_hw_status_page {
-	void		*page_addr;
-	unsigned int	gfx_addr;
-	struct		drm_gem_object *obj;
+enum {
+    RCS = 0x0,
+    VCS,
+    BCS,
+    I915_NUM_RINGS,
 };
 
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
-#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
-#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
-#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
-#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+struct  intel_hw_status_page {
+	u32	__iomem	*page_addr;
+	unsigned int	gfx_addr;
+	struct		drm_i915_gem_object *obj;
+};
 
-struct drm_i915_gem_execbuffer2;
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
+
+#define I915_READ_HEAD(ring)  I915_RING_READ(RING_HEAD((ring)->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
+
+#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
+#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
+
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1((ring)->mmio_base))
+
 struct  intel_ring_buffer {
 	const char	*name;
 	enum intel_ring_id {
@@ -25,45 +43,39 @@
 		RING_BLT = 0x4,
 	} id;
 	u32		mmio_base;
-	unsigned long	size;
 	void		*virtual_start;
 	struct		drm_device *dev;
-	struct		drm_gem_object *gem_object;
+	struct		drm_i915_gem_object *obj;
 
 	u32		actual_head;
 	u32		head;
 	u32		tail;
 	int		space;
+	int		size;
+	int		effective_size;
 	struct intel_hw_status_page status_page;
 
-	u32		irq_gem_seqno;		/* last seq seem at irq time */
-	u32		waiting_gem_seqno;
-	int		user_irq_refcount;
-	void		(*user_irq_get)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
-	void		(*user_irq_put)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	spinlock_t	irq_lock;
+	u32		irq_refcount;
+	u32		irq_mask;
+	u32		irq_seqno;		/* last seq seem at irq time */
+	u32		waiting_seqno;
+	u32		sync_seqno[I915_NUM_RINGS-1];
+	bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+	void		(*irq_put)(struct intel_ring_buffer *ring);
 
-	int		(*init)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	int		(*init)(struct intel_ring_buffer *ring);
 
-	void		(*write_tail)(struct drm_device *dev,
-				      struct intel_ring_buffer *ring,
+	void		(*write_tail)(struct intel_ring_buffer *ring,
 				      u32 value);
-	void		(*flush)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32	invalidate_domains,
-			u32	flush_domains);
-	u32		(*add_request)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			u32 flush_domains);
-	u32		(*get_seqno)(struct drm_device *dev,
-				     struct intel_ring_buffer *ring);
-	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
-			struct intel_ring_buffer *ring,
-			struct drm_i915_gem_execbuffer2 *exec,
-			struct drm_clip_rect *cliprects,
-			uint64_t exec_offset);
+	int __must_check (*flush)(struct intel_ring_buffer *ring,
+				  u32	invalidate_domains,
+				  u32	flush_domains);
+	int		(*add_request)(struct intel_ring_buffer *ring,
+				       u32 *seqno);
+	u32		(*get_seqno)(struct intel_ring_buffer *ring);
+	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
 
 	/**
@@ -96,7 +108,7 @@
 	/**
 	 * Do we have some not yet emitted requests outstanding?
 	 */
-	bool outstanding_lazy_request;
+	u32 outstanding_lazy_request;
 
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
@@ -105,44 +117,54 @@
 };
 
 static inline u32
-intel_read_status_page(struct intel_ring_buffer *ring,
-		int reg)
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+		      struct intel_ring_buffer *other)
 {
-	u32 *regs = ring->status_page.page_addr;
-	return regs[reg];
+	int idx;
+
+	/*
+	 * cs -> 0 = vcs, 1 = bcs
+	 * vcs -> 0 = bcs, 1 = cs,
+	 * bcs -> 0 = cs, 1 = vcs.
+	 */
+
+	idx = (other - ring) - 1;
+	if (idx < 0)
+		idx += I915_NUM_RINGS;
+
+	return idx;
 }
 
-int intel_init_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
-			       struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
-			   struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
-		      struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
-				   struct intel_ring_buffer *ring,
-				   unsigned int data)
+static inline u32
+intel_read_status_page(struct intel_ring_buffer *ring,
+		       int reg)
 {
-	unsigned int *virt = ring->virtual_start + ring->tail;
-	*virt = data;
+	return ioread32(ring->status_page.page_addr + reg);
+}
+
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+				   u32 data)
+{
+	iowrite32(data, ring->virtual_start + ring->tail);
 	ring->tail += 4;
 }
 
-void intel_ring_advance(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
 
-u32 intel_ring_get_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+		    struct intel_ring_buffer *to,
+		    u32 seqno);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);
 int intel_init_blt_ring_buffer(struct drm_device *dev);
 
-u32 intel_ring_get_active_head(struct drm_device *dev,
-			       struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
-				  struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
 
 #endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6bc42fa..45cd376 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1024,9 +1024,13 @@
 	if (!intel_sdvo_set_target_input(intel_sdvo))
 		return;
 
-	if (intel_sdvo->has_hdmi_monitor &&
-	    !intel_sdvo_set_avi_infoframe(intel_sdvo))
-		return;
+	if (intel_sdvo->has_hdmi_monitor) {
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
+		intel_sdvo_set_colorimetry(intel_sdvo,
+					   SDVO_COLORIMETRY_RGB256);
+		intel_sdvo_set_avi_infoframe(intel_sdvo);
+	} else
+		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
 	if (intel_sdvo->is_tv &&
 	    !intel_sdvo_set_tv_format(intel_sdvo))
@@ -1045,7 +1049,9 @@
 
 	/* Set the SDVO control regs. */
 	if (INTEL_INFO(dev)->gen >= 4) {
-		sdvox = SDVO_BORDER_ENABLE;
+		sdvox = 0;
+		if (INTEL_INFO(dev)->gen < 5)
+			sdvox |= SDVO_BORDER_ENABLE;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 			sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
 		if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1075,7 +1081,8 @@
 		sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
 	}
 
-	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+	if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+	    INTEL_INFO(dev)->gen < 5)
 		sdvox |= SDVO_STALL_SELECT;
 	intel_sdvo_write_sdvox(intel_sdvo, sdvox);
 }
@@ -1395,6 +1402,9 @@
 
 	intel_sdvo->attached_output = response;
 
+	intel_sdvo->has_hdmi_monitor = false;
+	intel_sdvo->has_hdmi_audio = false;
+
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
 	else if (response & SDVO_TMDS_MASK)
@@ -1919,20 +1929,7 @@
 static bool
 intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
 {
-	int is_hdmi;
-
-	if (!intel_sdvo_check_supp_encode(intel_sdvo))
-		return false;
-
-	if (!intel_sdvo_set_target_output(intel_sdvo,
-					  device == 0 ? SDVO_OUTPUT_TMDS0 : SDVO_OUTPUT_TMDS1))
-		return false;
-
-	is_hdmi = 0;
-	if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE, &is_hdmi, 1))
-		return false;
-
-	return !!is_hdmi;
+	return intel_sdvo_check_supp_encode(intel_sdvo);
 }
 
 static u8
@@ -2034,12 +2031,7 @@
 	connector->connector_type = DRM_MODE_CONNECTOR_DVID;
 
 	if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
-		/* enable hdmi encoding mode if supported */
-		intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
-		intel_sdvo_set_colorimetry(intel_sdvo,
-					   SDVO_COLORIMETRY_RGB256);
 		connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
-
 		intel_sdvo->is_hdmi = true;
 	}
 	intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f76819..93206e4 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@
 	int type;
 
 	/* Disable TV interrupts around load detect or we'll recurse */
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_disable_pipestat(dev_priv, 0,
+			      PIPE_HOTPLUG_INTERRUPT_ENABLE |
 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	save_tv_dac = tv_dac = I915_READ(TV_DAC);
 	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@
 	I915_WRITE(TV_CTL, save_tv_ctl);
 
 	/* Restore interrupt config */
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+	i915_enable_pipestat(dev_priv, 0,
+			     PIPE_HOTPLUG_INTERRUPT_ENABLE |
 			     PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	return type;
 }
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 72730e9..21d6c29 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,7 +10,7 @@
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EMBEDDED
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
-	select ACPI_VIDEO if ACPI
+	select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
 	help
 	  Choose this option for open-source nVidia support.
 
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 23fa82d..e12c97f 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -5,27 +5,32 @@
 ccflags-y := -Iinclude/drm
 nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_object.o nouveau_irq.o nouveau_notifier.o \
-             nouveau_sgdma.o nouveau_dma.o \
+             nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
              nouveau_dp.o nouveau_ramht.o \
 	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
+	     nouveau_mm.o nouveau_vm.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
              nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
              nv04_graph.o nv10_graph.o nv20_graph.o \
              nv40_graph.o nv50_graph.o nvc0_graph.o \
-             nv40_grctx.o nv50_grctx.o \
+             nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
+             nv84_crypt.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
-             nv50_crtc.o nv50_dac.o nv50_sor.o \
-             nv50_cursor.o nv50_display.o nv50_fbcon.o \
+             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+             nv50_cursor.o nv50_display.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
-             nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
+             nv04_crtc.o nv04_display.o nv04_cursor.o \
+             nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
 	     nv50_calc.o \
-	     nv04_pm.o nv50_pm.o nva3_pm.o
+	     nv04_pm.o nv50_pm.o nva3_pm.o \
+	     nv50_vram.o nvc0_vram.o \
+	     nv50_vm.o nvc0_vm.o
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 1191526..a542380 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -130,10 +130,15 @@
 
 static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
 {
-	if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+	/* easy option one - intel vendor ID means Integrated */
+	if (pdev->vendor == PCI_VENDOR_ID_INTEL)
 		return VGA_SWITCHEROO_IGD;
-	else
-		return VGA_SWITCHEROO_DIS;
+
+	/* is this device on Bus 0? - this may need improving */
+	if (pdev->bus->number == 0)
+		return VGA_SWITCHEROO_IGD;
+
+	return VGA_SWITCHEROO_DIS;
 }
 
 static struct vga_switcheroo_handler nouveau_dsm_handler = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index b14c811..d3a9c6e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -59,7 +59,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv40_bl_ops = {
+static const struct backlight_ops nv40_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv40_get_intensity,
 	.update_status = nv40_set_intensity,
@@ -82,7 +82,7 @@
 	return 0;
 }
 
-static struct backlight_ops nv50_bl_ops = {
+static const struct backlight_ops nv50_bl_ops = {
 	.options = BL_CORE_SUSPENDRESUME,
 	.get_brightness = nv50_get_intensity,
 	.update_status = nv50_set_intensity,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index b229357..2aef5cd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1927,7 +1927,7 @@
 	 * offset      (8  bit): opcode
 	 * offset + 1  (16 bit): time
 	 *
-	 * Sleep for "time" miliseconds.
+	 * Sleep for "time" milliseconds.
 	 */
 
 	unsigned time = ROM16(bios->data[offset + 1]);
@@ -1935,7 +1935,7 @@
 	if (!iexec->execute)
 		return 3;
 
-	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+	BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X milliseconds\n",
 		offset, time);
 
 	msleep(time);
@@ -6053,52 +6053,17 @@
 	return entry;
 }
 
-static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
+static void fabricate_dcb_output(struct dcb_table *dcb, int type, int i2c,
+				 int heads, int or)
 {
 	struct dcb_entry *entry = new_dcb_entry(dcb);
 
-	entry->type = 0;
+	entry->type = type;
 	entry->i2c_index = i2c;
 	entry->heads = heads;
-	entry->location = DCB_LOC_ON_CHIP;
-	entry->or = 1;
-}
-
-static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 2;
-	entry->i2c_index = LEGACY_I2C_PANEL;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
-	entry->or = 1;	/* means |0x10 gets set on CRE_LCD__INDEX */
-	entry->duallink_possible = false; /* SiI164 and co. are single link */
-
-#if 0
-	/*
-	 * For dvi-a either crtc probably works, but my card appears to only
-	 * support dvi-d.  "nvidia" still attempts to program it for dvi-a,
-	 * doing the full fp output setup (program 0x6808.. fp dimension regs,
-	 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
-	 * the monitor picks up the mode res ok and lights up, but no pixel
-	 * data appears, so the board manufacturer probably connected up the
-	 * sync lines, but missed the video traces / components
-	 *
-	 * with this introduction, dvi-a left as an exercise for the reader.
-	 */
-	fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
-#endif
-}
-
-static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
-{
-	struct dcb_entry *entry = new_dcb_entry(dcb);
-
-	entry->type = 1;
-	entry->i2c_index = LEGACY_I2C_TV;
-	entry->heads = twoHeads ? 3 : 1;
-	entry->location = !DCB_LOC_ON_CHIP;	/* ie OFF CHIP */
+	if (type != OUTPUT_ANALOG)
+		entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
+	entry->or = or;
 }
 
 static bool
@@ -6365,8 +6330,36 @@
 	return true;
 }
 
+static void
+fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
+{
+	struct dcb_table *dcb = &bios->dcb;
+	int all_heads = (nv_two_heads(dev) ? 3 : 1);
+
+#ifdef __powerpc__
+	/* Apple iMac G4 NV17 */
+	if (of_machine_is_compatible("PowerMac4,5")) {
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, 0, all_heads, 1);
+		fabricate_dcb_output(dcb, OUTPUT_ANALOG, 1, all_heads, 2);
+		return;
+	}
+#endif
+
+	/* Make up some sane defaults */
+	fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+
+	if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
+		fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+				     all_heads, 0);
+
+	else if (bios->tmds.output0_script_ptr ||
+		 bios->tmds.output1_script_ptr)
+		fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+				     all_heads, 1);
+}
+
 static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct dcb_table *dcb = &bios->dcb;
@@ -6386,12 +6379,7 @@
 
 	/* this situation likely means a really old card, pre DCB */
 	if (dcbptr == 0x0) {
-		NV_INFO(dev, "Assuming a CRT output exists\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6451,21 +6439,7 @@
 		 */
 		NV_TRACEWARN(dev, "No useful information in BIOS output table; "
 				  "adding all possible outputs\n");
-		fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
-
-		/*
-		 * Attempt to detect TV before DVI because the test
-		 * for the former is more accurate and it rules the
-		 * latter out.
-		 */
-		if (nv04_tv_identify(dev,
-				     bios->legacy.i2c_indices.tv) >= 0)
-			fabricate_tv_output(dcb, twoHeads);
-
-		else if (bios->tmds.output0_script_ptr ||
-			 bios->tmds.output1_script_ptr)
-			fabricate_dvi_i_output(dcb, twoHeads);
-
+		fabricate_dcb_encoder_table(dev, bios);
 		return 0;
 	}
 
@@ -6859,7 +6833,7 @@
 	if (ret)
 		return ret;
 
-	ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
+	ret = parse_dcb_table(dev, bios);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c41e1c2..a7fae26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -32,6 +32,8 @@
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 #include <linux/log2.h>
 #include <linux/slab.h>
@@ -46,82 +48,51 @@
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
-	if (nvbo->tile)
-		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
-
+	nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+	nouveau_vm_put(&nvbo->vma);
 	kfree(nvbo);
 }
 
 static void
-nouveau_bo_fixup_align(struct drm_device *dev,
-		       uint32_t tile_mode, uint32_t tile_flags,
-		       int *align, int *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
+		       int *page_shift)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
 
-	/*
-	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
-	 * align to to that as well as the page size. Align the size to the
-	 * appropriate boundaries. This does imply that sizes are rounded up
-	 * 3-7 pages, so be aware of this and do not waste memory by allocating
-	 * many small buffers.
-	 */
-	if (dev_priv->card_type == NV_50) {
-		uint32_t block_size = dev_priv->vram_size >> 15;
-		int i;
-
-		switch (tile_flags) {
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7a00:
-			if (is_power_of_2(block_size)) {
-				for (i = 1; i < 10; i++) {
-					*align = 12 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			} else {
-				for (i = 1; i < 10; i++) {
-					*align = 8 * i * block_size;
-					if (!(*align % 65536))
-						break;
-				}
-			}
-			*size = roundup(*size, *align);
-			break;
-		default:
-			break;
-		}
-
-	} else {
-		if (tile_mode) {
+	if (dev_priv->card_type < NV_50) {
+		if (nvbo->tile_mode) {
 			if (dev_priv->chipset >= 0x40) {
 				*align = 65536;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x30) {
 				*align = 32768;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x20) {
 				*align = 16384;
-				*size = roundup(*size, 64 * tile_mode);
+				*size = roundup(*size, 64 * nvbo->tile_mode);
 
 			} else if (dev_priv->chipset >= 0x10) {
 				*align = 16384;
-				*size = roundup(*size, 32 * tile_mode);
+				*size = roundup(*size, 32 * nvbo->tile_mode);
 			}
 		}
+	} else {
+		if (likely(dev_priv->chan_vm)) {
+			if (*size > 256 * 1024)
+				*page_shift = dev_priv->chan_vm->lpg_shift;
+			else
+				*page_shift = dev_priv->chan_vm->spg_shift;
+		} else {
+			*page_shift = 12;
+		}
+
+		*size = roundup(*size, (1 << *page_shift));
+		*align = max((1 << *page_shift), *align);
 	}
 
-	/* ALIGN works only on powers of two. */
 	*size = roundup(*size, PAGE_SIZE);
-
-	if (dev_priv->card_type == NV_50) {
-		*size = roundup(*size, 65536);
-		*align = max(65536, *align);
-	}
 }
 
 int
@@ -132,7 +103,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_bo *nvbo;
-	int ret = 0;
+	int ret = 0, page_shift = 0;
 
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	if (!nvbo)
@@ -145,10 +116,18 @@
 	nvbo->tile_flags = tile_flags;
 	nvbo->bo.bdev = &dev_priv->ttm.bdev;
 
-	nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
-			       &align, &size);
+	nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
 	align >>= PAGE_SHIFT;
 
+	if (!nvbo->no_vm && dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+				     NV_MEM_ACCESS_RW, &nvbo->vma);
+		if (ret) {
+			kfree(nvbo);
+			return ret;
+		}
+	}
+
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 	nvbo->channel = chan;
@@ -161,6 +140,11 @@
 	}
 	nvbo->channel = NULL;
 
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
 	*pnvbo = nvbo;
 	return 0;
 }
@@ -244,7 +228,7 @@
 
 	nouveau_bo_placement_set(nvbo, memtype, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -280,7 +264,7 @@
 
 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
 
-	ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
+	ret = nouveau_bo_validate(nvbo, false, false, false);
 	if (ret == 0) {
 		switch (bo->mem.mem_type) {
 		case TTM_PL_VRAM:
@@ -319,6 +303,25 @@
 		ttm_bo_kunmap(&nvbo->kmap);
 }
 
+int
+nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
+		    bool no_wait_reserve, bool no_wait_gpu)
+{
+	int ret;
+
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
+			      no_wait_reserve, no_wait_gpu);
+	if (ret)
+		return ret;
+
+	if (nvbo->vma.node) {
+		if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+			nvbo->bo.offset = nvbo->vma.offset;
+	}
+
+	return 0;
+}
+
 u16
 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
 {
@@ -410,37 +413,40 @@
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_VRAM:
-		man->func = &ttm_bo_manager_func;
+		if (dev_priv->card_type >= NV_50) {
+			man->func = &nouveau_vram_manager;
+			man->io_reserve_fastpath = false;
+			man->use_io_reserve_lru = true;
+		} else {
+			man->func = &ttm_bo_manager_func;
+		}
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 					 TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-		if (dev_priv->card_type == NV_50)
-			man->gpu_offset = 0x40000000;
-		else
-			man->gpu_offset = 0;
 		break;
 	case TTM_PL_TT:
 		man->func = &ttm_bo_manager_func;
 		switch (dev_priv->gart_info.type) {
 		case NOUVEAU_GART_AGP:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
-			man->available_caching = TTM_PL_FLAG_UNCACHED;
-			man->default_caching = TTM_PL_FLAG_UNCACHED;
+			man->available_caching = TTM_PL_FLAG_UNCACHED |
+				TTM_PL_FLAG_WC;
+			man->default_caching = TTM_PL_FLAG_WC;
 			break;
 		case NOUVEAU_GART_SGDMA:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
 				     TTM_MEMTYPE_FLAG_CMA;
 			man->available_caching = TTM_PL_MASK_CACHING;
 			man->default_caching = TTM_PL_FLAG_CACHED;
+			man->gpu_offset = dev_priv->gart_info.aper_base;
 			break;
 		default:
 			NV_ERROR(dev, "Unknown GART type: %d\n",
 				 dev_priv->gart_info.type);
 			return -EINVAL;
 		}
-		man->gpu_offset = dev_priv->vm_gart_base;
 		break;
 	default:
 		NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
@@ -485,16 +491,9 @@
 	if (ret)
 		return ret;
 
-	if (nvbo->channel) {
-		ret = nouveau_fence_sync(fence, nvbo->channel);
-		if (ret)
-			goto out;
-	}
-
 	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
 					no_wait_reserve, no_wait_gpu, new_mem);
-out:
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 	return ret;
 }
 
@@ -516,6 +515,58 @@
 }
 
 static int
+nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	u64 src_offset = old_mem->start << PAGE_SHIFT;
+	u64 dst_offset = new_mem->start << PAGE_SHIFT;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	if (!nvbo->no_vm) {
+		if (old_mem->mem_type == TTM_PL_VRAM)
+			src_offset  = nvbo->vma.offset;
+		else
+			src_offset += dev_priv->gart_info.aper_base;
+
+		if (new_mem->mem_type == TTM_PL_VRAM)
+			dst_offset  = nvbo->vma.offset;
+		else
+			dst_offset += dev_priv->gart_info.aper_base;
+	}
+
+	page_count = new_mem->num_pages;
+	while (page_count) {
+		int line_count = (page_count > 2047) ? 2047 : page_count;
+
+		ret = RING_SPACE(chan, 12);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* line_length */
+		OUT_RING  (chan, line_count);
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
+		OUT_RING  (chan, 0x00100110);
+
+		page_count -= line_count;
+		src_offset += (PAGE_SIZE * line_count);
+		dst_offset += (PAGE_SIZE * line_count);
+	}
+
+	return 0;
+}
+
+static int
 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
 		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
@@ -529,14 +580,14 @@
 	dst_offset = new_mem->start << PAGE_SHIFT;
 	if (!nvbo->no_vm) {
 		if (old_mem->mem_type == TTM_PL_VRAM)
-			src_offset += dev_priv->vm_vram_base;
+			src_offset  = nvbo->vma.offset;
 		else
-			src_offset += dev_priv->vm_gart_base;
+			src_offset += dev_priv->gart_info.aper_base;
 
 		if (new_mem->mem_type == TTM_PL_VRAM)
-			dst_offset += dev_priv->vm_vram_base;
+			dst_offset  = nvbo->vma.offset;
 		else
-			dst_offset += dev_priv->vm_gart_base;
+			dst_offset += dev_priv->gart_info.aper_base;
 	}
 
 	ret = RING_SPACE(chan, 3);
@@ -683,17 +734,27 @@
 	int ret;
 
 	chan = nvbo->channel;
-	if (!chan || nvbo->no_vm)
+	if (!chan || nvbo->no_vm) {
 		chan = dev_priv->channel;
+		mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
+	}
 
 	if (dev_priv->card_type < NV_50)
 		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
 	else
+	if (dev_priv->card_type < NV_C0)
 		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
-	if (ret)
-		return ret;
+	else
+		ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+	if (ret == 0) {
+		ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
+						    no_wait_reserve,
+						    no_wait_gpu, new_mem);
+	}
 
-	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+	if (chan == dev_priv->channel)
+		mutex_unlock(&chan->mutex);
+	return ret;
 }
 
 static int
@@ -771,7 +832,6 @@
 	struct drm_device *dev = dev_priv->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	uint64_t offset;
-	int ret;
 
 	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
 		/* Nothing to do. */
@@ -781,18 +841,12 @@
 
 	offset = new_mem->start << PAGE_SHIFT;
 
-	if (dev_priv->card_type == NV_50) {
-		ret = nv50_mem_vm_bind_linear(dev,
-					      offset + dev_priv->vm_vram_base,
-					      new_mem->size,
-					      nouveau_bo_tile_layout(nvbo),
-					      offset);
-		if (ret)
-			return ret;
-
+	if (dev_priv->chan_vm) {
+		nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
 	} else if (dev_priv->card_type >= NV_10) {
 		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
-						nvbo->tile_mode);
+						nvbo->tile_mode,
+						nvbo->tile_flags);
 	}
 
 	return 0;
@@ -808,9 +862,7 @@
 
 	if (dev_priv->card_type >= NV_10 &&
 	    dev_priv->card_type < NV_50) {
-		if (*old_tile)
-			nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
-
+		nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
 		*old_tile = new_tile;
 	}
 }
@@ -879,6 +931,7 @@
 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
 	struct drm_device *dev = dev_priv->dev;
+	int ret;
 
 	mem->bus.addr = NULL;
 	mem->bus.offset = 0;
@@ -901,9 +954,40 @@
 #endif
 		break;
 	case TTM_PL_VRAM:
-		mem->bus.offset = mem->start << PAGE_SHIFT;
+	{
+		struct nouveau_vram *vram = mem->mm_node;
+		u8 page_shift;
+
+		if (!dev_priv->bar1_vm) {
+			mem->bus.offset = mem->start << PAGE_SHIFT;
+			mem->bus.base = pci_resource_start(dev->pdev, 1);
+			mem->bus.is_iomem = true;
+			break;
+		}
+
+		if (dev_priv->card_type == NV_C0)
+			page_shift = vram->page_shift;
+		else
+			page_shift = 12;
+
+		ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
+				     page_shift, NV_MEM_ACCESS_RW,
+				     &vram->bar_vma);
+		if (ret)
+			return ret;
+
+		nouveau_vm_map(&vram->bar_vma, vram);
+		if (ret) {
+			nouveau_vm_put(&vram->bar_vma);
+			return ret;
+		}
+
+		mem->bus.offset = vram->bar_vma.offset;
+		if (dev_priv->card_type == NV_50) /*XXX*/
+			mem->bus.offset -= 0x0020000000ULL;
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.is_iomem = true;
+	}
 		break;
 	default:
 		return -EINVAL;
@@ -914,6 +998,17 @@
 static void
 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 {
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+	struct nouveau_vram *vram = mem->mm_node;
+
+	if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
+		return;
+
+	if (!vram->bar_vma.node)
+		return;
+
+	nouveau_vm_unmap(&vram->bar_vma);
+	nouveau_vm_put(&vram->bar_vma);
 }
 
 static int
@@ -939,7 +1034,23 @@
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
 	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
-	return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
+	return nouveau_bo_validate(nvbo, false, true, false);
+}
+
+void
+nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
+{
+	struct nouveau_fence *old_fence;
+
+	if (likely(fence))
+		nouveau_fence_ref(fence);
+
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	old_fence = nvbo->bo.sync_obj;
+	nvbo->bo.sync_obj = fence;
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+	nouveau_fence_unref(&old_fence);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
@@ -949,11 +1060,11 @@
 	.evict_flags = nouveau_bo_evict_flags,
 	.move = nouveau_bo_move,
 	.verify_access = nouveau_bo_verify_access,
-	.sync_obj_signaled = nouveau_fence_signalled,
-	.sync_obj_wait = nouveau_fence_wait,
-	.sync_obj_flush = nouveau_fence_flush,
-	.sync_obj_unref = nouveau_fence_unref,
-	.sync_obj_ref = nouveau_fence_ref,
+	.sync_obj_signaled = __nouveau_fence_signalled,
+	.sync_obj_wait = __nouveau_fence_wait,
+	.sync_obj_flush = __nouveau_fence_flush,
+	.sync_obj_unref = __nouveau_fence_unref,
+	.sync_obj_ref = __nouveau_fence_ref,
 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
 	.io_mem_free = &nouveau_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 373950e..3960d66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -38,23 +38,28 @@
 	int ret;
 
 	if (dev_priv->card_type >= NV_50) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-					     dev_priv->vm_end, NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_AGP, &pushbuf);
+		if (dev_priv->card_type < NV_C0) {
+			ret = nouveau_gpuobj_dma_new(chan,
+						     NV_CLASS_DMA_IN_MEMORY, 0,
+						     (1ULL << 40),
+						     NV_MEM_ACCESS_RO,
+						     NV_MEM_TARGET_VM,
+						     &pushbuf);
+		}
 		chan->pushbuf_base = pb->bo.offset;
 	} else
 	if (pb->bo.mem.mem_type == TTM_PL_TT) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RO, &pushbuf,
-						  NULL);
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+					     dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_GART, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else
 	if (dev_priv->card_type != NV_04) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_VIDMEM, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_VRAM, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else {
 		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,17 +67,16 @@
 		 * VRAM.
 		 */
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     pci_resource_start(dev->pdev,
-					     1),
+					     pci_resource_start(dev->pdev, 1),
 					     dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RO,
-					     NV_DMA_TARGET_PCI, &pushbuf);
+					     NV_MEM_ACCESS_RO,
+					     NV_MEM_TARGET_PCI, &pushbuf);
 		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	}
 
 	nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
 	nouveau_gpuobj_ref(NULL, &pushbuf);
-	return 0;
+	return ret;
 }
 
 static struct nouveau_bo *
@@ -100,6 +104,13 @@
 		return NULL;
 	}
 
+	ret = nouveau_bo_map(pushbuf);
+	if (ret) {
+		nouveau_bo_unpin(pushbuf);
+		nouveau_bo_ref(NULL, &pushbuf);
+		return NULL;
+	}
+
 	return pushbuf;
 }
 
@@ -107,74 +118,59 @@
 int
 nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 		      struct drm_file *file_priv,
-		      uint32_t vram_handle, uint32_t tt_handle)
+		      uint32_t vram_handle, uint32_t gart_handle)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_channel *chan;
-	int channel, user;
+	unsigned long flags;
 	int ret;
 
-	/*
-	 * Alright, here is the full story
-	 * Nvidia cards have multiple hw fifo contexts (praise them for that,
-	 * no complicated crash-prone context switches)
-	 * We allocate a new context for each app and let it write to it
-	 * directly (woo, full userspace command submission !)
-	 * When there are no more contexts, you lost
-	 */
-	for (channel = 0; channel < pfifo->channels; channel++) {
-		if (dev_priv->fifos[channel] == NULL)
-			break;
-	}
-
-	/* no more fifos. you lost. */
-	if (channel == pfifo->channels)
-		return -EINVAL;
-
-	dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
-					   GFP_KERNEL);
-	if (!dev_priv->fifos[channel])
+	/* allocate and lock channel structure */
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
 		return -ENOMEM;
-	chan = dev_priv->fifos[channel];
-	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
-	INIT_LIST_HEAD(&chan->fence.pending);
 	chan->dev = dev;
-	chan->id = channel;
 	chan->file_priv = file_priv;
 	chan->vram_handle = vram_handle;
-	chan->gart_handle = tt_handle;
+	chan->gart_handle = gart_handle;
 
-	NV_INFO(dev, "Allocating FIFO number %d\n", channel);
+	kref_init(&chan->ref);
+	atomic_set(&chan->users, 1);
+	mutex_init(&chan->mutex);
+	mutex_lock(&chan->mutex);
+
+	/* allocate hw channel id */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (chan->id = 0; chan->id < pfifo->channels; chan->id++) {
+		if (!dev_priv->channels.ptr[chan->id]) {
+			nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+	if (chan->id == pfifo->channels) {
+		mutex_unlock(&chan->mutex);
+		kfree(chan);
+		return -ENODEV;
+	}
+
+	NV_DEBUG(dev, "initialising channel %d\n", chan->id);
+	INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
+	INIT_LIST_HEAD(&chan->nvsw.flip);
+	INIT_LIST_HEAD(&chan->fence.pending);
 
 	/* Allocate DMA push buffer */
 	chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
 	if (!chan->pushbuf_bo) {
 		ret = -ENOMEM;
 		NV_ERROR(dev, "pushbuf %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_dma_pre_init(chan);
-
-	/* Locate channel's user control regs */
-	if (dev_priv->card_type < NV_40)
-		user = NV03_USER(channel);
-	else
-	if (dev_priv->card_type < NV_50)
-		user = NV40_USER(channel);
-	else
-		user = NV50_USER(channel);
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
-								PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "ioremap of regs failed.\n");
-		nouveau_channel_free(chan);
-		return -ENOMEM;
-	}
 	chan->user_put = 0x40;
 	chan->user_get = 0x44;
 
@@ -182,15 +178,15 @@
 	ret = nouveau_notifier_init_channel(chan);
 	if (ret) {
 		NV_ERROR(dev, "ntfy %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	/* Setup channel's default objects */
-	ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+	ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
 	if (ret) {
 		NV_ERROR(dev, "gpuobj %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -198,24 +194,17 @@
 	ret = nouveau_channel_pushbuf_ctxdma_init(chan);
 	if (ret) {
 		NV_ERROR(dev, "pbctxdma %d\n", ret);
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	/* disable the fifo caches */
 	pfifo->reassign(dev, false);
 
-	/* Create a graphics context for new channel */
-	ret = pgraph->create_context(chan);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
-	}
-
 	/* Construct inital RAMFC for new channel */
 	ret = pfifo->create_context(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
@@ -225,83 +214,111 @@
 	if (!ret)
 		ret = nouveau_fence_channel_init(chan);
 	if (ret) {
-		nouveau_channel_free(chan);
+		nouveau_channel_put(&chan);
 		return ret;
 	}
 
 	nouveau_debugfs_channel_init(chan);
 
-	NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
+	NV_DEBUG(dev, "channel %d initialised\n", chan->id);
 	*chan_ret = chan;
 	return 0;
 }
 
-/* stops a fifo */
-void
-nouveau_channel_free(struct nouveau_channel *chan)
+struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *ref)
 {
-	struct drm_device *dev = chan->dev;
+	struct nouveau_channel *chan = NULL;
+
+	if (likely(ref && atomic_inc_not_zero(&ref->users)))
+		nouveau_channel_ref(ref, &chan);
+
+	return chan;
+}
+
+struct nouveau_channel *
+nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id)
+{
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_channel *chan;
 	unsigned long flags;
-	int ret;
 
-	NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
+	if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR))
+		return ERR_PTR(-EINVAL);
 
-	nouveau_debugfs_channel_fini(chan);
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	/* Give outstanding push buffers a chance to complete */
-	nouveau_fence_update(chan);
-	if (chan->fence.sequence != chan->fence.sequence_ack) {
-		struct nouveau_fence *fence = NULL;
+	if (unlikely(!chan))
+		return ERR_PTR(-EINVAL);
 
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret)
-			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	if (unlikely(file_priv && chan->file_priv != file_priv)) {
+		nouveau_channel_put_unlocked(&chan);
+		return ERR_PTR(-EINVAL);
 	}
 
-	/* Ensure all outstanding fences are signaled.  They should be if the
+	mutex_lock(&chan->mutex);
+	return chan;
+}
+
+void
+nouveau_channel_put_unlocked(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan = *pchan;
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+	unsigned long flags;
+
+	/* decrement the refcount, and we're done if there's still refs */
+	if (likely(!atomic_dec_and_test(&chan->users))) {
+		nouveau_channel_ref(NULL, pchan);
+		return;
+	}
+
+	/* noone wants the channel anymore */
+	NV_DEBUG(dev, "freeing channel %d\n", chan->id);
+	nouveau_debugfs_channel_fini(chan);
+
+	/* give it chance to idle */
+	nouveau_channel_idle(chan);
+
+	/* ensure all outstanding fences are signaled.  they should be if the
 	 * above attempts at idling were OK, but if we failed this'll tell TTM
 	 * we're done with the buffers.
 	 */
 	nouveau_fence_channel_fini(chan);
 
-	/* This will prevent pfifo from switching channels. */
+	/* boot it off the hardware */
 	pfifo->reassign(dev, false);
 
-	/* We want to give pgraph a chance to idle and get rid of all potential
-	 * errors. We need to do this before the lock, otherwise the irq handler
-	 * is unable to process them.
+	/* We want to give pgraph a chance to idle and get rid of all
+	 * potential errors. We need to do this without the context
+	 * switch lock held, otherwise the irq handler is unable to
+	 * process them.
 	 */
 	if (pgraph->channel(dev) == chan)
 		nouveau_wait_for_idle(dev);
 
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	pgraph->fifo_access(dev, false);
-	if (pgraph->channel(dev) == chan)
-		pgraph->unload_context(dev);
-	pgraph->destroy_context(chan);
-	pgraph->fifo_access(dev, true);
-
-	if (pfifo->channel_id(dev) == chan->id) {
-		pfifo->disable(dev);
-		pfifo->unload_context(dev);
-		pfifo->enable(dev);
-	}
+	/* destroy the engine specific contexts */
 	pfifo->destroy_context(chan);
+	pgraph->destroy_context(chan);
+	if (pcrypt->destroy_context)
+		pcrypt->destroy_context(chan);
 
 	pfifo->reassign(dev, true);
 
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+	/* aside from its resources, the channel should now be dead,
+	 * remove it from the channel list
+	 */
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
-	/* Release the channel's resources */
+	/* destroy any resources the channel owned */
 	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
 	if (chan->pushbuf_bo) {
 		nouveau_bo_unmap(chan->pushbuf_bo);
@@ -310,44 +327,80 @@
 	}
 	nouveau_gpuobj_channel_takedown(chan);
 	nouveau_notifier_takedown_channel(chan);
-	if (chan->user)
-		iounmap(chan->user);
 
-	dev_priv->fifos[chan->id] = NULL;
+	nouveau_channel_ref(NULL, pchan);
+}
+
+void
+nouveau_channel_put(struct nouveau_channel **pchan)
+{
+	mutex_unlock(&(*pchan)->mutex);
+	nouveau_channel_put_unlocked(pchan);
+}
+
+static void
+nouveau_channel_del(struct kref *ref)
+{
+	struct nouveau_channel *chan =
+		container_of(ref, struct nouveau_channel, ref);
+
 	kfree(chan);
 }
 
+void
+nouveau_channel_ref(struct nouveau_channel *chan,
+		    struct nouveau_channel **pchan)
+{
+	if (chan)
+		kref_get(&chan->ref);
+
+	if (*pchan)
+		kref_put(&(*pchan)->ref, nouveau_channel_del);
+
+	*pchan = chan;
+}
+
+void
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_fence *fence = NULL;
+	int ret;
+
+	nouveau_fence_update(chan);
+
+	if (chan->fence.sequence != chan->fence.sequence_ack) {
+		ret = nouveau_fence_new(chan, &fence, true);
+		if (!ret) {
+			ret = nouveau_fence_wait(fence, false, false);
+			nouveau_fence_unref(&fence);
+		}
+
+		if (ret)
+			NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
+	}
+}
+
 /* cleans up all the fifos from file_priv */
 void
 nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_channel *chan;
 	int i;
 
 	NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
 	for (i = 0; i < engine->fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		chan = nouveau_channel_get(dev, file_priv, i);
+		if (IS_ERR(chan))
+			continue;
 
-		if (chan && chan->file_priv == file_priv)
-			nouveau_channel_free(chan);
+		atomic_dec(&chan->users);
+		nouveau_channel_put(&chan);
 	}
 }
 
-int
-nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
-		      int channel)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-
-	if (channel >= engine->fifo.channels)
-		return 0;
-	if (dev_priv->fifos[channel] == NULL)
-		return 0;
-
-	return (dev_priv->fifos[channel]->file_priv == file_priv);
-}
 
 /***********************************
  * ioctls wrapping the functions
@@ -383,36 +436,44 @@
 	else
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
 
-	init->subchan[0].handle = NvM2MF;
-	if (dev_priv->card_type < NV_50)
-		init->subchan[0].grclass = 0x0039;
-	else
-		init->subchan[0].grclass = 0x5039;
-	init->subchan[1].handle = NvSw;
-	init->subchan[1].grclass = NV_SW;
-	init->nr_subchan = 2;
+	if (dev_priv->card_type < NV_C0) {
+		init->subchan[0].handle = NvM2MF;
+		if (dev_priv->card_type < NV_50)
+			init->subchan[0].grclass = 0x0039;
+		else
+			init->subchan[0].grclass = 0x5039;
+		init->subchan[1].handle = NvSw;
+		init->subchan[1].grclass = NV_SW;
+		init->nr_subchan = 2;
+	} else {
+		init->subchan[0].handle  = 0x9039;
+		init->subchan[0].grclass = 0x9039;
+		init->nr_subchan = 1;
+	}
 
 	/* Named memory object area */
 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
 				    &init->notifier_handle);
-	if (ret) {
-		nouveau_channel_free(chan);
-		return ret;
-	}
 
-	return 0;
+	if (ret == 0)
+		atomic_inc(&chan->users); /* userspace reference */
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 static int
 nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-	struct drm_nouveau_channel_free *cfree = data;
+	struct drm_nouveau_channel_free *req = data;
 	struct nouveau_channel *chan;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	nouveau_channel_free(chan);
+	atomic_dec(&chan->users);
+	nouveau_channel_put(&chan);
 	return 0;
 }
 
@@ -421,18 +482,18 @@
  ***********************************/
 
 struct drm_ioctl_desc nouveau_ioctls[] = {
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
-	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
 };
 
 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 52c356e..a21e000 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,6 +37,8 @@
 #include "nouveau_connector.h"
 #include "nouveau_hw.h"
 
+static void nouveau_connector_hotplug(void *, int);
+
 static struct nouveau_encoder *
 find_encoder_by_type(struct drm_connector *connector, int type)
 {
@@ -94,22 +96,30 @@
 }
 
 static void
-nouveau_connector_destroy(struct drm_connector *drm_connector)
+nouveau_connector_destroy(struct drm_connector *connector)
 {
-	struct nouveau_connector *nv_connector =
-		nouveau_connector(drm_connector);
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_gpio_engine *pgpio;
 	struct drm_device *dev;
 
 	if (!nv_connector)
 		return;
 
 	dev = nv_connector->base.dev;
+	dev_priv = dev->dev_private;
 	NV_DEBUG_KMS(dev, "\n");
 
+	pgpio = &dev_priv->engine.gpio;
+	if (pgpio->irq_unregister) {
+		pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
+				      nouveau_connector_hotplug, connector);
+	}
+
 	kfree(nv_connector->edid);
-	drm_sysfs_connector_remove(drm_connector);
-	drm_connector_cleanup(drm_connector);
-	kfree(drm_connector);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
 }
 
 static struct nouveau_i2c_chan *
@@ -760,6 +770,7 @@
 {
 	const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
 	struct nouveau_connector *nv_connector = NULL;
 	struct dcb_connector_table_entry *dcb = NULL;
 	struct drm_connector *connector;
@@ -876,6 +887,11 @@
 		break;
 	}
 
+	if (pgpio->irq_register) {
+		pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
+				    nouveau_connector_hotplug, connector);
+	}
+
 	drm_sysfs_connector_add(connector);
 	dcb->drm = connector;
 	return dcb->drm;
@@ -886,3 +902,29 @@
 	return ERR_PTR(ret);
 
 }
+
+static void
+nouveau_connector_hotplug(void *data, int plugged)
+{
+	struct drm_connector *connector = data;
+	struct drm_device *dev = connector->dev;
+
+	NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
+		drm_get_connector_name(connector));
+
+	if (connector->encoder && connector->encoder->crtc &&
+	    connector->encoder->crtc->enabled) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
+		struct drm_encoder_helper_funcs *helper =
+			connector->encoder->helper_private;
+
+		if (nv_encoder->dcb->type == OUTPUT_DP) {
+			if (plugged)
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
+			else
+				helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		}
+	}
+
+	drm_helper_hpd_irq_event(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 2e11fd6..505c6bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_hw.h"
+#include "nouveau_crtc.h"
+#include "nouveau_dma.h"
 
 static void
 nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -104,3 +107,207 @@
 	.output_poll_changed = nouveau_fbcon_output_poll_changed,
 };
 
+int
+nouveau_vblank_enable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
+			    NV_PCRTC_INTR_0_VBLANK);
+
+	return 0;
+}
+
+void
+nouveau_vblank_disable(struct drm_device *dev, int crtc)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->card_type >= NV_50)
+		nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
+			NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
+	else
+		NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+}
+
+static int
+nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
+			  struct nouveau_bo *new_bo)
+{
+	int ret;
+
+	ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+	if (ret)
+		return ret;
+
+	ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail;
+
+	ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
+	if (ret)
+		goto fail_unreserve;
+
+	return 0;
+
+fail_unreserve:
+	ttm_bo_unreserve(&new_bo->bo);
+fail:
+	nouveau_bo_unpin(new_bo);
+	return ret;
+}
+
+static void
+nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
+			    struct nouveau_bo *new_bo,
+			    struct nouveau_fence *fence)
+{
+	nouveau_bo_fence(new_bo, fence);
+	ttm_bo_unreserve(&new_bo->bo);
+
+	nouveau_bo_fence(old_bo, fence);
+	ttm_bo_unreserve(&old_bo->bo);
+
+	nouveau_bo_unpin(old_bo);
+}
+
+static int
+nouveau_page_flip_emit(struct nouveau_channel *chan,
+		       struct nouveau_bo *old_bo,
+		       struct nouveau_bo *new_bo,
+		       struct nouveau_page_flip_state *s,
+		       struct nouveau_fence **pfence)
+{
+	struct drm_device *dev = chan->dev;
+	unsigned long flags;
+	int ret;
+
+	/* Queue it to the pending list */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_add_tail(&s->head, &chan->nvsw.flip);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* Synchronize with the old framebuffer */
+	ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+	if (ret)
+		goto fail;
+
+	/* Emit the pageflip */
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		goto fail;
+
+	BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+	OUT_RING(chan, 0);
+	FIRE_RING(chan);
+
+	ret = nouveau_fence_new(chan, pfence, true);
+	if (ret)
+		goto fail;
+
+	return 0;
+fail:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_del(&s->head);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+int
+nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		       struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+	struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+	struct nouveau_page_flip_state *s;
+	struct nouveau_channel *chan;
+	struct nouveau_fence *fence;
+	int ret;
+
+	if (dev_priv->engine.graph.accel_blocked)
+		return -ENODEV;
+
+	s = kzalloc(sizeof(*s), GFP_KERNEL);
+	if (!s)
+		return -ENOMEM;
+
+	/* Don't let the buffers go away while we flip */
+	ret = nouveau_page_flip_reserve(old_bo, new_bo);
+	if (ret)
+		goto fail_free;
+
+	/* Initialize a page flip struct */
+	*s = (struct nouveau_page_flip_state)
+		{ { }, s->event, nouveau_crtc(crtc)->index,
+		  fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+		  new_bo->bo.offset };
+
+	/* Choose the channel the flip will be handled in */
+	chan = nouveau_fence_channel(new_bo->bo.sync_obj);
+	if (!chan)
+		chan = nouveau_channel_get_unlocked(dev_priv->channel);
+	mutex_lock(&chan->mutex);
+
+	/* Emit a page flip */
+	ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
+	nouveau_channel_put(&chan);
+	if (ret)
+		goto fail_unreserve;
+
+	/* Update the crtc struct and cleanup */
+	crtc->fb = fb;
+
+	nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+	nouveau_fence_unref(&fence);
+	return 0;
+
+fail_unreserve:
+	nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+fail_free:
+	kfree(s);
+	return ret;
+}
+
+int
+nouveau_finish_page_flip(struct nouveau_channel *chan,
+			 struct nouveau_page_flip_state *ps)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state *s;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (list_empty(&chan->nvsw.flip)) {
+		NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		return -EINVAL;
+	}
+
+	s = list_first_entry(&chan->nvsw.flip,
+			     struct nouveau_page_flip_state, head);
+	if (s->event) {
+		struct drm_pending_vblank_event *e = s->event;
+		struct timeval now;
+
+		do_gettimeofday(&now);
+		e->event.sequence = 0;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	list_del(&s->head);
+	*ps = *s;
+	kfree(s);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 82581e6..65699bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -36,7 +36,7 @@
 	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct nouveau_bo *pushbuf = chan->pushbuf_bo;
 
-	if (dev_priv->card_type == NV_50) {
+	if (dev_priv->card_type >= NV_50) {
 		const int ib_size = pushbuf->bo.mem.size / 2;
 
 		chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
@@ -59,17 +59,26 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
 	int ret, i;
 
-	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
-	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
-				    0x0039 : 0x5039, &obj);
-	if (ret)
-		return ret;
+	if (dev_priv->card_type >= NV_C0) {
+		ret = nouveau_gpuobj_gr_new(chan, 0x9039, 0x9039);
+		if (ret)
+			return ret;
 
-	ret = nouveau_ramht_insert(chan, NvM2MF, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+
+		BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0000, 1);
+		OUT_RING  (chan, 0x00009039);
+		FIRE_RING (chan);
+		return 0;
+	}
+
+	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+	ret = nouveau_gpuobj_gr_new(chan, NvM2MF, dev_priv->card_type < NV_50 ?
+				    0x0039 : 0x5039);
 	if (ret)
 		return ret;
 
@@ -78,11 +87,6 @@
 	if (ret)
 		return ret;
 
-	/* Map push buffer */
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret)
-		return ret;
-
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index d578c21..c36f176 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -77,7 +77,8 @@
 	/* G80+ display objects */
 	NvEvoVRAM	= 0x01000000,
 	NvEvoFB16	= 0x01000001,
-	NvEvoFB32	= 0x01000002
+	NvEvoFB32	= 0x01000002,
+	NvEvoVRAM_LP	= 0x01000003
 };
 
 #define NV_MEMORY_TO_MEMORY_FORMAT                                    0x00000039
@@ -125,6 +126,12 @@
 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
 
 static inline void
+BEGIN_NVC0(struct nouveau_channel *chan, int op, int subc, int mthd, int size)
+{
+	OUT_RING(chan, (op << 28) | (size << 16) | (subc << 13) | (mthd >> 2));
+}
+
+static inline void
 BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
 {
 	OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 4562f30..38d5995 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -279,7 +279,7 @@
 	struct bit_displayport_encoder_table *dpe;
 	int dpe_headerlen;
 	uint8_t config[4], status[3];
-	bool cr_done, cr_max_vs, eq_done;
+	bool cr_done, cr_max_vs, eq_done, hpd_state;
 	int ret = 0, i, tries, voltage;
 
 	NV_DEBUG_KMS(dev, "link training!!\n");
@@ -297,7 +297,7 @@
 	/* disable hotplug detect, this flips around on some panels during
 	 * link training.
 	 */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+	hpd_state = pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
 
 	if (dpe->script0) {
 		NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
@@ -439,7 +439,7 @@
 	}
 
 	/* re-enable hotplug detect */
-	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+	pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, hpd_state);
 
 	return eq_done;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9087549..13bb672 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -115,6 +115,10 @@
 int nouveau_perflvl_wr;
 module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
 
+MODULE_PARM_DESC(msi, "Enable MSI (default: off)\n");
+int nouveau_msi;
+module_param_named(msi, nouveau_msi, int, 0400);
+
 int nouveau_fbpercrtc;
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -167,6 +171,9 @@
 	if (pm_state.event == PM_EVENT_PRETHAW)
 		return 0;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	NV_INFO(dev, "Disabling fbcon acceleration...\n");
 	nouveau_fbcon_save_disable_accel(dev);
 
@@ -193,23 +200,10 @@
 
 	NV_INFO(dev, "Idling channels...\n");
 	for (i = 0; i < pfifo->channels; i++) {
-		struct nouveau_fence *fence = NULL;
+		chan = dev_priv->channels.ptr[i];
 
-		chan = dev_priv->fifos[i];
-		if (!chan || (dev_priv->card_type >= NV_50 &&
-			      chan == dev_priv->fifos[0]))
-			continue;
-
-		ret = nouveau_fence_new(chan, &fence, true);
-		if (ret == 0) {
-			ret = nouveau_fence_wait(fence, NULL, false, false);
-			nouveau_fence_unref((void *)&fence);
-		}
-
-		if (ret) {
-			NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
-				 chan->id);
-		}
+		if (chan && chan->pushbuf_bo)
+			nouveau_channel_idle(chan);
 	}
 
 	pgraph->fifo_access(dev, false);
@@ -219,17 +213,17 @@
 	pfifo->unload_context(dev);
 	pgraph->unload_context(dev);
 
-	NV_INFO(dev, "Suspending GPU objects...\n");
-	ret = nouveau_gpuobj_suspend(dev);
+	ret = pinstmem->suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
 		goto out_abort;
 	}
 
-	ret = pinstmem->suspend(dev);
+	NV_INFO(dev, "Suspending GPU objects...\n");
+	ret = nouveau_gpuobj_suspend(dev);
 	if (ret) {
 		NV_ERROR(dev, "... failed: %d\n", ret);
-		nouveau_gpuobj_suspend_cleanup(dev);
+		pinstmem->resume(dev);
 		goto out_abort;
 	}
 
@@ -263,6 +257,9 @@
 	struct drm_crtc *crtc;
 	int ret, i;
 
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+		return 0;
+
 	nouveau_fbcon_save_disable_accel(dev);
 
 	NV_INFO(dev, "We're back, enabling device...\n");
@@ -294,17 +291,18 @@
 		}
 	}
 
+	NV_INFO(dev, "Restoring GPU objects...\n");
+	nouveau_gpuobj_resume(dev);
+
 	NV_INFO(dev, "Reinitialising engines...\n");
 	engine->instmem.resume(dev);
 	engine->mc.init(dev);
 	engine->timer.init(dev);
 	engine->fb.init(dev);
 	engine->graph.init(dev);
+	engine->crypt.init(dev);
 	engine->fifo.init(dev);
 
-	NV_INFO(dev, "Restoring GPU objects...\n");
-	nouveau_gpuobj_resume(dev);
-
 	nouveau_irq_postinstall(dev);
 
 	/* Re-write SKIPS, they'll have been lost over the suspend */
@@ -313,7 +311,7 @@
 		int j;
 
 		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			chan = dev_priv->fifos[i];
+			chan = dev_priv->channels.ptr[i];
 			if (!chan || !chan->pushbuf_bo)
 				continue;
 
@@ -347,13 +345,11 @@
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+		u32 offset = nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT;
 
-		nv_crtc->cursor.set_offset(nv_crtc,
-					nv_crtc->cursor.nvbo->bo.offset -
-					dev_priv->vm_vram_base);
-
+		nv_crtc->cursor.set_offset(nv_crtc, offset);
 		nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
-			nv_crtc->cursor_saved_y);
+						 nv_crtc->cursor_saved_y);
 	}
 
 	/* Force CLUT to get re-loaded during modeset */
@@ -393,6 +389,9 @@
 	.irq_postinstall = nouveau_irq_postinstall,
 	.irq_uninstall = nouveau_irq_uninstall,
 	.irq_handler = nouveau_irq_handler,
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank = nouveau_vblank_enable,
+	.disable_vblank = nouveau_vblank_disable,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.ioctls = nouveau_ioctls,
 	.fops = {
@@ -403,6 +402,7 @@
 		.mmap = nouveau_ttm_mmap,
 		.poll = drm_poll,
 		.fasync = drm_fasync,
+		.read = drm_read,
 #if defined(CONFIG_COMPAT)
 		.compat_ioctl = nouveau_compat_ioctl,
 #endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c7db64..46e3257 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,22 +54,37 @@
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
 #include "nouveau_bios.h"
+#include "nouveau_util.h"
+
 struct nouveau_grctx;
+struct nouveau_vram;
+#include "nouveau_vm.h"
 
 #define MAX_NUM_DCB_ENTRIES 16
 
 #define NOUVEAU_MAX_CHANNEL_NR 128
 #define NOUVEAU_MAX_TILE_NR 15
 
-#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
-#define NV50_VM_BLOCK    (512*1024*1024ULL)
-#define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_vram {
+	struct drm_device *dev;
+
+	struct nouveau_vma bar_vma;
+	u8  page_shift;
+
+	struct list_head regions;
+	u32 memtype;
+	u64 offset;
+	u64 size;
+};
 
 struct nouveau_tile_reg {
-	struct nouveau_fence *fence;
-	uint32_t addr;
-	uint32_t size;
 	bool used;
+	uint32_t addr;
+	uint32_t limit;
+	uint32_t pitch;
+	uint32_t zcomp;
+	struct drm_mm_node *tag_mem;
+	struct nouveau_fence *fence;
 };
 
 struct nouveau_bo {
@@ -88,6 +103,7 @@
 
 	struct nouveau_channel *channel;
 
+	struct nouveau_vma vma;
 	bool mappable;
 	bool no_vm;
 
@@ -96,7 +112,6 @@
 	struct nouveau_tile_reg *tile;
 
 	struct drm_gem_object *gem;
-	struct drm_file *cpu_filp;
 	int pin_refcnt;
 };
 
@@ -133,20 +148,28 @@
 
 #define NVOBJ_ENGINE_SW		0
 #define NVOBJ_ENGINE_GR		1
-#define NVOBJ_ENGINE_DISPLAY	2
+#define NVOBJ_ENGINE_PPP	2
+#define NVOBJ_ENGINE_COPY	3
+#define NVOBJ_ENGINE_VP		4
+#define NVOBJ_ENGINE_CRYPT      5
+#define NVOBJ_ENGINE_BSP	6
+#define NVOBJ_ENGINE_DISPLAY	0xcafe0001
 #define NVOBJ_ENGINE_INT	0xdeadbeef
 
+#define NVOBJ_FLAG_DONT_MAP             (1 << 0)
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
+#define NVOBJ_FLAG_VM			(1 << 3)
+
+#define NVOBJ_CINST_GLOBAL	0xdeadbeef
+
 struct nouveau_gpuobj {
 	struct drm_device *dev;
 	struct kref refcount;
 	struct list_head list;
 
-	struct drm_mm_node *im_pramin;
-	struct nouveau_bo *im_backing;
-	uint32_t *im_backing_suspend;
-	int im_bound;
+	void *node;
+	u32 *suspend;
 
 	uint32_t flags;
 
@@ -162,10 +185,29 @@
 	void *priv;
 };
 
+struct nouveau_page_flip_state {
+	struct list_head head;
+	struct drm_pending_vblank_event *event;
+	int crtc, bpp, pitch, x, y;
+	uint64_t offset;
+};
+
+enum nouveau_channel_mutex_class {
+	NOUVEAU_UCHANNEL_MUTEX,
+	NOUVEAU_KCHANNEL_MUTEX
+};
+
 struct nouveau_channel {
 	struct drm_device *dev;
 	int id;
 
+	/* references to the channel data structure */
+	struct kref ref;
+	/* users of the hardware channel resources, the hardware
+	 * context will be kicked off when it reaches zero. */
+	atomic_t users;
+	struct mutex mutex;
+
 	/* owner of this fifo */
 	struct drm_file *file_priv;
 	/* mapping of the fifo itself */
@@ -198,16 +240,17 @@
 	/* PFIFO context */
 	struct nouveau_gpuobj *ramfc;
 	struct nouveau_gpuobj *cache;
+	void *fifo_priv;
 
 	/* PGRAPH context */
 	/* XXX may be merge 2 pointers as private data ??? */
 	struct nouveau_gpuobj *ramin_grctx;
+	struct nouveau_gpuobj *crypt_ctx;
 	void *pgraph_ctx;
 
 	/* NV50 VM */
+	struct nouveau_vm     *vm;
 	struct nouveau_gpuobj *vm_pd;
-	struct nouveau_gpuobj *vm_gart_pt;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 
 	/* Objects */
 	struct nouveau_gpuobj *ramin; /* Private instmem */
@@ -238,9 +281,11 @@
 
 	struct {
 		struct nouveau_gpuobj *vblsem;
+		uint32_t vblsem_head;
 		uint32_t vblsem_offset;
 		uint32_t vblsem_rval;
 		struct list_head vbl_wait;
+		struct list_head flip;
 	} nvsw;
 
 	struct {
@@ -258,11 +303,11 @@
 	int	(*suspend)(struct drm_device *dev);
 	void	(*resume)(struct drm_device *dev);
 
-	int	(*populate)(struct drm_device *, struct nouveau_gpuobj *,
-			    uint32_t *size);
-	void	(*clear)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*bind)(struct drm_device *, struct nouveau_gpuobj *);
-	int	(*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+	int	(*get)(struct nouveau_gpuobj *, u32 size, u32 align);
+	void	(*put)(struct nouveau_gpuobj *);
+	int	(*map)(struct nouveau_gpuobj *);
+	void	(*unmap)(struct nouveau_gpuobj *);
+
 	void	(*flush)(struct drm_device *);
 };
 
@@ -279,15 +324,21 @@
 
 struct nouveau_fb_engine {
 	int num_tiles;
+	struct drm_mm tag_heap;
+	void *priv;
 
 	int  (*init)(struct drm_device *dev);
 	void (*takedown)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				 uint32_t size, uint32_t pitch);
+	void (*init_tile_region)(struct drm_device *dev, int i,
+				 uint32_t addr, uint32_t size,
+				 uint32_t pitch, uint32_t flags);
+	void (*set_tile_region)(struct drm_device *dev, int i);
+	void (*free_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_fifo_engine {
+	void *priv;
 	int  channels;
 
 	struct nouveau_gpuobj *playlist[2];
@@ -310,22 +361,11 @@
 	void (*tlb_flush)(struct drm_device *dev);
 };
 
-struct nouveau_pgraph_object_method {
-	int id;
-	int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
-		      uint32_t data);
-};
-
-struct nouveau_pgraph_object_class {
-	int id;
-	bool software;
-	struct nouveau_pgraph_object_method *methods;
-};
-
 struct nouveau_pgraph_engine {
-	struct nouveau_pgraph_object_class *grclass;
 	bool accel_blocked;
+	bool registered;
 	int grctx_size;
+	void *priv;
 
 	/* NV2x/NV3x context table (0x400780) */
 	struct nouveau_gpuobj *ctx_table;
@@ -342,8 +382,7 @@
 	int  (*unload_context)(struct drm_device *);
 	void (*tlb_flush)(struct drm_device *dev);
 
-	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
-				  uint32_t size, uint32_t pitch);
+	void (*set_tile_region)(struct drm_device *dev, int i);
 };
 
 struct nouveau_display_engine {
@@ -355,13 +394,19 @@
 };
 
 struct nouveau_gpio_engine {
+	void *priv;
+
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *);
 
 	int  (*get)(struct drm_device *, enum dcb_gpio_tag);
 	int  (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
 
-	void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+	int  (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
+			     void (*)(void *, int), void *);
+	void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
+			       void (*)(void *, int), void *);
+	bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 };
 
 struct nouveau_pm_voltage_level {
@@ -437,6 +482,7 @@
 	struct nouveau_pm_level *cur;
 
 	struct device *hwmon;
+	struct notifier_block acpi_nb;
 
 	int (*clock_get)(struct drm_device *, u32 id);
 	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
@@ -449,6 +495,25 @@
 	int (*temp_get)(struct drm_device *);
 };
 
+struct nouveau_crypt_engine {
+	bool registered;
+
+	int  (*init)(struct drm_device *);
+	void (*takedown)(struct drm_device *);
+	int  (*create_context)(struct nouveau_channel *);
+	void (*destroy_context)(struct nouveau_channel *);
+	void (*tlb_flush)(struct drm_device *dev);
+};
+
+struct nouveau_vram_engine {
+	int  (*init)(struct drm_device *);
+	int  (*get)(struct drm_device *, u64, u32 align, u32 size_nc,
+		    u32 type, struct nouveau_vram **);
+	void (*put)(struct drm_device *, struct nouveau_vram **);
+
+	bool (*flags_valid)(struct drm_device *, u32 tile_flags);
+};
+
 struct nouveau_engine {
 	struct nouveau_instmem_engine instmem;
 	struct nouveau_mc_engine      mc;
@@ -459,6 +524,8 @@
 	struct nouveau_display_engine display;
 	struct nouveau_gpio_engine    gpio;
 	struct nouveau_pm_engine      pm;
+	struct nouveau_crypt_engine   crypt;
+	struct nouveau_vram_engine    vram;
 };
 
 struct nouveau_pll_vals {
@@ -577,18 +644,15 @@
 	bool ramin_available;
 	struct drm_mm ramin_heap;
 	struct list_head gpuobj_list;
+	struct list_head classes;
 
 	struct nouveau_bo *vga_ram;
 
+	/* interrupt handling */
+	void (*irq_handler[32])(struct drm_device *);
+	bool msi_enabled;
 	struct workqueue_struct *wq;
 	struct work_struct irq_work;
-	struct work_struct hpd_work;
-
-	struct {
-		spinlock_t lock;
-		uint32_t hpd0_bits;
-		uint32_t hpd1_bits;
-	} hpd_state;
 
 	struct list_head vbl_waiting;
 
@@ -605,8 +669,10 @@
 		struct nouveau_bo *bo;
 	} fence;
 
-	int fifo_alloc_count;
-	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+	struct {
+		spinlock_t lock;
+		struct nouveau_channel *ptr[NOUVEAU_MAX_CHANNEL_NR];
+	} channels;
 
 	struct nouveau_engine engine;
 	struct nouveau_channel *channel;
@@ -632,12 +698,14 @@
 		uint64_t aper_free;
 
 		struct nouveau_gpuobj *sg_ctxdma;
-		struct page *sg_dummy_page;
-		dma_addr_t sg_dummy_bus;
+		struct nouveau_vma vma;
 	} gart_info;
 
 	/* nv10-nv40 tiling regions */
-	struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
+	struct {
+		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+		spinlock_t lock;
+	} tile;
 
 	/* VRAM/fb configuration */
 	uint64_t vram_size;
@@ -650,14 +718,12 @@
 	uint64_t fb_aper_free;
 	int fb_mtrr;
 
+	/* BAR control (NV50-) */
+	struct nouveau_vm *bar1_vm;
+	struct nouveau_vm *bar3_vm;
+
 	/* G8x/G9x virtual address space */
-	uint64_t vm_gart_base;
-	uint64_t vm_gart_size;
-	uint64_t vm_vram_base;
-	uint64_t vm_vram_size;
-	uint64_t vm_end;
-	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
-	int vm_vram_pt_nr;
+	struct nouveau_vm *chan_vm;
 
 	struct nvbios vbios;
 
@@ -674,6 +740,7 @@
 	struct backlight_device *backlight;
 
 	struct nouveau_channel *evo;
+	u32 evo_alloc;
 	struct {
 		struct dcb_entry *dcb;
 		u16 script;
@@ -686,6 +753,8 @@
 
 	struct nouveau_fbdev *nfbdev;
 	struct apertures_struct *apertures;
+
+	bool powered_down;
 };
 
 static inline struct drm_nouveau_private *
@@ -719,16 +788,6 @@
 	return 0;
 }
 
-#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do {    \
-	struct drm_nouveau_private *nv = dev->dev_private;       \
-	if (!nouveau_channel_owner(dev, (cl), (id))) {           \
-		NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
-			 DRM_CURRENTPID, (id));                  \
-		return -EPERM;                                   \
-	}                                                        \
-	(ch) = nv->fifos[(id)];                                  \
-} while (0)
-
 /* nouveau_drv.c */
 extern int nouveau_agpmode;
 extern int nouveau_duallink;
@@ -748,6 +807,7 @@
 extern int nouveau_override_conntype;
 extern char *nouveau_perflvl;
 extern int nouveau_perflvl_wr;
+extern int nouveau_msi;
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -762,8 +822,10 @@
 				   struct drm_file *);
 extern int  nouveau_ioctl_setparam(struct drm_device *, void *data,
 				   struct drm_file *);
-extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
-			       uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
+			    uint32_t reg, uint32_t mask, uint32_t val);
 extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
@@ -775,18 +837,18 @@
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
-extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
-						    uint32_t addr,
-						    uint32_t size,
-						    uint32_t pitch);
-extern void nv10_mem_expire_tiling(struct drm_device *dev,
-				   struct nouveau_tile_reg *tile,
-				   struct nouveau_fence *fence);
-extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
-				    uint32_t size, uint32_t flags,
-				    uint64_t phys);
-extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
-			       uint32_t size);
+extern int  nouveau_mem_detect(struct drm_device *);
+extern bool nouveau_mem_flags_valid(struct drm_device *, u32 tile_flags);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(
+	struct drm_device *dev, uint32_t addr, uint32_t size,
+	uint32_t pitch, uint32_t flags);
+extern void nv10_mem_put_tile_region(struct drm_device *dev,
+				     struct nouveau_tile_reg *tile,
+				     struct nouveau_fence *fence);
+extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
+
+/* nvc0_vram.c */
+extern const struct ttm_mem_type_manager_func nvc0_vram_manager;
 
 /* nouveau_notifier.c */
 extern int  nouveau_notifier_init_channel(struct nouveau_channel *);
@@ -803,21 +865,44 @@
 extern struct drm_ioctl_desc nouveau_ioctls[];
 extern int nouveau_max_ioctl;
 extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
-extern int  nouveau_channel_owner(struct drm_device *, struct drm_file *,
-				  int channel);
 extern int  nouveau_channel_alloc(struct drm_device *dev,
 				  struct nouveau_channel **chan,
 				  struct drm_file *file_priv,
 				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
-extern void nouveau_channel_free(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get_unlocked(struct nouveau_channel *);
+extern struct nouveau_channel *
+nouveau_channel_get(struct drm_device *, struct drm_file *, int id);
+extern void nouveau_channel_put_unlocked(struct nouveau_channel **);
+extern void nouveau_channel_put(struct nouveau_channel **);
+extern void nouveau_channel_ref(struct nouveau_channel *chan,
+				struct nouveau_channel **pchan);
+extern void nouveau_channel_idle(struct nouveau_channel *chan);
 
 /* nouveau_object.c */
+#define NVOBJ_CLASS(d,c,e) do {                                                \
+	int ret = nouveau_gpuobj_class_new((d), (c), NVOBJ_ENGINE_##e);        \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
+#define NVOBJ_MTHD(d,c,m,e) do {                                               \
+	int ret = nouveau_gpuobj_mthd_new((d), (c), (m), (e));                 \
+	if (ret)                                                               \
+		return ret;                                                    \
+} while(0)
+
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
-extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
+extern int  nouveau_gpuobj_class_new(struct drm_device *, u32 class, u32 eng);
+extern int  nouveau_gpuobj_mthd_new(struct drm_device *, u32 class, u32 mthd,
+				    int (*exec)(struct nouveau_channel *,
+					        u32 class, u32 mthd, u32 data));
+extern int  nouveau_gpuobj_mthd_call(struct nouveau_channel *, u32, u32, u32);
+extern int  nouveau_gpuobj_mthd_call2(struct drm_device *, int, u32, u32, u32);
 extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
 				       uint32_t vram_h, uint32_t tt_h);
 extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
@@ -832,21 +917,25 @@
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
 				  uint64_t offset, uint64_t size, int access,
 				  int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
-				       uint64_t offset, uint64_t size,
-				       int access, struct nouveau_gpuobj **,
-				       uint32_t *o_ret);
-extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
-				 struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, u32 handle, int class);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+			       u64 size, int target, int access, u32 type,
+			       u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+				 int class, u64 base, u64 size, int target,
+				 int access, u32 type, u32 comp);
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
 				     struct drm_file *);
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
 				     struct drm_file *);
 
 /* nouveau_irq.c */
+extern int         nouveau_irq_init(struct drm_device *);
+extern void        nouveau_irq_fini(struct drm_device *);
 extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void        nouveau_irq_register(struct drm_device *, int status_bit,
+					void (*)(struct drm_device *));
+extern void        nouveau_irq_unregister(struct drm_device *, int status_bit);
 extern void        nouveau_irq_preinstall(struct drm_device *);
 extern int         nouveau_irq_postinstall(struct drm_device *);
 extern void        nouveau_irq_uninstall(struct drm_device *);
@@ -854,8 +943,8 @@
 /* nouveau_sgdma.c */
 extern int nouveau_sgdma_init(struct drm_device *);
 extern void nouveau_sgdma_takedown(struct drm_device *);
-extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
-				  uint32_t *page);
+extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
+					   uint32_t offset);
 extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
 
 /* nouveau_debugfs.c */
@@ -966,18 +1055,25 @@
 /* nv10_fb.c */
 extern int  nv10_fb_init(struct drm_device *);
 extern void nv10_fb_takedown(struct drm_device *);
-extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv10_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv10_fb_set_tile_region(struct drm_device *dev, int i);
+extern void nv10_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv30_fb.c */
 extern int  nv30_fb_init(struct drm_device *);
 extern void nv30_fb_takedown(struct drm_device *);
+extern void nv30_fb_init_tile_region(struct drm_device *dev, int i,
+				     uint32_t addr, uint32_t size,
+				     uint32_t pitch, uint32_t flags);
+extern void nv30_fb_free_tile_region(struct drm_device *dev, int i);
 
 /* nv40_fb.c */
 extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
-extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
-				      uint32_t, uint32_t);
+extern void nv40_fb_set_tile_region(struct drm_device *dev, int i);
+
 /* nv50_fb.c */
 extern int  nv50_fb_init(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
@@ -989,6 +1085,7 @@
 
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
+extern void nv04_fifo_fini(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
@@ -998,19 +1095,18 @@
 extern void nv04_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv04_fifo_load_context(struct nouveau_channel *);
 extern int  nv04_fifo_unload_context(struct drm_device *);
+extern void nv04_fifo_isr(struct drm_device *);
 
 /* nv10_fifo.c */
 extern int  nv10_fifo_init(struct drm_device *);
 extern int  nv10_fifo_channel_id(struct drm_device *);
 extern int  nv10_fifo_create_context(struct nouveau_channel *);
-extern void nv10_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv10_fifo_load_context(struct nouveau_channel *);
 extern int  nv10_fifo_unload_context(struct drm_device *);
 
 /* nv40_fifo.c */
 extern int  nv40_fifo_init(struct drm_device *);
 extern int  nv40_fifo_create_context(struct nouveau_channel *);
-extern void nv40_fifo_destroy_context(struct nouveau_channel *);
 extern int  nv40_fifo_load_context(struct nouveau_channel *);
 extern int  nv40_fifo_unload_context(struct drm_device *);
 
@@ -1038,7 +1134,6 @@
 extern int  nvc0_fifo_unload_context(struct drm_device *);
 
 /* nv04_graph.c */
-extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
 extern int  nv04_graph_init(struct drm_device *);
 extern void nv04_graph_takedown(struct drm_device *);
 extern void nv04_graph_fifo_access(struct drm_device *, bool);
@@ -1047,10 +1142,11 @@
 extern void nv04_graph_destroy_context(struct nouveau_channel *);
 extern int  nv04_graph_load_context(struct nouveau_channel *);
 extern int  nv04_graph_unload_context(struct drm_device *);
-extern void nv04_graph_context_switch(struct drm_device *);
+extern int  nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+				      u32 class, u32 mthd, u32 data);
+extern struct nouveau_bitfield nv04_graph_nsource[];
 
 /* nv10_graph.c */
-extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
 extern int  nv10_graph_init(struct drm_device *);
 extern void nv10_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
@@ -1058,13 +1154,11 @@
 extern void nv10_graph_destroy_context(struct nouveau_channel *);
 extern int  nv10_graph_load_context(struct nouveau_channel *);
 extern int  nv10_graph_unload_context(struct drm_device *);
-extern void nv10_graph_context_switch(struct drm_device *);
-extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv10_graph_set_tile_region(struct drm_device *dev, int i);
+extern struct nouveau_bitfield nv10_graph_intr[];
+extern struct nouveau_bitfield nv10_graph_nstatus[];
 
 /* nv20_graph.c */
-extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
-extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
 extern int  nv20_graph_create_context(struct nouveau_channel *);
 extern void nv20_graph_destroy_context(struct nouveau_channel *);
 extern int  nv20_graph_load_context(struct nouveau_channel *);
@@ -1072,11 +1166,9 @@
 extern int  nv20_graph_init(struct drm_device *);
 extern void nv20_graph_takedown(struct drm_device *);
 extern int  nv30_graph_init(struct drm_device *);
-extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv20_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv40_graph.c */
-extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
 extern int  nv40_graph_init(struct drm_device *);
 extern void nv40_graph_takedown(struct drm_device *);
 extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
@@ -1085,11 +1177,9 @@
 extern int  nv40_graph_load_context(struct nouveau_channel *);
 extern int  nv40_graph_unload_context(struct drm_device *);
 extern void nv40_grctx_init(struct nouveau_grctx *);
-extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
-					 uint32_t, uint32_t);
+extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
 
 /* nv50_graph.c */
-extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
 extern int  nv50_graph_init(struct drm_device *);
 extern void nv50_graph_takedown(struct drm_device *);
 extern void nv50_graph_fifo_access(struct drm_device *, bool);
@@ -1098,10 +1188,10 @@
 extern void nv50_graph_destroy_context(struct nouveau_channel *);
 extern int  nv50_graph_load_context(struct nouveau_channel *);
 extern int  nv50_graph_unload_context(struct drm_device *);
-extern void nv50_graph_context_switch(struct drm_device *);
 extern int  nv50_grctx_init(struct nouveau_grctx *);
 extern void nv50_graph_tlb_flush(struct drm_device *dev);
 extern void nv86_graph_tlb_flush(struct drm_device *dev);
+extern struct nouveau_enum nv50_data_error_names[];
 
 /* nvc0_graph.c */
 extern int  nvc0_graph_init(struct drm_device *);
@@ -1113,16 +1203,22 @@
 extern int  nvc0_graph_load_context(struct nouveau_channel *);
 extern int  nvc0_graph_unload_context(struct drm_device *);
 
+/* nv84_crypt.c */
+extern int  nv84_crypt_init(struct drm_device *dev);
+extern void nv84_crypt_fini(struct drm_device *dev);
+extern int  nv84_crypt_create_context(struct nouveau_channel *);
+extern void nv84_crypt_destroy_context(struct nouveau_channel *);
+extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+
 /* nv04_instmem.c */
 extern int  nv04_instmem_init(struct drm_device *);
 extern void nv04_instmem_takedown(struct drm_device *);
 extern int  nv04_instmem_suspend(struct drm_device *);
 extern void nv04_instmem_resume(struct drm_device *);
-extern int  nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv04_instmem_put(struct nouveau_gpuobj *);
+extern int  nv04_instmem_map(struct nouveau_gpuobj *);
+extern void nv04_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv04_instmem_flush(struct drm_device *);
 
 /* nv50_instmem.c */
@@ -1130,26 +1226,18 @@
 extern void nv50_instmem_takedown(struct drm_device *);
 extern int  nv50_instmem_suspend(struct drm_device *);
 extern void nv50_instmem_resume(struct drm_device *);
-extern int  nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern int  nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align);
+extern void nv50_instmem_put(struct nouveau_gpuobj *);
+extern int  nv50_instmem_map(struct nouveau_gpuobj *);
+extern void nv50_instmem_unmap(struct nouveau_gpuobj *);
 extern void nv50_instmem_flush(struct drm_device *);
 extern void nv84_instmem_flush(struct drm_device *);
-extern void nv50_vm_flush(struct drm_device *, int engine);
 
 /* nvc0_instmem.c */
 extern int  nvc0_instmem_init(struct drm_device *);
 extern void nvc0_instmem_takedown(struct drm_device *);
 extern int  nvc0_instmem_suspend(struct drm_device *);
 extern void nvc0_instmem_resume(struct drm_device *);
-extern int  nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
-				  uint32_t *size);
-extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
-extern int  nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nvc0_instmem_flush(struct drm_device *);
 
 /* nv04_mc.c */
 extern int  nv04_mc_init(struct drm_device *);
@@ -1219,6 +1307,9 @@
 extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
 extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
 extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
+extern void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
+extern int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
+			       bool no_wait_reserve, bool no_wait_gpu);
 
 /* nouveau_fence.c */
 struct nouveau_fence;
@@ -1234,12 +1325,35 @@
 			       void (*work)(void *priv, bool signalled),
 			       void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
-extern bool nouveau_fence_signalled(void *obj, void *arg);
-extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+
+extern bool __nouveau_fence_signalled(void *obj, void *arg);
+extern int __nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int __nouveau_fence_flush(void *obj, void *arg);
+extern void __nouveau_fence_unref(void **obj);
+extern void *__nouveau_fence_ref(void *obj);
+
+static inline bool nouveau_fence_signalled(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_signalled(obj, NULL);
+}
+static inline int
+nouveau_fence_wait(struct nouveau_fence *obj, bool lazy, bool intr)
+{
+	return __nouveau_fence_wait(obj, NULL, lazy, intr);
+}
 extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
-extern int nouveau_fence_flush(void *obj, void *arg);
-extern void nouveau_fence_unref(void **obj);
-extern void *nouveau_fence_ref(void *obj);
+static inline int nouveau_fence_flush(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_flush(obj, NULL);
+}
+static inline void nouveau_fence_unref(struct nouveau_fence **obj)
+{
+	__nouveau_fence_unref((void **)obj);
+}
+static inline struct nouveau_fence *nouveau_fence_ref(struct nouveau_fence *obj)
+{
+	return __nouveau_fence_ref(obj);
+}
 
 /* nouveau_gem.c */
 extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1259,15 +1373,28 @@
 extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
 				  struct drm_file *);
 
+/* nouveau_display.c */
+int nouveau_vblank_enable(struct drm_device *dev, int crtc);
+void nouveau_vblank_disable(struct drm_device *dev, int crtc);
+int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+			   struct drm_pending_vblank_event *event);
+int nouveau_finish_page_flip(struct nouveau_channel *,
+			     struct nouveau_page_flip_state *);
+
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 
 /* nv50_gpio.c */
 int nv50_gpio_init(struct drm_device *dev);
+void nv50_gpio_fini(struct drm_device *dev);
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
+int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
+			    void (*)(void *, int), void *);
+void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
+			      void (*)(void *, int), void *);
+bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
 
 /* nv50_calc. */
 int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1334,7 +1461,9 @@
 }
 
 #define nv_wait(dev, reg, mask, val) \
-	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
+	nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_ne(dev, reg, mask, val) \
+	nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
 
 /* PRAMIN access */
 static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1447,6 +1576,23 @@
 		dev->pdev->subsystem_device == sub_device;
 }
 
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO  1
+#define NV_MEM_ACCESS_WO  2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_SYS 4
+#define NV_MEM_ACCESS_VM  8
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
@@ -1457,5 +1603,6 @@
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
 #define NV_SW_VBLSEM_RELEASE                                         0x00000408
+#define NV_SW_PAGE_FLIP                                              0x00000500
 
 #endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 02a4d1f..a26d047 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -49,6 +49,102 @@
 #include "nouveau_fbcon.h"
 #include "nouveau_dma.h"
 
+static void
+nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_fillrect(info, rect);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_fillrect(info, rect);
+		else
+			ret = nvc0_fbcon_fillrect(info, rect);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_fillrect(info, rect);
+}
+
+static void
+nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_copyarea(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_copyarea(info, image);
+		else
+			ret = nvc0_fbcon_copyarea(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_copyarea(info, image);
+}
+
+static void
+nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (info->state != FBINFO_STATE_RUNNING)
+		return;
+
+	ret = -ENODEV;
+	if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
+	    mutex_trylock(&dev_priv->channel->mutex)) {
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_imageblit(info, image);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_imageblit(info, image);
+		else
+			ret = nvc0_fbcon_imageblit(info, image);
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (ret == 0)
+		return;
+
+	if (ret != -ENODEV)
+		nouveau_fbcon_gpu_lockup(info);
+	cfb_imageblit(info, image);
+}
+
 static int
 nouveau_fbcon_sync(struct fb_info *info)
 {
@@ -58,22 +154,36 @@
 	struct nouveau_channel *chan = dev_priv->channel;
 	int ret, i;
 
-	if (!chan || !chan->accel_done ||
+	if (!chan || !chan->accel_done || in_interrupt() ||
 	    info->state != FBINFO_STATE_RUNNING ||
 	    info->flags & FBINFO_HWACCEL_DISABLED)
 		return 0;
 
-	if (RING_SPACE(chan, 4)) {
+	if (!mutex_trylock(&chan->mutex))
+		return 0;
+
+	ret = RING_SPACE(chan, 4);
+	if (ret) {
+		mutex_unlock(&chan->mutex);
 		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 	}
 
-	BEGIN_RING(chan, 0, 0x0104, 1);
-	OUT_RING(chan, 0);
-	BEGIN_RING(chan, 0, 0x0100, 1);
-	OUT_RING(chan, 0);
+	if (dev_priv->card_type >= NV_C0) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
+		OUT_RING  (chan, 0);
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
+		OUT_RING  (chan, 0);
+	} else {
+		BEGIN_RING(chan, 0, 0x0104, 1);
+		OUT_RING  (chan, 0);
+		BEGIN_RING(chan, 0, 0x0100, 1);
+		OUT_RING  (chan, 0);
+	}
+
 	nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
 	FIRE_RING(chan);
+	mutex_unlock(&chan->mutex);
 
 	ret = -EBUSY;
 	for (i = 0; i < 100000; i++) {
@@ -97,40 +207,24 @@
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_set_par = drm_fb_helper_set_par,
+	.fb_fillrect = nouveau_fbcon_fillrect,
+	.fb_copyarea = nouveau_fbcon_copyarea,
+	.fb_imageblit = nouveau_fbcon_imageblit,
+	.fb_sync = nouveau_fbcon_sync,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static struct fb_ops nouveau_fbcon_sw_ops = {
+	.owner = THIS_MODULE,
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
 	.fb_fillrect = cfb_fillrect,
 	.fb_copyarea = cfb_copyarea,
 	.fb_imageblit = cfb_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv04_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv04_fbcon_fillrect,
-	.fb_copyarea = nv04_fbcon_copyarea,
-	.fb_imageblit = nv04_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
-	.fb_pan_display = drm_fb_helper_pan_display,
-	.fb_blank = drm_fb_helper_blank,
-	.fb_setcmap = drm_fb_helper_setcmap,
-	.fb_debug_enter = drm_fb_helper_debug_enter,
-	.fb_debug_leave = drm_fb_helper_debug_leave,
-};
-
-static struct fb_ops nv50_fbcon_ops = {
-	.owner = THIS_MODULE,
-	.fb_check_var = drm_fb_helper_check_var,
-	.fb_set_par = drm_fb_helper_set_par,
-	.fb_fillrect = nv50_fbcon_fillrect,
-	.fb_copyarea = nv50_fbcon_copyarea,
-	.fb_imageblit = nv50_fbcon_imageblit,
-	.fb_sync = nouveau_fbcon_sync,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
@@ -257,21 +351,16 @@
 			      FBINFO_HWACCEL_FILLRECT |
 			      FBINFO_HWACCEL_IMAGEBLIT;
 	info->flags |= FBINFO_CAN_FORCE_OUTPUT;
-	info->fbops = &nouveau_fbcon_ops;
-	info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
-			       dev_priv->vm_vram_base;
+	info->fbops = &nouveau_fbcon_sw_ops;
+	info->fix.smem_start = dev->mode_config.fb_base +
+			       (nvbo->bo.mem.start << PAGE_SHIFT);
 	info->fix.smem_len = size;
 
 	info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
 	info->screen_size = size;
 
-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
 	drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
 
-	/* FIXME: we really shouldn't expose mmio space at all */
-	info->fix.mmio_start = pci_resource_start(pdev, 1);
-	info->fix.mmio_len = pci_resource_len(pdev, 1);
-
 	/* Set aperture base/size for vesafb takeover */
 	info->apertures = dev_priv->apertures;
 	if (!info->apertures) {
@@ -285,19 +374,20 @@
 	info->pixmap.flags = FB_PIXMAP_SYSTEM;
 	info->pixmap.scan_align = 1;
 
+	mutex_unlock(&dev->struct_mutex);
+
 	if (dev_priv->channel && !nouveau_nofbaccel) {
-		switch (dev_priv->card_type) {
-		case NV_C0:
-			break;
-		case NV_50:
-			nv50_fbcon_accel_init(info);
-			info->fbops = &nv50_fbcon_ops;
-			break;
-		default:
-			nv04_fbcon_accel_init(info);
-			info->fbops = &nv04_fbcon_ops;
-			break;
-		};
+		ret = -ENODEV;
+		if (dev_priv->card_type < NV_50)
+			ret = nv04_fbcon_accel_init(info);
+		else
+		if (dev_priv->card_type < NV_C0)
+			ret = nv50_fbcon_accel_init(info);
+		else
+			ret = nvc0_fbcon_accel_init(info);
+
+		if (ret == 0)
+			info->fbops = &nouveau_fbcon_ops;
 	}
 
 	nouveau_fbcon_zfill(dev, nfbdev);
@@ -308,7 +398,6 @@
 						nouveau_fb->base.height,
 						nvbo->bo.offset, nvbo);
 
-	mutex_unlock(&dev->struct_mutex);
 	vga_switcheroo_client_fb_set(dev->pdev, info);
 	return 0;
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index e7e1268..b73c29f8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -40,15 +40,21 @@
 
 void nouveau_fbcon_restore(void);
 
-void nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv04_fbcon_accel_init(struct fb_info *info);
-void nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
-void nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
-void nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+
+int nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
+int nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
+int nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region);
+int nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image);
+int nvc0_fbcon_accel_init(struct fb_info *info);
+
 void nouveau_fbcon_gpu_lockup(struct fb_info *info);
 
 int nouveau_fbcon_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index ab1bbfb..221b846 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -32,7 +32,8 @@
 #include "nouveau_dma.h"
 
 #define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
-#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17 && \
+		       nouveau_private(dev)->card_type < NV_C0)
 
 struct nouveau_fence {
 	struct nouveau_channel *channel;
@@ -64,6 +65,7 @@
 	struct nouveau_fence *fence =
 		container_of(ref, struct nouveau_fence, refcount);
 
+	nouveau_channel_ref(NULL, &fence->channel);
 	kfree(fence);
 }
 
@@ -76,14 +78,17 @@
 
 	spin_lock(&chan->fence.lock);
 
-	if (USE_REFCNT(dev))
-		sequence = nvchan_rd32(chan, 0x48);
-	else
-		sequence = atomic_read(&chan->fence.last_sequence_irq);
+	/* Fetch the last sequence if the channel is still up and running */
+	if (likely(!list_empty(&chan->fence.pending))) {
+		if (USE_REFCNT(dev))
+			sequence = nvchan_rd32(chan, 0x48);
+		else
+			sequence = atomic_read(&chan->fence.last_sequence_irq);
 
-	if (chan->fence.sequence_ack == sequence)
-		goto out;
-	chan->fence.sequence_ack = sequence;
+		if (chan->fence.sequence_ack == sequence)
+			goto out;
+		chan->fence.sequence_ack = sequence;
+	}
 
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		sequence = fence->sequence;
@@ -113,13 +118,13 @@
 	if (!fence)
 		return -ENOMEM;
 	kref_init(&fence->refcount);
-	fence->channel = chan;
+	nouveau_channel_ref(chan, &fence->channel);
 
 	if (emit)
 		ret = nouveau_fence_emit(fence);
 
 	if (ret)
-		nouveau_fence_unref((void *)&fence);
+		nouveau_fence_unref(&fence);
 	*pfence = fence;
 	return ret;
 }
@@ -127,7 +132,7 @@
 struct nouveau_channel *
 nouveau_fence_channel(struct nouveau_fence *fence)
 {
-	return fence ? fence->channel : NULL;
+	return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
 }
 
 int
@@ -135,6 +140,7 @@
 {
 	struct nouveau_channel *chan = fence->channel;
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	int ret;
 
 	ret = RING_SPACE(chan, 2);
@@ -155,8 +161,15 @@
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	spin_unlock(&chan->fence.lock);
 
-	BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
-	OUT_RING(chan, fence->sequence);
+	if (USE_REFCNT(dev)) {
+		if (dev_priv->card_type < NV_C0)
+			BEGIN_RING(chan, NvSubSw, 0x0050, 1);
+		else
+			BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0050, 1);
+	} else {
+		BEGIN_RING(chan, NvSubSw, 0x0150, 1);
+	}
+	OUT_RING (chan, fence->sequence);
 	FIRE_RING(chan);
 
 	return 0;
@@ -182,7 +195,7 @@
 }
 
 void
-nouveau_fence_unref(void **sync_obj)
+__nouveau_fence_unref(void **sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(*sync_obj);
 
@@ -192,7 +205,7 @@
 }
 
 void *
-nouveau_fence_ref(void *sync_obj)
+__nouveau_fence_ref(void *sync_obj)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 
@@ -201,7 +214,7 @@
 }
 
 bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
+__nouveau_fence_signalled(void *sync_obj, void *sync_arg)
 {
 	struct nouveau_fence *fence = nouveau_fence(sync_obj);
 	struct nouveau_channel *chan = fence->channel;
@@ -214,13 +227,14 @@
 }
 
 int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+__nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
 {
 	unsigned long timeout = jiffies + (3 * DRM_HZ);
+	unsigned long sleep_time = jiffies + 1;
 	int ret = 0;
 
 	while (1) {
-		if (nouveau_fence_signalled(sync_obj, sync_arg))
+		if (__nouveau_fence_signalled(sync_obj, sync_arg))
 			break;
 
 		if (time_after_eq(jiffies, timeout)) {
@@ -230,7 +244,7 @@
 
 		__set_current_state(intr ? TASK_INTERRUPTIBLE
 			: TASK_UNINTERRUPTIBLE);
-		if (lazy)
+		if (lazy && time_after_eq(jiffies, sleep_time))
 			schedule_timeout(1);
 
 		if (intr && signal_pending(current)) {
@@ -368,7 +382,7 @@
 
 	kref_get(&sema->ref);
 	nouveau_fence_work(fence, semaphore_work, sema);
-	nouveau_fence_unref((void *)&fence);
+	nouveau_fence_unref(&fence);
 
 	return 0;
 }
@@ -380,33 +394,49 @@
 	struct nouveau_channel *chan = nouveau_fence_channel(fence);
 	struct drm_device *dev = wchan->dev;
 	struct nouveau_semaphore *sema;
-	int ret;
+	int ret = 0;
 
-	if (likely(!fence || chan == wchan ||
-		   nouveau_fence_signalled(fence, NULL)))
-		return 0;
+	if (likely(!chan || chan == wchan ||
+		   nouveau_fence_signalled(fence)))
+		goto out;
 
 	sema = alloc_semaphore(dev);
 	if (!sema) {
 		/* Early card or broken userspace, fall back to
 		 * software sync. */
-		return nouveau_fence_wait(fence, NULL, false, false);
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out;
+	}
+
+	/* try to take chan's mutex, if we can't take it right away
+	 * we have to fallback to software sync to prevent locking
+	 * order issues
+	 */
+	if (!mutex_trylock(&chan->mutex)) {
+		ret = nouveau_fence_wait(fence, true, false);
+		goto out_unref;
 	}
 
 	/* Make wchan wait until it gets signalled */
 	ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
 	if (ret)
-		goto out;
+		goto out_unlock;
 
 	/* Signal the semaphore from chan */
 	ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
-out:
+
+out_unlock:
+	mutex_unlock(&chan->mutex);
+out_unref:
 	kref_put(&sema->ref, free_semaphore);
+out:
+	if (chan)
+		nouveau_channel_put_unlocked(&chan);
 	return ret;
 }
 
 int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+__nouveau_fence_flush(void *sync_obj, void *sync_arg)
 {
 	return 0;
 }
@@ -420,30 +450,27 @@
 	int ret;
 
 	/* Create an NV_SW object for various sync purposes */
-	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+	ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
 	if (ret)
 		return ret;
 
-	ret = nouveau_ramht_insert(chan, NvSw, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	if (ret)
-		return ret;
-
-	ret = RING_SPACE(chan, 2);
-	if (ret)
-		return ret;
-	BEGIN_RING(chan, NvSubSw, 0, 1);
-	OUT_RING(chan, NvSw);
+	/* we leave subchannel empty for nvc0 */
+	if (dev_priv->card_type < NV_C0) {
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+		BEGIN_RING(chan, NvSubSw, 0, 1);
+		OUT_RING(chan, NvSw);
+	}
 
 	/* Create a DMA object for the shared cross-channel sync area. */
 	if (USE_SEMA(dev)) {
-		struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+		struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
 
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     mem->start << PAGE_SHIFT,
-					     mem->size << PAGE_SHIFT,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &obj);
+					     mem->size, NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &obj);
 		if (ret)
 			return ret;
 
@@ -473,6 +500,8 @@
 {
 	struct nouveau_fence *tmp, *fence;
 
+	spin_lock(&chan->fence.lock);
+
 	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		fence->signalled = true;
 		list_del(&fence->entry);
@@ -482,6 +511,8 @@
 
 		kref_put(&fence->refcount, nouveau_fence_del);
 	}
+
+	spin_unlock(&chan->fence.lock);
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 9a1fdcf..506c508 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -48,9 +48,6 @@
 		return;
 	nvbo->gem = NULL;
 
-	if (unlikely(nvbo->cpu_filp))
-		ttm_bo_synccpu_write_release(bo);
-
 	if (unlikely(nvbo->pin_refcnt)) {
 		nvbo->pin_refcnt = 1;
 		nouveau_bo_unpin(nvbo);
@@ -106,32 +103,6 @@
 	return 0;
 }
 
-static bool
-nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (dev_priv->card_type >= NV_50) {
-		switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
-		case 0x0000:
-		case 0x1800:
-		case 0x2800:
-		case 0x4800:
-		case 0x7000:
-		case 0x7400:
-		case 0x7a00:
-		case 0xe000:
-			return true;
-		}
-	} else {
-		if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
-			return true;
-	}
-
-	NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
-	return false;
-}
-
 int
 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
 		      struct drm_file *file_priv)
@@ -146,11 +117,6 @@
 	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
 		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
 
-	if (req->channel_hint) {
-		NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
-						     file_priv, chan);
-	}
-
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
 		flags |= TTM_PL_FLAG_VRAM;
 	if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
@@ -158,13 +124,23 @@
 	if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
 		flags |= TTM_PL_FLAG_SYSTEM;
 
-	if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
+	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
+		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
 		return -EINVAL;
+	}
+
+	if (req->channel_hint) {
+		chan = nouveau_channel_get(dev, file_priv, req->channel_hint);
+		if (IS_ERR(chan))
+			return PTR_ERR(chan);
+	}
 
 	ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
 			      req->info.tile_mode, req->info.tile_flags, false,
 			      (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
 			      &nvbo);
+	if (chan)
+		nouveau_channel_put(&chan);
 	if (ret)
 		return ret;
 
@@ -231,15 +207,8 @@
 
 	list_for_each_safe(entry, tmp, list) {
 		nvbo = list_entry(entry, struct nouveau_bo, entry);
-		if (likely(fence)) {
-			struct nouveau_fence *prev_fence;
 
-			spin_lock(&nvbo->bo.lock);
-			prev_fence = nvbo->bo.sync_obj;
-			nvbo->bo.sync_obj = nouveau_fence_ref(fence);
-			spin_unlock(&nvbo->bo.lock);
-			nouveau_fence_unref((void *)&prev_fence);
-		}
+		nouveau_bo_fence(nvbo, fence);
 
 		if (unlikely(nvbo->validate_mapped)) {
 			ttm_bo_kunmap(&nvbo->kmap);
@@ -299,14 +268,15 @@
 			return -EINVAL;
 		}
 
-		ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
+		ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
 		if (ret) {
 			validate_fini(op, NULL);
-			if (ret == -EAGAIN)
-				ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
+			if (unlikely(ret == -EAGAIN))
+				ret = ttm_bo_wait_unreserved(&nvbo->bo, true);
 			drm_gem_object_unreference_unlocked(gem);
-			if (ret) {
-				NV_ERROR(dev, "fail reserve\n");
+			if (unlikely(ret)) {
+				if (ret != -ERESTARTSYS)
+					NV_ERROR(dev, "fail reserve\n");
 				return ret;
 			}
 			goto retry;
@@ -331,25 +301,6 @@
 			validate_fini(op, NULL);
 			return -EINVAL;
 		}
-
-		if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
-			validate_fini(op, NULL);
-
-			if (nvbo->cpu_filp == file_priv) {
-				NV_ERROR(dev, "bo %p mapped by process trying "
-					      "to validate it!\n", nvbo);
-				return -EINVAL;
-			}
-
-			mutex_unlock(&drm_global_mutex);
-			ret = ttm_bo_wait_cpu(&nvbo->bo, false);
-			mutex_lock(&drm_global_mutex);
-			if (ret) {
-				NV_ERROR(dev, "fail wait_cpu\n");
-				return ret;
-			}
-			goto retry;
-		}
 	}
 
 	return 0;
@@ -383,11 +334,11 @@
 		}
 
 		nvbo->channel = (b->read_domains & (1 << 31)) ? NULL : chan;
-		ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-				      false, false, false);
+		ret = nouveau_bo_validate(nvbo, true, false, false);
 		nvbo->channel = NULL;
 		if (unlikely(ret)) {
-			NV_ERROR(dev, "fail ttm_validate\n");
+			if (ret != -ERESTARTSYS)
+				NV_ERROR(dev, "fail ttm_validate\n");
 			return ret;
 		}
 
@@ -439,13 +390,15 @@
 
 	ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
 	if (unlikely(ret)) {
-		NV_ERROR(dev, "validate_init\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate_init\n");
 		return ret;
 	}
 
 	ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate vram_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate vram_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -453,7 +406,8 @@
 
 	ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate gart_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate gart_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -461,7 +415,8 @@
 
 	ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
 	if (unlikely(ret < 0)) {
-		NV_ERROR(dev, "validate both_list\n");
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate both_list\n");
 		validate_fini(op, NULL);
 		return ret;
 	}
@@ -557,9 +512,9 @@
 				data |= r->vor;
 		}
 
-		spin_lock(&nvbo->bo.lock);
+		spin_lock(&nvbo->bo.bdev->fence_lock);
 		ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-		spin_unlock(&nvbo->bo.lock);
+		spin_unlock(&nvbo->bo.bdev->fence_lock);
 		if (ret) {
 			NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
 			break;
@@ -585,7 +540,9 @@
 	struct nouveau_fence *fence = NULL;
 	int i, j, ret = 0, do_reloc = 0;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, req->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	req->vram_available = dev_priv->fb_aper_free;
 	req->gart_available = dev_priv->gart_info.aper_free;
@@ -595,28 +552,34 @@
 	if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
 		NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
 			 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
 		NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
 			 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
 		NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
 			 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
+		nouveau_channel_put(&chan);
 		return -EINVAL;
 	}
 
 	push = u_memcpya(req->push, req->nr_push, sizeof(*push));
-	if (IS_ERR(push))
+	if (IS_ERR(push)) {
+		nouveau_channel_put(&chan);
 		return PTR_ERR(push);
+	}
 
 	bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
 	if (IS_ERR(bo)) {
 		kfree(push);
+		nouveau_channel_put(&chan);
 		return PTR_ERR(bo);
 	}
 
@@ -639,7 +602,8 @@
 	ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
 					   req->nr_buffers, &op, &do_reloc);
 	if (ret) {
-		NV_ERROR(dev, "validate: %d\n", ret);
+		if (ret != -ERESTARTSYS)
+			NV_ERROR(dev, "validate: %d\n", ret);
 		goto out;
 	}
 
@@ -732,7 +696,7 @@
 
 out:
 	validate_fini(&op, fence);
-	nouveau_fence_unref((void**)&fence);
+	nouveau_fence_unref(&fence);
 	kfree(bo);
 	kfree(push);
 
@@ -750,6 +714,7 @@
 		req->suffix1 = 0x00000000;
 	}
 
+	nouveau_channel_put(&chan);
 	return ret;
 }
 
@@ -781,26 +746,9 @@
 		return -ENOENT;
 	nvbo = nouveau_gem_object(gem);
 
-	if (nvbo->cpu_filp) {
-		if (nvbo->cpu_filp == file_priv)
-			goto out;
-
-		ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
-		if (ret)
-			goto out;
-	}
-
-	if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
-		spin_lock(&nvbo->bo.lock);
-		ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
-		spin_unlock(&nvbo->bo.lock);
-	} else {
-		ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
-		if (ret == 0)
-			nvbo->cpu_filp = file_priv;
-	}
-
-out:
+	spin_lock(&nvbo->bo.bdev->fence_lock);
+	ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
+	spin_unlock(&nvbo->bo.bdev->fence_lock);
 	drm_gem_object_unreference_unlocked(gem);
 	return ret;
 }
@@ -809,26 +757,7 @@
 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
 			   struct drm_file *file_priv)
 {
-	struct drm_nouveau_gem_cpu_prep *req = data;
-	struct drm_gem_object *gem;
-	struct nouveau_bo *nvbo;
-	int ret = -EINVAL;
-
-	gem = drm_gem_object_lookup(dev, file_priv, req->handle);
-	if (!gem)
-		return -ENOENT;
-	nvbo = nouveau_gem_object(gem);
-
-	if (nvbo->cpu_filp != file_priv)
-		goto out;
-	nvbo->cpu_filp = NULL;
-
-	ttm_bo_synccpu_write_release(&nvbo->bo);
-	ret = 0;
-
-out:
-	drm_gem_object_unreference_unlocked(gem);
-	return ret;
+	return 0;
 }
 
 int
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index b9672a0..053edf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -953,7 +953,7 @@
 			NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
 
 			reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
-			if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
+			if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
 			else
 				NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
@@ -999,8 +999,8 @@
 		if (dev_priv->card_type == NV_10) {
 			/* Not waiting for vertical retrace before modifying
 			   CRE_53/CRE_54 causes lockups. */
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
-			nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
+			nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
 		}
 
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
@@ -1017,8 +1017,9 @@
 
 	NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
 
-	/* Setting 1 on this value gives you interrupts for every vblank period. */
-	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
+	/* Enable vblank interrupts. */
+	NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0,
+		    (dev->vblank_enabled[head] ? 1 : 0));
 	NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 7bfd9e6..2ba7265 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -36,18 +36,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_ramht.h"
-#include <linux/ratelimit.h>
-
-/* needed for hotplug irq */
-#include "nouveau_connector.h"
-#include "nv50_display.h"
-
-static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
-
-static int nouveau_ratelimit(void)
-{
-	return __ratelimit(&nouveau_ratelimit_state);
-}
+#include "nouveau_util.h"
 
 void
 nouveau_irq_preinstall(struct drm_device *dev)
@@ -57,19 +46,19 @@
 	/* Master disable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 
-	if (dev_priv->card_type >= NV_50) {
-		INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
-		INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
-		spin_lock_init(&dev_priv->hpd_state.lock);
-		INIT_LIST_HEAD(&dev_priv->vbl_waiting);
-	}
+	INIT_LIST_HEAD(&dev_priv->vbl_waiting);
 }
 
 int
 nouveau_irq_postinstall(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
 	/* Master enable */
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
+
 	return 0;
 }
 
@@ -80,1178 +69,83 @@
 	nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
 }
 
-static int
-nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
-{
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct nouveau_pgraph_object_method *grm;
-	struct nouveau_pgraph_object_class *grc;
-
-	grc = dev_priv->engine.graph.grclass;
-	while (grc->id) {
-		if (grc->id == class)
-			break;
-		grc++;
-	}
-
-	if (grc->id != class || !grc->methods)
-		return -ENOENT;
-
-	grm = grc->methods;
-	while (grm->id) {
-		if (grm->id == mthd)
-			return grm->exec(chan, class, mthd, data);
-		grm++;
-	}
-
-	return -ENOENT;
-}
-
-static bool
-nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
-{
-	struct drm_device *dev = chan->dev;
-	const int subc = (addr >> 13) & 0x7;
-	const int mthd = addr & 0x1ffc;
-
-	if (mthd == 0x0000) {
-		struct nouveau_gpuobj *gpuobj;
-
-		gpuobj = nouveau_ramht_find(chan, data);
-		if (!gpuobj)
-			return false;
-
-		if (gpuobj->engine != NVOBJ_ENGINE_SW)
-			return false;
-
-		chan->sw_subchannel[subc] = gpuobj->class;
-		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
-			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
-		return true;
-	}
-
-	/* hw object */
-	if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
-		return false;
-
-	if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
-		return false;
-
-	return true;
-}
-
-static void
-nouveau_fifo_irq_handler(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t status, reassign;
-	int cnt = 0;
-
-	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
-	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
-		struct nouveau_channel *chan = NULL;
-		uint32_t chid, get;
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
-		chid = engine->fifo.channel_id(dev);
-		if (chid >= 0 && chid < engine->fifo.channels)
-			chan = dev_priv->fifos[chid];
-		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
-		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
-			uint32_t mthd, data;
-			int ptr;
-
-			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
-			 * wrapping on my G80 chips, but CACHE1 isn't big
-			 * enough for this much data.. Tests show that it
-			 * wraps around to the start at GET=0x800.. No clue
-			 * as to why..
-			 */
-			ptr = (get & 0x7ff) >> 2;
-
-			if (dev_priv->card_type < NV_40) {
-				mthd = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV04_PFIFO_CACHE1_DATA(ptr));
-			} else {
-				mthd = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_METHOD(ptr));
-				data = nv_rd32(dev,
-					NV40_PFIFO_CACHE1_DATA(ptr));
-			}
-
-			if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
-				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
-					     "Mthd 0x%04x Data 0x%08x\n",
-					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
-					data);
-			}
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-						NV_PFIFO_INTR_CACHE_ERROR);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
-				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
-				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
-			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
-		}
-
-		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-			u32 dma_get = nv_rd32(dev, 0x003244);
-			u32 dma_put = nv_rd32(dev, 0x003240);
-			u32 push = nv_rd32(dev, 0x003220);
-			u32 state = nv_rd32(dev, 0x003228);
-
-			if (dev_priv->card_type == NV_50) {
-				u32 ho_get = nv_rd32(dev, 0x003328);
-				u32 ho_put = nv_rd32(dev, 0x003320);
-				u32 ib_get = nv_rd32(dev, 0x003334);
-				u32 ib_put = nv_rd32(dev, 0x003330);
-
-				if (nouveau_ratelimit())
-					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
-					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
-					     "State 0x%08x Push 0x%08x\n",
-						chid, ho_get, dma_get, ho_put,
-						dma_put, ib_get, ib_put, state,
-						push);
-
-				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
-				nv_wr32(dev, 0x003364, 0x00000000);
-				if (dma_get != dma_put || ho_get != ho_put) {
-					nv_wr32(dev, 0x003244, dma_put);
-					nv_wr32(dev, 0x003328, ho_put);
-				} else
-				if (ib_get != ib_put) {
-					nv_wr32(dev, 0x003334, ib_put);
-				}
-			} else {
-				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
-					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
-					chid, dma_get, dma_put, state, push);
-
-				if (dma_get != dma_put)
-					nv_wr32(dev, 0x003244, dma_put);
-			}
-
-			nv_wr32(dev, 0x003228, 0x00000000);
-			nv_wr32(dev, 0x003220, 0x00000001);
-			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
-			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-		}
-
-		if (status & NV_PFIFO_INTR_SEMAPHORE) {
-			uint32_t sem;
-
-			status &= ~NV_PFIFO_INTR_SEMAPHORE;
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-				NV_PFIFO_INTR_SEMAPHORE);
-
-			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
-			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
-			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
-			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-		}
-
-		if (dev_priv->card_type == NV_50) {
-			if (status & 0x00000010) {
-				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
-				status &= ~0x00000010;
-				nv_wr32(dev, 0x002100, 0x00000010);
-			}
-		}
-
-		if (status) {
-			if (nouveau_ratelimit())
-				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
-					status, chid);
-			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
-			status = 0;
-		}
-
-		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
-	}
-
-	if (status) {
-		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
-		nv_wr32(dev, 0x2140, 0);
-		nv_wr32(dev, 0x140, 0);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-struct nouveau_bitfield_names {
-	uint32_t mask;
-	const char *name;
-};
-
-static struct nouveau_bitfield_names nstatus_names[] =
-{
-	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nstatus_names_nv10[] =
-{
-	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
-	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
-	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
-	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" }
-};
-
-static struct nouveau_bitfield_names nsource_names[] =
-{
-	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
-	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
-	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
-	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
-	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
-	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
-	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
-	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
-	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
-	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
-	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
-};
-
-static void
-nouveau_print_bitfield_names_(uint32_t value,
-				const struct nouveau_bitfield_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		uint32_t mask = namelist[i].mask;
-		if (value & mask) {
-			printk(" %s", namelist[i].name);
-			value &= ~mask;
-		}
-	}
-	if (value)
-		printk(" (unknown bits 0x%08x)", value);
-}
-#define nouveau_print_bitfield_names(val, namelist) \
-	nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-struct nouveau_enum_names {
-	uint32_t value;
-	const char *name;
-};
-
-static void
-nouveau_print_enum_names_(uint32_t value,
-				const struct nouveau_enum_names *namelist,
-				const int namelist_len)
-{
-	/*
-	 * Caller must have already printed the KERN_* log level for us.
-	 * Also the caller is responsible for adding the newline.
-	 */
-	int i;
-	for (i = 0; i < namelist_len; ++i) {
-		if (value == namelist[i].value) {
-			printk("%s", namelist[i].name);
-			return;
-		}
-	}
-	printk("unknown value 0x%08x", value);
-}
-#define nouveau_print_enum_names(val, namelist) \
-	nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
-
-static int
-nouveau_graph_chid_from_grctx(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t inst;
-	int i;
-
-	if (dev_priv->card_type < NV_40)
-		return dev_priv->engine.fifo.channels;
-	else
-	if (dev_priv->card_type < NV_50) {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin_grctx)
-				continue;
-
-			if (inst == chan->ramin_grctx->pinst)
-				break;
-		}
-	} else {
-		inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
-
-		for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-			struct nouveau_channel *chan = dev_priv->fifos[i];
-
-			if (!chan || !chan->ramin)
-				continue;
-
-			if (inst == chan->ramin->vinst)
-				break;
-		}
-	}
-
-
-	return i;
-}
-
-static int
-nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	int channel;
-
-	if (dev_priv->card_type < NV_10)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
-	else
-	if (dev_priv->card_type < NV_40)
-		channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	else
-		channel = nouveau_graph_chid_from_grctx(dev);
-
-	if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
-		NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
-		return -EINVAL;
-	}
-
-	*channel_ret = channel;
-	return 0;
-}
-
-struct nouveau_pgraph_trap {
-	int channel;
-	int class;
-	int subc, mthd, size;
-	uint32_t data, data2;
-	uint32_t nsource, nstatus;
-};
-
-static void
-nouveau_graph_trap_info(struct drm_device *dev,
-			struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t address;
-
-	trap->nsource = trap->nstatus = 0;
-	if (dev_priv->card_type < NV_50) {
-		trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-		trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
-	}
-
-	if (nouveau_graph_trapped_channel(dev, &trap->channel))
-		trap->channel = -1;
-	address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
-
-	trap->mthd = address & 0x1FFC;
-	trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
-	if (dev_priv->card_type < NV_10) {
-		trap->subc  = (address >> 13) & 0x7;
-	} else {
-		trap->subc  = (address >> 16) & 0x7;
-		trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
-	}
-
-	if (dev_priv->card_type < NV_10)
-		trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
-	else if (dev_priv->card_type < NV_40)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
-	else if (dev_priv->card_type < NV_50)
-		trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
-	else
-		trap->class = nv_rd32(dev, 0x400814);
-}
-
-static void
-nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
-			     struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
-
-	if (dev_priv->card_type < NV_50) {
-		NV_INFO(dev, "%s - nSource:", id);
-		nouveau_print_bitfield_names(nsource, nsource_names);
-		printk(", nStatus:");
-		if (dev_priv->card_type < NV_10)
-			nouveau_print_bitfield_names(nstatus, nstatus_names);
-		else
-			nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
-		printk("\n");
-	}
-
-	NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
-					"Data 0x%08x:0x%08x\n",
-					id, trap->channel, trap->subc,
-					trap->class, trap->mthd,
-					trap->data2, trap->data);
-}
-
-static int
-nouveau_pgraph_intr_swmthd(struct drm_device *dev,
-			   struct nouveau_pgraph_trap *trap)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (trap->channel < 0 ||
-	    trap->channel >= dev_priv->engine.fifo.channels ||
-	    !dev_priv->fifos[trap->channel])
-		return -ENODEV;
-
-	return nouveau_call_method(dev_priv->fifos[trap->channel],
-				   trap->class, trap->mthd, trap->data);
-}
-
-static inline void
-nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled)
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
-}
-
-
-static inline void
-nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-
-	nouveau_graph_trap_info(dev, &trap);
-	trap.nsource = nsource;
-
-	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
-		if (nouveau_pgraph_intr_swmthd(dev, &trap))
-			unhandled = 1;
-	} else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
-		uint32_t v = nv_rd32(dev, 0x402000);
-		nv_wr32(dev, 0x402000, v);
-
-		/* dump the error anyway for now: it's useful for
-		   Gallium development */
-		unhandled = 1;
-	} else {
-		unhandled = 1;
-	}
-
-	if (unhandled && nouveau_ratelimit())
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
-}
-
-static inline void
-nouveau_pgraph_intr_context_switch(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t chid;
-
-	chid = engine->fifo.channel_id(dev);
-	NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
-
-	switch (dev_priv->card_type) {
-	case NV_04:
-		nv04_graph_context_switch(dev);
-		break;
-	case NV_10:
-		nv10_graph_context_switch(dev);
-		break;
-	default:
-		NV_ERROR(dev, "Context switch not implemented\n");
-		break;
-	}
-}
-
-static void
-nouveau_pgraph_irq_handler(struct drm_device *dev)
-{
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
-		if (status & NV_PGRAPH_INTR_NOTIFY) {
-			nouveau_pgraph_intr_notify(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_NOTIFY;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
-		}
-
-		if (status & NV_PGRAPH_INTR_ERROR) {
-			nouveau_pgraph_intr_error(dev, nsource);
-
-			status &= ~NV_PGRAPH_INTR_ERROR;
-			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
-		}
-
-		if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				 NV_PGRAPH_INTR_CONTEXT_SWITCH);
-
-			nouveau_pgraph_intr_context_switch(dev);
-		}
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
-			nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-}
-
-static struct nouveau_enum_names nv50_mp_exec_error_names[] =
-{
-	{ 3, "STACK_UNDERFLOW" },
-	{ 4, "QUADON_ACTIVE" },
-	{ 8, "TIMEOUT" },
-	{ 0x10, "INVALID_OPCODE" },
-	{ 0x40, "BREAKPOINT" },
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	uint32_t addr, mp10, status, pc, oplow, ophigh;
-	int i;
-	int mps = 0;
-	for (i = 0; i < 4; i++) {
-		if (!(units & 1 << (i+24)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			addr = 0x408200 + (tpid << 12) + (i << 7);
-		else
-			addr = 0x408100 + (tpid << 11) + (i << 7);
-		mp10 = nv_rd32(dev, addr + 0x10);
-		status = nv_rd32(dev, addr + 0x14);
-		if (!status)
-			continue;
-		if (display) {
-			nv_rd32(dev, addr + 0x20);
-			pc = nv_rd32(dev, addr + 0x24);
-			oplow = nv_rd32(dev, addr + 0x70);
-			ophigh= nv_rd32(dev, addr + 0x74);
-			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
-					"TP %d MP %d: ", tpid, i);
-			nouveau_print_enum_names(status,
-					nv50_mp_exec_error_names);
-			printk(" at %06x warp %d, opcode %08x %08x\n",
-					pc&0xffffff, pc >> 24,
-					oplow, ophigh);
-		}
-		nv_wr32(dev, addr + 0x10, mp10);
-		nv_wr32(dev, addr + 0x14, 0);
-		mps++;
-	}
-	if (!mps && display)
-		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
-				"No MPs claiming errors?\n", tpid);
-}
-
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
-		uint32_t ustatus_new, int display, const char *name)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int tps = 0;
-	uint32_t units = nv_rd32(dev, 0x1540);
-	int i, r;
-	uint32_t ustatus_addr, ustatus;
-	for (i = 0; i < 16; i++) {
-		if (!(units & (1 << i)))
-			continue;
-		if (dev_priv->chipset < 0xa0)
-			ustatus_addr = ustatus_old + (i << 12);
-		else
-			ustatus_addr = ustatus_new + (i << 11);
-		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
-		if (!ustatus)
-			continue;
-		tps++;
-		switch (type) {
-		case 6: /* texture error... unknown for now */
-			nv50_fb_vm_trap(dev, display, name);
-			if (display) {
-				NV_ERROR(dev, "magic set %d:\n", i);
-				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
-					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
-						nv_rd32(dev, r));
-			}
-			break;
-		case 7: /* MP error */
-			if (ustatus & 0x00010000) {
-				nv50_pgraph_mp_trap(dev, i, display);
-				ustatus &= ~0x00010000;
-			}
-			break;
-		case 8: /* TPDMA error */
-			{
-			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
-			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
-			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
-			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
-			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
-			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
-			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-			nv50_fb_vm_trap(dev, display, name);
-			/* 2d engine destination */
-			if (ustatus & 0x00000010) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000010;
-			}
-			/* Render target */
-			if (ustatus & 0x00000040) {
-				if (display) {
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
-							i, e14, e10);
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000040;
-			}
-			/* CUDA memory: l[], g[] or stack. */
-			if (ustatus & 0x00000080) {
-				if (display) {
-					if (e18 & 0x80000000) {
-						/* g[] read fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 24) & 0x1f));
-						e18 &= ~0x1f000000;
-					} else if (e18 & 0xc) {
-						/* g[] write fault? */
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
-								i, e14, e10 | ((e18 >> 7) & 0x1f));
-						e18 &= ~0x00000f80;
-					} else {
-						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
-								i, e14, e10);
-					}
-					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
-							i, e0c, e18, e1c, e20, e24);
-				}
-				ustatus &= ~0x00000080;
-			}
-			}
-			break;
-		}
-		if (ustatus) {
-			if (display)
-				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
-		}
-		nv_wr32(dev, ustatus_addr, 0xc0000000);
-	}
-
-	if (!tps && display)
-		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static void
-nv50_pgraph_trap_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	uint32_t status = nv_rd32(dev, 0x400108);
-	uint32_t ustatus;
-	int display = nouveau_ratelimit();
-
-
-	if (!status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
-		NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
-	}
-
-	/* DISPATCH: Relays commands to other units and handles NOTIFY,
-	 * COND, QUERY. If you get a trap from it, the command is still stuck
-	 * in DISPATCH and you need to do something about it. */
-	if (status & 0x001) {
-		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
-		}
-
-		/* Known to be triggered by screwed up NOTIFY and COND... */
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x400808) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40080c);
-					trap.data2 = nv_rd32(dev, 0x400810);
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_FAULT", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
-				}
-				nv_wr32(dev, 0x400808, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
-			}
-			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
-			nv_wr32(dev, 0x400848, 0);
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
-			nv_wr32(dev, 0x400500, 0);
-			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
-				if (display) {
-					if (nouveau_graph_trapped_channel(dev, &trap.channel))
-						trap.channel = -1;
-					trap.class = nv_rd32(dev, 0x400814);
-					trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
-					trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
-					trap.data = nv_rd32(dev, 0x40085c);
-					trap.data2 = 0;
-					nouveau_graph_dump_trap_info(dev,
-							"PGRAPH_TRAP_DISPATCH_QUERY", &trap);
-					NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
-				}
-				nv_wr32(dev, 0x40084c, 0);
-			} else if (display) {
-				NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
-			}
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400804, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x001);
-		status &= ~0x001;
-	}
-
-	/* TRAPs other than dispatch use the "normal" trap regs. */
-	if (status && display) {
-		nouveau_graph_trap_info(dev, &trap);
-		nouveau_graph_dump_trap_info(dev,
-				"PGRAPH_TRAP", &trap);
-	}
-
-	/* M2MF: Memory to memory copy engine. */
-	if (status & 0x002) {
-		ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus & 0x00000002) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
-			ustatus &= ~0x00000002;
-		}
-		if (ustatus & 0x00000004) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
-			ustatus &= ~0x00000004;
-		}
-		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
-				nv_rd32(dev, 0x406804),
-				nv_rd32(dev, 0x406808),
-				nv_rd32(dev, 0x40680c),
-				nv_rd32(dev, 0x406810));
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 2);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x406800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x002);
-		status &= ~0x002;
-	}
-
-	/* VFETCH: Fetches data from vertex buffers. */
-	if (status & 0x004) {
-		ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x400c00),
-					nv_rd32(dev, 0x400c08),
-					nv_rd32(dev, 0x400c0c),
-					nv_rd32(dev, 0x400c10));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x400c04, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x004);
-		status &= ~0x004;
-	}
-
-	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
-	if (status & 0x008) {
-		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x401804),
-					nv_rd32(dev, 0x401808),
-					nv_rd32(dev, 0x40180c),
-					nv_rd32(dev, 0x401810));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
-		/* No sane way found yet -- just reset the bugger. */
-		nv_wr32(dev, 0x400040, 0x80);
-		nv_wr32(dev, 0x400040, 0);
-		nv_wr32(dev, 0x401800, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x008);
-		status &= ~0x008;
-	}
-
-	/* CCACHE: Handles code and c[] caches and fills them. */
-	if (status & 0x010) {
-		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
-		if (!ustatus && display) {
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
-		}
-		if (ustatus & 0x00000001) {
-			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
-			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
-					nv_rd32(dev, 0x405800),
-					nv_rd32(dev, 0x405804),
-					nv_rd32(dev, 0x405808),
-					nv_rd32(dev, 0x40580c),
-					nv_rd32(dev, 0x405810),
-					nv_rd32(dev, 0x405814),
-					nv_rd32(dev, 0x40581c));
-			ustatus &= ~0x00000001;
-		}
-		if (ustatus && display)
-			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x405018, 0xc0000000);
-		nv_wr32(dev, 0x400108, 0x010);
-		status &= ~0x010;
-	}
-
-	/* Unknown, not seen yet... 0x402000 is the only trap status reg
-	 * remaining, so try to handle it anyway. Perhaps related to that
-	 * unknown DMA slot on tesla? */
-	if (status & 0x20) {
-		nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
-		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
-		nv_wr32(dev, 0x402000, 0xc0000000);
-		/* no status modifiction on purpose */
-	}
-
-	/* TEXTURE: CUDA texturing units */
-	if (status & 0x040) {
-		nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
-				"PGRAPH_TRAP_TEXTURE");
-		nv_wr32(dev, 0x400108, 0x040);
-		status &= ~0x040;
-	}
-
-	/* MP: CUDA execution engines. */
-	if (status & 0x080) {
-		nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
-				"PGRAPH_TRAP_MP");
-		nv_wr32(dev, 0x400108, 0x080);
-		status &= ~0x080;
-	}
-
-	/* TPDMA:  Handles TP-initiated uncached memory accesses:
-	 * l[], g[], stack, 2d surfaces, render targets. */
-	if (status & 0x100) {
-		nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
-				"PGRAPH_TRAP_TPDMA");
-		nv_wr32(dev, 0x400108, 0x100);
-		status &= ~0x100;
-	}
-
-	if (status) {
-		if (display)
-			NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
-				status);
-		nv_wr32(dev, 0x400108, status);
-	}
-}
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-static struct nouveau_enum_names nv50_data_error_names[] =
-{
-	{ 4,	"INVALID_VALUE" },
-	{ 5,	"INVALID_ENUM" },
-	{ 8,	"INVALID_OBJECT" },
-	{ 0xc,	"INVALID_BITFIELD" },
-	{ 0x28,	"MP_NO_REG_SPACE" },
-	{ 0x2b,	"MP_BLOCK_SIZE_MISMATCH" },
-};
-
-static void
-nv50_pgraph_irq_handler(struct drm_device *dev)
-{
-	struct nouveau_pgraph_trap trap;
-	int unhandled = 0;
-	uint32_t status;
-
-	while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
-		/* NOTIFY: You've set a NOTIFY an a command and it's done. */
-		if (status & 0x00000001) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_NOTIFY", &trap);
-			status &= ~0x00000001;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
-		}
-
-		/* COMPUTE_QUERY: Purpose and exact cause unknown, happens
-		 * when you write 0x200 to 0x50c0 method 0x31c. */
-		if (status & 0x00000002) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_COMPUTE_QUERY", &trap);
-			status &= ~0x00000002;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
-		}
-
-		/* Unknown, never seen: 0x4 */
-
-		/* ILLEGAL_MTHD: You used a wrong method for this class. */
-		if (status & 0x00000010) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_pgraph_intr_swmthd(dev, &trap))
-				unhandled = 1;
-			if (unhandled && nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_MTHD", &trap);
-			status &= ~0x00000010;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
-		}
-
-		/* ILLEGAL_CLASS: You used a wrong class. */
-		if (status & 0x00000020) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_ILLEGAL_CLASS", &trap);
-			status &= ~0x00000020;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
-		}
-
-		/* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
-		if (status & 0x00000040) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DOUBLE_NOTIFY", &trap);
-			status &= ~0x00000040;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
-		}
-
-		/* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
-		if (status & 0x00001000) {
-			nv_wr32(dev, 0x400500, 0x00000000);
-			nv_wr32(dev, NV03_PGRAPH_INTR,
-				NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
-				NV40_PGRAPH_INTR_EN) &
-				~NV_PGRAPH_INTR_CONTEXT_SWITCH);
-			nv_wr32(dev, 0x400500, 0x00010001);
-
-			nv50_graph_context_switch(dev);
-
-			status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
-		}
-
-		/* BUFFER_NOTIFY: Your m2mf transfer finished */
-		if (status & 0x00010000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_BUFFER_NOTIFY", &trap);
-			status &= ~0x00010000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
-		}
-
-		/* DATA_ERROR: Invalid value for this method, or invalid
-		 * state in current PGRAPH context for this operation */
-		if (status & 0x00100000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit()) {
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_DATA_ERROR", &trap);
-				NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
-				nouveau_print_enum_names(nv_rd32(dev, 0x400110),
-						nv50_data_error_names);
-				printk("\n");
-			}
-			status &= ~0x00100000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
-		}
-
-		/* TRAP: Something bad happened in the middle of command
-		 * execution.  Has a billion types, subtypes, and even
-		 * subsubtypes. */
-		if (status & 0x00200000) {
-			nv50_pgraph_trap_handler(dev);
-			status &= ~0x00200000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
-		}
-
-		/* Unknown, never seen: 0x00400000 */
-
-		/* SINGLE_STEP: Happens on every method if you turned on
-		 * single stepping in 40008c */
-		if (status & 0x01000000) {
-			nouveau_graph_trap_info(dev, &trap);
-			if (nouveau_ratelimit())
-				nouveau_graph_dump_trap_info(dev,
-						"PGRAPH_SINGLE_STEP", &trap);
-			status &= ~0x01000000;
-			nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
-		}
-
-		/* 0x02000000 happens when you pause a ctxprog...
-		 * but the only way this can happen that I know is by
-		 * poking the relevant MMIO register, and we don't
-		 * do that. */
-
-		if (status) {
-			NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
-				status);
-			nv_wr32(dev, NV03_PGRAPH_INTR, status);
-		}
-
-		{
-			const int isb = (1 << 16) | (1 << 0);
-
-			if ((nv_rd32(dev, 0x400500) & isb) != isb)
-				nv_wr32(dev, 0x400500,
-					nv_rd32(dev, 0x400500) | isb);
-		}
-	}
-
-	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
-	if (nv_rd32(dev, 0x400824) & (1 << 31))
-		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
-}
-
-static void
-nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
-{
-	if (crtc & 1)
-		nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
-
-	if (crtc & 2)
-		nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
-}
-
 irqreturn_t
 nouveau_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *)arg;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t status;
 	unsigned long flags;
+	u32 stat;
+	int i;
 
-	status = nv_rd32(dev, NV03_PMC_INTR_0);
-	if (!status)
+	stat = nv_rd32(dev, NV03_PMC_INTR_0);
+	if (!stat)
 		return IRQ_NONE;
 
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	for (i = 0; i < 32 && stat; i++) {
+		if (!(stat & (1 << i)) || !dev_priv->irq_handler[i])
+			continue;
 
-	if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
-		nouveau_fifo_irq_handler(dev);
-		status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+		dev_priv->irq_handler[i](dev);
+		stat &= ~(1 << i);
 	}
 
-	if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
-		if (dev_priv->card_type >= NV_50)
-			nv50_pgraph_irq_handler(dev);
-		else
-			nouveau_pgraph_irq_handler(dev);
-
-		status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
-	}
-
-	if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
-		nouveau_crtc_irq_handler(dev, (status>>24)&3);
-		status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
-	}
-
-	if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-		      NV_PMC_INTR_0_NV50_I2C_PENDING)) {
-		nv50_display_irq_handler(dev);
-		status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
-			    NV_PMC_INTR_0_NV50_I2C_PENDING);
-	}
-
-	if (status)
-		NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
-
+	if (dev_priv->msi_enabled)
+		nv_wr08(dev, 0x00088068, 0xff);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
+	if (stat && nouveau_ratelimit())
+		NV_ERROR(dev, "PMC - unhandled INTR 0x%08x\n", stat);
 	return IRQ_HANDLED;
 }
+
+int
+nouveau_irq_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
+		ret = pci_enable_msi(dev->pdev);
+		if (ret == 0) {
+			NV_INFO(dev, "enabled MSI\n");
+			dev_priv->msi_enabled = true;
+		}
+	}
+
+	return drm_irq_install(dev);
+}
+
+void
+nouveau_irq_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	drm_irq_uninstall(dev);
+	if (dev_priv->msi_enabled)
+		pci_disable_msi(dev->pdev);
+}
+
+void
+nouveau_irq_register(struct drm_device *dev, int status_bit,
+		     void (*handler)(struct drm_device *))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = handler;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
+
+void
+nouveau_irq_unregister(struct drm_device *dev, int status_bit)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	dev_priv->irq_handler[status_bit] = NULL;
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index fe4a30d..69044eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -36,183 +36,112 @@
 
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
 
 /*
  * NV10-NV40 tiling helpers
  */
 
 static void
-nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			   uint32_t size, uint32_t pitch)
+nv10_mem_update_tile_region(struct drm_device *dev,
+			    struct nouveau_tile_reg *tile, uint32_t addr,
+			    uint32_t size, uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+	int i = tile - dev_priv->tile.reg;
+	unsigned long save;
 
-	tile->addr = addr;
-	tile->size = size;
-	tile->used = !!pitch;
-	nouveau_fence_unref((void **)&tile->fence);
+	nouveau_fence_unref(&tile->fence);
 
+	if (tile->pitch)
+		pfb->free_tile_region(dev, i);
+
+	if (pitch)
+		pfb->init_tile_region(dev, i, addr, size, pitch, flags);
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, save);
 	pfifo->reassign(dev, false);
 	pfifo->cache_pull(dev, false);
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->set_region_tiling(dev, i, addr, size, pitch);
-	pfb->set_region_tiling(dev, i, addr, size, pitch);
+	pfb->set_tile_region(dev, i);
+	pgraph->set_tile_region(dev, i);
 
 	pfifo->cache_pull(dev, true);
 	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
+}
+
+static struct nouveau_tile_reg *
+nv10_mem_get_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	spin_lock(&dev_priv->tile.lock);
+
+	if (!tile->used &&
+	    (!tile->fence || nouveau_fence_signalled(tile->fence)))
+		tile->used = true;
+	else
+		tile = NULL;
+
+	spin_unlock(&dev_priv->tile.lock);
+	return tile;
+}
+
+void
+nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
+			 struct nouveau_fence *fence)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (tile) {
+		spin_lock(&dev_priv->tile.lock);
+		if (fence) {
+			/* Mark it as pending. */
+			tile->fence = fence;
+			nouveau_fence_ref(fence);
+		}
+
+		tile->used = false;
+		spin_unlock(&dev_priv->tile.lock);
+	}
 }
 
 struct nouveau_tile_reg *
 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
-		    uint32_t pitch)
+		    uint32_t pitch, uint32_t flags)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nouveau_tile_reg *found = NULL;
-	unsigned long i, flags;
-
-	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
-
-	for (i = 0; i < pfb->num_tiles; i++) {
-		struct nouveau_tile_reg *tile = &dev_priv->tile[i];
-
-		if (tile->used)
-			/* Tile region in use. */
-			continue;
-
-		if (tile->fence &&
-		    !nouveau_fence_signalled(tile->fence, NULL))
-			/* Pending tile region. */
-			continue;
-
-		if (max(tile->addr, addr) <
-		    min(tile->addr + tile->size, addr + size))
-			/* Kill an intersecting tile region. */
-			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
-
-		if (pitch && !found) {
-			/* Free tile region. */
-			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
-			found = tile;
-		}
-	}
-
-	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-	return found;
-}
-
-void
-nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
-		       struct nouveau_fence *fence)
-{
-	if (fence) {
-		/* Mark it as pending. */
-		tile->fence = fence;
-		nouveau_fence_ref(fence);
-	}
-
-	tile->used = false;
-}
-
-/*
- * NV50 VM helpers
- */
-int
-nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
-			uint32_t flags, uint64_t phys)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned block;
+	struct nouveau_tile_reg *tile, *found = NULL;
 	int i;
 
-	virt = ((virt - dev_priv->vm_vram_base) >> 16) << 1;
-	size = (size >> 16) << 1;
+	for (i = 0; i < pfb->num_tiles; i++) {
+		tile = nv10_mem_get_tile_region(dev, i);
 
-	phys |= ((uint64_t)flags << 32);
-	phys |= 1;
-	if (dev_priv->vram_sys_base) {
-		phys += dev_priv->vram_sys_base;
-		phys |= 0x30;
+		if (pitch && !found) {
+			found = tile;
+			continue;
+
+		} else if (tile && tile->pitch) {
+			/* Kill an unused tile region. */
+			nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
+		}
+
+		nv10_mem_put_tile_region(dev, tile, NULL);
 	}
 
-	while (size) {
-		unsigned offset_h = upper_32_bits(phys);
-		unsigned offset_l = lower_32_bits(phys);
-		unsigned pte, end;
-
-		for (i = 7; i >= 0; i--) {
-			block = 1 << (i + 1);
-			if (size >= block && !(virt & (block - 1)))
-				break;
-		}
-		offset_l |= (i << 7);
-
-		phys += block << 15;
-		size -= block;
-
-		while (block) {
-			pgt = dev_priv->vm_vram_pt[virt >> 14];
-			pte = virt & 0x3ffe;
-
-			end = pte + block;
-			if (end > 16384)
-				end = 16384;
-			block -= (end - pte);
-			virt  += (end - pte);
-
-			while (pte < end) {
-				nv_wo32(pgt, (pte * 4) + 0, offset_l);
-				nv_wo32(pgt, (pte * 4) + 4, offset_h);
-				pte += 2;
-			}
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
-	return 0;
-}
-
-void
-nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *pgt;
-	unsigned pages, pte, end;
-
-	virt -= dev_priv->vm_vram_base;
-	pages = (size >> 16) << 1;
-
-	while (pages) {
-		pgt = dev_priv->vm_vram_pt[virt >> 29];
-		pte = (virt & 0x1ffe0000ULL) >> 15;
-
-		end = pte + pages;
-		if (end > 16384)
-			end = 16384;
-		pages -= (end - pte);
-		virt  += (end - pte) << 15;
-
-		while (pte < end) {
-			nv_wo32(pgt, (pte * 4), 0);
-			pte++;
-		}
-	}
-
-	dev_priv->engine.instmem.flush(dev);
-	dev_priv->engine.fifo.tlb_flush(dev);
-	dev_priv->engine.graph.tlb_flush(dev);
-	nv50_vm_flush(dev, 6);
+	if (found)
+		nv10_mem_update_tile_region(dev, found, addr, size,
+					    pitch, flags);
+	return found;
 }
 
 /*
@@ -312,62 +241,7 @@
 	return 0;
 }
 
-static void
-nv50_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int i, parts, colbits, rowbitsa, rowbitsb, banks;
-	u64 rowsize, predicted;
-	u32 r0, r4, rt, ru;
-
-	r0 = nv_rd32(dev, 0x100200);
-	r4 = nv_rd32(dev, 0x100204);
-	rt = nv_rd32(dev, 0x100250);
-	ru = nv_rd32(dev, 0x001540);
-	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
-	for (i = 0, parts = 0; i < 8; i++) {
-		if (ru & (0x00010000 << i))
-			parts++;
-	}
-
-	colbits  =  (r4 & 0x0000f000) >> 12;
-	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
-	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
-	banks    = ((r4 & 0x01000000) ? 8 : 4);
-
-	rowsize = parts * banks * (1 << colbits) * 8;
-	predicted = rowsize << rowbitsa;
-	if (r0 & 0x00000004)
-		predicted += rowsize << rowbitsb;
-
-	if (predicted != dev_priv->vram_size) {
-		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
-			(u32)(dev_priv->vram_size >> 20));
-		NV_WARN(dev, "we calculated %dMiB VRAM\n",
-			(u32)(predicted >> 20));
-	}
-
-	dev_priv->vram_rblock_size = rowsize >> 12;
-	if (rt & 1)
-		dev_priv->vram_rblock_size *= 3;
-
-	NV_DEBUG(dev, "rblock %lld bytes\n",
-		 (u64)dev_priv->vram_rblock_size << 12);
-}
-
-static void
-nvaa_vram_preinit(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	/* To our knowledge, there's no large scale reordering of pages
-	 * that occurs on IGP chipsets.
-	 */
-	dev_priv->vram_rblock_size = 1;
-}
-
-static int
+int
 nouveau_mem_detect(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -381,33 +255,6 @@
 	if (dev_priv->card_type < NV_50) {
 		dev_priv->vram_size  = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-	} else
-	if (dev_priv->card_type < NV_C0) {
-		dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
-		dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
-		dev_priv->vram_size &= 0xffffffff00ll;
-
-		switch (dev_priv->chipset) {
-		case 0xaa:
-		case 0xac:
-		case 0xaf:
-			dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
-			dev_priv->vram_sys_base <<= 12;
-			nvaa_vram_preinit(dev);
-			break;
-		default:
-			nv50_vram_preinit(dev);
-			break;
-		}
-	} else {
-		dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
-		dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
-	}
-
-	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
-	if (dev_priv->vram_sys_base) {
-		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
-			dev_priv->vram_sys_base);
 	}
 
 	if (dev_priv->vram_size)
@@ -415,6 +262,15 @@
 	return -ENOMEM;
 }
 
+bool
+nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
+		return true;
+
+	return false;
+}
+
 #if __OS_HAS_AGP
 static unsigned long
 get_agp_mode(struct drm_device *dev, unsigned long mode)
@@ -547,10 +403,6 @@
 	if (ret)
 		return ret;
 
-	ret = nouveau_mem_detect(dev);
-	if (ret)
-		return ret;
-
 	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
 
 	ret = nouveau_ttm_global_init(dev_priv);
@@ -566,13 +418,6 @@
 		return ret;
 	}
 
-	dev_priv->fb_available_size = dev_priv->vram_size;
-	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
-	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
-		dev_priv->fb_mappable_pages =
-			pci_resource_len(dev->pdev, 1);
-	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
-
 	/* reserve space at end of VRAM for PRAMIN */
 	if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
 	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
@@ -583,6 +428,22 @@
 	else
 		dev_priv->ramin_rsvd_vram = (512 * 1024);
 
+	ret = dev_priv->engine.vram.init(dev);
+	if (ret)
+		return ret;
+
+	NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
+	if (dev_priv->vram_sys_base) {
+		NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
+			dev_priv->vram_sys_base);
+	}
+
+	dev_priv->fb_available_size = dev_priv->vram_size;
+	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
+	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+		dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
+	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
+
 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
 
@@ -799,3 +660,118 @@
 
 	kfree(mem->timing);
 }
+
+static int
+nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long p_size)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_mm *mm;
+	u32 b_size;
+	int ret;
+
+	p_size = (p_size << PAGE_SHIFT) >> 12;
+	b_size = dev_priv->vram_rblock_size >> 12;
+
+	ret = nouveau_mm_init(&mm, 0, p_size, b_size);
+	if (ret)
+		return ret;
+
+	man->priv = mm;
+	return 0;
+}
+
+static int
+nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
+{
+	struct nouveau_mm *mm = man->priv;
+	int ret;
+
+	ret = nouveau_mm_fini(&mm);
+	if (ret)
+		return ret;
+
+	man->priv = NULL;
+	return 0;
+}
+
+static void
+nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+
+	vram->put(dev, (struct nouveau_vram **)&mem->mm_node);
+}
+
+static int
+nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
+			 struct ttm_buffer_object *bo,
+			 struct ttm_placement *placement,
+			 struct ttm_mem_reg *mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct drm_device *dev = dev_priv->dev;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_vram *node;
+	u32 size_nc = 0;
+	int ret;
+
+	if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
+		size_nc = 1 << nvbo->vma.node->type;
+
+	ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
+			mem->page_alignment << PAGE_SHIFT, size_nc,
+			(nvbo->tile_flags >> 8) & 0xff, &node);
+	if (ret)
+		return ret;
+
+	node->page_shift = 12;
+	if (nvbo->vma.node)
+		node->page_shift = nvbo->vma.node->type;
+
+	mem->mm_node = node;
+	mem->start   = node->offset >> PAGE_SHIFT;
+	return 0;
+}
+
+void
+nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
+{
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
+	int i;
+
+	mutex_lock(&mm->mutex);
+	list_for_each_entry(r, &mm->nodes, nl_entry) {
+		printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
+		       prefix, r->free ? "free" : "used", r->type,
+		       ((u64)r->offset << 12),
+		       (((u64)r->offset + r->length) << 12));
+		total += r->length;
+		ttotal[r->type] += r->length;
+		if (r->free)
+			tfree[r->type] += r->length;
+		else
+			tused[r->type] += r->length;
+	}
+	mutex_unlock(&mm->mutex);
+
+	printk(KERN_DEBUG "%s  total: 0x%010llx\n", prefix, total << 12);
+	for (i = 0; i < 3; i++) {
+		printk(KERN_DEBUG "%s type %d: 0x%010llx, "
+				  "used 0x%010llx, free 0x%010llx\n", prefix,
+		       i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
+	}
+}
+
+const struct ttm_mem_type_manager_func nouveau_vram_manager = {
+	nouveau_vram_manager_init,
+	nouveau_vram_manager_fini,
+	nouveau_vram_manager_new,
+	nouveau_vram_manager_del,
+	nouveau_vram_manager_debug
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
new file mode 100644
index 0000000..cdbb11e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static inline void
+region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+{
+	list_del(&a->nl_entry);
+	list_del(&a->fl_entry);
+	kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+{
+	struct nouveau_mm_node *b;
+
+	if (a->length == size)
+		return a;
+
+	b = kmalloc(sizeof(*b), GFP_KERNEL);
+	if (unlikely(b == NULL))
+		return NULL;
+
+	b->offset = a->offset;
+	b->length = size;
+	b->free   = a->free;
+	b->type   = a->type;
+	a->offset += size;
+	a->length -= size;
+	list_add_tail(&b->nl_entry, &a->nl_entry);
+	if (b->free)
+		list_add_tail(&b->fl_entry, &a->fl_entry);
+	return b;
+}
+
+static struct nouveau_mm_node *
+nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	struct nouveau_mm_node *prev, *next;
+
+	/* try to merge with free adjacent entries of same type */
+	prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.prev != &rmm->nodes) {
+		if (prev->free && prev->type == this->type) {
+			prev->length += this->length;
+			region_put(rmm, this);
+			this = prev;
+		}
+	}
+
+	next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+	if (this->nl_entry.next != &rmm->nodes) {
+		if (next->free && next->type == this->type) {
+			next->offset  = this->offset;
+			next->length += this->length;
+			region_put(rmm, this);
+			this = next;
+		}
+	}
+
+	return this;
+}
+
+void
+nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+{
+	u32 block_s, block_l;
+
+	this->free = true;
+	list_add(&this->fl_entry, &rmm->free);
+	this = nouveau_mm_merge(rmm, this);
+
+	/* any entirely free blocks now?  we'll want to remove typing
+	 * on them now so they can be use for any memory allocation
+	 */
+	block_s = roundup(this->offset, rmm->block_size);
+	if (block_s + rmm->block_size > this->offset + this->length)
+		return;
+
+	/* split off any still-typed region at the start */
+	if (block_s != this->offset) {
+		if (!region_split(rmm, this, block_s - this->offset))
+			return;
+	}
+
+	/* split off the soon-to-be-untyped block(s) */
+	block_l = rounddown(this->length, rmm->block_size);
+	if (block_l != this->length) {
+		this = region_split(rmm, this, block_l);
+		if (!this)
+			return;
+	}
+
+	/* mark as having no type, and retry merge with any adjacent
+	 * untyped blocks
+	 */
+	this->type = 0;
+	nouveau_mm_merge(rmm, this);
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+	       u32 align, struct nouveau_mm_node **pnode)
+{
+	struct nouveau_mm_node *this, *tmp, *next;
+	u32 splitoff, avail, alloc;
+
+	list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
+		next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
+		if (this->nl_entry.next == &rmm->nodes)
+			next = NULL;
+
+		/* skip wrongly typed blocks */
+		if (this->type && this->type != type)
+			continue;
+
+		/* account for alignment */
+		splitoff = this->offset & (align - 1);
+		if (splitoff)
+			splitoff = align - splitoff;
+
+		if (this->length <= splitoff)
+			continue;
+
+		/* determine total memory available from this, and
+		 * the next block (if appropriate)
+		 */
+		avail = this->length;
+		if (next && next->free && (!next->type || next->type == type))
+			avail += next->length;
+
+		avail -= splitoff;
+
+		/* determine allocation size */
+		if (size_nc) {
+			alloc = min(avail, size);
+			alloc = rounddown(alloc, size_nc);
+			if (alloc == 0)
+				continue;
+		} else {
+			alloc = size;
+			if (avail < alloc)
+				continue;
+		}
+
+		/* untyped block, split off a chunk that's a multiple
+		 * of block_size and type it
+		 */
+		if (!this->type) {
+			u32 block = roundup(alloc + splitoff, rmm->block_size);
+			if (this->length < block)
+				continue;
+
+			this = region_split(rmm, this, block);
+			if (!this)
+				return -ENOMEM;
+
+			this->type = type;
+		}
+
+		/* stealing memory from adjacent block */
+		if (alloc > this->length) {
+			u32 amount = alloc - (this->length - splitoff);
+
+			if (!next->type) {
+				amount = roundup(amount, rmm->block_size);
+
+				next = region_split(rmm, next, amount);
+				if (!next)
+					return -ENOMEM;
+
+				next->type = type;
+			}
+
+			this->length += amount;
+			next->offset += amount;
+			next->length -= amount;
+			if (!next->length) {
+				list_del(&next->nl_entry);
+				list_del(&next->fl_entry);
+				kfree(next);
+			}
+		}
+
+		if (splitoff) {
+			if (!region_split(rmm, this, splitoff))
+				return -ENOMEM;
+		}
+
+		this = region_split(rmm, this, alloc);
+		if (this == NULL)
+			return -ENOMEM;
+
+		this->free = false;
+		list_del(&this->fl_entry);
+		*pnode = this;
+		return 0;
+	}
+
+	return -ENOMEM;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+{
+	struct nouveau_mm *rmm;
+	struct nouveau_mm_node *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return -ENOMEM;
+	heap->free = true;
+	heap->offset = roundup(offset, block);
+	heap->length = rounddown(offset + length, block) - heap->offset;
+
+	rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
+	if (!rmm) {
+		kfree(heap);
+		return -ENOMEM;
+	}
+	rmm->block_size = block;
+	mutex_init(&rmm->mutex);
+	INIT_LIST_HEAD(&rmm->nodes);
+	INIT_LIST_HEAD(&rmm->free);
+	list_add(&heap->nl_entry, &rmm->nodes);
+	list_add(&heap->fl_entry, &rmm->free);
+
+	*prmm = rmm;
+	return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm **prmm)
+{
+	struct nouveau_mm *rmm = *prmm;
+	struct nouveau_mm_node *heap =
+		list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
+
+	if (!list_is_singular(&rmm->nodes))
+		return -EBUSY;
+
+	kfree(heap);
+	kfree(rmm);
+	*prmm = NULL;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.h b/drivers/gpu/drm/nouveau/nouveau_mm.h
new file mode 100644
index 0000000..af38449
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+	struct list_head nl_entry;
+	struct list_head fl_entry;
+	struct list_head rl_entry;
+
+	bool free;
+	int  type;
+
+	u32 offset;
+	u32 length;
+};
+
+struct nouveau_mm {
+	struct list_head nodes;
+	struct list_head free;
+
+	struct mutex mutex;
+
+	u32 block_size;
+};
+
+int  nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm **);
+int  nouveau_mm_pre(struct nouveau_mm *);
+int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+		    u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+int  nv50_vram_init(struct drm_device *);
+int  nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+		    u32 memtype, struct nouveau_vram **);
+void nv50_vram_del(struct drm_device *, struct nouveau_vram **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int  nvc0_vram_init(struct drm_device *);
+int  nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+		    u32 memtype, struct nouveau_vram **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 2cc59f8..fe29d60 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -99,7 +99,6 @@
 		       int size, uint32_t *b_offset)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *nobj = NULL;
 	struct drm_mm_node *mem;
 	uint32_t offset;
@@ -113,31 +112,15 @@
 		return -ENOMEM;
 	}
 
-	offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
-		target = NV_DMA_TARGET_VIDMEM;
-	} else
-	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
-		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
-		    dev_priv->card_type < NV_50) {
-			ret = nouveau_sgdma_get_page(dev, offset, &offset);
-			if (ret)
-				return ret;
-			target = NV_DMA_TARGET_PCI;
-		} else {
-			target = NV_DMA_TARGET_AGP;
-			if (dev_priv->card_type >= NV_50)
-				offset += dev_priv->vm_gart_base;
-		}
-	} else {
-		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
-			 chan->notifier_bo->bo.mem.mem_type);
-		return -EINVAL;
-	}
+	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+		target = NV_MEM_TARGET_VRAM;
+	else
+		target = NV_MEM_TARGET_GART;
+	offset  = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
 	offset += mem->start;
 
 	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
-				     mem->size, NV_DMA_ACCESS_RW, target,
+				     mem->size, NV_MEM_ACCESS_RW, target,
 				     &nobj);
 	if (ret) {
 		drm_mm_put_block(mem);
@@ -181,15 +164,20 @@
 nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_notifierobj_alloc *na = data;
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+	/* completely unnecessary for these chipsets... */
+	if (unlikely(dev_priv->card_type >= NV_C0))
+		return -EINVAL;
+
+	chan = nouveau_channel_get(dev, file_priv, na->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
 	ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
-	if (ret)
-		return ret;
-
-	return 0;
+	nouveau_channel_put(&chan);
+	return ret;
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index dd572ad..30b6544 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -35,6 +35,102 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
+
+struct nouveau_gpuobj_method {
+	struct list_head head;
+	u32 mthd;
+	int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
+};
+
+struct nouveau_gpuobj_class {
+	struct list_head head;
+	struct list_head methods;
+	u32 id;
+	u32 engine;
+};
+
+int
+nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_class *oc;
+
+	oc = kzalloc(sizeof(*oc), GFP_KERNEL);
+	if (!oc)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&oc->methods);
+	oc->id = class;
+	oc->engine = engine;
+	list_add(&oc->head, &dev_priv->classes);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
+			int (*exec)(struct nouveau_channel *, u32, u32, u32))
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	return -EINVAL;
+
+found:
+	om = kzalloc(sizeof(*om), GFP_KERNEL);
+	if (!om)
+		return -ENOMEM;
+
+	om->mthd = mthd;
+	om->exec = exec;
+	list_add(&om->head, &oc->methods);
+	return 0;
+}
+
+int
+nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_gpuobj_method *om;
+	struct nouveau_gpuobj_class *oc;
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id != class)
+			continue;
+
+		list_for_each_entry(om, &oc->methods, head) {
+			if (om->mthd == mthd)
+				return om->exec(chan, class, mthd, data);
+		}
+	}
+
+	return -ENOENT;
+}
+
+int
+nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (chid > 0 && chid < dev_priv->engine.fifo.channels)
+		chan = dev_priv->channels.ptr[chid];
+	if (chan)
+		ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return ret;
+}
 
 /* NVidia uses context objects to drive drawing operations.
 
@@ -73,17 +169,14 @@
 		   struct nouveau_gpuobj **gpuobj_ret)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *gpuobj;
 	struct drm_mm_node *ramin = NULL;
-	int ret;
+	int ret, i;
 
 	NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
 		 chan ? chan->id : -1, size, align, flags);
 
-	if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -98,88 +191,41 @@
 	spin_unlock(&dev_priv->ramin_lock);
 
 	if (chan) {
-		NV_DEBUG(dev, "channel heap\n");
-
 		ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
 		if (ramin)
 			ramin = drm_mm_get_block(ramin, size, align);
-
 		if (!ramin) {
 			nouveau_gpuobj_ref(NULL, &gpuobj);
 			return -ENOMEM;
 		}
-	} else {
-		NV_DEBUG(dev, "global heap\n");
 
-		/* allocate backing pages, sets vinst */
-		ret = engine->instmem.populate(dev, gpuobj, &size);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-
-		/* try and get aperture space */
-		do {
-			if (drm_mm_pre_get(&dev_priv->ramin_heap))
-				return -ENOMEM;
-
-			spin_lock(&dev_priv->ramin_lock);
-			ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
-						   align, 0);
-			if (ramin == NULL) {
-				spin_unlock(&dev_priv->ramin_lock);
-				nouveau_gpuobj_ref(NULL, &gpuobj);
-				return -ENOMEM;
-			}
-
-			ramin = drm_mm_get_block_atomic(ramin, size, align);
-			spin_unlock(&dev_priv->ramin_lock);
-		} while (ramin == NULL);
-
-		/* on nv50 it's ok to fail, we have a fallback path */
-		if (!ramin && dev_priv->card_type < NV_50) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return -ENOMEM;
-		}
-	}
-
-	/* if we got a chunk of the aperture, map pages into it */
-	gpuobj->im_pramin = ramin;
-	if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) {
-		ret = engine->instmem.bind(dev, gpuobj);
-		if (ret) {
-			nouveau_gpuobj_ref(NULL, &gpuobj);
-			return ret;
-		}
-	}
-
-	/* calculate the various different addresses for the object */
-	if (chan) {
 		gpuobj->pinst = chan->ramin->pinst;
 		if (gpuobj->pinst != ~0)
-			gpuobj->pinst += gpuobj->im_pramin->start;
+			gpuobj->pinst += ramin->start;
 
-		if (dev_priv->card_type < NV_50) {
-			gpuobj->cinst = gpuobj->pinst;
-		} else {
-			gpuobj->cinst = gpuobj->im_pramin->start;
-			gpuobj->vinst = gpuobj->im_pramin->start +
-					chan->ramin->vinst;
-		}
+		gpuobj->cinst = ramin->start;
+		gpuobj->vinst = ramin->start + chan->ramin->vinst;
+		gpuobj->node  = ramin;
 	} else {
-		if (gpuobj->im_pramin)
-			gpuobj->pinst = gpuobj->im_pramin->start;
-		else
+		ret = instmem->get(gpuobj, size, align);
+		if (ret) {
+			nouveau_gpuobj_ref(NULL, &gpuobj);
+			return ret;
+		}
+
+		ret = -ENOSYS;
+		if (!(flags & NVOBJ_FLAG_DONT_MAP))
+			ret = instmem->map(gpuobj);
+		if (ret)
 			gpuobj->pinst = ~0;
-		gpuobj->cinst = 0xdeadbeef;
+
+		gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	}
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
-		int i;
-
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 
@@ -195,6 +241,7 @@
 	NV_DEBUG(dev, "\n");
 
 	INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+	INIT_LIST_HEAD(&dev_priv->classes);
 	spin_lock_init(&dev_priv->ramin_lock);
 	dev_priv->ramin_base = ~0;
 
@@ -205,9 +252,20 @@
 nouveau_gpuobj_takedown(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj_method *om, *tm;
+	struct nouveau_gpuobj_class *oc, *tc;
 
 	NV_DEBUG(dev, "\n");
 
+	list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
+		list_for_each_entry_safe(om, tm, &oc->methods, head) {
+			list_del(&om->head);
+			kfree(om);
+		}
+		list_del(&oc->head);
+		kfree(oc);
+	}
+
 	BUG_ON(!list_empty(&dev_priv->gpuobj_list));
 }
 
@@ -219,26 +277,34 @@
 		container_of(ref, struct nouveau_gpuobj, refcount);
 	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	int i;
 
 	NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
 
-	if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
+	if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
 		for (i = 0; i < gpuobj->size; i += 4)
 			nv_wo32(gpuobj, i, 0);
-		engine->instmem.flush(dev);
+		instmem->flush(dev);
 	}
 
 	if (gpuobj->dtor)
 		gpuobj->dtor(dev, gpuobj);
 
-	if (gpuobj->im_backing)
-		engine->instmem.clear(dev, gpuobj);
+	if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
+		if (gpuobj->node) {
+			instmem->unmap(gpuobj);
+			instmem->put(gpuobj);
+		}
+	} else {
+		if (gpuobj->node) {
+			spin_lock(&dev_priv->ramin_lock);
+			drm_mm_put_block(gpuobj->node);
+			spin_unlock(&dev_priv->ramin_lock);
+		}
+	}
 
 	spin_lock(&dev_priv->ramin_lock);
-	if (gpuobj->im_pramin)
-		drm_mm_put_block(gpuobj->im_pramin);
 	list_del(&gpuobj->list);
 	spin_unlock(&dev_priv->ramin_lock);
 
@@ -278,7 +344,7 @@
 	kref_init(&gpuobj->refcount);
 	gpuobj->size  = size;
 	gpuobj->pinst = pinst;
-	gpuobj->cinst = 0xdeadbeef;
+	gpuobj->cinst = NVOBJ_CINST_GLOBAL;
 	gpuobj->vinst = vinst;
 
 	if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
@@ -335,115 +401,152 @@
    The method below creates a DMA object in instance RAM and returns a handle
    to it that can be used to set up context objects.
 */
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
-		       uint64_t offset, uint64_t size, int access,
-		       int target, struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-	int ret;
 
-	NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
-		 chan->id, class, offset, size);
-	NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+		     u64 base, u64 size, int target, int access,
+		     u32 type, u32 comp)
+{
+	struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	u32 flags0;
+
+	flags0  = (comp << 29) | (type << 22) | class;
+	flags0 |= 0x00100000;
+
+	switch (access) {
+	case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+	case NV_MEM_ACCESS_RW:
+	case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+	default:
+		break;
+	}
 
 	switch (target) {
-	case NV_DMA_TARGET_AGP:
-		offset += dev_priv->gart_info.aper_base;
+	case NV_MEM_TARGET_VRAM:
+		flags0 |= 0x00010000;
+		break;
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
+		break;
+	case NV_MEM_TARGET_GART:
+		base += dev_priv->gart_info.aper_base;
+	default:
+		flags0 &= ~0x00100000;
+		break;
+	}
+
+	/* convert to base + limit */
+	size = (base + size) - 1;
+
+	nv_wo32(obj, offset + 0x00, flags0);
+	nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+	nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+	nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+				    upper_32_bits(base));
+	nv_wo32(obj, offset + 0x10, 0x00000000);
+	nv_wo32(obj, offset + 0x14, 0x00000000);
+
+	pinstmem->flush(obj->dev);
+}
+
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+		    int target, int access, u32 type, u32 comp,
+		    struct nouveau_gpuobj **pobj)
+{
+	struct drm_device *dev = chan->dev;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
+	if (ret)
+		return ret;
+
+	nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+			     access, type, comp);
+	return 0;
+}
+
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+		       u64 size, int access, int target,
+		       struct nouveau_gpuobj **pobj)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj *obj;
+	u32 flags0, flags2;
+	int ret;
+
+	if (dev_priv->card_type >= NV_50) {
+		u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+		u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+		return nv50_gpuobj_dma_new(chan, class, base, size,
+					   target, access, type, comp, pobj);
+	}
+
+	if (target == NV_MEM_TARGET_GART) {
+		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+			target = NV_MEM_TARGET_PCI_NOSNOOP;
+			base  += dev_priv->gart_info.aper_base;
+		} else
+		if (base != 0) {
+			base = nouveau_sgdma_get_physical(dev, base);
+			target = NV_MEM_TARGET_PCI;
+		} else {
+			nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+			return 0;
+		}
+	}
+
+	flags0  = class;
+	flags0 |= 0x00003000; /* PT present, PT linear */
+	flags2  = 0;
+
+	switch (target) {
+	case NV_MEM_TARGET_PCI:
+		flags0 |= 0x00020000;
+		break;
+	case NV_MEM_TARGET_PCI_NOSNOOP:
+		flags0 |= 0x00030000;
 		break;
 	default:
 		break;
 	}
 
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16, NVOBJ_FLAG_ZERO_ALLOC |
-				 NVOBJ_FLAG_ZERO_FREE, gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
+	switch (access) {
+	case NV_MEM_ACCESS_RO:
+		flags0 |= 0x00004000;
+		break;
+	case NV_MEM_ACCESS_WO:
+		flags0 |= 0x00008000;
+	default:
+		flags2 |= 0x00000002;
+		break;
+	}
+
+	flags0 |= (base & 0x00000fff) << 20;
+	flags2 |= (base & 0xfffff000);
+
+	ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+	if (ret)
 		return ret;
-	}
 
-	if (dev_priv->card_type < NV_50) {
-		uint32_t frame, adjust, pte_flags = 0;
+	nv_wo32(obj, 0x00, flags0);
+	nv_wo32(obj, 0x04, size - 1);
+	nv_wo32(obj, 0x08, flags2);
+	nv_wo32(obj, 0x0c, flags2);
 
-		if (access != NV_DMA_ACCESS_RO)
-			pte_flags |= (1<<1);
-		adjust = offset &  0x00000fff;
-		frame  = offset & ~0x00000fff;
-
-		nv_wo32(*gpuobj,  0, ((1<<12) | (1<<13) | (adjust << 20) |
-				      (access << 14) | (target << 16) |
-				      class));
-		nv_wo32(*gpuobj,  4, size - 1);
-		nv_wo32(*gpuobj,  8, frame | pte_flags);
-		nv_wo32(*gpuobj, 12, frame | pte_flags);
-	} else {
-		uint64_t limit = offset + size - 1;
-		uint32_t flags0, flags5;
-
-		if (target == NV_DMA_TARGET_VIDMEM) {
-			flags0 = 0x00190000;
-			flags5 = 0x00010000;
-		} else {
-			flags0 = 0x7fc00000;
-			flags5 = 0x00080000;
-		}
-
-		nv_wo32(*gpuobj,  0, flags0 | class);
-		nv_wo32(*gpuobj,  4, lower_32_bits(limit));
-		nv_wo32(*gpuobj,  8, lower_32_bits(offset));
-		nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
-				      (upper_32_bits(offset) & 0xff));
-		nv_wo32(*gpuobj, 20, flags5);
-	}
-
-	instmem->flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_SW;
-	(*gpuobj)->class  = class;
+	obj->engine = NVOBJ_ENGINE_SW;
+	obj->class  = class;
+	*pobj = obj;
 	return 0;
 }
 
-int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
-			    uint64_t offset, uint64_t size, int access,
-			    struct nouveau_gpuobj **gpuobj,
-			    uint32_t *o_ret)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
-	    (dev_priv->card_type >= NV_50 &&
-	     dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     offset + dev_priv->vm_gart_base,
-					     size, access, NV_DMA_TARGET_AGP,
-					     gpuobj);
-		if (o_ret)
-			*o_ret = 0;
-	} else
-	if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
-		if (offset & ~0xffffffffULL) {
-			NV_ERROR(dev, "obj offset exceeds 32-bits\n");
-			return -EINVAL;
-		}
-		if (o_ret)
-			*o_ret = (uint32_t)offset;
-		ret = (*gpuobj != NULL) ? 0 : -EINVAL;
-	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		return -EINVAL;
-	}
-
-	return ret;
-}
-
 /* Context objects in the instance RAM have the following structure.
  * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
 
@@ -495,68 +598,13 @@
    entry[5]:
    set to 0?
 */
-int
-nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
-		      struct nouveau_gpuobj **gpuobj)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
-
-	ret = nouveau_gpuobj_new(dev, chan,
-				 nouveau_gpuobj_class_instmem_size(dev, class),
-				 16,
-				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
-				 gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
-		return ret;
-	}
-
-	if (dev_priv->card_type >= NV_50) {
-		nv_wo32(*gpuobj,  0, class);
-		nv_wo32(*gpuobj, 20, 0x00010000);
-	} else {
-		switch (class) {
-		case NV_CLASS_NULL:
-			nv_wo32(*gpuobj, 0, 0x00001030);
-			nv_wo32(*gpuobj, 4, 0xFFFFFFFF);
-			break;
-		default:
-			if (dev_priv->card_type >= NV_40) {
-				nv_wo32(*gpuobj, 0, class);
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 8, 0x01000000);
-#endif
-			} else {
-#ifdef __BIG_ENDIAN
-				nv_wo32(*gpuobj, 0, class | 0x00080000);
-#else
-				nv_wo32(*gpuobj, 0, class);
-#endif
-			}
-		}
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	(*gpuobj)->engine = NVOBJ_ENGINE_GR;
-	(*gpuobj)->class  = class;
-	return 0;
-}
-
-int
+static int
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 		      struct nouveau_gpuobj **gpuobj_ret)
 {
-	struct drm_nouveau_private *dev_priv;
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 
-	if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
-		return -EINVAL;
-	dev_priv = chan->dev->dev_private;
-
 	gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
 	if (!gpuobj)
 		return -ENOMEM;
@@ -573,6 +621,109 @@
 	return 0;
 }
 
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj_class *oc;
+	struct nouveau_gpuobj *gpuobj;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
+
+	list_for_each_entry(oc, &dev_priv->classes, head) {
+		if (oc->id == class)
+			goto found;
+	}
+
+	NV_ERROR(dev, "illegal object class: 0x%x\n", class);
+	return -EINVAL;
+
+found:
+	switch (oc->engine) {
+	case NVOBJ_ENGINE_SW:
+		if (dev_priv->card_type < NV_C0) {
+			ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
+			if (ret)
+				return ret;
+			goto insert;
+		}
+		break;
+	case NVOBJ_ENGINE_GR:
+		if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
+		    (dev_priv->card_type  < NV_20 && !chan->pgraph_ctx)) {
+			struct nouveau_pgraph_engine *pgraph =
+				&dev_priv->engine.graph;
+
+			ret = pgraph->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	case NVOBJ_ENGINE_CRYPT:
+		if (!chan->crypt_ctx) {
+			struct nouveau_crypt_engine *pcrypt =
+				&dev_priv->engine.crypt;
+
+			ret = pcrypt->create_context(chan);
+			if (ret)
+				return ret;
+		}
+		break;
+	}
+
+	/* we're done if this is fermi */
+	if (dev_priv->card_type >= NV_C0)
+		return 0;
+
+	ret = nouveau_gpuobj_new(dev, chan,
+				 nouveau_gpuobj_class_instmem_size(dev, class),
+				 16,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &gpuobj);
+	if (ret) {
+		NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
+		return ret;
+	}
+
+	if (dev_priv->card_type >= NV_50) {
+		nv_wo32(gpuobj,  0, class);
+		nv_wo32(gpuobj, 20, 0x00010000);
+	} else {
+		switch (class) {
+		case NV_CLASS_NULL:
+			nv_wo32(gpuobj, 0, 0x00001030);
+			nv_wo32(gpuobj, 4, 0xFFFFFFFF);
+			break;
+		default:
+			if (dev_priv->card_type >= NV_40) {
+				nv_wo32(gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 8, 0x01000000);
+#endif
+			} else {
+#ifdef __BIG_ENDIAN
+				nv_wo32(gpuobj, 0, class | 0x00080000);
+#else
+				nv_wo32(gpuobj, 0, class);
+#endif
+			}
+		}
+	}
+	dev_priv->engine.instmem.flush(dev);
+
+	gpuobj->engine = oc->engine;
+	gpuobj->class  = oc->id;
+
+insert:
+	ret = nouveau_ramht_insert(chan, handle, gpuobj);
+	if (ret)
+		NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
+	nouveau_gpuobj_ref(NULL, &gpuobj);
+	return ret;
+}
+
 static int
 nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 {
@@ -585,7 +736,7 @@
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	/* Base amount for object storage (4KiB enough?) */
-	size = 0x1000;
+	size = 0x2000;
 	base = 0;
 
 	/* PGRAPH context */
@@ -624,12 +775,30 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
 	struct nouveau_gpuobj *vram = NULL, *tt = NULL;
-	int ret, i;
+	int ret;
 
 	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
 
+	if (dev_priv->card_type == NV_C0) {
+		struct nouveau_vm *vm = dev_priv->chan_vm;
+		struct nouveau_vm_pgd *vpgd;
+
+		ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
+					 &chan->ramin);
+		if (ret)
+			return ret;
+
+		nouveau_vm_ref(vm, &chan->vm, NULL);
+
+		vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
+		nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
+		nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
+		nv_wo32(chan->ramin, 0x0208, 0xffffffff);
+		nv_wo32(chan->ramin, 0x020c, 0x000000ff);
+		return 0;
+	}
+
 	/* Allocate a chunk of memory for per-channel object storage */
 	ret = nouveau_gpuobj_channel_init_pramin(chan);
 	if (ret) {
@@ -639,14 +808,12 @@
 
 	/* NV50 VM
 	 *  - Allocate per-channel page-directory
-	 *  - Map GART and VRAM into the channel's address space at the
-	 *    locations determined during init.
+	 *  - Link with shared channel VM
 	 */
-	if (dev_priv->card_type >= NV_50) {
+	if (dev_priv->chan_vm) {
 		u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 		u64 vm_vinst = chan->ramin->vinst + pgd_offs;
 		u32 vm_pinst = chan->ramin->pinst;
-		u32 pde;
 
 		if (vm_pinst != ~0)
 			vm_pinst += pgd_offs;
@@ -655,29 +822,8 @@
 					      0, &chan->vm_pd);
 		if (ret)
 			return ret;
-		for (i = 0; i < 0x4000; i += 8) {
-			nv_wo32(chan->vm_pd, i + 0, 0x00000000);
-			nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
-		}
 
-		nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma,
-				   &chan->vm_gart_pt);
-		pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 8;
-		nv_wo32(chan->vm_pd, pde + 0, chan->vm_gart_pt->vinst | 3);
-		nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-
-		pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 8;
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-			nouveau_gpuobj_ref(dev_priv->vm_vram_pt[i],
-					   &chan->vm_vram_pt[i]);
-
-			nv_wo32(chan->vm_pd, pde + 0,
-				chan->vm_vram_pt[i]->vinst | 0x61);
-			nv_wo32(chan->vm_pd, pde + 4, 0x00000000);
-			pde += 8;
-		}
-
-		instmem->flush(dev);
+		nouveau_vm_ref(dev_priv->chan_vm, &chan->vm, chan->vm_pd);
 	}
 
 	/* RAMHT */
@@ -700,9 +846,8 @@
 	/* VRAM ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &vram);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -710,8 +855,8 @@
 	} else {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
 					     0, dev_priv->fb_available_size,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_VIDMEM, &vram);
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VRAM, &vram);
 		if (ret) {
 			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
 			return ret;
@@ -728,21 +873,13 @@
 	/* TT memory ctxdma */
 	if (dev_priv->card_type >= NV_50) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-					     0, dev_priv->vm_end,
-					     NV_DMA_ACCESS_RW,
-					     NV_DMA_TARGET_AGP, &tt);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-			return ret;
-		}
-	} else
-	if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
-		ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-						  dev_priv->gart_info.aper_size,
-						  NV_DMA_ACCESS_RW, &tt, NULL);
+					     0, (1ULL << 40), NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_VM, &tt);
 	} else {
-		NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-		ret = -EINVAL;
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     0, dev_priv->gart_info.aper_size,
+					     NV_MEM_ACCESS_RW,
+					     NV_MEM_TARGET_GART, &tt);
 	}
 
 	if (ret) {
@@ -763,21 +900,14 @@
 void
 nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct drm_device *dev = chan->dev;
-	int i;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	if (!chan->ramht)
-		return;
-
 	nouveau_ramht_ref(NULL, &chan->ramht, chan);
 
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
-	nouveau_gpuobj_ref(NULL, &chan->vm_gart_pt);
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-		nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
 
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -791,147 +921,91 @@
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
-		if (!dev_priv->susres.ramin_copy)
-			return -ENOMEM;
-
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
-		return 0;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing)
+		if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
 			continue;
 
-		gpuobj->im_backing_suspend = vmalloc(gpuobj->size);
-		if (!gpuobj->im_backing_suspend) {
+		gpuobj->suspend = vmalloc(gpuobj->size);
+		if (!gpuobj->suspend) {
 			nouveau_gpuobj_resume(dev);
 			return -ENOMEM;
 		}
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			gpuobj->im_backing_suspend[i/4] = nv_ro32(gpuobj, i);
+			gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
 	}
 
 	return 0;
 }
 
 void
-nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *gpuobj;
-
-	if (dev_priv->card_type < NV_50) {
-		vfree(dev_priv->susres.ramin_copy);
-		dev_priv->susres.ramin_copy = NULL;
-		return;
-	}
-
-	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
-			continue;
-
-		vfree(gpuobj->im_backing_suspend);
-		gpuobj->im_backing_suspend = NULL;
-	}
-}
-
-void
 nouveau_gpuobj_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj;
 	int i;
 
-	if (dev_priv->card_type < NV_50) {
-		for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
-			nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
-		nouveau_gpuobj_suspend_cleanup(dev);
-		return;
-	}
-
 	list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
-		if (!gpuobj->im_backing_suspend)
+		if (!gpuobj->suspend)
 			continue;
 
 		for (i = 0; i < gpuobj->size; i += 4)
-			nv_wo32(gpuobj, i, gpuobj->im_backing_suspend[i/4]);
-		dev_priv->engine.instmem.flush(dev);
+			nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
+
+		vfree(gpuobj->suspend);
+		gpuobj->suspend = NULL;
 	}
 
-	nouveau_gpuobj_suspend_cleanup(dev);
+	dev_priv->engine.instmem.flush(dev);
 }
 
 int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_grobj_alloc *init = data;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_pgraph_object_class *grc;
-	struct nouveau_gpuobj *gr = NULL;
 	struct nouveau_channel *chan;
 	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
-
 	if (init->handle == ~0)
 		return -EINVAL;
 
-	grc = pgraph->grclass;
-	while (grc->id) {
-		if (grc->id == init->class)
-			break;
-		grc++;
+	chan = nouveau_channel_get(dev, file_priv, init->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
+
+	if (nouveau_ramht_find(chan, init->handle)) {
+		ret = -EEXIST;
+		goto out;
 	}
 
-	if (!grc->id) {
-		NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
-		return -EPERM;
-	}
-
-	if (nouveau_ramht_find(chan, init->handle))
-		return -EEXIST;
-
-	if (!grc->software)
-		ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
-	else
-		ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
+	ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
 	if (ret) {
 		NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
 			 ret, init->channel, init->handle);
-		return ret;
 	}
 
-	ret = nouveau_ramht_insert(chan, init->handle, gr);
-	nouveau_gpuobj_ref(NULL, &gr);
-	if (ret) {
-		NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
-			 ret, init->channel, init->handle);
-		return ret;
-	}
-
-	return 0;
+out:
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv)
 {
 	struct drm_nouveau_gpuobj_free *objfree = data;
-	struct nouveau_gpuobj *gpuobj;
 	struct nouveau_channel *chan;
+	int ret;
 
-	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+	chan = nouveau_channel_get(dev, file_priv, objfree->channel);
+	if (IS_ERR(chan))
+		return PTR_ERR(chan);
 
-	gpuobj = nouveau_ramht_find(chan, objfree->handle);
-	if (!gpuobj)
-		return -ENOENT;
+	/* Synchronize with the user channel */
+	nouveau_channel_idle(chan);
 
-	nouveau_ramht_remove(chan, objfree->handle);
-	return 0;
+	ret = nouveau_ramht_remove(chan, objfree->handle);
+	nouveau_channel_put(&chan);
+	return ret;
 }
 
 u32
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 9f7b158..fb846a3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -27,6 +27,10 @@
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
 
+#ifdef CONFIG_ACPI
+#include <linux/acpi.h>
+#endif
+#include <linux/power_supply.h>
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 
@@ -418,8 +422,7 @@
 		return ret;
 	}
 	dev_set_drvdata(hwmon_dev, dev);
-	ret = sysfs_create_group(&hwmon_dev->kobj,
-					&hwmon_attrgroup);
+	ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
 	if (ret) {
 		NV_ERROR(dev,
 			"Unable to create hwmon sysfs file: %d\n", ret);
@@ -446,6 +449,25 @@
 #endif
 }
 
+#ifdef CONFIG_ACPI
+static int
+nouveau_pm_acpi_event(struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct drm_nouveau_private *dev_priv =
+		container_of(nb, struct drm_nouveau_private, engine.pm.acpi_nb);
+	struct drm_device *dev = dev_priv->dev;
+	struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
+
+	if (strcmp(entry->device_class, "ac_adapter") == 0) {
+		bool ac = power_supply_is_system_supplied();
+
+		NV_DEBUG(dev, "power supply changed: %s\n", ac ? "AC" : "DC");
+	}
+
+	return NOTIFY_OK;
+}
+#endif
+
 int
 nouveau_pm_init(struct drm_device *dev)
 {
@@ -485,6 +507,10 @@
 
 	nouveau_sysfs_init(dev);
 	nouveau_hwmon_init(dev);
+#ifdef CONFIG_ACPI
+	pm->acpi_nb.notifier_call = nouveau_pm_acpi_event;
+	register_acpi_notifier(&pm->acpi_nb);
+#endif
 
 	return 0;
 }
@@ -503,6 +529,9 @@
 	nouveau_perf_fini(dev);
 	nouveau_volt_fini(dev);
 
+#ifdef CONFIG_ACPI
+	unregister_acpi_notifier(&pm->acpi_nb);
+#endif
 	nouveau_hwmon_fini(dev);
 	nouveau_sysfs_fini(dev);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c
index 2d85809..bef3e69 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c
@@ -104,17 +104,17 @@
 	nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
 
 	if (dev_priv->card_type < NV_40) {
-		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
 		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else
 	if (dev_priv->card_type < NV_50) {
-		ctx = (gpuobj->cinst >> 4) |
+		ctx = (gpuobj->pinst >> 4) |
 		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
 		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
 	} else {
 		if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
-			ctx = (gpuobj->cinst << 10) | 2;
+			ctx = (gpuobj->cinst << 10) | chan->id;
 		} else {
 			ctx = (gpuobj->cinst >> 4) |
 			      ((gpuobj->engine <<
@@ -214,18 +214,19 @@
 	spin_unlock_irqrestore(&chan->ramht->lock, flags);
 }
 
-void
+int
 nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
 {
 	struct nouveau_ramht_entry *entry;
 
 	entry = nouveau_ramht_remove_entry(chan, handle);
 	if (!entry)
-		return;
+		return -ENOENT;
 
 	nouveau_ramht_remove_hash(chan, entry->handle);
 	nouveau_gpuobj_ref(NULL, &entry->gpuobj);
 	kfree(entry);
+	return 0;
 }
 
 struct nouveau_gpuobj *
diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.h b/drivers/gpu/drm/nouveau/nouveau_ramht.h
index b79cb5e..c82de98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ramht.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ramht.h
@@ -48,7 +48,7 @@
 
 extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
 				 struct nouveau_gpuobj *);
-extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern int  nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
 extern struct nouveau_gpuobj *
 nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
 
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 1b42541..04e8fb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -45,6 +45,11 @@
 #	define NV04_PFB_REF_CMD_REFRESH				(1 << 0)
 #define NV04_PFB_PRE						0x001002d4
 #	define NV04_PFB_PRE_CMD_PRECHARGE			(1 << 0)
+#define NV20_PFB_ZCOMP(i)                              (0x00100300 + 4*(i))
+#	define NV20_PFB_ZCOMP_MODE_32				(4 << 24)
+#	define NV20_PFB_ZCOMP_EN				(1 << 31)
+#	define NV25_PFB_ZCOMP_MODE_16				(1 << 20)
+#	define NV25_PFB_ZCOMP_MODE_32				(2 << 20)
 #define NV10_PFB_CLOSE_PAGE2					0x0010033c
 #define NV04_PFB_SCRAMBLE(i)                         (0x00100400 + 4 * (i))
 #define NV40_PFB_TILE(i)                              (0x00100600 + (i*16))
@@ -74,17 +79,6 @@
 #    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
 #    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
 
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI    2
-#define NV_DMA_TARGET_AGP    3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
 /* Some object classes we care about in the drm */
 #define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
 #define NV_CLASS_DMA_TO_MEMORY                             0x00000003
@@ -332,6 +326,7 @@
 #define NV04_PGRAPH_BSWIZZLE5                              0x004006A0
 #define NV03_PGRAPH_STATUS                                 0x004006B0
 #define NV04_PGRAPH_STATUS                                 0x00400700
+#    define NV40_PGRAPH_STATUS_SYNC_STALL                  0x00004000
 #define NV04_PGRAPH_TRAPPED_ADDR                           0x00400704
 #define NV04_PGRAPH_TRAPPED_DATA                           0x00400708
 #define NV04_PGRAPH_SURFACE                                0x0040070C
@@ -378,6 +373,7 @@
 #define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
 #define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
 #define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
+#define NV20_PGRAPH_ZCOMP(i)                               (0x00400980 + 4*(i))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
@@ -714,31 +710,32 @@
 #define NV50_PDISPLAY_INTR_1_CLK_UNK10                               0x00000010
 #define NV50_PDISPLAY_INTR_1_CLK_UNK20                               0x00000020
 #define NV50_PDISPLAY_INTR_1_CLK_UNK40                               0x00000040
-#define NV50_PDISPLAY_INTR_EN                                        0x0061002c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC                            0x0000000c
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n)                   (1 << ((n) + 2))
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0                          0x00000004
-#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1                          0x00000008
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK10                              0x00000010
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK20                              0x00000020
-#define NV50_PDISPLAY_INTR_EN_CLK_UNK40                              0x00000040
+#define NV50_PDISPLAY_INTR_EN_0                                      0x00610028
+#define NV50_PDISPLAY_INTR_EN_1                                      0x0061002c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC                          0x0000000c
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(n)                 (1 << ((n) + 2))
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_0                        0x00000004
+#define NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_1                        0x00000008
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK10                            0x00000010
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK20                            0x00000020
+#define NV50_PDISPLAY_INTR_EN_1_CLK_UNK40                            0x00000040
 #define NV50_PDISPLAY_UNK30_CTRL                                     0x00610030
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0                        0x00000200
 #define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1                        0x00000400
 #define NV50_PDISPLAY_UNK30_CTRL_PENDING                             0x80000000
-#define NV50_PDISPLAY_TRAPPED_ADDR                                   0x00610080
-#define NV50_PDISPLAY_TRAPPED_DATA                                   0x00610084
-#define NV50_PDISPLAY_CHANNEL_STAT(i)                  ((i) * 0x10 + 0x00610200)
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA                               0x00000010
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED                      0x00000000
-#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED                       0x00000010
-#define NV50_PDISPLAY_CHANNEL_DMA_CB(i)                ((i) * 0x10 + 0x00610204)
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION                        0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM                   0x00000000
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM                 0x00000002
-#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID                           0x00000001
-#define NV50_PDISPLAY_CHANNEL_UNK2(i)                  ((i) * 0x10 + 0x00610208)
-#define NV50_PDISPLAY_CHANNEL_UNK3(i)                  ((i) * 0x10 + 0x0061020c)
+#define NV50_PDISPLAY_TRAPPED_ADDR(i)                  ((i) * 0x08 + 0x00610080)
+#define NV50_PDISPLAY_TRAPPED_DATA(i)                  ((i) * 0x08 + 0x00610084)
+#define NV50_PDISPLAY_EVO_CTRL(i)                      ((i) * 0x10 + 0x00610200)
+#define NV50_PDISPLAY_EVO_CTRL_DMA                                   0x00000010
+#define NV50_PDISPLAY_EVO_CTRL_DMA_DISABLED                          0x00000000
+#define NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED                           0x00000010
+#define NV50_PDISPLAY_EVO_DMA_CB(i)                    ((i) * 0x10 + 0x00610204)
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION                            0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM                       0x00000000
+#define NV50_PDISPLAY_EVO_DMA_CB_LOCATION_SYSTEM                     0x00000002
+#define NV50_PDISPLAY_EVO_DMA_CB_VALID                               0x00000001
+#define NV50_PDISPLAY_EVO_UNK2(i)                      ((i) * 0x10 + 0x00610208)
+#define NV50_PDISPLAY_EVO_HASH_TAG(i)                  ((i) * 0x10 + 0x0061020c)
 
 #define NV50_PDISPLAY_CURSOR                                         0x00610270
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)           ((i) * 0x10 + 0x00610270)
@@ -746,15 +743,11 @@
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS                     0x00030000
 #define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE              0x00010000
 
-#define NV50_PDISPLAY_CTRL_STATE                                     0x00610300
-#define NV50_PDISPLAY_CTRL_STATE_PENDING                             0x80000000
-#define NV50_PDISPLAY_CTRL_STATE_METHOD                              0x00001ffc
-#define NV50_PDISPLAY_CTRL_STATE_ENABLE                              0x00000001
-#define NV50_PDISPLAY_CTRL_VAL                                       0x00610304
-#define NV50_PDISPLAY_UNK_380                                        0x00610380
-#define NV50_PDISPLAY_RAM_AMOUNT                                     0x00610384
-#define NV50_PDISPLAY_UNK_388                                        0x00610388
-#define NV50_PDISPLAY_UNK_38C                                        0x0061038c
+#define NV50_PDISPLAY_PIO_CTRL                                       0x00610300
+#define NV50_PDISPLAY_PIO_CTRL_PENDING                               0x80000000
+#define NV50_PDISPLAY_PIO_CTRL_MTHD                                  0x00001ffc
+#define NV50_PDISPLAY_PIO_CTRL_ENABLED                               0x00000001
+#define NV50_PDISPLAY_PIO_DATA                                       0x00610304
 
 #define NV50_PDISPLAY_CRTC_P(i, r)        ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
 #define NV50_PDISPLAY_CRTC_C(i, r)    (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index d4ac970..9a250eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -14,7 +14,7 @@
 	dma_addr_t *pages;
 	unsigned nr_pages;
 
-	unsigned pte_start;
+	u64 offset;
 	bool bound;
 };
 
@@ -74,18 +74,6 @@
 	}
 }
 
-static inline unsigned
-nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
-
-	if (dev_priv->card_type < NV_50)
-		return pte + 2;
-
-	return pte << 1;
-}
-
 static int
 nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
 {
@@ -97,32 +85,17 @@
 
 	NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
 
-	pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
-	nvbe->pte_start = pte;
+	nvbe->offset = mem->start << PAGE_SHIFT;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
 		dma_addr_t dma_offset = nvbe->pages[i];
 		uint32_t offset_l = lower_32_bits(dma_offset);
-		uint32_t offset_h = upper_32_bits(dma_offset);
 
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
-				nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
-				pte += 2;
-			}
-
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
+			nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
 			dma_offset += NV_CTXDMA_PAGE_SIZE;
 		}
 	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
-	}
 
 	nvbe->bound = true;
 	return 0;
@@ -142,28 +115,10 @@
 	if (!nvbe->bound)
 		return 0;
 
-	pte = nvbe->pte_start;
+	pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 	for (i = 0; i < nvbe->nr_pages; i++) {
-		dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
-
-		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
-			if (dev_priv->card_type < NV_50) {
-				nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
-				pte += 1;
-			} else {
-				nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
-				nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
-				pte += 2;
-			}
-
-			dma_offset += NV_CTXDMA_PAGE_SIZE;
-		}
-	}
-	dev_priv->engine.instmem.flush(nvbe->dev);
-
-	if (dev_priv->card_type == NV_50) {
-		dev_priv->engine.fifo.tlb_flush(dev);
-		dev_priv->engine.graph.tlb_flush(dev);
+		for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
+			nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
 	}
 
 	nvbe->bound = false;
@@ -186,6 +141,35 @@
 	}
 }
 
+static int
+nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	nvbe->offset = mem->start << PAGE_SHIFT;
+
+	nouveau_vm_map_sg(&dev_priv->gart_info.vma, nvbe->offset,
+			  nvbe->nr_pages << PAGE_SHIFT, nvbe->pages);
+	nvbe->bound = true;
+	return 0;
+}
+
+static int
+nv50_sgdma_unbind(struct ttm_backend *be)
+{
+	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+	struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+	if (!nvbe->bound)
+		return 0;
+
+	nouveau_vm_unmap_at(&dev_priv->gart_info.vma, nvbe->offset,
+			    nvbe->nr_pages << PAGE_SHIFT);
+	nvbe->bound = false;
+	return 0;
+}
+
 static struct ttm_backend_func nouveau_sgdma_backend = {
 	.populate		= nouveau_sgdma_populate,
 	.clear			= nouveau_sgdma_clear,
@@ -194,23 +178,30 @@
 	.destroy		= nouveau_sgdma_destroy
 };
 
+static struct ttm_backend_func nv50_sgdma_backend = {
+	.populate		= nouveau_sgdma_populate,
+	.clear			= nouveau_sgdma_clear,
+	.bind			= nv50_sgdma_bind,
+	.unbind			= nv50_sgdma_unbind,
+	.destroy		= nouveau_sgdma_destroy
+};
+
 struct ttm_backend *
 nouveau_sgdma_init_ttm(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_sgdma_be *nvbe;
 
-	if (!dev_priv->gart_info.sg_ctxdma)
-		return NULL;
-
 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
 	if (!nvbe)
 		return NULL;
 
 	nvbe->dev = dev;
 
-	nvbe->backend.func	= &nouveau_sgdma_backend;
-
+	if (dev_priv->card_type < NV_50)
+		nvbe->backend.func = &nouveau_sgdma_backend;
+	else
+		nvbe->backend.func = &nv50_sgdma_backend;
 	return &nvbe->backend;
 }
 
@@ -218,7 +209,6 @@
 nouveau_sgdma_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct pci_dev *pdev = dev->pdev;
 	struct nouveau_gpuobj *gpuobj = NULL;
 	uint32_t aper_size, obj_size;
 	int i, ret;
@@ -231,68 +221,40 @@
 
 		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
 		obj_size += 8; /* ctxdma header */
-	} else {
-		/* 1 entire VM page table */
-		aper_size = (512 * 1024 * 1024);
-		obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
-	}
 
-	ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
-				      NVOBJ_FLAG_ZERO_ALLOC |
-				      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
-	if (ret) {
-		NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
-		return ret;
-	}
+		ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+					      NVOBJ_FLAG_ZERO_ALLOC |
+					      NVOBJ_FLAG_ZERO_FREE, &gpuobj);
+		if (ret) {
+			NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
+			return ret;
+		}
 
-	dev_priv->gart_info.sg_dummy_page =
-		alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
-	if (!dev_priv->gart_info.sg_dummy_page) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -ENOMEM;
-	}
-
-	set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
-	dev_priv->gart_info.sg_dummy_bus =
-		pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
-			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-	if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
-		nouveau_gpuobj_ref(NULL, &gpuobj);
-		return -EFAULT;
-	}
-
-	if (dev_priv->card_type < NV_50) {
-		/* special case, allocated from global instmem heap so
-		 * cinst is invalid, we use it on all channels though so
-		 * cinst needs to be valid, set it the same as pinst
-		 */
-		gpuobj->cinst = gpuobj->pinst;
-
-		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
-		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
-		 * on those cards? */
 		nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
 				   (1 << 12) /* PT present */ |
 				   (0 << 13) /* PT *not* linear */ |
-				   (NV_DMA_ACCESS_RW  << 14) |
-				   (NV_DMA_TARGET_PCI << 16));
+				   (0 << 14) /* RW */ |
+				   (2 << 16) /* PCI */);
 		nv_wo32(gpuobj, 4, aper_size - 1);
-		for (i = 2; i < 2 + (aper_size >> 12); i++) {
-			nv_wo32(gpuobj, i * 4,
-				dev_priv->gart_info.sg_dummy_bus | 3);
-		}
-	} else {
-		for (i = 0; i < obj_size; i += 8) {
-			nv_wo32(gpuobj, i + 0, 0x00000000);
-			nv_wo32(gpuobj, i + 4, 0x00000000);
-		}
+		for (i = 2; i < 2 + (aper_size >> 12); i++)
+			nv_wo32(gpuobj, i * 4, 0x00000000);
+
+		dev_priv->gart_info.sg_ctxdma = gpuobj;
+		dev_priv->gart_info.aper_base = 0;
+		dev_priv->gart_info.aper_size = aper_size;
+	} else
+	if (dev_priv->chan_vm) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, 512 * 1024 * 1024,
+				     12, NV_MEM_ACCESS_RW,
+				     &dev_priv->gart_info.vma);
+		if (ret)
+			return ret;
+
+		dev_priv->gart_info.aper_base = dev_priv->gart_info.vma.offset;
+		dev_priv->gart_info.aper_size = 512 * 1024 * 1024;
 	}
-	dev_priv->engine.instmem.flush(dev);
 
 	dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
-	dev_priv->gart_info.aper_base = 0;
-	dev_priv->gart_info.aper_size = aper_size;
-	dev_priv->gart_info.sg_ctxdma = gpuobj;
 	return 0;
 }
 
@@ -301,31 +263,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->gart_info.sg_dummy_page) {
-		pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
-			       NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-		unlock_page(dev_priv->gart_info.sg_dummy_page);
-		__free_page(dev_priv->gart_info.sg_dummy_page);
-		dev_priv->gart_info.sg_dummy_page = NULL;
-		dev_priv->gart_info.sg_dummy_bus = 0;
-	}
-
 	nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
+	nouveau_vm_put(&dev_priv->gart_info.vma);
 }
 
-int
-nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+uint32_t
+nouveau_sgdma_get_physical(struct drm_device *dev, uint32_t offset)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
-	int pte;
+	int pte = (offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
 
-	pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
-	if (dev_priv->card_type < NV_50) {
-		*page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
-		return 0;
-	}
+	BUG_ON(dev_priv->card_type >= NV_50);
 
-	NV_ERROR(dev, "Unimplemented on NV50\n");
-	return -EINVAL;
+	return (nv_ro32(gpuobj, 4 * pte) & ~NV_CTXDMA_PAGE_MASK) |
+		(offset & NV_CTXDMA_PAGE_MASK);
 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 049f755..a54fc431 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -53,10 +53,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -65,7 +65,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv04_fb_init;
 		engine->fb.takedown		= nv04_fb_takedown;
-		engine->graph.grclass		= nv04_graph_grclass;
 		engine->graph.init		= nv04_graph_init;
 		engine->graph.takedown		= nv04_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -76,7 +75,7 @@
 		engine->graph.unload_context	= nv04_graph_unload_context;
 		engine->fifo.channels		= 16;
 		engine->fifo.init		= nv04_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
@@ -99,16 +98,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x10:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -117,8 +120,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv10_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv10_graph_init;
 		engine->graph.takedown		= nv10_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -127,17 +131,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv10_graph_load_context;
 		engine->graph.unload_context	= nv10_graph_unload_context;
-		engine->graph.set_region_tiling	= nv10_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv10_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -153,16 +157,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x20:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -171,8 +179,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv20_graph_grclass;
+		engine->fb.init_tile_region	= nv10_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv10_fb_free_tile_region;
 		engine->graph.init		= nv20_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.channel		= nv10_graph_channel;
@@ -181,17 +190,17 @@
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -207,16 +216,20 @@
 		engine->pm.clock_get		= nv04_pm_clock_get;
 		engine->pm.clock_pre		= nv04_pm_clock_pre;
 		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x30:
 		engine->instmem.init		= nv04_instmem_init;
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv04_mc_init;
 		engine->mc.takedown		= nv04_mc_takedown;
@@ -225,8 +238,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv30_fb_init;
 		engine->fb.takedown		= nv30_fb_takedown;
-		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
-		engine->graph.grclass		= nv30_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv10_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv30_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -235,17 +249,17 @@
 		engine->graph.destroy_context	= nv20_graph_destroy_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
-		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv20_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
-		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv10_fifo_load_context;
 		engine->fifo.unload_context	= nv10_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -263,6 +277,10 @@
 		engine->pm.clock_set		= nv04_pm_clock_set;
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x40:
 	case 0x60:
@@ -270,10 +288,10 @@
 		engine->instmem.takedown	= nv04_instmem_takedown;
 		engine->instmem.suspend		= nv04_instmem_suspend;
 		engine->instmem.resume		= nv04_instmem_resume;
-		engine->instmem.populate	= nv04_instmem_populate;
-		engine->instmem.clear		= nv04_instmem_clear;
-		engine->instmem.bind		= nv04_instmem_bind;
-		engine->instmem.unbind		= nv04_instmem_unbind;
+		engine->instmem.get		= nv04_instmem_get;
+		engine->instmem.put		= nv04_instmem_put;
+		engine->instmem.map		= nv04_instmem_map;
+		engine->instmem.unmap		= nv04_instmem_unmap;
 		engine->instmem.flush		= nv04_instmem_flush;
 		engine->mc.init			= nv40_mc_init;
 		engine->mc.takedown		= nv40_mc_takedown;
@@ -282,8 +300,9 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv40_fb_init;
 		engine->fb.takedown		= nv40_fb_takedown;
-		engine->fb.set_region_tiling	= nv40_fb_set_region_tiling;
-		engine->graph.grclass		= nv40_graph_grclass;
+		engine->fb.init_tile_region	= nv30_fb_init_tile_region;
+		engine->fb.set_tile_region	= nv40_fb_set_tile_region;
+		engine->fb.free_tile_region	= nv30_fb_free_tile_region;
 		engine->graph.init		= nv40_graph_init;
 		engine->graph.takedown		= nv40_graph_takedown;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
@@ -292,17 +311,17 @@
 		engine->graph.destroy_context	= nv40_graph_destroy_context;
 		engine->graph.load_context	= nv40_graph_load_context;
 		engine->graph.unload_context	= nv40_graph_unload_context;
-		engine->graph.set_region_tiling	= nv40_graph_set_region_tiling;
+		engine->graph.set_tile_region	= nv40_graph_set_tile_region;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv40_fifo_init;
-		engine->fifo.takedown		= nouveau_stub_takedown;
+		engine->fifo.takedown		= nv04_fifo_fini;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv40_fifo_create_context;
-		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
+		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.load_context	= nv40_fifo_load_context;
 		engine->fifo.unload_context	= nv40_fifo_unload_context;
 		engine->display.early_init	= nv04_display_early_init;
@@ -321,6 +340,10 @@
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
 		engine->pm.temp_get		= nv40_temp_get;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nouveau_mem_detect;
+		engine->vram.flags_valid	= nouveau_mem_flags_valid;
 		break;
 	case 0x50:
 	case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -330,10 +353,10 @@
 		engine->instmem.takedown	= nv50_instmem_takedown;
 		engine->instmem.suspend		= nv50_instmem_suspend;
 		engine->instmem.resume		= nv50_instmem_resume;
-		engine->instmem.populate	= nv50_instmem_populate;
-		engine->instmem.clear		= nv50_instmem_clear;
-		engine->instmem.bind		= nv50_instmem_bind;
-		engine->instmem.unbind		= nv50_instmem_unbind;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
 		if (dev_priv->chipset == 0x50)
 			engine->instmem.flush	= nv50_instmem_flush;
 		else
@@ -345,7 +368,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv50_fb_init;
 		engine->fb.takedown		= nv50_fb_takedown;
-		engine->graph.grclass		= nv50_graph_grclass;
 		engine->graph.init		= nv50_graph_init;
 		engine->graph.takedown		= nv50_graph_takedown;
 		engine->graph.fifo_access	= nv50_graph_fifo_access;
@@ -381,24 +403,32 @@
 		engine->display.init		= nv50_display_init;
 		engine->display.destroy		= nv50_display_destroy;
 		engine->gpio.init		= nv50_gpio_init;
-		engine->gpio.takedown		= nouveau_stub_takedown;
+		engine->gpio.takedown		= nv50_gpio_fini;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
 		switch (dev_priv->chipset) {
-		case 0xa3:
-		case 0xa5:
-		case 0xa8:
-		case 0xaf:
-			engine->pm.clock_get	= nva3_pm_clock_get;
-			engine->pm.clock_pre	= nva3_pm_clock_pre;
-			engine->pm.clock_set	= nva3_pm_clock_set;
-			break;
-		default:
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0x98:
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+		case 0x50:
 			engine->pm.clock_get	= nv50_pm_clock_get;
 			engine->pm.clock_pre	= nv50_pm_clock_pre;
 			engine->pm.clock_set	= nv50_pm_clock_set;
 			break;
+		default:
+			engine->pm.clock_get	= nva3_pm_clock_get;
+			engine->pm.clock_pre	= nva3_pm_clock_pre;
+			engine->pm.clock_set	= nva3_pm_clock_set;
+			break;
 		}
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
@@ -406,17 +436,39 @@
 			engine->pm.temp_get	= nv84_temp_get;
 		else
 			engine->pm.temp_get	= nv40_temp_get;
+		switch (dev_priv->chipset) {
+		case 0x84:
+		case 0x86:
+		case 0x92:
+		case 0x94:
+		case 0x96:
+		case 0xa0:
+			engine->crypt.init	= nv84_crypt_init;
+			engine->crypt.takedown	= nv84_crypt_fini;
+			engine->crypt.create_context = nv84_crypt_create_context;
+			engine->crypt.destroy_context = nv84_crypt_destroy_context;
+			engine->crypt.tlb_flush	= nv84_crypt_tlb_flush;
+			break;
+		default:
+			engine->crypt.init	= nouveau_stub_init;
+			engine->crypt.takedown	= nouveau_stub_takedown;
+			break;
+		}
+		engine->vram.init		= nv50_vram_init;
+		engine->vram.get		= nv50_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nv50_vram_flags_valid;
 		break;
 	case 0xC0:
 		engine->instmem.init		= nvc0_instmem_init;
 		engine->instmem.takedown	= nvc0_instmem_takedown;
 		engine->instmem.suspend		= nvc0_instmem_suspend;
 		engine->instmem.resume		= nvc0_instmem_resume;
-		engine->instmem.populate	= nvc0_instmem_populate;
-		engine->instmem.clear		= nvc0_instmem_clear;
-		engine->instmem.bind		= nvc0_instmem_bind;
-		engine->instmem.unbind		= nvc0_instmem_unbind;
-		engine->instmem.flush		= nvc0_instmem_flush;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
+		engine->instmem.flush		= nv84_instmem_flush;
 		engine->mc.init			= nv50_mc_init;
 		engine->mc.takedown		= nv50_mc_takedown;
 		engine->timer.init		= nv04_timer_init;
@@ -424,7 +476,6 @@
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nvc0_fb_init;
 		engine->fb.takedown		= nvc0_fb_takedown;
-		engine->graph.grclass		= NULL;  //nvc0_graph_grclass;
 		engine->graph.init		= nvc0_graph_init;
 		engine->graph.takedown		= nvc0_graph_takedown;
 		engine->graph.fifo_access	= nvc0_graph_fifo_access;
@@ -453,7 +504,15 @@
 		engine->gpio.takedown		= nouveau_stub_takedown;
 		engine->gpio.get		= nv50_gpio_get;
 		engine->gpio.set		= nv50_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
 		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+		engine->crypt.init		= nouveau_stub_init;
+		engine->crypt.takedown		= nouveau_stub_takedown;
+		engine->vram.init		= nvc0_vram_init;
+		engine->vram.get		= nvc0_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nvc0_vram_flags_valid;
 		break;
 	default:
 		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -493,9 +552,13 @@
 	if (ret)
 		return ret;
 
+	/* no dma objects on fermi... */
+	if (dev_priv->card_type >= NV_C0)
+		goto out_done;
+
 	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
 				     0, dev_priv->vram_size,
-				     NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
 				     &gpuobj);
 	if (ret)
 		goto out_err;
@@ -505,9 +568,10 @@
 	if (ret)
 		goto out_err;
 
-	ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
-					  dev_priv->gart_info.aper_size,
-					  NV_DMA_ACCESS_RW, &gpuobj, NULL);
+	ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+				     0, dev_priv->gart_info.aper_size,
+				     NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+				     &gpuobj);
 	if (ret)
 		goto out_err;
 
@@ -516,11 +580,12 @@
 	if (ret)
 		goto out_err;
 
+out_done:
+	mutex_unlock(&dev_priv->channel->mutex);
 	return 0;
 
 out_err:
-	nouveau_channel_free(dev_priv->channel);
-	dev_priv->channel = NULL;
+	nouveau_channel_put(&dev_priv->channel);
 	return ret;
 }
 
@@ -531,15 +596,25 @@
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		nouveau_pci_resume(pdev);
 		drm_kms_helper_poll_enable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 	} else {
 		printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		drm_kms_helper_poll_disable(dev);
 		nouveau_pci_suspend(pdev, pmm);
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
+static void nouveau_switcheroo_reprobe(struct pci_dev *pdev)
+{
+	struct drm_device *dev = pci_get_drvdata(pdev);
+	nouveau_fbcon_output_poll_changed(dev);
+}
+
 static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
@@ -560,6 +635,7 @@
 
 	vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
 	vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+				       nouveau_switcheroo_reprobe,
 				       nouveau_switcheroo_can_switch);
 
 	/* Initialise internal driver API hooks */
@@ -567,6 +643,8 @@
 	if (ret)
 		goto out;
 	engine = &dev_priv->engine;
+	spin_lock_init(&dev_priv->channels.lock);
+	spin_lock_init(&dev_priv->tile.lock);
 	spin_lock_init(&dev_priv->context_switch_lock);
 
 	/* Make the CRTCs and I2C buses accessible */
@@ -625,26 +703,28 @@
 		if (ret)
 			goto out_fb;
 
+		/* PCRYPT */
+		ret = engine->crypt.init(dev);
+		if (ret)
+			goto out_graph;
+
 		/* PFIFO */
 		ret = engine->fifo.init(dev);
 		if (ret)
-			goto out_graph;
+			goto out_crypt;
 	}
 
 	ret = engine->display.create(dev);
 	if (ret)
 		goto out_fifo;
 
-	/* this call irq_preinstall, register irq handler and
-	 * call irq_postinstall
-	 */
-	ret = drm_irq_install(dev);
+	ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
 	if (ret)
-		goto out_display;
+		goto out_vblank;
 
-	ret = drm_vblank_init(dev, 0);
+	ret = nouveau_irq_init(dev);
 	if (ret)
-		goto out_irq;
+		goto out_vblank;
 
 	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
 
@@ -669,12 +749,16 @@
 out_fence:
 	nouveau_fence_fini(dev);
 out_irq:
-	drm_irq_uninstall(dev);
-out_display:
+	nouveau_irq_fini(dev);
+out_vblank:
+	drm_vblank_cleanup(dev);
 	engine->display.destroy(dev);
 out_fifo:
 	if (!nouveau_noaccel)
 		engine->fifo.takedown(dev);
+out_crypt:
+	if (!nouveau_noaccel)
+		engine->crypt.takedown(dev);
 out_graph:
 	if (!nouveau_noaccel)
 		engine->graph.takedown(dev);
@@ -713,12 +797,12 @@
 
 	if (!engine->graph.accel_blocked) {
 		nouveau_fence_fini(dev);
-		nouveau_channel_free(dev_priv->channel);
-		dev_priv->channel = NULL;
+		nouveau_channel_put_unlocked(&dev_priv->channel);
 	}
 
 	if (!nouveau_noaccel) {
 		engine->fifo.takedown(dev);
+		engine->crypt.takedown(dev);
 		engine->graph.takedown(dev);
 	}
 	engine->fb.takedown(dev);
@@ -737,7 +821,8 @@
 	nouveau_gpuobj_takedown(dev);
 	nouveau_mem_vram_fini(dev);
 
-	drm_irq_uninstall(dev);
+	nouveau_irq_fini(dev);
+	drm_vblank_cleanup(dev);
 
 	nouveau_pm_fini(dev);
 	nouveau_bios_takedown(dev);
@@ -980,6 +1065,7 @@
 
 void nouveau_lastclose(struct drm_device *dev)
 {
+	vga_switcheroo_process_delayed_switch();
 }
 
 int nouveau_unload(struct drm_device *dev)
@@ -1024,21 +1110,6 @@
 		else
 			getparam->value = NV_PCI;
 		break;
-	case NOUVEAU_GETPARAM_FB_PHYSICAL:
-		getparam->value = dev_priv->fb_phys;
-		break;
-	case NOUVEAU_GETPARAM_AGP_PHYSICAL:
-		getparam->value = dev_priv->gart_info.aper_base;
-		break;
-	case NOUVEAU_GETPARAM_PCI_PHYSICAL:
-		if (dev->sg) {
-			getparam->value = (unsigned long)dev->sg->virtual;
-		} else {
-			NV_ERROR(dev, "Requested PCIGART address, "
-					"while no PCIGART was created\n");
-			return -EINVAL;
-		}
-		break;
 	case NOUVEAU_GETPARAM_FB_SIZE:
 		getparam->value = dev_priv->fb_available_size;
 		break;
@@ -1046,7 +1117,7 @@
 		getparam->value = dev_priv->gart_info.aper_size;
 		break;
 	case NOUVEAU_GETPARAM_VM_VRAM_BASE:
-		getparam->value = dev_priv->vm_vram_base;
+		getparam->value = 0; /* deprecated */
 		break;
 	case NOUVEAU_GETPARAM_PTIMER_TIME:
 		getparam->value = dev_priv->engine.timer.read(dev);
@@ -1054,6 +1125,9 @@
 	case NOUVEAU_GETPARAM_HAS_BO_USAGE:
 		getparam->value = 1;
 		break;
+	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
+		getparam->value = (dev_priv->card_type < NV_50);
+		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
 		/* NV40 and NV50 versions are quite different, but register
 		 * address is the same. User is supposed to know the card
@@ -1087,8 +1161,9 @@
 }
 
 /* Wait until (value(reg) & mask) == val, up until timeout has hit */
-bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
-			uint32_t reg, uint32_t mask, uint32_t val)
+bool
+nouveau_wait_eq(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
@@ -1102,10 +1177,33 @@
 	return false;
 }
 
+/* Wait until (value(reg) & mask) != val, up until timeout has hit */
+bool
+nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
+		uint32_t reg, uint32_t mask, uint32_t val)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	uint64_t start = ptimer->read(dev);
+
+	do {
+		if ((nv_rd32(dev, reg) & mask) != val)
+			return true;
+	} while (ptimer->read(dev) - start < timeout);
+
+	return false;
+}
+
 /* Waits for PGRAPH to go completely idle */
 bool nouveau_wait_for_idle(struct drm_device *dev)
 {
-	if (!nv_wait(dev, NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t mask = ~0;
+
+	if (dev_priv->card_type == NV_40)
+		mask &= ~NV40_PGRAPH_STATUS_SYNC_STALL;
+
+	if (!nv_wait(dev, NV04_PGRAPH_STATUS, mask, 0)) {
 		NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
 			 nv_rd32(dev, NV04_PGRAPH_STATUS));
 		return false;
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.c b/drivers/gpu/drm/nouveau/nouveau_util.c
new file mode 100644
index 0000000..fbe0fb1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/ratelimit.h>
+
+#include "nouveau_util.h"
+
+static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
+
+void
+nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
+{
+	while (bf->name) {
+		if (value & bf->mask) {
+			printk(" %s", bf->name);
+			value &= ~bf->mask;
+		}
+
+		bf++;
+	}
+
+	if (value)
+		printk(" (unknown bits 0x%08x)", value);
+}
+
+void
+nouveau_enum_print(const struct nouveau_enum *en, u32 value)
+{
+	while (en->name) {
+		if (value == en->value) {
+			printk("%s", en->name);
+			return;
+		}
+
+		en++;
+	}
+
+	printk("(unknown enum 0x%08x)", value);
+}
+
+int
+nouveau_ratelimit(void)
+{
+	return __ratelimit(&nouveau_ratelimit_state);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_util.h b/drivers/gpu/drm/nouveau/nouveau_util.h
new file mode 100644
index 0000000..d9ceaea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_util.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Nouveau Project
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_UTIL_H__
+#define __NOUVEAU_UTIL_H__
+
+struct nouveau_bitfield {
+	u32 mask;
+	const char *name;
+};
+
+struct nouveau_enum {
+	u32 value;
+	const char *name;
+};
+
+void nouveau_bitfield_print(const struct nouveau_bitfield *, u32 value);
+void nouveau_enum_print(const struct nouveau_enum *, u32 value);
+int nouveau_ratelimit(void);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
new file mode 100644
index 0000000..97d82ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nouveau_vm.h"
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_vram *vram)
+{
+	struct nouveau_vm *vm = vma->vm;
+	struct nouveau_mm_node *r;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	list_for_each_entry(r, &vram->regions, rl_entry) {
+		u64 phys = (u64)r->offset << 12;
+		u32 num  = r->length >> bits;
+
+		while (num) {
+			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+			end = (pte + num);
+			if (unlikely(end >= max))
+				end = max;
+			len = end - pte;
+
+			vm->map(vma, pgt, vram, pte, len, phys);
+
+			num -= len;
+			pte += len;
+			if (unlikely(end >= max)) {
+				pde++;
+				pte = 0;
+			}
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_vram *vram)
+{
+	nouveau_vm_map_at(vma, 0, vram);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+		  dma_addr_t *list)
+{
+	struct nouveau_vm *vm = vma->vm;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->map_sg(vma, pgt, pte, list, len);
+
+		num  -= len;
+		pte  += len;
+		list += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+	struct nouveau_vm *vm = vma->vm;
+	int big = vma->node->type != vm->spg_shift;
+	u32 offset = vma->node->offset + (delta >> 12);
+	u32 bits = vma->node->type - 12;
+	u32 num  = length >> vma->node->type;
+	u32 pde  = (offset >> vm->pgt_bits) - vm->fpde;
+	u32 pte  = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+	u32 max  = 1 << (vm->pgt_bits - bits);
+	u32 end, len;
+
+	while (num) {
+		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+		end = (pte + num);
+		if (unlikely(end >= max))
+			end = max;
+		len = end - pte;
+
+		vm->unmap(pgt, pte, len);
+
+		num -= len;
+		pte += len;
+		if (unlikely(end >= max)) {
+			pde++;
+			pte = 0;
+		}
+	}
+
+	vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_vm_pgt *vpgt;
+	struct nouveau_gpuobj *pgt;
+	u32 pde;
+
+	for (pde = fpde; pde <= lpde; pde++) {
+		vpgt = &vm->pgt[pde - vm->fpde];
+		if (--vpgt->refcount[big])
+			continue;
+
+		pgt = vpgt->obj[big];
+		vpgt->obj[big] = NULL;
+
+		list_for_each_entry(vpgd, &vm->pgd_list, head) {
+			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+		}
+
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+	}
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+	struct nouveau_vm_pgd *vpgd;
+	struct nouveau_gpuobj *pgt;
+	int big = (type != vm->spg_shift);
+	u32 pgt_size;
+	int ret;
+
+	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
+	pgt_size *= 8;
+
+	mutex_unlock(&vm->mm->mutex);
+	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+	mutex_lock(&vm->mm->mutex);
+	if (unlikely(ret))
+		return ret;
+
+	/* someone beat us to filling the PDE while we didn't have the lock */
+	if (unlikely(vpgt->refcount[big]++)) {
+		mutex_unlock(&vm->mm->mutex);
+		nouveau_gpuobj_ref(NULL, &pgt);
+		mutex_lock(&vm->mm->mutex);
+		return 0;
+	}
+
+	vpgt->obj[big] = pgt;
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+	}
+
+	return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+	       u32 access, struct nouveau_vma *vma)
+{
+	u32 align = (1 << page_shift) >> 12;
+	u32 msize = size >> 12;
+	u32 fpde, lpde, pde;
+	int ret;
+
+	mutex_lock(&vm->mm->mutex);
+	ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+	if (unlikely(ret != 0)) {
+		mutex_unlock(&vm->mm->mutex);
+		return ret;
+	}
+
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+	for (pde = fpde; pde <= lpde; pde++) {
+		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+		int big = (vma->node->type != vm->spg_shift);
+
+		if (likely(vpgt->refcount[big])) {
+			vpgt->refcount[big]++;
+			continue;
+		}
+
+		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+		if (ret) {
+			if (pde != fpde)
+				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+			nouveau_mm_put(vm->mm, vma->node);
+			mutex_unlock(&vm->mm->mutex);
+			vma->node = NULL;
+			return ret;
+		}
+	}
+	mutex_unlock(&vm->mm->mutex);
+
+	vma->vm     = vm;
+	vma->offset = (u64)vma->node->offset << 12;
+	vma->access = access;
+	return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+	struct nouveau_vm *vm = vma->vm;
+	u32 fpde, lpde;
+
+	if (unlikely(vma->node == NULL))
+		return;
+	fpde = (vma->node->offset >> vm->pgt_bits);
+	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+	mutex_lock(&vm->mm->mutex);
+	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+	nouveau_mm_put(vm->mm, vma->node);
+	vma->node = NULL;
+	mutex_unlock(&vm->mm->mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+	       struct nouveau_vm **pvm)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vm *vm;
+	u64 mm_length = (offset + length) - mm_offset;
+	u32 block, pgt_bits;
+	int ret;
+
+	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+	if (!vm)
+		return -ENOMEM;
+
+	if (dev_priv->card_type == NV_50) {
+		vm->map_pgt = nv50_vm_map_pgt;
+		vm->map = nv50_vm_map;
+		vm->map_sg = nv50_vm_map_sg;
+		vm->unmap = nv50_vm_unmap;
+		vm->flush = nv50_vm_flush;
+		vm->spg_shift = 12;
+		vm->lpg_shift = 16;
+
+		pgt_bits = 29;
+		block = (1 << pgt_bits);
+		if (length < block)
+			block = length;
+
+	} else
+	if (dev_priv->card_type == NV_C0) {
+		vm->map_pgt = nvc0_vm_map_pgt;
+		vm->map = nvc0_vm_map;
+		vm->map_sg = nvc0_vm_map_sg;
+		vm->unmap = nvc0_vm_unmap;
+		vm->flush = nvc0_vm_flush;
+		vm->spg_shift = 12;
+		vm->lpg_shift = 17;
+		pgt_bits = 27;
+
+		/* Should be 4096 everywhere, this is a hack that's
+		 * currently necessary to avoid an elusive bug that
+		 * causes corruption when mixing small/large pages
+		 */
+		if (length < (1ULL << 40))
+			block = 4096;
+		else {
+			block = (1 << pgt_bits);
+			if (length < block)
+				block = length;
+		}
+	} else {
+		kfree(vm);
+		return -ENOSYS;
+	}
+
+	vm->fpde   = offset >> pgt_bits;
+	vm->lpde   = (offset + length - 1) >> pgt_bits;
+	vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+	if (!vm->pgt) {
+		kfree(vm);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&vm->pgd_list);
+	vm->dev = dev;
+	vm->refcount = 1;
+	vm->pgt_bits = pgt_bits - 12;
+
+	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+			      block >> 12);
+	if (ret) {
+		kfree(vm);
+		return ret;
+	}
+
+	*pvm = vm;
+	return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd;
+	int i;
+
+	if (!pgd)
+		return 0;
+
+	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+	if (!vpgd)
+		return -ENOMEM;
+
+	nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+	mutex_lock(&vm->mm->mutex);
+	for (i = vm->fpde; i <= vm->lpde; i++)
+		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+	list_add(&vpgd->head, &vm->pgd_list);
+	mutex_unlock(&vm->mm->mutex);
+	return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	if (!pgd)
+		return;
+
+	mutex_lock(&vm->mm->mutex);
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		if (vpgd->obj != pgd)
+			continue;
+
+		list_del(&vpgd->head);
+		nouveau_gpuobj_ref(NULL, &vpgd->obj);
+		kfree(vpgd);
+	}
+	mutex_unlock(&vm->mm->mutex);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+	struct nouveau_vm_pgd *vpgd, *tmp;
+
+	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+		nouveau_vm_unlink(vm, vpgd->obj);
+	}
+	WARN_ON(nouveau_mm_fini(&vm->mm) != 0);
+
+	kfree(vm->pgt);
+	kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+	       struct nouveau_gpuobj *pgd)
+{
+	struct nouveau_vm *vm;
+	int ret;
+
+	vm = ref;
+	if (vm) {
+		ret = nouveau_vm_link(vm, pgd);
+		if (ret)
+			return ret;
+
+		vm->refcount++;
+	}
+
+	vm = *ptr;
+	*ptr = ref;
+
+	if (vm) {
+		nouveau_vm_unlink(vm, pgd);
+
+		if (--vm->refcount == 0)
+			nouveau_vm_del(vm);
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
new file mode 100644
index 0000000..e119351
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+struct nouveau_vm_pgt {
+	struct nouveau_gpuobj *obj[2];
+	u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+	struct list_head head;
+	struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+	struct nouveau_vm *vm;
+	struct nouveau_mm_node *node;
+	u64 offset;
+	u32 access;
+};
+
+struct nouveau_vm {
+	struct drm_device *dev;
+	struct nouveau_mm *mm;
+	int refcount;
+
+	struct list_head pgd_list;
+	atomic_t pgraph_refs;
+	atomic_t pcrypt_refs;
+
+	struct nouveau_vm_pgt *pgt;
+	u32 fpde;
+	u32 lpde;
+
+	u32 pgt_bits;
+	u8  spg_shift;
+	u8  lpg_shift;
+
+	void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+			struct nouveau_gpuobj *pgt[2]);
+	void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+	void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+		       u32 pte, dma_addr_t *, u32 cnt);
+	void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+	void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int  nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+		    struct nouveau_vm **);
+int  nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+		    struct nouveau_gpuobj *pgd);
+int  nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+		    u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_vram *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_vram *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+		       dma_addr_t *);
+
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		     struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+		 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    u32 pte, dma_addr_t *, u32 cnt);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		     struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+		 struct nouveau_vram *, u32 pte, u32 cnt, u64 phys);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+		    u32 pte, dma_addr_t *, u32 cnt);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 40e1807..297505e 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -551,7 +551,10 @@
 	if (dev_priv->card_type >= NV_30)
 		regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
 
-	regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	if (dev_priv->card_type >= NV_10)
+		regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
+	else
+		regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
 
 	/* Some misc regs */
 	if (dev_priv->card_type == NV_40) {
@@ -669,6 +672,7 @@
 	if (nv_two_heads(dev))
 		NVSetOwner(dev, nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
 
 	NVBlankScreen(dev, nv_crtc->index, true);
@@ -701,6 +705,7 @@
 #endif
 
 	funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 }
 
 static void nv_crtc_destroy(struct drm_crtc *crtc)
@@ -986,6 +991,7 @@
 	.cursor_move = nv04_crtc_cursor_move,
 	.gamma_set = nv_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv_crtc_destroy,
 };
 
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index ba6423f..e000455 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -74,14 +74,14 @@
 		 * use a 10ms timeout (guards against crtc being inactive, in
 		 * which case blank state would never change)
 		 */
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000001))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000001))
 			return -EBUSY;
-		if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
-					0x00000001, 0x00000000))
+		if (!nouveau_wait_eq(dev, 10000000, NV_PRMCIO_INP0__COLOR,
+				     0x00000001, 0x00000000))
 			return -EBUSY;
 
 		udelay(100);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 9e28cf7..1715e14 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,6 +32,9 @@
 #include "nouveau_encoder.h"
 #include "nouveau_connector.h"
 
+static void nv04_vblank_crtc0_isr(struct drm_device *);
+static void nv04_vblank_crtc1_isr(struct drm_device *);
+
 static void
 nv04_display_store_initial_head_owner(struct drm_device *dev)
 {
@@ -197,6 +200,8 @@
 		func->save(encoder);
 	}
 
+	nouveau_irq_register(dev, 24, nv04_vblank_crtc0_isr);
+	nouveau_irq_register(dev, 25, nv04_vblank_crtc1_isr);
 	return 0;
 }
 
@@ -208,6 +213,9 @@
 
 	NV_DEBUG_KMS(dev, "\n");
 
+	nouveau_irq_unregister(dev, 24);
+	nouveau_irq_unregister(dev, 25);
+
 	/* Turn every CRTC off. */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct drm_mode_set modeset = {
@@ -258,3 +266,16 @@
 	return 0;
 }
 
+static void
+nv04_vblank_crtc0_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 0);
+}
+
+static void
+nv04_vblank_crtc1_isr(struct drm_device *dev)
+{
+	nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+	drm_handle_vblank(dev, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 33e4c93..7a11893 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -28,52 +28,39 @@
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
 
-void
+int
 nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 4);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
 	OUT_RING(chan, (region->sy << 16) | region->sx);
 	OUT_RING(chan, (region->dy << 16) | region->dx);
 	OUT_RING(chan, (region->height << 16) | region->width);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, 7);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
 	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
@@ -87,9 +74,10 @@
 	OUT_RING(chan, (rect->dx << 16) | rect->dy);
 	OUT_RING(chan, (rect->width << 16) | rect->height);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -101,23 +89,14 @@
 	uint32_t dsize;
 	uint32_t width;
 	uint32_t *data = (uint32_t *)image->data;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 8);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 8);
 	dsize = ALIGN(width * image->height, 32) >> 5;
@@ -144,11 +123,9 @@
 	while (dsize) {
 		int iter_len = dsize > 128 ? 128 : dsize;
 
-		if (RING_SPACE(chan, iter_len + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, iter_len + 1);
+		if (ret)
+			return ret;
 
 		BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
 		OUT_RINGp(chan, data, iter_len);
@@ -157,22 +134,7 @@
 	}
 
 	FIRE_RING(chan);
-}
-
-static int
-nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, handle, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	return ret;
+	return 0;
 }
 
 int
@@ -214,29 +176,31 @@
 		return -EINVAL;
 	}
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
-				   0x0062 : 0x0042, NvCtxSurf2D);
+	ret = nouveau_gpuobj_gr_new(chan, NvCtxSurf2D,
+				    dev_priv->card_type >= NV_10 ?
+				    0x0062 : 0x0042);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvClipRect, 0x0019);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
+	ret = nouveau_gpuobj_gr_new(chan, NvRop, 0x0043);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
+	ret = nouveau_gpuobj_gr_new(chan, NvImagePatt, 0x0044);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
+	ret = nouveau_gpuobj_gr_new(chan, NvGdiRect, 0x004a);
 	if (ret)
 		return ret;
 
-	ret = nv04_fbcon_grobj_new(dev, dev_priv->chipset >= 0x11 ?
-				   0x009f : 0x005f, NvImageBlit);
+	ret = nouveau_gpuobj_gr_new(chan, NvImageBlit,
+				    dev_priv->chipset >= 0x11 ?
+				    0x009f : 0x005f);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 708293b..f89d104 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_util.h"
 
 #define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
 #define NV04_RAMFC__SIZE 32
@@ -128,6 +129,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	/* Setup initial state */
@@ -151,10 +157,31 @@
 nv04_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	unsigned long flags;
 
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
 
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
+	/* Keep it from being rescheduled */
+	nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
 }
 
@@ -208,7 +235,7 @@
 	if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -267,6 +294,7 @@
 static void
 nv04_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -289,7 +317,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
@@ -298,3 +326,207 @@
 	return 0;
 }
 
+void
+nv04_fifo_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = NULL;
+	struct nouveau_gpuobj *obj;
+	unsigned long flags;
+	const int subc = (addr >> 13) & 0x7;
+	const int mthd = addr & 0x1ffc;
+	bool handled = false;
+	u32 engine;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
+		chan = dev_priv->channels.ptr[chid];
+	if (unlikely(!chan))
+		goto out;
+
+	switch (mthd) {
+	case 0x0000: /* bind object to subchannel */
+		obj = nouveau_ramht_find(chan, data);
+		if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+			break;
+
+		chan->sw_subchannel[subc] = obj->class;
+		engine = 0x0000000f << (subc * 4);
+
+		nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+		handled = true;
+		break;
+	default:
+		engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+		if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+			break;
+
+		if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
+					      mthd, data))
+			handled = true;
+		break;
+	}
+
+out:
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return handled;
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_engine *engine = &dev_priv->engine;
+	uint32_t status, reassign;
+	int cnt = 0;
+
+	reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+	while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+		uint32_t chid, get;
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+		chid = engine->fifo.channel_id(dev);
+		get  = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+		if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+			uint32_t mthd, data;
+			int ptr;
+
+			/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+			 * wrapping on my G80 chips, but CACHE1 isn't big
+			 * enough for this much data.. Tests show that it
+			 * wraps around to the start at GET=0x800.. No clue
+			 * as to why..
+			 */
+			ptr = (get & 0x7ff) >> 2;
+
+			if (dev_priv->card_type < NV_40) {
+				mthd = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV04_PFIFO_CACHE1_DATA(ptr));
+			} else {
+				mthd = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_METHOD(ptr));
+				data = nv_rd32(dev,
+					NV40_PFIFO_CACHE1_DATA(ptr));
+			}
+
+			if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+				NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+					     "Mthd 0x%04x Data 0x%08x\n",
+					chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+					data);
+			}
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+						NV_PFIFO_INTR_CACHE_ERROR);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+				nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+				nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+			status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+		}
+
+		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+			u32 dma_get = nv_rd32(dev, 0x003244);
+			u32 dma_put = nv_rd32(dev, 0x003240);
+			u32 push = nv_rd32(dev, 0x003220);
+			u32 state = nv_rd32(dev, 0x003228);
+
+			if (dev_priv->card_type == NV_50) {
+				u32 ho_get = nv_rd32(dev, 0x003328);
+				u32 ho_put = nv_rd32(dev, 0x003320);
+				u32 ib_get = nv_rd32(dev, 0x003334);
+				u32 ib_put = nv_rd32(dev, 0x003330);
+
+				if (nouveau_ratelimit())
+					NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+					     "State 0x%08x Push 0x%08x\n",
+						chid, ho_get, dma_get, ho_put,
+						dma_put, ib_get, ib_put, state,
+						push);
+
+				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+				nv_wr32(dev, 0x003364, 0x00000000);
+				if (dma_get != dma_put || ho_get != ho_put) {
+					nv_wr32(dev, 0x003244, dma_put);
+					nv_wr32(dev, 0x003328, ho_put);
+				} else
+				if (ib_get != ib_put) {
+					nv_wr32(dev, 0x003334, ib_put);
+				}
+			} else {
+				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
+					chid, dma_get, dma_put, state, push);
+
+				if (dma_get != dma_put)
+					nv_wr32(dev, 0x003244, dma_put);
+			}
+
+			nv_wr32(dev, 0x003228, 0x00000000);
+			nv_wr32(dev, 0x003220, 0x00000001);
+			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+		}
+
+		if (status & NV_PFIFO_INTR_SEMAPHORE) {
+			uint32_t sem;
+
+			status &= ~NV_PFIFO_INTR_SEMAPHORE;
+			nv_wr32(dev, NV03_PFIFO_INTR_0,
+				NV_PFIFO_INTR_SEMAPHORE);
+
+			sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+			nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+			nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+		}
+
+		if (dev_priv->card_type == NV_50) {
+			if (status & 0x00000010) {
+				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+				status &= ~0x00000010;
+				nv_wr32(dev, 0x002100, 0x00000010);
+			}
+		}
+
+		if (status) {
+			if (nouveau_ratelimit())
+				NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+					status, chid);
+			nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+			status = 0;
+		}
+
+		nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+	}
+
+	if (status) {
+		NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+		nv_wr32(dev, 0x2140, 0);
+		nv_wr32(dev, 0x140, 0);
+	}
+
+	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index c897342..af75015 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -26,6 +26,11 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+
+static int  nv04_graph_register(struct drm_device *dev);
+static void nv04_graph_isr(struct drm_device *dev);
 
 static uint32_t nv04_graph_ctx_regs[] = {
 	0x0040053c,
@@ -357,10 +362,10 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
-void
+static void
 nv04_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,7 +373,6 @@
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -376,11 +380,9 @@
 
 	/* Load context for next channel */
 	chid = dev_priv->engine.fifo.channel_id(dev);
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan)
 		nv04_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
@@ -412,10 +414,25 @@
 
 void nv04_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 int nv04_graph_load_context(struct nouveau_channel *chan)
@@ -468,13 +485,19 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
+	int ret;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv04_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* Enable PGRAPH interrupts */
+	nouveau_irq_register(dev, 12, nv04_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -510,6 +533,8 @@
 
 void nv04_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -524,13 +549,27 @@
 }
 
 static int
-nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
-			int mthd, uint32_t data)
+nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
+			u32 class, u32 mthd, u32 data)
 {
 	atomic_set(&chan->fence.last_sequence_irq, data);
 	return 0;
 }
 
+int
+nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
+{
+	struct drm_device *dev = chan->dev;
+	struct nouveau_page_flip_state s;
+
+	if (!nouveau_finish_page_flip(chan, &s))
+		nv_set_crtc_base(dev, s.crtc,
+				 s.offset + s.y * s.pitch + s.x * s.bpp / 8);
+
+	return 0;
+}
+
 /*
  * Software methods, why they are needed, and how they all work:
  *
@@ -606,12 +645,12 @@
  */
 
 static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
 	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
-	uint32_t tmp;
+	u32 tmp;
 
 	tmp  = nv_ri32(dev, instance);
 	tmp &= ~mask;
@@ -623,11 +662,11 @@
 }
 
 static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, uint32_t mask, uint32_t value)
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
 {
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
-	uint32_t tmp, ctx1;
+	u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+	u32 tmp, ctx1;
 	int class, op, valid = 1;
 
 	ctx1 = nv_ri32(dev, instance);
@@ -672,13 +711,13 @@
 }
 
 static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (data > 5)
 		return 1;
 	/* Old versions of the objects only accept first three operations. */
-	if (data > 2 && grclass < 0x40)
+	if (data > 2 && class < 0x40)
 		return 1;
 	nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
 	/* changing operation changes set of objects needed for validation */
@@ -687,8 +726,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -706,8 +745,8 @@
 }
 
 static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	uint32_t min = data & 0xffff, max;
 	uint32_t w = data >> 16;
@@ -725,8 +764,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -742,8 +781,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+				    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -763,8 +802,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -778,8 +817,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -793,8 +832,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+			 u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -808,8 +847,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -823,8 +862,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -838,8 +877,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -853,8 +892,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -868,8 +907,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+				u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -883,8 +922,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -898,8 +937,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+			  u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -913,8 +952,8 @@
 }
 
 static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+			    u32 class, u32 mthd, u32 data)
 {
 	switch (nv_ri32(chan->dev, data << 4) & 0xff) {
 	case 0x30:
@@ -930,194 +969,346 @@
 	return 1;
 }
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
-	{ 0x0150, nv04_graph_mthd_set_ref },
+static int
+nv04_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	/* dvd subpicture */
+	NVOBJ_CLASS(dev, 0x0038, GR);
+
+	/* m2mf */
+	NVOBJ_CLASS(dev, 0x0039, GR);
+
+	/* nv03 gdirect */
+	NVOBJ_CLASS(dev, 0x004b, GR);
+	NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 gdirect */
+	NVOBJ_CLASS(dev, 0x004a, GR);
+	NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 imageblit */
+	NVOBJ_CLASS(dev, 0x001f, GR);
+	NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+	NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 imageblit */
+	NVOBJ_CLASS(dev, 0x005f, GR);
+	NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 iifc */
+	NVOBJ_CLASS(dev, 0x0060, GR);
+	NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+	/* nv05 iifc */
+	NVOBJ_CLASS(dev, 0x0064, GR);
+
+	/* nv01 ifc */
+	NVOBJ_CLASS(dev, 0x0021, GR);
+	NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 ifc */
+	NVOBJ_CLASS(dev, 0x0061, GR);
+	NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 ifc */
+	NVOBJ_CLASS(dev, 0x0065, GR);
+
+	/* nv03 sifc */
+	NVOBJ_CLASS(dev, 0x0036, GR);
+	NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifc */
+	NVOBJ_CLASS(dev, 0x0076, GR);
+	NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+	NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv05 sifc */
+	NVOBJ_CLASS(dev, 0x0066, GR);
+
+	/* nv03 sifm */
+	NVOBJ_CLASS(dev, 0x0037, GR);
+	NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* nv04 sifm */
+	NVOBJ_CLASS(dev, 0x0077, GR);
+	NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+	NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+	/* null */
+	NVOBJ_CLASS(dev, 0x0030, GR);
+
+	/* surf2d */
+	NVOBJ_CLASS(dev, 0x0042, GR);
+
+	/* rop */
+	NVOBJ_CLASS(dev, 0x0043, GR);
+
+	/* beta1 */
+	NVOBJ_CLASS(dev, 0x0012, GR);
+
+	/* beta4 */
+	NVOBJ_CLASS(dev, 0x0072, GR);
+
+	/* cliprect */
+	NVOBJ_CLASS(dev, 0x0019, GR);
+
+	/* nv01 pattern */
+	NVOBJ_CLASS(dev, 0x0018, GR);
+
+	/* nv04 pattern */
+	NVOBJ_CLASS(dev, 0x0044, GR);
+
+	/* swzsurf */
+	NVOBJ_CLASS(dev, 0x0052, GR);
+
+	/* surf3d */
+	NVOBJ_CLASS(dev, 0x0053, GR);
+	NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+	NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+	/* nv03 tex_tri */
+	NVOBJ_CLASS(dev, 0x0048, GR);
+	NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+	NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+	/* tex_tri */
+	NVOBJ_CLASS(dev, 0x0054, GR);
+
+	/* multitex_tri */
+	NVOBJ_CLASS(dev, 0x0055, GR);
+
+	/* nv01 chroma */
+	NVOBJ_CLASS(dev, 0x0017, GR);
+
+	/* nv04 chroma */
+	NVOBJ_CLASS(dev, 0x0057, GR);
+
+	/* surf_dst */
+	NVOBJ_CLASS(dev, 0x0058, GR);
+
+	/* surf_src */
+	NVOBJ_CLASS(dev, 0x0059, GR);
+
+	/* surf_color */
+	NVOBJ_CLASS(dev, 0x005a, GR);
+
+	/* surf_zeta */
+	NVOBJ_CLASS(dev, 0x005b, GR);
+
+	/* nv01 line */
+	NVOBJ_CLASS(dev, 0x001c, GR);
+	NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 line */
+	NVOBJ_CLASS(dev, 0x005c, GR);
+	NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 tri */
+	NVOBJ_CLASS(dev, 0x001d, GR);
+	NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 tri */
+	NVOBJ_CLASS(dev, 0x005d, GR);
+	NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv01 rect */
+	NVOBJ_CLASS(dev, 0x001e, GR);
+	NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+	NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+	NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nv04 rect */
+	NVOBJ_CLASS(dev, 0x005e, GR);
+	NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+	NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+	NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+	NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+	NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+	NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+	NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0150, nv04_graph_mthd_set_ref);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+};
+
+static struct nouveau_bitfield nv04_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
 	{}
 };
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_gdirect[] = {
-	{ 0x0184, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0188, nv04_graph_mthd_bind_rop },
-	{ 0x018c, nv04_graph_mthd_bind_beta1 },
-	{ 0x0190, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_gdirect[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_imageblit[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x019c, nv04_graph_mthd_bind_surf_src },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_imageblit_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_beta4 },
-	{ 0x019c, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_iifc[] = {
-	{ 0x0188, nv04_graph_mthd_bind_chroma },
-	{ 0x018c, nv04_graph_mthd_bind_clip },
-	{ 0x0190, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x0194, nv04_graph_mthd_bind_rop },
-	{ 0x0198, nv04_graph_mthd_bind_beta1 },
-	{ 0x019c, nv04_graph_mthd_bind_beta4 },
-	{ 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x03e4, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_ifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x0190, nv04_graph_mthd_bind_rop },
-	{ 0x0194, nv04_graph_mthd_bind_beta1 },
-	{ 0x0198, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifc[] = {
-	{ 0x0184, nv04_graph_mthd_bind_chroma },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_sifm[] = {
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf },
-	{ 0x0304, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv01_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv01_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_surf_dst },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv04_shape[] = {
-	{ 0x0184, nv04_graph_mthd_bind_clip },
-	{ 0x0188, nv04_graph_mthd_bind_nv04_patt },
-	{ 0x018c, nv04_graph_mthd_bind_rop },
-	{ 0x0190, nv04_graph_mthd_bind_beta1 },
-	{ 0x0194, nv04_graph_mthd_bind_beta4 },
-	{ 0x0198, nv04_graph_mthd_bind_surf2d },
-	{ 0x02fc, nv04_graph_mthd_set_operation },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_nv03_tex_tri[] = {
-	{ 0x0188, nv04_graph_mthd_bind_clip },
-	{ 0x018c, nv04_graph_mthd_bind_surf_color },
-	{ 0x0190, nv04_graph_mthd_bind_surf_zeta },
-	{},
-};
-
-static struct nouveau_pgraph_object_method nv04_graph_mthds_surf3d[] = {
-	{ 0x02f8, nv04_graph_mthd_surf3d_clip_h },
-	{ 0x02fc, nv04_graph_mthd_surf3d_clip_v },
-	{},
-};
-
-struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-	{ 0x0038, false, NULL }, /* dvd subpicture */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004b, false, nv04_graph_mthds_nv03_gdirect }, /* nv03 gdirect */
-	{ 0x004a, false, nv04_graph_mthds_nv04_gdirect }, /* nv04 gdirect */
-	{ 0x001f, false, nv04_graph_mthds_nv01_imageblit }, /* nv01 imageblit */
-	{ 0x005f, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 imageblit */
-	{ 0x0060, false, nv04_graph_mthds_nv04_iifc }, /* nv04 iifc */
-	{ 0x0064, false, NULL }, /* nv05 iifc */
-	{ 0x0021, false, nv04_graph_mthds_nv01_ifc }, /* nv01 ifc */
-	{ 0x0061, false, nv04_graph_mthds_nv04_imageblit_ifc }, /* nv04 ifc */
-	{ 0x0065, false, NULL }, /* nv05 ifc */
-	{ 0x0036, false, nv04_graph_mthds_nv03_sifc }, /* nv03 sifc */
-	{ 0x0076, false, nv04_graph_mthds_nv04_sifc }, /* nv04 sifc */
-	{ 0x0066, false, NULL }, /* nv05 sifc */
-	{ 0x0037, false, nv04_graph_mthds_nv03_sifm }, /* nv03 sifm */
-	{ 0x0077, false, nv04_graph_mthds_nv04_sifm }, /* nv04 sifm */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0042, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0018, false, NULL }, /* nv01 pattern */
-	{ 0x0044, false, NULL }, /* nv04 pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0053, false, nv04_graph_mthds_surf3d }, /* surf3d */
-	{ 0x0048, false, nv04_graph_mthds_nv03_tex_tri }, /* nv03 tex_tri */
-	{ 0x0054, false, NULL }, /* tex_tri */
-	{ 0x0055, false, NULL }, /* multitex_tri */
-	{ 0x0017, false, NULL }, /* nv01 chroma */
-	{ 0x0057, false, NULL }, /* nv04 chroma */
-	{ 0x0058, false, NULL }, /* surf_dst */
-	{ 0x0059, false, NULL }, /* surf_src */
-	{ 0x005a, false, NULL }, /* surf_color */
-	{ 0x005b, false, NULL }, /* surf_zeta */
-	{ 0x001c, false, nv04_graph_mthds_nv01_shape }, /* nv01 line */
-	{ 0x005c, false, nv04_graph_mthds_nv04_shape }, /* nv04 line */
-	{ 0x001d, false, nv04_graph_mthds_nv01_shape }, /* nv01 tri */
-	{ 0x005d, false, nv04_graph_mthds_nv04_shape }, /* nv04 tri */
-	{ 0x001e, false, nv04_graph_mthds_nv01_shape }, /* nv01 rect */
-	{ 0x005e, false, nv04_graph_mthds_nv04_shape }, /* nv04 rect */
-	{ 0x506e, true, nv04_graph_mthds_sw },
+static struct nouveau_bitfield nv04_graph_nstatus[] =
+{
+	{ NV04_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV04_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV04_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV04_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
 
+struct nouveau_bitfield nv04_graph_nsource[] =
+{
+	{ NV03_PGRAPH_NSOURCE_NOTIFICATION,       "NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DATA_ERROR,         "DATA_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR,   "PROTECTION_ERROR" },
+	{ NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION,    "RANGE_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_COLOR,        "LIMIT_COLOR" },
+	{ NV03_PGRAPH_NSOURCE_LIMIT_ZETA,         "LIMIT_ZETA" },
+	{ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD,       "ILLEGAL_MTHD" },
+	{ NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION,   "DMA_R_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION,   "DMA_W_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION,   "FORMAT_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION,    "PATCH_EXCEPTION" },
+	{ NV03_PGRAPH_NSOURCE_STATE_INVALID,      "STATE_INVALID" },
+	{ NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY,      "DOUBLE_NOTIFY" },
+	{ NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE,      "NOTIFY_IN_USE" },
+	{ NV03_PGRAPH_NSOURCE_METHOD_CNT,         "METHOD_CNT" },
+	{ NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION,   "BFR_NOTIFICATION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_A,        "DMA_WIDTH_A" },
+	{ NV03_PGRAPH_NSOURCE_DMA_WIDTH_B,        "DMA_WIDTH_B" },
+	{}
+};
+
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x0f000000) >> 24;
+		u32 subc = (addr & 0x0000e000) >> 13;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_NOTIFY) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_NOTIFY;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv04_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv04_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index 0b5ae29..b8e3edb 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -98,35 +98,6 @@
 }
 
 int
-nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
-{
-	return 0;
-}
-
-void
-nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-}
-
-int
-nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-int
-nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	return 0;
-}
-
-void
-nv04_instmem_flush(struct drm_device *dev)
-{
-}
-
-int
 nv04_instmem_suspend(struct drm_device *dev)
 {
 	return 0;
@@ -137,3 +108,56 @@
 {
 }
 
+int
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct drm_mm_node *ramin = NULL;
+
+	do {
+		if (drm_mm_pre_get(&dev_priv->ramin_heap))
+			return -ENOMEM;
+
+		spin_lock(&dev_priv->ramin_lock);
+		ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+		if (ramin == NULL) {
+			spin_unlock(&dev_priv->ramin_lock);
+			return -ENOMEM;
+		}
+
+		ramin = drm_mm_get_block_atomic(ramin, size, align);
+		spin_unlock(&dev_priv->ramin_lock);
+	} while (ramin == NULL);
+
+	gpuobj->node  = ramin;
+	gpuobj->vinst = ramin->start;
+	return 0;
+}
+
+void
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+	spin_lock(&dev_priv->ramin_lock);
+	drm_mm_put_block(gpuobj->node);
+	gpuobj->node = NULL;
+	spin_unlock(&dev_priv->ramin_lock);
+}
+
+int
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
+{
+	gpuobj->pinst = gpuobj->vinst;
+	return 0;
+}
+
+void
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+{
+}
+
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index cc5cda4..f78181a 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,23 +3,109 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
-void
-nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	struct drm_mm_node *mem;
+	int ret;
 
-	if (pitch) {
-		if (dev_priv->card_type >= NV_20)
-			addr |= 1;
-		else
-			addr |= 1 << 31;
+	ret = drm_mm_pre_get(&pfb->tag_heap);
+	if (ret)
+		return NULL;
+
+	spin_lock(&dev_priv->tile.lock);
+	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+	if (mem)
+		mem = drm_mm_get_block_atomic(mem, size, 0);
+	spin_unlock(&dev_priv->tile.lock);
+
+	return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	spin_lock(&dev_priv->tile.lock);
+	drm_mm_put_block(mem);
+	spin_unlock(&dev_priv->tile.lock);
+}
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+	int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+	tile->addr = addr;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+
+	if (dev_priv->card_type == NV_20) {
+		if (flags & NOUVEAU_GEM_TILE_ZETA) {
+			/*
+			 * Allocate some of the on-die tag memory,
+			 * used to store Z compression meta-data (most
+			 * likely just a bitmap determining if a given
+			 * tile is compressed or not).
+			 */
+			tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+
+			if (tile->tag_mem) {
+				/* Enable Z compression */
+				if (dev_priv->chipset >= 0x25)
+					tile->zcomp = tile->tag_mem->start |
+						(bpp == 16 ?
+						 NV25_PFB_ZCOMP_MODE_16 :
+						 NV25_PFB_ZCOMP_MODE_32);
+				else
+					tile->zcomp = tile->tag_mem->start |
+						NV20_PFB_ZCOMP_EN |
+						(bpp == 16 ? 0 :
+						 NV20_PFB_ZCOMP_MODE_32);
+			}
+
+			tile->addr |= 3;
+		} else {
+			tile->addr |= 1;
+		}
+
+	} else {
+		tile->addr |= 1 << 31;
+	}
+}
+
+void
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	if (tile->tag_mem) {
+		nv20_fb_free_tag(dev, tile->tag_mem);
+		tile->tag_mem = NULL;
 	}
 
-	nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PFB_TILE(i), addr);
+	tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+
+	if (dev_priv->card_type == NV_20)
+		nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
 }
 
 int
@@ -31,9 +117,14 @@
 
 	pfb->num_tiles = NV10_PFB_TILE__SIZE;
 
+	if (dev_priv->card_type == NV_20)
+		drm_mm_init(&pfb->tag_heap, 0,
+			    (dev_priv->chipset >= 0x25 ?
+			     64 * 1024 : 32 * 1024));
+
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
@@ -41,4 +132,13 @@
 void
 nv10_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	int i;
+
+	for (i = 0; i < pfb->num_tiles; i++)
+		pfb->free_tile_region(dev, i);
+
+	if (dev_priv->card_type == NV_20)
+		drm_mm_takedown(&pfb->tag_heap);
 }
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index f1b03ad..d2ecbff 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -53,6 +53,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV03_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	/* Fill entries that are seen filled in dumps of nvidia driver just
 	 * after channel's is put into DMA mode
 	 */
@@ -73,17 +78,6 @@
 	return 0;
 }
 
-void
-nv10_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-			nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv10_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -219,6 +213,7 @@
 static void
 nv10_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -241,7 +236,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 8e68c97..8c92edb 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -26,6 +26,10 @@
 #include "drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+static int  nv10_graph_register(struct drm_device *);
+static void nv10_graph_isr(struct drm_device *);
 
 #define NV10_FIFO_NUMBER 32
 
@@ -786,15 +790,13 @@
 	return 0;
 }
 
-void
+static void
 nv10_graph_context_switch(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_channel *chan = NULL;
 	int chid;
 
-	pgraph->fifo_access(dev, false);
 	nouveau_wait_for_idle(dev);
 
 	/* If previous context is valid, we need to save it */
@@ -802,11 +804,9 @@
 
 	/* Load context for next channel */
 	chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (chan && chan->pgraph_ctx)
 		nv10_graph_load_context(chan);
-
-	pgraph->fifo_access(dev, true);
 }
 
 #define NV_WRITE_CTX(reg, val) do { \
@@ -833,7 +833,7 @@
 	if (chid >= dev_priv->engine.fifo.channels)
 		return NULL;
 
-	return dev_priv->fifos[chid];
+	return dev_priv->channels.ptr[chid];
 }
 
 int nv10_graph_create_context(struct nouveau_channel *chan)
@@ -875,37 +875,54 @@
 
 void nv10_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct graph_state *pgraph_ctx = chan->pgraph_ctx;
+	unsigned long flags;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	/* Free the context resources */
 	kfree(pgraph_ctx);
 	chan->pgraph_ctx = NULL;
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
 
 void
-nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1 << 31;
-
-	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
 }
 
 int nv10_graph_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t tmp;
-	int i;
+	int ret, i;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
 			 NV_PMC_ENABLE_PGRAPH);
 
+	ret = nv10_graph_register(dev);
+	if (ret)
+		return ret;
+
+	nouveau_irq_register(dev, 12, nv10_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -928,7 +945,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv10_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
@@ -948,17 +965,17 @@
 
 void nv10_graph_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct graph_state *ctx = chan->pgraph_ctx;
 	struct pipe_state *pipe = &ctx->pipe_state;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
 	uint32_t xfmode0, xfmode1;
 	int i;
@@ -1025,18 +1042,14 @@
 
 	nouveau_wait_for_idle(dev);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
 static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
 	nouveau_wait_for_idle(dev);
 
@@ -1045,40 +1058,118 @@
 	nv_wr32(dev, 0x004006b0,
 		nv_rd32(dev, 0x004006b0) | 0x8 << 24);
 
-	pgraph->fifo_access(dev, true);
-
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
-	{ 0x1638, nv17_graph_mthd_lma_window },
-	{ 0x163c, nv17_graph_mthd_lma_window },
-	{ 0x1640, nv17_graph_mthd_lma_window },
-	{ 0x1644, nv17_graph_mthd_lma_window },
-	{ 0x1658, nv17_graph_mthd_lma_enable },
+static int
+nv10_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+	NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+	NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+	/* celcius */
+	if (dev_priv->chipset <= 0x10) {
+		NVOBJ_CLASS(dev, 0x0056, GR);
+	} else
+	if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+		NVOBJ_CLASS(dev, 0x0096, GR);
+	} else {
+		NVOBJ_CLASS(dev, 0x0099, GR);
+		NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+		NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+	}
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+struct nouveau_bitfield nv10_graph_intr[] = {
+	{ NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+	{ NV_PGRAPH_INTR_ERROR,  "ERROR"  },
 	{}
 };
 
-struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x005f, false, NULL }, /* imageblit */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x0052, false, NULL }, /* swzsurf */
-	{ 0x0093, false, NULL }, /* surf3d */
-	{ 0x0094, false, NULL }, /* tex_tri */
-	{ 0x0095, false, NULL }, /* multitex_tri */
-	{ 0x0056, false, NULL }, /* celcius (nv10) */
-	{ 0x0096, false, NULL }, /* celcius (nv11) */
-	{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
+struct nouveau_bitfield nv10_graph_nstatus[] =
+{
+	{ NV10_PGRAPH_NSTATUS_STATE_IN_USE,       "STATE_IN_USE" },
+	{ NV10_PGRAPH_NSTATUS_INVALID_STATE,      "INVALID_STATE" },
+	{ NV10_PGRAPH_NSTATUS_BAD_ARGUMENT,       "BAD_ARGUMENT" },
+	{ NV10_PGRAPH_NSTATUS_PROTECTION_FAULT,   "PROTECTION_FAULT" },
 	{}
 };
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+			nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+			stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+			nv10_graph_context_switch(dev);
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 12ab9cd..8464b76 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -32,6 +32,10 @@
 #define NV34_GRCTX_SIZE    (18140)
 #define NV35_36_GRCTX_SIZE (22396)
 
+static int nv20_graph_register(struct drm_device *);
+static int nv30_graph_register(struct drm_device *);
+static void nv20_graph_isr(struct drm_device *);
+
 static void
 nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
 {
@@ -425,9 +429,21 @@
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
 
-	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nv_wo32(pgraph->ctx_table, chan->id * 4, 0);
+	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
 int
@@ -496,24 +512,27 @@
 }
 
 void
-nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
 {
-	uint32_t limit = max(1u, addr + size) - 1;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
-	if (pitch)
-		addr |= 1;
-
-	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-	nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+	nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
-	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+	if (dev_priv->card_type == NV_20) {
+		nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+		nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+	}
 }
 
 int
@@ -560,6 +579,13 @@
 
 	nv20_graph_rdi(dev);
 
+	ret = nv20_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -571,16 +597,17 @@
 	nv_wr32(dev, 0x40009C           , 0x00000040);
 
 	if (dev_priv->chipset >= 0x25) {
-		nv_wr32(dev, 0x400890, 0x00080000);
+		nv_wr32(dev, 0x400890, 0x00a8cfff);
 		nv_wr32(dev, 0x400610, 0x304B1FB6);
-		nv_wr32(dev, 0x400B80, 0x18B82880);
+		nv_wr32(dev, 0x400B80, 0x1cbd3883);
 		nv_wr32(dev, 0x400B84, 0x44000000);
 		nv_wr32(dev, 0x400098, 0x40000080);
 		nv_wr32(dev, 0x400B88, 0x000000ff);
+
 	} else {
-		nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
+		nv_wr32(dev, 0x400880, 0x0008c7df);
 		nv_wr32(dev, 0x400094, 0x00000005);
-		nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
+		nv_wr32(dev, 0x400B80, 0x45eae20e);
 		nv_wr32(dev, 0x400B84, 0x24000000);
 		nv_wr32(dev, 0x400098, 0x00000040);
 		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
@@ -591,14 +618,8 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
-	for (i = 0; i < 8; i++) {
-		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, 0x100300 + i * 4));
-	}
 	nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
 	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
 	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
@@ -642,6 +663,9 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 
+	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
+
 	nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
 }
 
@@ -684,9 +708,16 @@
 			return ret;
 	}
 
+	ret = nv30_graph_register(dev);
+	if (ret) {
+		nouveau_gpuobj_ref(NULL, &pgraph->ctx_table);
+		return ret;
+	}
+
 	nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
 		     pgraph->ctx_table->pinst >> 4);
 
+	nouveau_irq_register(dev, 12, nv20_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -724,7 +755,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
-		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv20_graph_set_tile_region(dev, i);
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
 	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
@@ -744,46 +775,125 @@
 	return 0;
 }
 
-struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x009e, false, NULL }, /* swzsurf */
-	{ 0x0096, false, NULL }, /* celcius */
-	{ 0x0097, false, NULL }, /* kelvin (nv20) */
-	{ 0x0597, false, NULL }, /* kelvin (nv25) */
-	{}
-};
+static int
+nv20_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x038a, false, NULL }, /* ifc (nv30) */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x0389, false, NULL }, /* sifm (nv30) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x0362, false, NULL }, /* surf2d (nv30) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x039e, false, NULL }, /* swzsurf */
-	{ 0x0397, false, NULL }, /* rankine (nv30) */
-	{ 0x0497, false, NULL }, /* rankine (nv35) */
-	{ 0x0697, false, NULL }, /* rankine (nv34) */
-	{}
-};
+	if (dev_priv->engine.graph.registered)
+		return 0;
 
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+	NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+	/* kelvin */
+	if (dev_priv->chipset < 0x25)
+		NVOBJ_CLASS(dev, 0x0097, GR);
+	else
+		NVOBJ_CLASS(dev, 0x0597, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv30_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+	/* rankine */
+	if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0397, GR);
+	else
+	if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0697, GR);
+	else
+	if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x0497, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 chid = (addr & 0x01f00000) >> 20;
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+				     "mthd 0x%04x data 0x%08x\n",
+				chid, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
index 4a3f2f0..e0135f0 100644
--- a/drivers/gpu/drm/nouveau/nv30_fb.c
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -29,6 +29,27 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+			 uint32_t size, uint32_t pitch, uint32_t flags)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = addr | 1;
+	tile->limit = max(1u, addr + size) - 1;
+	tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = tile->limit = tile->pitch = 0;
+}
+
 static int
 calc_bias(struct drm_device *dev, int k, int i, int j)
 {
@@ -65,7 +86,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	/* Init the memory timing regs at 0x10037c/0x1003ac */
 	if (dev_priv->chipset == 0x30 ||
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index 3cd07d8..f3d9c05 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -4,26 +4,22 @@
 #include "nouveau_drm.h"
 
 void
-nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			  uint32_t size, uint32_t pitch)
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x40:
-		nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV10_PFB_TILE(i), addr);
+		nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
-		nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
-		nv_wr32(dev, NV40_PFB_TILE(i), addr);
+		nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
 		break;
 	}
 }
@@ -64,7 +60,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		pfb->set_region_tiling(dev, i, 0, 0, 0);
+		pfb->set_tile_region(dev, i);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index d337b8b..49b9a35 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -47,6 +47,11 @@
 	if (ret)
 		return ret;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV40_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wi32(dev, fc +  0, chan->pushbuf_base);
@@ -59,7 +64,6 @@
 			      NV_PFIFO_CACHE1_BIG_ENDIAN |
 #endif
 			      0x30000000 /* no idea.. */);
-	nv_wi32(dev, fc + 56, chan->ramin_grctx->pinst >> 4);
 	nv_wi32(dev, fc + 60, 0x0001FFFF);
 
 	/* enable the fifo dma operation */
@@ -70,17 +74,6 @@
 	return 0;
 }
 
-void
-nv40_fifo_destroy_context(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-
-	nv_wr32(dev, NV04_PFIFO_MODE,
-		nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
-
-	nouveau_gpuobj_ref(NULL, &chan->ramfc);
-}
-
 static void
 nv40_fifo_do_load_context(struct drm_device *dev, int chid)
 {
@@ -279,6 +272,7 @@
 static void
 nv40_fifo_init_intr(struct drm_device *dev)
 {
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, 0x002100, 0xffffffff);
 	nv_wr32(dev, 0x002140, 0xffffffff);
 }
@@ -301,7 +295,7 @@
 	pfifo->reassign(dev, true);
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		if (dev_priv->fifos[i]) {
+		if (dev_priv->channels.ptr[i]) {
 			uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
 			nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
 		}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 7ee1b91..19ef92a 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -29,6 +29,9 @@
 #include "nouveau_drv.h"
 #include "nouveau_grctx.h"
 
+static int nv40_graph_register(struct drm_device *);
+static void nv40_graph_isr(struct drm_device *);
+
 struct nouveau_channel *
 nv40_graph_channel(struct drm_device *dev)
 {
@@ -42,7 +45,7 @@
 	inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin_grctx &&
 		    chan->ramin_grctx->pinst == inst)
@@ -59,6 +62,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_grctx ctx = {};
+	unsigned long flags;
 	int ret;
 
 	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 16,
@@ -73,12 +77,39 @@
 	nv40_grctx_init(&ctx);
 
 	nv_wo32(chan->ramin_grctx, 0, chan->ramin_grctx->pinst);
+
+	/* init grctx pointer in ramfc, and on PFIFO if channel is
+	 * already active there
+	 */
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	nv_wo32(chan->ramfc, 0x38, chan->ramin_grctx->pinst >> 4);
+	nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+	if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+		nv_wr32(dev, 0x0032e0, chan->ramin_grctx->pinst >> 4);
+	nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 	return 0;
 }
 
 void
 nv40_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the context resources */
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
 }
 
@@ -174,43 +205,39 @@
 }
 
 void
-nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
-			     uint32_t size, uint32_t pitch)
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t limit = max(1u, addr + size) - 1;
-
-	if (pitch)
-		addr |= 1;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
 
 	switch (dev_priv->chipset) {
 	case 0x44:
 	case 0x4a:
 	case 0x4e:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
 		break;
 
 	case 0x46:
 	case 0x47:
 	case 0x49:
 	case 0x4b:
-		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 
 	default:
-		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
-		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
-		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
-		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
-		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
-		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
 		break;
 	}
 }
@@ -232,7 +259,7 @@
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_grctx ctx = {};
 	uint32_t vramsz, *cp;
-	int i, j;
+	int ret, i, j;
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 			~NV_PMC_ENABLE_PGRAPH);
@@ -256,9 +283,14 @@
 
 	kfree(cp);
 
+	ret = nv40_graph_register(dev);
+	if (ret)
+		return ret;
+
 	/* No context present currently */
 	nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
 
+	nouveau_irq_register(dev, 12, nv40_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR   , 0xFFFFFFFF);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
 
@@ -347,7 +379,7 @@
 
 	/* Turn all the tiling regions off. */
 	for (i = 0; i < pfb->num_tiles; i++)
-		nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
+		nv40_graph_set_tile_region(dev, i);
 
 	/* begin RAM config */
 	vramsz = pci_resource_len(dev->pdev, 0) - 1;
@@ -390,26 +422,111 @@
 
 void nv40_graph_takedown(struct drm_device *dev)
 {
+	nouveau_irq_unregister(dev, 12);
 }
 
-struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x0039, false, NULL }, /* m2mf */
-	{ 0x004a, false, NULL }, /* gdirect */
-	{ 0x009f, false, NULL }, /* imageblit (nv12) */
-	{ 0x008a, false, NULL }, /* ifc */
-	{ 0x0089, false, NULL }, /* sifm */
-	{ 0x3089, false, NULL }, /* sifm (nv40) */
-	{ 0x0062, false, NULL }, /* surf2d */
-	{ 0x3062, false, NULL }, /* surf2d (nv40) */
-	{ 0x0043, false, NULL }, /* rop */
-	{ 0x0012, false, NULL }, /* beta1 */
-	{ 0x0072, false, NULL }, /* beta4 */
-	{ 0x0019, false, NULL }, /* cliprect */
-	{ 0x0044, false, NULL }, /* pattern */
-	{ 0x309e, false, NULL }, /* swzsurf */
-	{ 0x4097, false, NULL }, /* curie (nv40) */
-	{ 0x4497, false, NULL }, /* curie (nv44) */
-	{}
-};
+static int
+nv40_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+	NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+	NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+	NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+	NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+	NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+	NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+	NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+	NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+	NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+	NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+	NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+	NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+	/* curie */
+	if (dev_priv->chipset >= 0x60 ||
+	    0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
+		NVOBJ_CLASS(dev, 0x4497, GR);
+	else
+		NVOBJ_CLASS(dev, 0x4097, GR);
+
+	/* nvsw */
+	NVOBJ_CLASS(dev, 0x506e, SW);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv04_graph_mthd_page_flip);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin_grctx)
+			continue;
+
+		if (inst == chan->ramin_grctx->pinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+		u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+		u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+		u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+		u32 chid = nv40_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+		u32 show = stat;
+
+		if (stat & NV_PGRAPH_INTR_ERROR) {
+			if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+				if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+					show &= ~NV_PGRAPH_INTR_ERROR;
+			} else
+			if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+				nv_mask(dev, 0x402000, 0, 0);
+			}
+		}
+
+		nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+		nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+		if (show && nouveau_ratelimit()) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv10_graph_intr, show);
+			printk(" nsource:");
+			nouveau_bitfield_print(nv04_graph_nsource, nsource);
+			printk(" nstatus:");
+			nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 56476d0..9023c4d 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -115,15 +115,16 @@
 		OUT_RING(evo, 0);
 		BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
 		if (dev_priv->chipset != 0x50)
-			if (nv_crtc->fb.tile_flags == 0x7a00)
+			if (nv_crtc->fb.tile_flags == 0x7a00 ||
+			    nv_crtc->fb.tile_flags == 0xfe00)
 				OUT_RING(evo, NvEvoFB32);
 			else
 			if (nv_crtc->fb.tile_flags == 0x7000)
 				OUT_RING(evo, NvEvoFB16);
 			else
-				OUT_RING(evo, NvEvoVRAM);
+				OUT_RING(evo, NvEvoVRAM_LP);
 		else
-			OUT_RING(evo, NvEvoVRAM);
+			OUT_RING(evo, NvEvoVRAM_LP);
 	}
 
 	nv_crtc->fb.blanked = blanked;
@@ -345,7 +346,6 @@
 		     uint32_t buffer_handle, uint32_t width, uint32_t height)
 {
 	struct drm_device *dev = crtc->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
 	struct nouveau_bo *cursor = NULL;
 	struct drm_gem_object *gem;
@@ -374,8 +374,7 @@
 
 	nouveau_bo_unmap(cursor);
 
-	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
-					    dev_priv->vm_vram_base);
+	nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.mem.start << PAGE_SHIFT);
 	nv_crtc->cursor.show(nv_crtc, true);
 
 out:
@@ -437,6 +436,7 @@
 	.cursor_move = nv50_crtc_cursor_move,
 	.gamma_set = nv50_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
+	.page_flip = nouveau_crtc_page_flip,
 	.destroy = nv50_crtc_destroy,
 };
 
@@ -453,6 +453,7 @@
 
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
+	drm_vblank_pre_modeset(dev, nv_crtc->index);
 	nv50_crtc_blank(nv_crtc, true);
 }
 
@@ -468,6 +469,7 @@
 	NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
 
 	nv50_crtc_blank(nv_crtc, false);
+	drm_vblank_post_modeset(dev, nv_crtc->index);
 
 	ret = RING_SPACE(evo, 2);
 	if (ret) {
@@ -545,7 +547,7 @@
 		 return -EINVAL;
 	}
 
-	nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
+	nv_crtc->fb.offset = fb->nvbo->bo.mem.start << PAGE_SHIFT;
 	nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
 	nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
 	if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
@@ -554,13 +556,14 @@
 			return ret;
 
 		BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
-		if (nv_crtc->fb.tile_flags == 0x7a00)
+		if (nv_crtc->fb.tile_flags == 0x7a00 ||
+		    nv_crtc->fb.tile_flags == 0xfe00)
 			OUT_RING(evo, NvEvoFB32);
 		else
 		if (nv_crtc->fb.tile_flags == 0x7000)
 			OUT_RING(evo, NvEvoFB16);
 		else
-			OUT_RING(evo, NvEvoVRAM);
+			OUT_RING(evo, NvEvoVRAM_LP);
 	}
 
 	ret = RING_SPACE(evo, 12);
@@ -574,8 +577,10 @@
 	if (!nv_crtc->fb.tile_flags) {
 		OUT_RING(evo, drm_fb->pitch | (1 << 20));
 	} else {
-		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
-				  fb->nvbo->tile_mode);
+		u32 tile_mode = fb->nvbo->tile_mode;
+		if (dev_priv->card_type >= NV_C0)
+			tile_mode >>= 4;
+		OUT_RING(evo, ((drm_fb->pitch / 4) << 4) | tile_mode);
 	}
 	if (dev_priv->chipset == 0x50)
 		OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f624c61..7cc94ed 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -33,6 +33,8 @@
 #include "nouveau_ramht.h"
 #include "drm_crtc_helper.h"
 
+static void nv50_display_isr(struct drm_device *);
+
 static inline int
 nv50_sor_nr(struct drm_device *dev)
 {
@@ -46,159 +48,6 @@
 	return 4;
 }
 
-static void
-nv50_evo_channel_del(struct nouveau_channel **pchan)
-{
-	struct nouveau_channel *chan = *pchan;
-
-	if (!chan)
-		return;
-	*pchan = NULL;
-
-	nouveau_gpuobj_channel_takedown(chan);
-	nouveau_bo_unmap(chan->pushbuf_bo);
-	nouveau_bo_ref(NULL, &chan->pushbuf_bo);
-
-	if (chan->user)
-		iounmap(chan->user);
-
-	kfree(chan);
-}
-
-static int
-nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
-		    uint32_t tile_flags, uint32_t magic_flags,
-		    uint32_t offset, uint32_t limit)
-{
-	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
-	struct drm_device *dev = evo->dev;
-	struct nouveau_gpuobj *obj = NULL;
-	int ret;
-
-	ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
-	if (ret)
-		return ret;
-	obj->engine = NVOBJ_ENGINE_DISPLAY;
-
-	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
-	nv_wo32(obj,  4, limit);
-	nv_wo32(obj,  8, offset);
-	nv_wo32(obj, 12, 0x00000000);
-	nv_wo32(obj, 16, 0x00000000);
-	if (dev_priv->card_type < NV_C0)
-		nv_wo32(obj, 20, 0x00010000);
-	else
-		nv_wo32(obj, 20, 0x00020000);
-	dev_priv->engine.instmem.flush(dev);
-
-	ret = nouveau_ramht_insert(evo, name, obj);
-	nouveau_gpuobj_ref(NULL, &obj);
-	if (ret) {
-		return ret;
-	}
-
-	return 0;
-}
-
-static int
-nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *ramht = NULL;
-	struct nouveau_channel *chan;
-	int ret;
-
-	chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
-	if (!chan)
-		return -ENOMEM;
-	*pchan = chan;
-
-	chan->id = -1;
-	chan->dev = dev;
-	chan->user_get = 4;
-	chan->user_put = 0;
-
-	ret = nouveau_gpuobj_new(dev, NULL, 32768, 0x1000,
-				 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
-	if (ret) {
-		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = drm_mm_init(&chan->ramin_heap, 0, 32768);
-	if (ret) {
-		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_gpuobj_new(dev, chan, 4096, 16, 0, &ramht);
-	if (ret) {
-		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
-	nouveau_gpuobj_ref(NULL, &ramht);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	if (dev_priv->chipset != 0x50) {
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-
-
-		ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
-					  0, 0xffffffff);
-		if (ret) {
-			nv50_evo_channel_del(pchan);
-			return ret;
-		}
-	}
-
-	ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
-				  0, dev_priv->vram_size);
-	if (ret) {
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
-			     false, true, &chan->pushbuf_bo);
-	if (ret == 0)
-		ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	ret = nouveau_bo_map(chan->pushbuf_bo);
-	if (ret) {
-		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
-		nv50_evo_channel_del(pchan);
-		return ret;
-	}
-
-	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
-					NV50_PDISPLAY_USER(0), PAGE_SIZE);
-	if (!chan->user) {
-		NV_ERROR(dev, "Error mapping EVO control regs.\n");
-		nv50_evo_channel_del(pchan);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
 int
 nv50_display_early_init(struct drm_device *dev)
 {
@@ -214,17 +63,16 @@
 nv50_display_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	struct nouveau_channel *evo = dev_priv->evo;
 	struct drm_connector *connector;
-	uint32_t val, ram_amount;
-	uint64_t start;
+	struct nouveau_channel *evo;
 	int ret, i;
+	u32 val;
 
 	NV_DEBUG_KMS(dev, "\n");
 
 	nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
+
 	/*
 	 * I think the 0x006101XX range is some kind of main control area
 	 * that enables things.
@@ -240,16 +88,19 @@
 		val = nv_rd32(dev, 0x0061610c + (i * 0x800));
 		nv_wr32(dev, 0x0061019c + (i * 0x10), val);
 	}
+
 	/* DAC */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
 		nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
 	}
+
 	/* SOR */
 	for (i = 0; i < nv50_sor_nr(dev); i++) {
 		val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
 		nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
 	}
+
 	/* EXT */
 	for (i = 0; i < 3; i++) {
 		val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
@@ -262,17 +113,6 @@
 		nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
 	}
 
-	/* This used to be in crtc unblank, but seems out of place there. */
-	nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
-	/* RAM is clamped to 256 MiB. */
-	ram_amount = dev_priv->vram_size;
-	NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
-	if (ram_amount > 256*1024*1024)
-		ram_amount = 256*1024*1024;
-	nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
-	nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
-
 	/* The precise purpose is unknown, i suspect it has something to do
 	 * with text mode.
 	 */
@@ -287,37 +127,6 @@
 		}
 	}
 
-	/* taken from nv bug #12637, attempts to un-wedge the hw if it's
-	 * stuck in some unspecified state
-	 */
-	start = ptimer->read(dev);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
-	while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
-		if ((val & 0x9f0000) == 0x20000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x800000);
-
-		if ((val & 0x3f0000) == 0x30000)
-			nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-							val | 0x200000);
-
-		if (ptimer->read(dev) - start > 1000000000ULL) {
-			NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
-			NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
-			return -EBUSY;
-		}
-	}
-
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		     0x40000000, 0x40000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-		return -EBUSY;
-	}
-
 	for (i = 0; i < 2; i++) {
 		nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
 		if (!nv_wait(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
@@ -341,39 +150,31 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
+	nv_wr32(dev, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1,
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
+		     NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
 
-	/* initialise fifo */
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
-		((evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT) >> 8) |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
-		NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
-	if (!nv_wait(dev, 0x610200, 0x80000000, 0x00000000)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
-		return -EBUSY;
+	/* enable hotplug interrupts */
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct nouveau_connector *conn = nouveau_connector(connector);
+
+		if (conn->dcb->gpio_tag == 0xff)
+			continue;
+
+		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
 	}
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
-		(nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
-		 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
-		NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
-	nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
 
-	evo->dma.max = (4096/4) - 2;
-	evo->dma.put = 0;
-	evo->dma.cur = evo->dma.put;
-	evo->dma.free = evo->dma.max - evo->dma.cur;
-
-	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	ret = nv50_evo_init(dev);
 	if (ret)
 		return ret;
+	evo = dev_priv->evo;
 
-	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
-		OUT_RING(evo, 0);
+	nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
 
 	ret = RING_SPACE(evo, 11);
 	if (ret)
@@ -393,21 +194,6 @@
 	if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
 		NV_ERROR(dev, "evo pushbuf stalled\n");
 
-	/* enable clock change interrupts. */
-	nv_wr32(dev, 0x610028, 0x00010001);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
-					     NV50_PDISPLAY_INTR_EN_CLK_UNK40));
-
-	/* enable hotplug interrupts */
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct nouveau_connector *conn = nouveau_connector(connector);
-
-		if (conn->dcb->gpio_tag == 0xff)
-			continue;
-
-		pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
-	}
 
 	return 0;
 }
@@ -452,13 +238,7 @@
 		}
 	}
 
-	nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
-	nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
-	if (!nv_wait(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
-		NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
-		NV_ERROR(dev, "0x610200 = 0x%08x\n",
-			  nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
-	}
+	nv50_evo_fini(dev);
 
 	for (i = 0; i < 3; i++) {
 		if (!nv_wait(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i),
@@ -470,7 +250,7 @@
 	}
 
 	/* disable interrupts. */
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
 
 	/* disable hotplug interrupts */
 	nv_wr32(dev, 0xe054, 0xffffffff);
@@ -508,13 +288,6 @@
 
 	dev->mode_config.fb_base = dev_priv->fb_phys;
 
-	/* Create EVO channel */
-	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
-	if (ret) {
-		NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
-		return ret;
-	}
-
 	/* Create CRTC objects */
 	for (i = 0; i < 2; i++)
 		nv50_crtc_create(dev, i);
@@ -557,6 +330,9 @@
 		}
 	}
 
+	INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
+	nouveau_irq_register(dev, 26, nv50_display_isr);
+
 	ret = nv50_display_init(dev);
 	if (ret) {
 		nv50_display_destroy(dev);
@@ -569,14 +345,12 @@
 void
 nv50_display_destroy(struct drm_device *dev)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
 	NV_DEBUG_KMS(dev, "\n");
 
 	drm_mode_config_cleanup(dev);
 
 	nv50_display_disable(dev);
-	nv50_evo_channel_del(&dev_priv->evo);
+	nouveau_irq_unregister(dev, 26);
 }
 
 static u16
@@ -660,32 +434,32 @@
 nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan;
-	struct list_head *entry, *tmp;
+	struct nouveau_channel *chan, *tmp;
 
-	list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
-		chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
+	list_for_each_entry_safe(chan, tmp, &dev_priv->vbl_waiting,
+				 nvsw.vbl_wait) {
+		if (chan->nvsw.vblsem_head != crtc)
+			continue;
 
 		nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
 						chan->nvsw.vblsem_rval);
 		list_del(&chan->nvsw.vbl_wait);
+		drm_vblank_put(dev, crtc);
 	}
+
+	drm_handle_vblank(dev, crtc);
 }
 
 static void
 nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
 {
-	intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
-
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
 		nv50_display_vblank_crtc_handler(dev, 0);
 
 	if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
 		nv50_display_vblank_crtc_handler(dev, 1);
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-		     NV50_PDISPLAY_INTR_EN) & ~intr);
-	nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_VBLANK_CRTC);
 }
 
 static void
@@ -1011,108 +785,31 @@
 static void
 nv50_display_error_handler(struct drm_device *dev)
 {
-	uint32_t addr, data;
+	u32 channels = (nv_rd32(dev, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16;
+	u32 addr, data;
+	int chid;
 
-	nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
-	addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
-	data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
-
-	NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
-		 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
-
-	nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
-}
-
-void
-nv50_display_irq_hotplug_bh(struct work_struct *work)
-{
-	struct drm_nouveau_private *dev_priv =
-		container_of(work, struct drm_nouveau_private, hpd_work);
-	struct drm_device *dev = dev_priv->dev;
-	struct drm_connector *connector;
-	const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
-	uint32_t unplug_mask, plug_mask, change_mask;
-	uint32_t hpd0, hpd1;
-
-	spin_lock_irq(&dev_priv->hpd_state.lock);
-	hpd0 = dev_priv->hpd_state.hpd0_bits;
-	dev_priv->hpd_state.hpd0_bits = 0;
-	hpd1 = dev_priv->hpd_state.hpd1_bits;
-	dev_priv->hpd_state.hpd1_bits = 0;
-	spin_unlock_irq(&dev_priv->hpd_state.lock);
-
-	hpd0 &= nv_rd32(dev, 0xe050);
-	if (dev_priv->chipset >= 0x90)
-		hpd1 &= nv_rd32(dev, 0xe070);
-
-	plug_mask   = (hpd0 & 0x0000ffff) | (hpd1 << 16);
-	unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
-	change_mask = plug_mask | unplug_mask;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		struct drm_encoder_helper_funcs *helper;
-		struct nouveau_connector *nv_connector =
-			nouveau_connector(connector);
-		struct nouveau_encoder *nv_encoder;
-		struct dcb_gpio_entry *gpio;
-		uint32_t reg;
-		bool plugged;
-
-		if (!nv_connector->dcb)
+	for (chid = 0; chid < 5; chid++) {
+		if (!(channels & (1 << chid)))
 			continue;
 
-		gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
-		if (!gpio || !(change_mask & (1 << gpio->line)))
-			continue;
+		nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000 << chid);
+		addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid));
+		data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA(chid));
+		NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x "
+			      "(0x%04x 0x%02x)\n", chid,
+			 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
 
-		reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
-		plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
-		NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
-			drm_get_connector_name(connector)) ;
-
-		if (!connector->encoder || !connector->encoder->crtc ||
-		    !connector->encoder->crtc->enabled)
-			continue;
-		nv_encoder = nouveau_encoder(connector->encoder);
-		helper = connector->encoder->helper_private;
-
-		if (nv_encoder->dcb->type != OUTPUT_DP)
-			continue;
-
-		if (plugged)
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
-		else
-			helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
+		nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000);
 	}
-
-	drm_helper_hpd_irq_event(dev);
 }
 
-void
-nv50_display_irq_handler(struct drm_device *dev)
+static void
+nv50_display_isr(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	uint32_t delayed = 0;
 
-	if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
-		uint32_t hpd0_bits, hpd1_bits = 0;
-
-		hpd0_bits = nv_rd32(dev, 0xe054);
-		nv_wr32(dev, 0xe054, hpd0_bits);
-
-		if (dev_priv->chipset >= 0x90) {
-			hpd1_bits = nv_rd32(dev, 0xe074);
-			nv_wr32(dev, 0xe074, hpd1_bits);
-		}
-
-		spin_lock(&dev_priv->hpd_state.lock);
-		dev_priv->hpd_state.hpd0_bits |= hpd0_bits;
-		dev_priv->hpd_state.hpd1_bits |= hpd1_bits;
-		spin_unlock(&dev_priv->hpd_state.lock);
-
-		queue_work(dev_priv->wq, &dev_priv->hpd_work);
-	}
-
 	while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
 		uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
 		uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
@@ -1123,9 +820,9 @@
 		if (!intr0 && !(intr1 & ~delayed))
 			break;
 
-		if (intr0 & 0x00010000) {
+		if (intr0 & 0x001f0000) {
 			nv50_display_error_handler(dev);
-			intr0 &= ~0x00010000;
+			intr0 &= ~0x001f0000;
 		}
 
 		if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
@@ -1156,4 +853,3 @@
 		}
 	}
 }
-
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c551f0b..f0e30b78 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -35,9 +35,7 @@
 #include "nouveau_crtc.h"
 #include "nv50_evo.h"
 
-void nv50_display_irq_handler(struct drm_device *dev);
 void nv50_display_irq_handler_bh(struct work_struct *work);
-void nv50_display_irq_hotplug_bh(struct work_struct *work);
 int nv50_display_early_init(struct drm_device *dev);
 void nv50_display_late_takedown(struct drm_device *dev);
 int nv50_display_create(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
new file mode 100644
index 0000000..14e24e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+
+static void
+nv50_evo_channel_del(struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv;
+	struct nouveau_channel *evo = *pevo;
+
+	if (!evo)
+		return;
+	*pevo = NULL;
+
+	dev_priv = evo->dev->dev_private;
+	dev_priv->evo_alloc &= ~(1 << evo->id);
+
+	nouveau_gpuobj_channel_takedown(evo);
+	nouveau_bo_unmap(evo->pushbuf_bo);
+	nouveau_bo_ref(NULL, &evo->pushbuf_bo);
+
+	if (evo->user)
+		iounmap(evo->user);
+
+	kfree(evo);
+}
+
+int
+nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 class, u32 name,
+		    u32 tile_flags, u32 magic_flags, u32 offset, u32 limit,
+		    u32 flags5)
+{
+	struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
+	struct drm_device *dev = evo->dev;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	ret = nouveau_gpuobj_new(dev, dev_priv->evo, 6*4, 32, 0, &obj);
+	if (ret)
+		return ret;
+	obj->engine = NVOBJ_ENGINE_DISPLAY;
+
+	nv_wo32(obj,  0, (tile_flags << 22) | (magic_flags << 16) | class);
+	nv_wo32(obj,  4, limit);
+	nv_wo32(obj,  8, offset);
+	nv_wo32(obj, 12, 0x00000000);
+	nv_wo32(obj, 16, 0x00000000);
+	nv_wo32(obj, 20, flags5);
+	dev_priv->engine.instmem.flush(dev);
+
+	ret = nouveau_ramht_insert(evo, name, obj);
+	nouveau_gpuobj_ref(NULL, &obj);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pevo)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *evo;
+	int ret;
+
+	evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
+	if (!evo)
+		return -ENOMEM;
+	*pevo = evo;
+
+	for (evo->id = 0; evo->id < 5; evo->id++) {
+		if (dev_priv->evo_alloc & (1 << evo->id))
+			continue;
+
+		dev_priv->evo_alloc |= (1 << evo->id);
+		break;
+	}
+
+	if (evo->id == 5) {
+		kfree(evo);
+		return -ENODEV;
+	}
+
+	evo->dev = dev;
+	evo->user_get = 4;
+	evo->user_put = 0;
+
+	ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
+			     false, true, &evo->pushbuf_bo);
+	if (ret == 0)
+		ret = nouveau_bo_pin(evo->pushbuf_bo, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	ret = nouveau_bo_map(evo->pushbuf_bo);
+	if (ret) {
+		NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
+		nv50_evo_channel_del(pevo);
+		return ret;
+	}
+
+	evo->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			    NV50_PDISPLAY_USER(evo->id), PAGE_SIZE);
+	if (!evo->user) {
+		NV_ERROR(dev, "Error mapping EVO control regs.\n");
+		nv50_evo_channel_del(pevo);
+		return -ENOMEM;
+	}
+
+	/* bind primary evo channel's ramht to the channel */
+	if (dev_priv->evo && evo != dev_priv->evo)
+		nouveau_ramht_ref(dev_priv->evo->ramht, &evo->ramht, NULL);
+
+	return 0;
+}
+
+static int
+nv50_evo_channel_init(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id, ret, i;
+	u64 pushbuf = evo->pushbuf_bo->bo.mem.start << PAGE_SHIFT;
+	u32 tmp;
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x009f0000) == 0x00020000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
+
+	tmp = nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id));
+	if ((tmp & 0x003f0000) == 0x00030000)
+		nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
+
+	/* initialise fifo */
+	nv_wr32(dev, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
+		     NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
+		     NV50_PDISPLAY_EVO_DMA_CB_VALID);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+
+	nv_wr32(dev, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
+	nv_wr32(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
+		     NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d init timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+		return -EBUSY;
+	}
+
+	/* enable error reporting on the channel */
+	nv_mask(dev, 0x610028, 0x00000000, 0x00010001 << id);
+
+	evo->dma.max = (4096/4) - 2;
+	evo->dma.put = 0;
+	evo->dma.cur = evo->dma.put;
+	evo->dma.free = evo->dma.max - evo->dma.cur;
+
+	ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+		OUT_RING(evo, 0);
+
+	return 0;
+}
+
+static void
+nv50_evo_channel_fini(struct nouveau_channel *evo)
+{
+	struct drm_device *dev = evo->dev;
+	int id = evo->id;
+
+	nv_mask(dev, 0x610028, 0x00010001 << id, 0x00000000);
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
+	nv_wr32(dev, NV50_PDISPLAY_INTR_0, (1 << id));
+	nv_mask(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
+	if (!nv_wait(dev, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
+		NV_ERROR(dev, "EvoCh %d takedown timeout: 0x%08x\n", id,
+			 nv_rd32(dev, NV50_PDISPLAY_EVO_CTRL(id)));
+	}
+}
+
+static int
+nv50_evo_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramht = NULL;
+	struct nouveau_channel *evo;
+	int ret;
+
+	/* create primary evo channel, the one we use for modesetting
+	 * purporses
+	 */
+	ret = nv50_evo_channel_new(dev, &dev_priv->evo);
+	if (ret)
+		return ret;
+	evo = dev_priv->evo;
+
+	/* setup object management on it, any other evo channel will
+	 * use this also as there's no per-channel support on the
+	 * hardware
+	 */
+	ret = nouveau_gpuobj_new(dev, NULL, 32768, 65536,
+				 NVOBJ_FLAG_ZERO_ALLOC, &evo->ramin);
+	if (ret) {
+		NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = drm_mm_init(&evo->ramin_heap, 0, 32768);
+	if (ret) {
+		NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_gpuobj_new(dev, evo, 4096, 16, 0, &ramht);
+	if (ret) {
+		NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	ret = nouveau_ramht_new(dev, ramht, &evo->ramht);
+	nouveau_gpuobj_ref(NULL, &ramht);
+	if (ret) {
+		nv50_evo_channel_del(&dev_priv->evo);
+		return ret;
+	}
+
+	/* create some default objects for the scanout memtypes we support */
+	if (dev_priv->card_type >= NV_C0) {
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0xfe, 0x19,
+					  0, 0xffffffff, 0x00000000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00020000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00000000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+	} else
+	if (dev_priv->chipset != 0x50) {
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB16, 0x70, 0x19,
+					  0, 0xffffffff, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoFB32, 0x7a, 0x19,
+					  0, 0xffffffff, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+
+		ret = nv50_evo_dmaobj_new(evo, 0x3d, NvEvoVRAM_LP, 0, 0x19,
+					  0, dev_priv->vram_size, 0x00010000);
+		if (ret) {
+			nv50_evo_channel_del(&dev_priv->evo);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int
+nv50_evo_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (!dev_priv->evo) {
+		ret = nv50_evo_create(dev);
+		if (ret)
+			return ret;
+	}
+
+	return nv50_evo_channel_init(dev_priv->evo);
+}
+
+void
+nv50_evo_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->evo) {
+		nv50_evo_channel_fini(dev_priv->evo);
+		nv50_evo_channel_del(&dev_priv->evo);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
index aae1334..aa4f0d3 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -24,6 +24,15 @@
  *
  */
 
+#ifndef __NV50_EVO_H__
+#define __NV50_EVO_H__
+
+int  nv50_evo_init(struct drm_device *dev);
+void nv50_evo_fini(struct drm_device *dev);
+int  nv50_evo_dmaobj_new(struct nouveau_channel *, u32 class, u32 name,
+			 u32 tile_flags, u32 magic_flags,
+			 u32 offset, u32 limit);
+
 #define NV50_EVO_UPDATE                                              0x00000080
 #define NV50_EVO_UNK84                                               0x00000084
 #define NV50_EVO_UNK84_NOTIFY                                        0x40000000
@@ -111,3 +120,4 @@
 #define NV50_EVO_CRTC_SCALE_RES1                                     0x000008d8
 #define NV50_EVO_CRTC_SCALE_RES2                                     0x000008dc
 
+#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
index cd1988b..50290de 100644
--- a/drivers/gpu/drm/nouveau/nv50_fb.c
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -3,30 +3,75 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 
+struct nv50_fb_priv {
+	struct page *r100c08_page;
+	dma_addr_t r100c08;
+};
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!priv->r100c08_page) {
+		kfree(priv);
+		return -ENOMEM;
+	}
+
+	priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+	if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+		__free_page(priv->r100c08_page);
+		kfree(priv);
+		return -EFAULT;
+	}
+
+	dev_priv->engine.fb.priv = priv;
+	return 0;
+}
+
 int
 nv50_fb_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+	int ret;
+
+	if (!dev_priv->engine.fb.priv) {
+		ret = nv50_fb_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = dev_priv->engine.fb.priv;
 
 	/* Not a clue what this is exactly.  Without pointing it at a
 	 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
 	 * cause IOMMU "read from address 0" errors (rh#561267)
 	 */
-	nv_wr32(dev, 0x100c08, dev_priv->gart_info.sg_dummy_bus >> 8);
+	nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
 
 	/* This is needed to get meaningful information from 100c90
 	 * on traps. No idea what these values mean exactly. */
 	switch (dev_priv->chipset) {
 	case 0x50:
-		nv_wr32(dev, 0x100c90, 0x0707ff);
+		nv_wr32(dev, 0x100c90, 0x000707ff);
 		break;
 	case 0xa3:
 	case 0xa5:
 	case 0xa8:
-		nv_wr32(dev, 0x100c90, 0x0d0fff);
+		nv_wr32(dev, 0x100c90, 0x000d0fff);
+		break;
+	case 0xaf:
+		nv_wr32(dev, 0x100c90, 0x089d1fff);
 		break;
 	default:
-		nv_wr32(dev, 0x100c90, 0x1d07ff);
+		nv_wr32(dev, 0x100c90, 0x001d07ff);
 		break;
 	}
 
@@ -36,12 +81,25 @@
 void
 nv50_fb_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv50_fb_priv *priv;
+
+	priv = dev_priv->engine.fb.priv;
+	if (!priv)
+		return;
+	dev_priv->engine.fb.priv = NULL;
+
+	pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+		       PCI_DMA_BIDIRECTIONAL);
+	__free_page(priv->r100c08_page);
+	kfree(priv);
 }
 
 void
 nv50_fb_vm_trap(struct drm_device *dev, int display, const char *name)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	unsigned long flags;
 	u32 trap[6], idx, chinst;
 	int i, ch;
 
@@ -60,8 +118,10 @@
 		return;
 
 	chinst = (trap[2] << 16) | trap[1];
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
 	for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
-		struct nouveau_channel *chan = dev_priv->fifos[ch];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
 
 		if (!chan || !chan->ramin)
 			continue;
@@ -69,6 +129,7 @@
 		if (chinst == chan->ramin->vinst >> 12)
 			break;
 	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
 
 	NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x "
 		     "channel %d (0x%08x)\n",
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6dcf048..791ded1 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -1,29 +1,46 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
 #include "drmP.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_ramht.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
 
-void
+int
 nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
-	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_fillrect(info, rect);
-		return;
-	}
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
 
 	if (rect->rop != ROP_COPY) {
 		BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
@@ -45,27 +62,21 @@
 		OUT_RING(chan, 3);
 	}
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_copyarea(info, region);
-		return;
-	}
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
 
 	BEGIN_RING(chan, NvSub2D, 0x0110, 1);
 	OUT_RING(chan, 0);
@@ -80,9 +91,10 @@
 	OUT_RING(chan, 0);
 	OUT_RING(chan, region->sy);
 	FIRE_RING(chan);
+	return 0;
 }
 
-void
+int
 nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 {
 	struct nouveau_fbdev *nfbdev = info->par;
@@ -92,23 +104,14 @@
 	uint32_t width, dwords, *data = (uint32_t *)image->data;
 	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
 	uint32_t *palette = info->pseudo_palette;
+	int ret;
 
-	if (info->state != FBINFO_STATE_RUNNING)
-		return;
+	if (image->depth != 1)
+		return -ENODEV;
 
-	if (image->depth != 1) {
-		cfb_imageblit(info, image);
-		return;
-	}
-
-	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
-		nouveau_fbcon_gpu_lockup(info);
-	}
-
-	if (info->flags & FBINFO_HWACCEL_DISABLED) {
-		cfb_imageblit(info, image);
-		return;
-	}
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
 
 	width = ALIGN(image->width, 32);
 	dwords = (width * image->height) >> 5;
@@ -134,11 +137,9 @@
 	while (dwords) {
 		int push = dwords > 2047 ? 2047 : dwords;
 
-		if (RING_SPACE(chan, push + 1)) {
-			nouveau_fbcon_gpu_lockup(info);
-			cfb_imageblit(info, image);
-			return;
-		}
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
 
 		dwords -= push;
 
@@ -148,6 +149,7 @@
 	}
 
 	FIRE_RING(chan);
+	return 0;
 }
 
 int
@@ -157,12 +159,9 @@
 	struct drm_device *dev = nfbdev->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
-	struct nouveau_gpuobj *eng2d = NULL;
-	uint64_t fb;
+	struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
 	int ret, format;
 
-	fb = info->fix.smem_start - dev_priv->fb_phys + dev_priv->vm_vram_base;
-
 	switch (info->var.bits_per_pixel) {
 	case 8:
 		format = 0xf3;
@@ -190,12 +189,7 @@
 		return -EINVAL;
 	}
 
-	ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
-	if (ret)
-		return ret;
-
-	ret = nouveau_ramht_insert(dev_priv->channel, Nv2D, eng2d);
-	nouveau_gpuobj_ref(NULL, &eng2d);
+	ret = nouveau_gpuobj_gr_new(dev_priv->channel, Nv2D, 0x502d);
 	if (ret)
 		return ret;
 
@@ -253,8 +247,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 	BEGIN_RING(chan, NvSub2D, 0x0230, 2);
 	OUT_RING(chan, format);
 	OUT_RING(chan, 1);
@@ -262,8 +256,8 @@
 	OUT_RING(chan, info->fix.line_length);
 	OUT_RING(chan, info->var.xres_virtual);
 	OUT_RING(chan, info->var.yres_virtual);
-	OUT_RING(chan, upper_32_bits(fb));
-	OUT_RING(chan, lower_32_bits(fb));
+	OUT_RING(chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING(chan, lower_32_bits(nvbo->vma.offset));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 1da65bd..8dd04c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
+#include "nouveau_vm.h"
 
 static void
 nv50_fifo_playlist_update(struct drm_device *dev)
@@ -44,7 +45,8 @@
 
 	/* We never schedule channel 0 or 127 */
 	for (i = 1, nr = 0; i < 127; i++) {
-		if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) {
+		if (dev_priv->channels.ptr[i] &&
+		    dev_priv->channels.ptr[i]->ramfc) {
 			nv_wo32(cur, (nr * 4), i);
 			nr++;
 		}
@@ -60,7 +62,7 @@
 nv50_fifo_channel_enable(struct drm_device *dev, int channel)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[channel];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[channel];
 	uint32_t inst;
 
 	NV_DEBUG(dev, "ch%d\n", channel);
@@ -105,6 +107,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 8, nv04_fifo_isr);
 	nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
 	nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
 }
@@ -118,7 +121,7 @@
 	NV_DEBUG(dev, "\n");
 
 	for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
-		if (dev_priv->fifos[i])
+		if (dev_priv->channels.ptr[i])
 			nv50_fifo_channel_enable(dev, i);
 		else
 			nv50_fifo_channel_disable(dev, i);
@@ -206,6 +209,9 @@
 	if (!pfifo->playlist[0])
 		return;
 
+	nv_wr32(dev, 0x2140, 0x00000000);
+	nouveau_irq_unregister(dev, 8);
+
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[0]);
 	nouveau_gpuobj_ref(NULL, &pfifo->playlist[1]);
 }
@@ -256,6 +262,11 @@
 	}
 	ramfc = chan->ramfc;
 
+	chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+			     NV50_USER(chan->id), PAGE_SIZE);
+	if (!chan->user)
+		return -ENOMEM;
+
 	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 	nv_wo32(ramfc, 0x48, chan->pushbuf->cinst >> 4);
@@ -291,10 +302,23 @@
 nv50_fifo_destroy_context(struct nouveau_channel *chan)
 {
 	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_gpuobj *ramfc = NULL;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pfifo->reassign(dev, false);
+
+	/* Unload the context if it's the currently active one */
+	if (pfifo->channel_id(dev) == chan->id) {
+		pfifo->disable(dev);
+		pfifo->unload_context(dev);
+		pfifo->enable(dev);
+	}
+
 	/* This will ensure the channel is seen as disabled. */
 	nouveau_gpuobj_ref(chan->ramfc, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
@@ -305,6 +329,14 @@
 		nv50_fifo_channel_disable(dev, 127);
 	nv50_fifo_playlist_update(dev);
 
+	pfifo->reassign(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+	/* Free the channel resources */
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
 	nouveau_gpuobj_ref(NULL, &ramfc);
 	nouveau_gpuobj_ref(NULL, &chan->cache);
 }
@@ -392,7 +424,7 @@
 	if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
 		return 0;
 
-	chan = dev_priv->fifos[chid];
+	chan = dev_priv->channels.ptr[chid];
 	if (!chan) {
 		NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
 		return -EINVAL;
@@ -467,5 +499,5 @@
 void
 nv50_fifo_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 5);
+	nv50_vm_flush_engine(dev, 5);
 }
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index b2fab2b..6b149c0 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -26,6 +26,28 @@
 #include "nouveau_drv.h"
 #include "nouveau_hw.h"
 
+#include "nv50_display.h"
+
+static void nv50_gpio_isr(struct drm_device *dev);
+static void nv50_gpio_isr_bh(struct work_struct *work);
+
+struct nv50_gpio_priv {
+	struct list_head handlers;
+	spinlock_t lock;
+};
+
+struct nv50_gpio_handler {
+	struct drm_device *dev;
+	struct list_head head;
+	struct work_struct work;
+	bool inhibit;
+
+	struct dcb_gpio_entry *gpio;
+
+	void (*handler)(void *data, int state);
+	void *data;
+};
+
 static int
 nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
 {
@@ -75,29 +97,123 @@
 	return 0;
 }
 
+int
+nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
+		       void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return -ENOENT;
+
+	gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
+	if (!gpioh)
+		return -ENOMEM;
+
+	INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
+	gpioh->dev  = dev;
+	gpioh->gpio = gpio;
+	gpioh->handler = handler;
+	gpioh->data = data;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_add(&gpioh->head, &priv->handlers);
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+}
+
 void
+nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
+			 void (*handler)(void *, int), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh, *tmp;
+	struct dcb_gpio_entry *gpio;
+	unsigned long flags;
+
+	gpio = nouveau_bios_gpio_entry(dev, tag);
+	if (!gpio)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
+		if (gpioh->gpio != gpio ||
+		    gpioh->handler != handler ||
+		    gpioh->data != data)
+			continue;
+		list_del(&gpioh->head);
+		kfree(gpioh);
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+bool
 nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
 {
 	struct dcb_gpio_entry *gpio;
 	u32 reg, mask;
 
 	gpio = nouveau_bios_gpio_entry(dev, tag);
-	if (!gpio) {
-		NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
-		return;
-	}
+	if (!gpio)
+		return false;
 
 	reg  = gpio->line < 16 ? 0xe050 : 0xe070;
 	mask = 0x00010001 << (gpio->line & 0xf);
 
 	nv_wr32(dev, reg + 4, mask);
-	nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
+	return (reg & mask) == mask;
+}
+
+static int
+nv50_gpio_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&priv->handlers);
+	spin_lock_init(&priv->lock);
+	pgpio->priv = priv;
+	return 0;
+}
+
+static void
+nv50_gpio_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+	kfree(pgpio->priv);
+	pgpio->priv = NULL;
 }
 
 int
 nv50_gpio_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv;
+	int ret;
+
+	if (!pgpio->priv) {
+		ret = nv50_gpio_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pgpio->priv;
 
 	/* disable, and ack any pending gpio interrupts */
 	nv_wr32(dev, 0xe050, 0x00000000);
@@ -107,5 +223,77 @@
 		nv_wr32(dev, 0xe074, 0xffffffff);
 	}
 
+	nouveau_irq_register(dev, 21, nv50_gpio_isr);
 	return 0;
 }
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	nv_wr32(dev, 0xe050, 0x00000000);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe070, 0x00000000);
+	nouveau_irq_unregister(dev, 21);
+
+	nv50_gpio_destroy(dev);
+}
+
+static void
+nv50_gpio_isr_bh(struct work_struct *work)
+{
+	struct nv50_gpio_handler *gpioh =
+		container_of(work, struct nv50_gpio_handler, work);
+	struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	unsigned long flags;
+	int state;
+
+	state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
+	if (state < 0)
+		return;
+
+	gpioh->handler(gpioh->data, state);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	gpioh->inhibit = false;
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+	struct nv50_gpio_priv *priv = pgpio->priv;
+	struct nv50_gpio_handler *gpioh;
+	u32 intr0, intr1 = 0;
+	u32 hi, lo, ch;
+
+	intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+	if (dev_priv->chipset >= 0x90)
+		intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+	hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+	lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+	ch = hi | lo;
+
+	nv_wr32(dev, 0xe054, intr0);
+	if (dev_priv->chipset >= 0x90)
+		nv_wr32(dev, 0xe074, intr1);
+
+	spin_lock(&priv->lock);
+	list_for_each_entry(gpioh, &priv->handlers, head) {
+		if (!(ch & (1 << gpioh->gpio->line)))
+			continue;
+
+		if (gpioh->inhibit)
+			continue;
+		gpioh->inhibit = true;
+
+		queue_work(dev_priv->wq, &gpioh->work);
+	}
+	spin_unlock(&priv->lock);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8b669d0..2d7ea75 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -29,6 +29,12 @@
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
 #include "nouveau_grctx.h"
+#include "nouveau_dma.h"
+#include "nouveau_vm.h"
+#include "nv50_evo.h"
+
+static int  nv50_graph_register(struct drm_device *);
+static void nv50_graph_isr(struct drm_device *);
 
 static void
 nv50_graph_init_reset(struct drm_device *dev)
@@ -46,6 +52,7 @@
 {
 	NV_DEBUG(dev, "\n");
 
+	nouveau_irq_register(dev, 12, nv50_graph_isr);
 	nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
 	nv_wr32(dev, 0x400138, 0xffffffff);
 	nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
@@ -145,12 +152,15 @@
 	nv50_graph_init_reset(dev);
 	nv50_graph_init_regs__nv(dev);
 	nv50_graph_init_regs(dev);
-	nv50_graph_init_intr(dev);
 
 	ret = nv50_graph_init_ctxctl(dev);
 	if (ret)
 		return ret;
 
+	ret = nv50_graph_register(dev);
+	if (ret)
+		return ret;
+	nv50_graph_init_intr(dev);
 	return 0;
 }
 
@@ -158,6 +168,8 @@
 nv50_graph_takedown(struct drm_device *dev)
 {
 	NV_DEBUG(dev, "\n");
+	nv_wr32(dev, 0x40013c, 0x00000000);
+	nouveau_irq_unregister(dev, 12);
 }
 
 void
@@ -190,7 +202,7 @@
 	inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
 
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
-		struct nouveau_channel *chan = dev_priv->fifos[i];
+		struct nouveau_channel *chan = dev_priv->channels.ptr[i];
 
 		if (chan && chan->ramin && chan->ramin->vinst == inst)
 			return chan;
@@ -211,7 +223,7 @@
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0x1000,
+	ret = nouveau_gpuobj_new(dev, chan, pgraph->grctx_size, 0,
 				 NVOBJ_FLAG_ZERO_ALLOC |
 				 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
 	if (ret)
@@ -234,6 +246,7 @@
 	nv_wo32(chan->ramin_grctx, 0x00000, chan->ramin->vinst >> 12);
 
 	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pgraph_refs);
 	return 0;
 }
 
@@ -242,18 +255,31 @@
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+	unsigned long flags;
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
 	if (!chan->ramin)
 		return;
 
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+	pgraph->fifo_access(dev, false);
+
+	if (pgraph->channel(dev) == chan)
+		pgraph->unload_context(dev);
+
 	for (i = hdr; i < hdr + 24; i += 4)
 		nv_wo32(chan->ramin, i, 0);
 	dev_priv->engine.instmem.flush(dev);
 
+	pgraph->fifo_access(dev, true);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
 	nouveau_gpuobj_ref(NULL, &chan->ramin_grctx);
+
+	atomic_dec(&chan->vm->pgraph_refs);
 }
 
 static int
@@ -306,7 +332,7 @@
 	return 0;
 }
 
-void
+static void
 nv50_graph_context_switch(struct drm_device *dev)
 {
 	uint32_t inst;
@@ -322,8 +348,8 @@
 }
 
 static int
-nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
-			   int mthd, uint32_t data)
+nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan,
+			   u32 class, u32 mthd, u32 data)
 {
 	struct nouveau_gpuobj *gpuobj;
 
@@ -340,8 +366,8 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
-			      int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan,
+			      u32 class, u32 mthd, u32 data)
 {
 	if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
 		return -ERANGE;
@@ -351,16 +377,16 @@
 }
 
 static int
-nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
-				   int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan,
+				   u32 class, u32 mthd, u32 data)
 {
 	chan->nvsw.vblsem_rval = data;
 	return 0;
 }
 
 static int
-nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
-			       int mthd, uint32_t data)
+nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -368,45 +394,85 @@
 	if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
 		return -EINVAL;
 
-	if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
-		      NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
-		nv_wr32(dev, NV50_PDISPLAY_INTR_1,
-			NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
-		nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
-			NV50_PDISPLAY_INTR_EN) |
-			NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
-	}
+	drm_vblank_get(dev, data);
 
+	chan->nvsw.vblsem_head = data;
 	list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
+
 	return 0;
 }
 
-static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
-	{ 0x018c, nv50_graph_nvsw_dma_vblsem },
-	{ 0x0400, nv50_graph_nvsw_vblsem_offset },
-	{ 0x0404, nv50_graph_nvsw_vblsem_release_val },
-	{ 0x0408, nv50_graph_nvsw_vblsem_release },
-	{}
-};
+static int
+nv50_graph_nvsw_mthd_page_flip(struct nouveau_channel *chan,
+			       u32 class, u32 mthd, u32 data)
+{
+	struct nouveau_page_flip_state s;
 
-struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
-	{ 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
-	{ 0x0030, false, NULL }, /* null */
-	{ 0x5039, false, NULL }, /* m2mf */
-	{ 0x502d, false, NULL }, /* 2d */
-	{ 0x50c0, false, NULL }, /* compute */
-	{ 0x85c0, false, NULL }, /* compute (nva3, nva5, nva8) */
-	{ 0x5097, false, NULL }, /* tesla (nv50) */
-	{ 0x8297, false, NULL }, /* tesla (nv8x/nv9x) */
-	{ 0x8397, false, NULL }, /* tesla (nva0, nvaa, nvac) */
-	{ 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */
-	{}
-};
+	if (!nouveau_finish_page_flip(chan, &s)) {
+		/* XXX - Do something here */
+	}
+
+	return 0;
+}
+
+static int
+nv50_graph_register(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->engine.graph.registered)
+		return 0;
+
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	NVOBJ_MTHD (dev, 0x506e, 0x018c, nv50_graph_nvsw_dma_vblsem);
+	NVOBJ_MTHD (dev, 0x506e, 0x0400, nv50_graph_nvsw_vblsem_offset);
+	NVOBJ_MTHD (dev, 0x506e, 0x0404, nv50_graph_nvsw_vblsem_release_val);
+	NVOBJ_MTHD (dev, 0x506e, 0x0408, nv50_graph_nvsw_vblsem_release);
+	NVOBJ_MTHD (dev, 0x506e, 0x0500, nv50_graph_nvsw_mthd_page_flip);
+
+	NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+	NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+	NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+	/* tesla */
+	if (dev_priv->chipset == 0x50)
+		NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+	else
+	if (dev_priv->chipset < 0xa0)
+		NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+	else {
+		switch (dev_priv->chipset) {
+		case 0xa0:
+		case 0xaa:
+		case 0xac:
+			NVOBJ_CLASS(dev, 0x8397, GR);
+			break;
+		case 0xa3:
+		case 0xa5:
+		case 0xa8:
+			NVOBJ_CLASS(dev, 0x8597, GR);
+			break;
+		case 0xaf:
+			NVOBJ_CLASS(dev, 0x8697, GR);
+			break;
+		}
+	}
+
+	/* compute */
+	NVOBJ_CLASS(dev, 0x50c0, GR);
+	if (dev_priv->chipset  > 0xa0 &&
+	    dev_priv->chipset != 0xaa &&
+	    dev_priv->chipset != 0xac)
+		NVOBJ_CLASS(dev, 0x85c0, GR);
+
+	dev_priv->engine.graph.registered = true;
+	return 0;
+}
 
 void
 nv50_graph_tlb_flush(struct drm_device *dev)
 {
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 }
 
 void
@@ -449,8 +515,535 @@
 			 nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
 	}
 
-	nv50_vm_flush(dev, 0);
+	nv50_vm_flush_engine(dev, 0);
 
 	nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 }
+
+static struct nouveau_enum nv50_mp_exec_error_names[] =
+{
+	{ 3, "STACK_UNDERFLOW" },
+	{ 4, "QUADON_ACTIVE" },
+	{ 8, "TIMEOUT" },
+	{ 0x10, "INVALID_OPCODE" },
+	{ 0x40, "BREAKPOINT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "IN" },
+	{ 0x00000004, "OUT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+	{ 0x00000001, "FAULT" },
+	{}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+	{ 0x00000003, "INVALID_QUERY_OR_TEXTURE" },
+	{ 0x00000004, "INVALID_VALUE" },
+	{ 0x00000005, "INVALID_ENUM" },
+	{ 0x00000008, "INVALID_OBJECT" },
+	{ 0x00000009, "READ_ONLY_OBJECT" },
+	{ 0x0000000a, "SUPERVISOR_OBJECT" },
+	{ 0x0000000b, "INVALID_ADDRESS_ALIGNMENT" },
+	{ 0x0000000c, "INVALID_BITFIELD" },
+	{ 0x0000000d, "BEGIN_END_ACTIVE" },
+	{ 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT" },
+	{ 0x0000000f, "VIEWPORT_ID_NEEDS_GP" },
+	{ 0x00000010, "RT_DOUBLE_BIND" },
+	{ 0x00000011, "RT_TYPES_MISMATCH" },
+	{ 0x00000012, "RT_LINEAR_WITH_ZETA" },
+	{ 0x00000015, "FP_TOO_FEW_REGS" },
+	{ 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH" },
+	{ 0x00000017, "RT_LINEAR_WITH_MSAA" },
+	{ 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT" },
+	{ 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT" },
+	{ 0x0000001a, "RT_INVALID_ALIGNMENT" },
+	{ 0x0000001b, "SAMPLER_OVER_LIMIT" },
+	{ 0x0000001c, "TEXTURE_OVER_LIMIT" },
+	{ 0x0000001e, "GP_TOO_MANY_OUTPUTS" },
+	{ 0x0000001f, "RT_BPP128_WITH_MS8" },
+	{ 0x00000021, "Z_OUT_OF_BOUNDS" },
+	{ 0x00000023, "XY_OUT_OF_BOUNDS" },
+	{ 0x00000027, "CP_MORE_PARAMS_THAN_SHARED" },
+	{ 0x00000028, "CP_NO_REG_SPACE_STRIPED" },
+	{ 0x00000029, "CP_NO_REG_SPACE_PACKED" },
+	{ 0x0000002a, "CP_NOT_ENOUGH_WARPS" },
+	{ 0x0000002b, "CP_BLOCK_SIZE_MISMATCH" },
+	{ 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS" },
+	{ 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS" },
+	{ 0x0000002e, "CP_NO_BLOCKDIM_LATCH" },
+	{ 0x00000031, "ENG2D_FORMAT_MISMATCH" },
+	{ 0x0000003f, "PRIMITIVE_ID_NEEDS_GP" },
+	{ 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT" },
+	{ 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT" },
+	{ 0x00000046, "LAYER_ID_NEEDS_GP" },
+	{ 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT" },
+	{ 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT" },
+	{}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+	{ 0x00000001, "NOTIFY" },
+	{ 0x00000002, "COMPUTE_QUERY" },
+	{ 0x00000010, "ILLEGAL_MTHD" },
+	{ 0x00000020, "ILLEGAL_CLASS" },
+	{ 0x00000040, "DOUBLE_NOTIFY" },
+	{ 0x00001000, "CONTEXT_SWITCH" },
+	{ 0x00010000, "BUFFER_NOTIFY" },
+	{ 0x00100000, "DATA_ERROR" },
+	{ 0x00200000, "TRAP" },
+	{ 0x01000000, "SINGLE_STEP" },
+	{}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	uint32_t addr, mp10, status, pc, oplow, ophigh;
+	int i;
+	int mps = 0;
+	for (i = 0; i < 4; i++) {
+		if (!(units & 1 << (i+24)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			addr = 0x408200 + (tpid << 12) + (i << 7);
+		else
+			addr = 0x408100 + (tpid << 11) + (i << 7);
+		mp10 = nv_rd32(dev, addr + 0x10);
+		status = nv_rd32(dev, addr + 0x14);
+		if (!status)
+			continue;
+		if (display) {
+			nv_rd32(dev, addr + 0x20);
+			pc = nv_rd32(dev, addr + 0x24);
+			oplow = nv_rd32(dev, addr + 0x70);
+			ophigh= nv_rd32(dev, addr + 0x74);
+			NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+					"TP %d MP %d: ", tpid, i);
+			nouveau_enum_print(nv50_mp_exec_error_names, status);
+			printk(" at %06x warp %d, opcode %08x %08x\n",
+					pc&0xffffff, pc >> 24,
+					oplow, ophigh);
+		}
+		nv_wr32(dev, addr + 0x10, mp10);
+		nv_wr32(dev, addr + 0x14, 0);
+		mps++;
+	}
+	if (!mps && display)
+		NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+				"No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+		uint32_t ustatus_new, int display, const char *name)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int tps = 0;
+	uint32_t units = nv_rd32(dev, 0x1540);
+	int i, r;
+	uint32_t ustatus_addr, ustatus;
+	for (i = 0; i < 16; i++) {
+		if (!(units & (1 << i)))
+			continue;
+		if (dev_priv->chipset < 0xa0)
+			ustatus_addr = ustatus_old + (i << 12);
+		else
+			ustatus_addr = ustatus_new + (i << 11);
+		ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+		if (!ustatus)
+			continue;
+		tps++;
+		switch (type) {
+		case 6: /* texture error... unknown for now */
+			nv50_fb_vm_trap(dev, display, name);
+			if (display) {
+				NV_ERROR(dev, "magic set %d:\n", i);
+				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+					NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+						nv_rd32(dev, r));
+			}
+			break;
+		case 7: /* MP error */
+			if (ustatus & 0x00010000) {
+				nv50_pgraph_mp_trap(dev, i, display);
+				ustatus &= ~0x00010000;
+			}
+			break;
+		case 8: /* TPDMA error */
+			{
+			uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+			uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+			uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+			uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+			nv50_fb_vm_trap(dev, display, name);
+			/* 2d engine destination */
+			if (ustatus & 0x00000010) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000010;
+			}
+			/* Render target */
+			if (ustatus & 0x00000040) {
+				if (display) {
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+							i, e14, e10);
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000040;
+			}
+			/* CUDA memory: l[], g[] or stack. */
+			if (ustatus & 0x00000080) {
+				if (display) {
+					if (e18 & 0x80000000) {
+						/* g[] read fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 24) & 0x1f));
+						e18 &= ~0x1f000000;
+					} else if (e18 & 0xc) {
+						/* g[] write fault? */
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+								i, e14, e10 | ((e18 >> 7) & 0x1f));
+						e18 &= ~0x00000f80;
+					} else {
+						NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+								i, e14, e10);
+					}
+					NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+							i, e0c, e18, e1c, e20, e24);
+				}
+				ustatus &= ~0x00000080;
+			}
+			}
+			break;
+		}
+		if (ustatus) {
+			if (display)
+				NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+		}
+		nv_wr32(dev, ustatus_addr, 0xc0000000);
+	}
+
+	if (!tps && display)
+		NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+	u32 status = nv_rd32(dev, 0x400108);
+	u32 ustatus;
+
+	if (!status && display) {
+		NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+		return 1;
+	}
+
+	/* DISPATCH: Relays commands to other units and handles NOTIFY,
+	 * COND, QUERY. If you get a trap from it, the command is still stuck
+	 * in DISPATCH and you need to do something about it. */
+	if (status & 0x001) {
+		ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+		if (!ustatus && display) {
+			NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+		}
+
+		nv_wr32(dev, 0x400500, 0x00000000);
+
+		/* Known to be triggered by screwed up NOTIFY and COND... */
+		if (ustatus & 0x00000001) {
+			u32 addr = nv_rd32(dev, 0x400808);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 datal = nv_rd32(dev, 0x40080c);
+			u32 datah = nv_rd32(dev, 0x400810);
+			u32 class = nv_rd32(dev, 0x400814);
+			u32 r848 = nv_rd32(dev, 0x400848);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x%08x "
+					     "400808 0x%08x 400848 0x%08x\n",
+					chid, inst, subc, class, mthd, datah,
+					datal, addr, r848);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x400808, 0);
+			nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+			nv_wr32(dev, 0x400848, 0);
+			ustatus &= ~0x00000001;
+		}
+
+		if (ustatus & 0x00000002) {
+			u32 addr = nv_rd32(dev, 0x40084c);
+			u32 subc = (addr & 0x00070000) >> 16;
+			u32 mthd = (addr & 0x00001ffc);
+			u32 data = nv_rd32(dev, 0x40085c);
+			u32 class = nv_rd32(dev, 0x400814);
+
+			NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+			if (display && (addr & 0x80000000)) {
+				NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+					     "subc %d class 0x%04x mthd 0x%04x "
+					     "data 0x%08x 40084c 0x%08x\n",
+					chid, inst, subc, class, mthd,
+					data, addr);
+			} else
+			if (display) {
+				NV_INFO(dev, "PGRAPH - no stuck command?\n");
+			}
+
+			nv_wr32(dev, 0x40084c, 0);
+			ustatus &= ~0x00000002;
+		}
+
+		if (ustatus && display) {
+			NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+				      "0x%08x)\n", ustatus);
+		}
+
+		nv_wr32(dev, 0x400804, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x001);
+		status &= ~0x001;
+		if (!status)
+			return 0;
+	}
+
+	/* M2MF: Memory to memory copy engine. */
+	if (status & 0x002) {
+		u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+			nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+				nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 2);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x406800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x002);
+		status &= ~0x002;
+	}
+
+	/* VFETCH: Fetches data from vertex buffers. */
+	if (status & 0x004) {
+		u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+			nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+				nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+		}
+
+		nv_wr32(dev, 0x400c04, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x004);
+		status &= ~0x004;
+	}
+
+	/* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+	if (status & 0x008) {
+		ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+			nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+				nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+				nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+		}
+
+		/* No sane way found yet -- just reset the bugger. */
+		nv_wr32(dev, 0x400040, 0x80);
+		nv_wr32(dev, 0x400040, 0);
+		nv_wr32(dev, 0x401800, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x008);
+		status &= ~0x008;
+	}
+
+	/* CCACHE: Handles code and c[] caches and fills them. */
+	if (status & 0x010) {
+		ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+		if (display) {
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+			nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+				     " %08x %08x %08x\n",
+				nv_rd32(dev, 0x405800), nv_rd32(dev, 0x405804),
+				nv_rd32(dev, 0x405808), nv_rd32(dev, 0x40580c),
+				nv_rd32(dev, 0x405810), nv_rd32(dev, 0x405814),
+				nv_rd32(dev, 0x40581c));
+
+		}
+
+		nv_wr32(dev, 0x405018, 0xc0000000);
+		nv_wr32(dev, 0x400108, 0x010);
+		status &= ~0x010;
+	}
+
+	/* Unknown, not seen yet... 0x402000 is the only trap status reg
+	 * remaining, so try to handle it anyway. Perhaps related to that
+	 * unknown DMA slot on tesla? */
+	if (status & 0x20) {
+		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+		nv_wr32(dev, 0x402000, 0xc0000000);
+		/* no status modifiction on purpose */
+	}
+
+	/* TEXTURE: CUDA texturing units */
+	if (status & 0x040) {
+		nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+				    "PGRAPH - TRAP_TEXTURE");
+		nv_wr32(dev, 0x400108, 0x040);
+		status &= ~0x040;
+	}
+
+	/* MP: CUDA execution engines. */
+	if (status & 0x080) {
+		nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+				    "PGRAPH - TRAP_MP");
+		nv_wr32(dev, 0x400108, 0x080);
+		status &= ~0x080;
+	}
+
+	/* TPDMA:  Handles TP-initiated uncached memory accesses:
+	 * l[], g[], stack, 2d surfaces, render targets. */
+	if (status & 0x100) {
+		nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+				    "PGRAPH - TRAP_TPDMA");
+		nv_wr32(dev, 0x400108, 0x100);
+		status &= ~0x100;
+	}
+
+	if (status) {
+		if (display)
+			NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+		nv_wr32(dev, 0x400108, status);
+	}
+
+	return 1;
+}
+
+static int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin)
+			continue;
+
+		if (inst == chan->ramin->vinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+	u32 stat;
+
+	while ((stat = nv_rd32(dev, 0x400100))) {
+		u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+		u32 chid = nv50_graph_isr_chid(dev, inst);
+		u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+		u32 subc = (addr & 0x00070000) >> 16;
+		u32 mthd = (addr & 0x00001ffc);
+		u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+		u32 class = nv_rd32(dev, 0x400814);
+		u32 show = stat;
+
+		if (stat & 0x00000010) {
+			if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+						       mthd, data))
+				show &= ~0x00000010;
+		}
+
+		if (stat & 0x00001000) {
+			nv_wr32(dev, 0x400500, 0x00000000);
+			nv_wr32(dev, 0x400100, 0x00001000);
+			nv_mask(dev, 0x40013c, 0x00001000, 0x00000000);
+			nv50_graph_context_switch(dev);
+			stat &= ~0x00001000;
+			show &= ~0x00001000;
+		}
+
+		show = (show && nouveau_ratelimit()) ? show : 0;
+
+		if (show & 0x00100000) {
+			u32 ecode = nv_rd32(dev, 0x400110);
+			NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+			nouveau_enum_print(nv50_data_error_names, ecode);
+			printk("\n");
+		}
+
+		if (stat & 0x00200000) {
+			if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+				show &= ~0x00200000;
+		}
+
+		nv_wr32(dev, 0x400100, stat);
+		nv_wr32(dev, 0x400500, 0x00010001);
+
+		if (show) {
+			NV_INFO(dev, "PGRAPH -");
+			nouveau_bitfield_print(nv50_graph_intr, show);
+			printk("\n");
+			NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+				     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+				chid, inst, subc, class, mthd, data);
+		}
+	}
+
+	if (nv_rd32(dev, 0x400824) & (1 << 31))
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index b773229..2e1b1cd 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -27,14 +27,20 @@
 
 #include "drmP.h"
 #include "drm.h"
+
 #include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
 
 struct nv50_instmem_priv {
 	uint32_t save1700[5]; /* 0x1700->0x1710 */
 
-	struct nouveau_gpuobj *pramin_pt;
-	struct nouveau_gpuobj *pramin_bar;
-	struct nouveau_gpuobj *fb_bar;
+	struct nouveau_gpuobj *bar1_dmaobj;
+	struct nouveau_gpuobj *bar3_dmaobj;
 };
 
 static void
@@ -48,6 +54,7 @@
 		return;
 
 	nouveau_gpuobj_ref(NULL, &chan->ramfc);
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
 	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
 	if (chan->ramin_heap.free_stack.next)
 		drm_mm_takedown(&chan->ramin_heap);
@@ -56,14 +63,14 @@
 }
 
 static int
-nv50_channel_new(struct drm_device *dev, u32 size,
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
 		 struct nouveau_channel **pchan)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
 	u32  fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
 	struct nouveau_channel *chan;
-	int ret;
+	int ret, i;
 
 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
 	if (!chan)
@@ -92,6 +99,17 @@
 		return ret;
 	}
 
+	for (i = 0; i < 0x4000; i += 8) {
+		nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+		nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+	}
+
+	ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+	if (ret) {
+		nv50_channel_del(&chan);
+		return ret;
+	}
+
 	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
 				      chan->ramin->pinst + fc,
 				      chan->ramin->vinst + fc, 0x100,
@@ -111,6 +129,7 @@
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv;
 	struct nouveau_channel *chan;
+	struct nouveau_vm *vm;
 	int ret, i;
 	u32 tmp;
 
@@ -127,112 +146,87 @@
 	ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
 	if (ret) {
 		NV_ERROR(dev, "Failed to init RAMIN heap\n");
-		return -ENOMEM;
+		goto error;
 	}
 
-	/* we need a channel to plug into the hw to control the BARs */
-	ret = nv50_channel_new(dev, 128*1024, &dev_priv->fifos[0]);
+	/* BAR3 */
+	ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+			     &dev_priv->bar3_vm);
 	if (ret)
-		return ret;
-	chan = dev_priv->fifos[127] = dev_priv->fifos[0];
+		goto error;
 
-	/* allocate page table for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, (dev_priv->ramin_size >> 12) * 8,
-				 0x1000, NVOBJ_FLAG_ZERO_ALLOC,
-				 &priv->pramin_pt);
+	ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+				 0x1000, NVOBJ_FLAG_DONT_MAP |
+				 NVOBJ_FLAG_ZERO_ALLOC,
+				 &dev_priv->bar3_vm->pgt[0].obj[0]);
 	if (ret)
-		return ret;
+		goto error;
+	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
 
-	nv_wo32(chan->vm_pd, 0x0000, priv->pramin_pt->vinst | 0x63);
-	nv_wo32(chan->vm_pd, 0x0004, 0);
+	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
 
-	/* DMA object for PRAMIN BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->pramin_bar);
+	ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
 	if (ret)
-		return ret;
-	nv_wo32(priv->pramin_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->pramin_bar, 0x04, dev_priv->ramin_size - 1);
-	nv_wo32(priv->pramin_bar, 0x08, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x10, 0x00000000);
-	nv_wo32(priv->pramin_bar, 0x14, 0x00000000);
+		goto error;
+	dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
 
-	/* map channel into PRAMIN, gpuobj didn't do it for us */
-	ret = nv50_instmem_bind(dev, chan->ramin);
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar3_dmaobj);
 	if (ret)
-		return ret;
+		goto error;
 
-	/* poke regs... */
 	nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
 	nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
-	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->pramin_bar->cinst >> 4));
-
-	tmp = nv_ri32(dev, 0);
-	nv_wi32(dev, 0, ~tmp);
-	if (nv_ri32(dev, 0) != ~tmp) {
-		NV_ERROR(dev, "PRAMIN readback failed\n");
-		return -EIO;
-	}
-	nv_wi32(dev, 0, tmp);
-
-	dev_priv->ramin_available = true;
-
-	/* Determine VM layout */
-	dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
-	dev_priv->vm_gart_size = NV50_VM_BLOCK;
-
-	dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
-	dev_priv->vm_vram_size = dev_priv->vram_size;
-	if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
-		dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
-	dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
-	dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
-
-	dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
-
-	NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_gart_base,
-		 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
-	NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
-		 dev_priv->vm_vram_base,
-		 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
-
-	/* VRAM page table(s), mapped into VM at +1GiB  */
-	for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
-		ret = nouveau_gpuobj_new(dev, NULL, NV50_VM_BLOCK / 0x10000 * 8,
-					 0, NVOBJ_FLAG_ZERO_ALLOC,
-					 &chan->vm_vram_pt[i]);
-		if (ret) {
-			NV_ERROR(dev, "Error creating VRAM PGT: %d\n", ret);
-			dev_priv->vm_vram_pt_nr = i;
-			return ret;
-		}
-		dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i];
-
-		nv_wo32(chan->vm_pd, 0x10 + (i*8),
-			chan->vm_vram_pt[i]->vinst | 0x61);
-		nv_wo32(chan->vm_pd, 0x14 + (i*8), 0);
-	}
-
-	/* DMA object for FB BAR */
-	ret = nouveau_gpuobj_new(dev, chan, 6*4, 16, 0, &priv->fb_bar);
-	if (ret)
-		return ret;
-	nv_wo32(priv->fb_bar, 0x00, 0x7fc00000);
-	nv_wo32(priv->fb_bar, 0x04, 0x40000000 +
-				    pci_resource_len(dev->pdev, 1) - 1);
-	nv_wo32(priv->fb_bar, 0x08, 0x40000000);
-	nv_wo32(priv->fb_bar, 0x0c, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x10, 0x00000000);
-	nv_wo32(priv->fb_bar, 0x14, 0x00000000);
+	nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
 
 	dev_priv->engine.instmem.flush(dev);
+	dev_priv->ramin_available = true;
 
-	nv_wr32(dev, 0x001708, 0x80000000 | (priv->fb_bar->cinst >> 4));
+	tmp = nv_ro32(chan->ramin, 0);
+	nv_wo32(chan->ramin, 0, ~tmp);
+	if (nv_ro32(chan->ramin, 0) != ~tmp) {
+		NV_ERROR(dev, "PRAMIN readback failed\n");
+		ret = -EIO;
+		goto error;
+	}
+	nv_wo32(chan->ramin, 0, tmp);
+
+	/* BAR1 */
+	ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+				  NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+				  NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+				  &priv->bar1_dmaobj);
+	if (ret)
+		goto error;
+
+	nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
 
+	/* Create shared channel VM, space is reserved at the beginning
+	 * to catch "NULL pointer" references
+	 */
+	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+			     &dev_priv->chan_vm);
+	if (ret)
+		return ret;
+
 	return 0;
+
+error:
+	nv50_instmem_takedown(dev);
+	return ret;
 }
 
 void
@@ -240,7 +234,7 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
 	NV_DEBUG(dev, "\n");
@@ -250,23 +244,23 @@
 
 	dev_priv->ramin_available = false;
 
-	/* Restore state from before init */
+	nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
 	for (i = 0x1700; i <= 0x1710; i += 4)
 		nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
 
-	nouveau_gpuobj_ref(NULL, &priv->fb_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_bar);
-	nouveau_gpuobj_ref(NULL, &priv->pramin_pt);
+	nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+	nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
 
-	/* Destroy dummy channel */
-	if (chan) {
-		for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
-			nouveau_gpuobj_ref(NULL, &chan->vm_vram_pt[i]);
-		dev_priv->vm_vram_pt_nr = 0;
+	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+	dev_priv->channels.ptr[127] = 0;
+	nv50_channel_del(&dev_priv->channels.ptr[0]);
 
-		nv50_channel_del(&dev_priv->fifos[0]);
-		dev_priv->fifos[127] = NULL;
-	}
+	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+	if (dev_priv->ramin_heap.free_stack.next)
+		drm_mm_takedown(&dev_priv->ramin_heap);
 
 	dev_priv->engine.instmem.priv = NULL;
 	kfree(priv);
@@ -276,16 +270,8 @@
 nv50_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
-	int i;
 
-	ramin->im_backing_suspend = vmalloc(ramin->size);
-	if (!ramin->im_backing_suspend)
-		return -ENOMEM;
-
-	for (i = 0; i < ramin->size; i += 4)
-		ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
+	dev_priv->ramin_available = false;
 	return 0;
 }
 
@@ -294,146 +280,121 @@
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_channel *chan = dev_priv->fifos[0];
-	struct nouveau_gpuobj *ramin = chan->ramin;
+	struct nouveau_channel *chan = dev_priv->channels.ptr[0];
 	int i;
 
-	dev_priv->ramin_available = false;
-	dev_priv->ramin_base = ~0;
-	for (i = 0; i < ramin->size; i += 4)
-		nv_wo32(ramin, i, ramin->im_backing_suspend[i/4]);
-	dev_priv->ramin_available = true;
-	vfree(ramin->im_backing_suspend);
-	ramin->im_backing_suspend = NULL;
-
 	/* Poke the relevant regs, and pray it works :) */
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
 	nv_wr32(dev, NV50_PUNK_UNK1710, 0);
 	nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
 					 NV50_PUNK_BAR_CFG_BASE_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR1_CTXDMA_VALID);
-	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->cinst >> 4) |
+	nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
 					NV50_PUNK_BAR3_CTXDMA_VALID);
 
 	for (i = 0; i < 8; i++)
 		nv_wr32(dev, 0x1900 + (i*4), 0);
+
+	dev_priv->ramin_available = true;
 }
 
+struct nv50_gpuobj_node {
+	struct nouveau_vram *vram;
+	struct nouveau_vma chan_vma;
+	u32 align;
+};
+
+
 int
-nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *sz)
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
 {
+	struct drm_device *dev = gpuobj->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node = NULL;
 	int ret;
 
-	if (gpuobj->im_backing)
-		return -EINVAL;
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+	node->align = align;
 
-	*sz = ALIGN(*sz, 4096);
-	if (*sz == 0)
-		return -EINVAL;
+	size  = (size + 4095) & ~4095;
+	align = max(align, (u32)4096);
 
-	ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
+	ret = vram->get(dev, size, align, 0, 0, &node->vram);
 	if (ret) {
-		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+		kfree(node);
 		return ret;
 	}
 
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		return ret;
+	gpuobj->vinst = node->vram->offset;
+
+	if (gpuobj->flags & NVOBJ_FLAG_VM) {
+		ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
+				     NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
+				     &node->chan_vma);
+		if (ret) {
+			vram->put(dev, &node->vram);
+			kfree(node);
+			return ret;
+		}
+
+		nouveau_vm_map(&node->chan_vma, node->vram);
+		gpuobj->vinst = node->chan_vma.offset;
 	}
 
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
+	gpuobj->size = size;
+	gpuobj->node = node;
 	return 0;
 }
 
 void
-nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
 {
+	struct drm_device *dev = gpuobj->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+	struct nv50_gpuobj_node *node;
 
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
+	node = gpuobj->node;
+	gpuobj->node = NULL;
+
+	if (node->chan_vma.node) {
+		nouveau_vm_unmap(&node->chan_vma);
+		nouveau_vm_put(&node->chan_vma);
 	}
+	vram->put(dev, &node->vram);
+	kfree(node);
 }
 
 int
-nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	struct nouveau_gpuobj *pramin_pt = priv->pramin_pt;
-	uint32_t pte, pte_end;
-	uint64_t vram;
+	struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+	struct nv50_gpuobj_node *node = gpuobj->node;
+	int ret;
 
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
+	ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+			     NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+	if (ret)
+		return ret;
 
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-	vram    = gpuobj->vinst;
-
-	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
-	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
-	vram |= 1;
-	if (dev_priv->vram_sys_base) {
-		vram += dev_priv->vram_sys_base;
-		vram |= 0x30;
-	}
-
-	while (pte < pte_end) {
-		nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram));
-		nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram));
-		vram += 0x1000;
-		pte += 2;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	nv50_vm_flush(dev, 6);
-
-	gpuobj->im_bound = 1;
+	nouveau_vm_map(&node->vram->bar_vma, node->vram);
+	gpuobj->pinst = node->vram->bar_vma.offset;
 	return 0;
 }
 
-int
-nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-	uint32_t pte, pte_end;
+	struct nv50_gpuobj_node *node = gpuobj->node;
 
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
-
-	/* can happen during late takedown */
-	if (unlikely(!dev_priv->ramin_available))
-		return 0;
-
-	pte     = (gpuobj->im_pramin->start >> 12) << 1;
-	pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
-
-	while (pte < pte_end) {
-		nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000);
-		nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000);
-		pte += 2;
+	if (node->vram->bar_vma.node) {
+		nouveau_vm_unmap(&node->vram->bar_vma);
+		nouveau_vm_put(&node->vram->bar_vma);
 	}
-	dev_priv->engine.instmem.flush(dev);
-
-	gpuobj->im_bound = 0;
-	return 0;
 }
 
 void
@@ -452,11 +413,3 @@
 		NV_ERROR(dev, "PRAMIN flush timeout\n");
 }
 
-void
-nv50_vm_flush(struct drm_device *dev, int engine)
-{
-	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
-	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
-		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
-}
-
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
new file mode 100644
index 0000000..38e523e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+		struct nouveau_gpuobj *pgt[2])
+{
+	struct drm_nouveau_private *dev_priv = pgd->dev->dev_private;
+	u64 phys = 0xdeadcafe00000000ULL;
+	u32 coverage = 0;
+
+	if (pgt[0]) {
+		phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+		coverage = (pgt[0]->size >> 3) << 12;
+	} else
+	if (pgt[1]) {
+		phys = 0x00000001 | pgt[1]->vinst; /* present */
+		coverage = (pgt[1]->size >> 3) << 16;
+	}
+
+	if (phys & 1) {
+		if (dev_priv->vram_sys_base) {
+			phys += dev_priv->vram_sys_base;
+			phys |= 0x30;
+		}
+
+		if (coverage <= 32 * 1024 * 1024)
+			phys |= 0x60;
+		else if (coverage <= 64 * 1024 * 1024)
+			phys |= 0x40;
+		else if (coverage < 128 * 1024 * 1024)
+			phys |= 0x20;
+	}
+
+	nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+	nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+nv50_vm_addr(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	     u64 phys, u32 memtype, u32 target)
+{
+	struct drm_nouveau_private *dev_priv = pgt->dev->dev_private;
+
+	phys |= 1; /* present */
+	phys |= (u64)memtype << 40;
+
+	/* IGPs don't have real VRAM, re-target to stolen system memory */
+	if (target == 0 && dev_priv->vram_sys_base) {
+		phys  += dev_priv->vram_sys_base;
+		target = 3;
+	}
+
+	phys |= target << 4;
+
+	if (vma->access & NV_MEM_ACCESS_SYS)
+		phys |= (1 << 6);
+
+	if (!(vma->access & NV_MEM_ACCESS_WO))
+		phys |= (1 << 3);
+
+	return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+	u32 block;
+	int i;
+
+	phys  = nv50_vm_addr(vma, pgt, phys, mem->memtype, 0);
+	pte <<= 3;
+	cnt <<= 3;
+
+	while (cnt) {
+		u32 offset_h = upper_32_bits(phys);
+		u32 offset_l = lower_32_bits(phys);
+
+		for (i = 7; i >= 0; i--) {
+			block = 1 << (i + 3);
+			if (cnt >= block && !(pte & (block - 1)))
+				break;
+		}
+		offset_l |= (i << 7);
+
+		phys += block << (vma->node->type - 3);
+		cnt  -= block;
+
+		while (block) {
+			nv_wo32(pgt, pte + 0, offset_l);
+			nv_wo32(pgt, pte + 4, offset_h);
+			pte += 8;
+			block -= 8;
+		}
+	}
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       u32 pte, dma_addr_t *list, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nv50_vm_addr(vma, pgt, (u64)*list++, 0, 2);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	pinstmem->flush(vm->dev);
+
+	/* BAR */
+	if (vm != dev_priv->chan_vm) {
+		nv50_vm_flush_engine(vm->dev, 6);
+		return;
+	}
+
+	pfifo->tlb_flush(vm->dev);
+
+	if (atomic_read(&vm->pgraph_refs))
+		pgraph->tlb_flush(vm->dev);
+	if (atomic_read(&vm->pcrypt_refs))
+		pcrypt->tlb_flush(vm->dev);
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+	nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+	if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+		NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
new file mode 100644
index 0000000..58e98ad
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static int types[0x80] = {
+	1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+	0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+	1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+	if (likely(type < ARRAY_SIZE(types) && types[type]))
+		return true;
+	return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *this;
+	struct nouveau_vram *vram;
+
+	vram = *pvram;
+	*pvram = NULL;
+	if (unlikely(vram == NULL))
+		return;
+
+	mutex_lock(&mm->mutex);
+	while (!list_empty(&vram->regions)) {
+		this = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+
+		list_del(&this->rl_entry);
+		nouveau_mm_put(mm, this);
+	}
+	mutex_unlock(&mm->mutex);
+
+	kfree(vram);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+	      u32 type, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	struct nouveau_vram *vram;
+	int ret;
+
+	if (!types[type])
+		return -EINVAL;
+	size >>= 12;
+	align >>= 12;
+	size_nc >>= 12;
+
+	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+	if (!vram)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vram->regions);
+	vram->dev = dev_priv->dev;
+	vram->memtype = type;
+	vram->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			nv50_vram_del(dev, &vram);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &vram->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+	vram->offset = (u64)r->offset << 12;
+	*pvram = vram;
+	return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru, rblock_size;
+
+	r0 = nv_rd32(dev, 0x100200);
+	r4 = nv_rd32(dev, 0x100204);
+	rt = nv_rd32(dev, 0x100250);
+	ru = nv_rd32(dev, 0x001540);
+	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = ((r4 & 0x01000000) ? 8 : 4);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != dev_priv->vram_size) {
+		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+			(u32)(dev_priv->vram_size >> 20));
+		NV_WARN(dev, "we calculated %dMiB VRAM\n",
+			(u32)(predicted >> 20));
+	}
+
+	rblock_size = rowsize;
+	if (rt & 1)
+		rblock_size *= 3;
+
+	NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+	return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->vram_size  = nv_rd32(dev, 0x10020c);
+	dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+	dev_priv->vram_size &= 0xffffffff00ULL;
+
+	switch (dev_priv->chipset) {
+	case 0xaa:
+	case 0xac:
+	case 0xaf:
+		dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+		dev_priv->vram_rblock_size = 4096;
+		break;
+	default:
+		dev_priv->vram_rblock_size = nv50_vram_rblock(dev);
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_crypt.c b/drivers/gpu/drm/nouveau/nv84_crypt.c
new file mode 100644
index 0000000..ec18ae1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_crypt.c
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+
+static void nv84_crypt_isr(struct drm_device *);
+
+int
+nv84_crypt_create_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *ramin = chan->ramin;
+	int ret;
+
+	NV_DEBUG(dev, "ch%d\n", chan->id);
+
+	ret = nouveau_gpuobj_new(dev, chan, 256, 0,
+				 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+				 &chan->crypt_ctx);
+	if (ret)
+		return ret;
+
+	nv_wo32(ramin, 0xa0, 0x00190000);
+	nv_wo32(ramin, 0xa4, chan->crypt_ctx->vinst + 0xff);
+	nv_wo32(ramin, 0xa8, chan->crypt_ctx->vinst);
+	nv_wo32(ramin, 0xac, 0);
+	nv_wo32(ramin, 0xb0, 0);
+	nv_wo32(ramin, 0xb4, 0);
+
+	dev_priv->engine.instmem.flush(dev);
+	atomic_inc(&chan->vm->pcrypt_refs);
+	return 0;
+}
+
+void
+nv84_crypt_destroy_context(struct nouveau_channel *chan)
+{
+	struct drm_device *dev = chan->dev;
+	u32 inst;
+
+	if (!chan->crypt_ctx)
+		return;
+
+	inst  = (chan->ramin->vinst >> 12);
+	inst |= 0x80000000;
+
+	/* mark context as invalid if still on the hardware, not
+	 * doing this causes issues the next time PCRYPT is used,
+	 * unsurprisingly :)
+	 */
+	nv_wr32(dev, 0x10200c, 0x00000000);
+	if (nv_rd32(dev, 0x102188) == inst)
+		nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+	if (nv_rd32(dev, 0x10218c) == inst)
+		nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+	nv_wr32(dev, 0x10200c, 0x00000010);
+
+	nouveau_gpuobj_ref(NULL, &chan->crypt_ctx);
+	atomic_dec(&chan->vm->pcrypt_refs);
+}
+
+void
+nv84_crypt_tlb_flush(struct drm_device *dev)
+{
+	nv50_vm_flush_engine(dev, 0x0a);
+}
+
+int
+nv84_crypt_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
+
+	if (!pcrypt->registered) {
+		NVOBJ_CLASS(dev, 0x74c1, CRYPT);
+		pcrypt->registered = true;
+	}
+
+	nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+	nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+	nouveau_irq_register(dev, 14, nv84_crypt_isr);
+	nv_wr32(dev, 0x102130, 0xffffffff);
+	nv_wr32(dev, 0x102140, 0xffffffbf);
+
+	nv_wr32(dev, 0x10200c, 0x00000010);
+	return 0;
+}
+
+void
+nv84_crypt_fini(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x102140, 0x00000000);
+	nouveau_irq_unregister(dev, 14);
+}
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+	u32 stat = nv_rd32(dev, 0x102130);
+	u32 mthd = nv_rd32(dev, 0x102190);
+	u32 data = nv_rd32(dev, 0x102194);
+	u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
+	int show = nouveau_ratelimit();
+
+	if (show) {
+		NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+			     stat, mthd, data, inst);
+	}
+
+	nv_wr32(dev, 0x102130, stat);
+	nv_wr32(dev, 0x10200c, 0x10);
+
+	nv50_fb_vm_trap(dev, show, "PCRYPT");
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
new file mode 100644
index 0000000..fa5d4c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+#include "nouveau_ramht.h"
+#include "nouveau_fbcon.h"
+#include "nouveau_mm.h"
+
+int
+nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11);
+	if (ret)
+		return ret;
+
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 1);
+	}
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0588, 1);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING  (chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING  (chan, rect->color);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0600, 4);
+	OUT_RING  (chan, rect->dx);
+	OUT_RING  (chan, rect->dy);
+	OUT_RING  (chan, rect->dx + rect->width);
+	OUT_RING  (chan, rect->dy + rect->height);
+	if (rect->rop != ROP_COPY) {
+		BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+		OUT_RING  (chan, 3);
+	}
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	int ret;
+
+	ret = RING_SPACE(chan, 12);
+	if (ret)
+		return ret;
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0110, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08b0, 4);
+	OUT_RING  (chan, region->dx);
+	OUT_RING  (chan, region->dy);
+	OUT_RING  (chan, region->width);
+	OUT_RING  (chan, region->height);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08d0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, region->sy);
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	uint32_t width, dwords, *data = (uint32_t *)image->data;
+	uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
+	uint32_t *palette = info->pseudo_palette;
+	int ret;
+
+	if (image->depth != 1)
+		return -ENODEV;
+
+	ret = RING_SPACE(chan, 11);
+	if (ret)
+		return ret;
+
+	width = ALIGN(image->width, 32);
+	dwords = (width * image->height) >> 5;
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0814, 2);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+		OUT_RING  (chan, palette[image->bg_color] | mask);
+		OUT_RING  (chan, palette[image->fg_color] | mask);
+	} else {
+		OUT_RING  (chan, image->bg_color);
+		OUT_RING  (chan, image->fg_color);
+	}
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0838, 2);
+	OUT_RING  (chan, image->width);
+	OUT_RING  (chan, image->height);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0850, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dx);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, image->dy);
+
+	while (dwords) {
+		int push = dwords > 2047 ? 2047 : dwords;
+
+		ret = RING_SPACE(chan, push + 1);
+		if (ret)
+			return ret;
+
+		dwords -= push;
+
+		BEGIN_NVC0(chan, 6, NvSub2D, 0x0860, push);
+		OUT_RINGp(chan, data, push);
+		data += push;
+	}
+
+	FIRE_RING(chan);
+	return 0;
+}
+
+int
+nvc0_fbcon_accel_init(struct fb_info *info)
+{
+	struct nouveau_fbdev *nfbdev = info->par;
+	struct drm_device *dev = nfbdev->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan = dev_priv->channel;
+	struct nouveau_bo *nvbo = nfbdev->nouveau_fb.nvbo;
+	int ret, format;
+
+	ret = nouveau_gpuobj_gr_new(chan, 0x902d, 0x902d);
+	if (ret)
+		return ret;
+
+	switch (info->var.bits_per_pixel) {
+	case 8:
+		format = 0xf3;
+		break;
+	case 15:
+		format = 0xf8;
+		break;
+	case 16:
+		format = 0xe8;
+		break;
+	case 32:
+		switch (info->var.transp.length) {
+		case 0: /* depth 24 */
+		case 8: /* depth 32, just use 24.. */
+			format = 0xe6;
+			break;
+		case 2: /* depth 30 */
+			format = 0xd1;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = RING_SPACE(chan, 60);
+	if (ret) {
+		WARN_ON(1);
+		nouveau_fbcon_gpu_lockup(info);
+		return ret;
+	}
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0000, 1);
+	OUT_RING  (chan, 0x0000902d);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0104, 2);
+	OUT_RING  (chan, upper_32_bits(chan->notifier_bo->bo.offset));
+	OUT_RING  (chan, lower_32_bits(chan->notifier_bo->bo.offset));
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0290, 1);
+	OUT_RING  (chan, 0);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0888, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02ac, 1);
+	OUT_RING  (chan, 3);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02a0, 1);
+	OUT_RING  (chan, 0x55);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x08c0, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0580, 2);
+	OUT_RING  (chan, 4);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x02e8, 2);
+	OUT_RING  (chan, 2);
+	OUT_RING  (chan, 1);
+
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0804, 1);
+	OUT_RING  (chan, format);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0800, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0808, 3);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x081c, 1);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0840, 4);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0200, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING  (chan, lower_32_bits(nvbo->vma.offset));
+	BEGIN_NVC0(chan, 2, NvSub2D, 0x0230, 10);
+	OUT_RING  (chan, format);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, 1);
+	OUT_RING  (chan, 0);
+	OUT_RING  (chan, info->fix.line_length);
+	OUT_RING  (chan, info->var.xres_virtual);
+	OUT_RING  (chan, info->var.yres_virtual);
+	OUT_RING  (chan, upper_32_bits(nvbo->vma.offset));
+	OUT_RING  (chan, lower_32_bits(nvbo->vma.offset));
+	FIRE_RING (chan);
+
+	return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
index 890c2b9..e6f92c54 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fifo.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -25,6 +25,49 @@
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+	struct nouveau_gpuobj *playlist[2];
+	int cur_playlist;
+	struct nouveau_vma user_vma;
+	int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+	struct nouveau_bo *user;
+	struct nouveau_gpuobj *ramfc;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv = pfifo->priv;
+	struct nouveau_gpuobj *cur;
+	int i, p;
+
+	cur = priv->playlist[priv->cur_playlist];
+	priv->cur_playlist = !priv->cur_playlist;
+
+	for (i = 0, p = 0; i < 128; i++) {
+		if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+			continue;
+		nv_wo32(cur, p + 0, i);
+		nv_wo32(cur, p + 4, 0x00000004);
+		p += 8;
+	}
+	pinstmem->flush(dev);
+
+	nv_wr32(dev, 0x002270, cur->vinst >> 12);
+	nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+	if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+		NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
 
 void
 nvc0_fifo_disable(struct drm_device *dev)
@@ -57,12 +100,135 @@
 int
 nvc0_fifo_create_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv = pfifo->priv;
+	struct nvc0_fifo_chan *fifoch;
+	u64 ib_virt, user_vinst;
+	int ret;
+
+	chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
+	if (!chan->fifo_priv)
+		return -ENOMEM;
+	fifoch = chan->fifo_priv;
+
+	/* allocate vram for control regs, map into polling area */
+	ret = nouveau_bo_new(dev, NULL, 0x1000, 0, TTM_PL_FLAG_VRAM,
+			     0, 0, true, true, &fifoch->user);
+	if (ret)
+		goto error;
+
+	ret = nouveau_bo_pin(fifoch->user, TTM_PL_FLAG_VRAM);
+	if (ret) {
+		nouveau_bo_ref(NULL, &fifoch->user);
+		goto error;
+	}
+
+	user_vinst = fifoch->user->bo.mem.start << PAGE_SHIFT;
+
+	ret = nouveau_bo_map(fifoch->user);
+	if (ret) {
+		nouveau_bo_unpin(fifoch->user);
+		nouveau_bo_ref(NULL, &fifoch->user);
+		goto error;
+	}
+
+	nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+			  fifoch->user->bo.mem.mm_node);
+
+	chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+				priv->user_vma.offset + (chan->id * 0x1000),
+				PAGE_SIZE);
+	if (!chan->user) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+
+	/* zero channel regs */
+	nouveau_bo_wr32(fifoch->user, 0x0040/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0044/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0048/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x004c/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0050/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0058/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x005c/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0060/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x0088/4, 0);
+	nouveau_bo_wr32(fifoch->user, 0x008c/4, 0);
+
+	/* ramfc */
+	ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
+				      chan->ramin->vinst, 0x100,
+				      NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
+	if (ret)
+		goto error;
+
+	nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(user_vinst));
+	nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(user_vinst));
+	nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
+	nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
+	nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
+	nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+				   upper_32_bits(ib_virt));
+	nv_wo32(fifoch->ramfc, 0x54, 0x00000002);
+	nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
+	nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
+	nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
+	nv_wo32(fifoch->ramfc, 0xa4, 0x1f1f1f1f);
+	nv_wo32(fifoch->ramfc, 0xa8, 0x1f1f1f1f);
+	nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
+	nv_wo32(fifoch->ramfc, 0xb8, 0xf8000000);
+	nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
+	nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
+	pinstmem->flush(dev);
+
+	nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+						(chan->ramin->vinst >> 12));
+	nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+	nvc0_fifo_playlist_update(dev);
 	return 0;
+
+error:
+	pfifo->destroy_context(chan);
+	return ret;
 }
 
 void
 nvc0_fifo_destroy_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+	struct nvc0_fifo_chan *fifoch;
+
+	nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+	nv_wr32(dev, 0x002634, chan->id);
+	if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+		NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+
+	nvc0_fifo_playlist_update(dev);
+
+	nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+	if (chan->user) {
+		iounmap(chan->user);
+		chan->user = NULL;
+	}
+
+	fifoch = chan->fifo_priv;
+	chan->fifo_priv = NULL;
+	if (!fifoch)
+		return;
+
+	nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
+	if (fifoch->user) {
+		nouveau_bo_unmap(fifoch->user);
+		nouveau_bo_unpin(fifoch->user);
+		nouveau_bo_ref(NULL, &fifoch->user);
+	}
+	kfree(fifoch);
 }
 
 int
@@ -77,14 +243,213 @@
 	return 0;
 }
 
+static void
+nvc0_fifo_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+
+	priv = pfifo->priv;
+	if (!priv)
+		return;
+
+	nouveau_vm_put(&priv->user_vma);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+	nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+	kfree(priv);
+}
+
 void
 nvc0_fifo_takedown(struct drm_device *dev)
 {
+	nv_wr32(dev, 0x002140, 0x00000000);
+	nvc0_fifo_destroy(dev);
+}
+
+static int
+nvc0_fifo_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	pfifo->priv = priv;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+				 &priv->playlist[0]);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000, 0,
+				 &priv->playlist[1]);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_get(dev_priv->bar1_vm, pfifo->channels * 0x1000,
+			     12, NV_MEM_ACCESS_RW, &priv->user_vma);
+	if (ret)
+		goto error;
+
+	nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+	NVOBJ_CLASS(dev, 0x506e, SW); /* nvsw */
+	return 0;
+
+error:
+	nvc0_fifo_destroy(dev);
+	return ret;
 }
 
 int
 nvc0_fifo_init(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nvc0_fifo_priv *priv;
+	int ret, i;
+
+	if (!pfifo->priv) {
+		ret = nvc0_fifo_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pfifo->priv;
+
+	/* reset PFIFO, enable all available PSUBFIFO areas */
+	nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+	nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+	nv_wr32(dev, 0x000204, 0xffffffff);
+	nv_wr32(dev, 0x002204, 0xffffffff);
+
+	priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+	NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+	/* assign engines to subfifos */
+	if (priv->spoon_nr >= 3) {
+		nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+		nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+		nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+		nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+		nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+		nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+	}
+
+	/* PSUBFIFO[n] */
+	for (i = 0; i < 3; i++) {
+		nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+		nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+		nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+	}
+
+	nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+	nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+	nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+	nv_wr32(dev, 0x002100, 0xffffffff);
+	nv_wr32(dev, 0x002140, 0xbfffffff);
 	return 0;
 }
 
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+	{ 0, "PGRAPH" },
+	{ 3, "PEEPHOLE" },
+	{ 4, "BAR1" },
+	{ 5, "BAR3" },
+	{ 7, "PFIFO" },
+	{}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+	{ 0, "PT_NOT_PRESENT" },
+	{ 1, "PT_TOO_SHORT" },
+	{ 2, "PAGE_NOT_PRESENT" },
+	{ 3, "VM_LIMIT_EXCEEDED" },
+	{}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/*	{ 0x00008000, "" }	seen with null ib push */
+	{ 0x00200000, "ILLEGAL_MTHD" },
+	{ 0x00800000, "EMPTY_SUBC" },
+	{}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+	u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+	u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+	u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+	u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+
+	NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+		(stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+	nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+	printk("] from ");
+	nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+	printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+	u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+	u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+	u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+	u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+	u32 subc = (addr & 0x00070000);
+	u32 mthd = (addr & 0x00003ffc);
+
+	NV_INFO(dev, "PSUBFIFO %d:", unit);
+	nouveau_bitfield_print(nvc0_fifo_subfifo_intr, stat);
+	NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+		unit, chid, subc, mthd, data);
+
+	nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+	nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+	u32 stat = nv_rd32(dev, 0x002100);
+
+	if (stat & 0x10000000) {
+		u32 units = nv_rd32(dev, 0x00259c);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_vm_fault(dev, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(dev, 0x00259c, units);
+		stat &= ~0x10000000;
+	}
+
+	if (stat & 0x20000000) {
+		u32 units = nv_rd32(dev, 0x0025a0);
+		u32 u = units;
+
+		while (u) {
+			int i = ffs(u) - 1;
+			nvc0_fifo_isr_subfifo_intr(dev, i);
+			u &= ~(1 << i);
+		}
+
+		nv_wr32(dev, 0x0025a0, units);
+		stat &= ~0x20000000;
+	}
+
+	if (stat) {
+		NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+		nv_wr32(dev, 0x002100, stat);
+	}
+
+	nv_wr32(dev, 0x2140, 0);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index 717a517..5feacd5 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -22,9 +22,16 @@
  * Authors: Ben Skeggs
  */
 
+#include <linux/firmware.h>
+
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void nvc0_graph_isr(struct drm_device *);
+static int  nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan);
 
 void
 nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
@@ -37,39 +44,735 @@
 	return NULL;
 }
 
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int ret, i;
+	u32 *ctx;
+
+	ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	nvc0_graph_load_context(chan);
+
+	nv_wo32(grch->grctx, 0x1c, 1);
+	nv_wo32(grch->grctx, 0x20, 0);
+	nv_wo32(grch->grctx, 0x28, 0);
+	nv_wo32(grch->grctx, 0x2c, 0);
+	dev_priv->engine.instmem.flush(dev);
+
+	ret = nvc0_grctx_generate(chan);
+	if (ret) {
+		kfree(ctx);
+		return ret;
+	}
+
+	ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+	if (ret) {
+		kfree(ctx);
+		return ret;
+	}
+
+	for (i = 0; i < priv->grctx_size; i += 4)
+		ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+	priv->grctx_vals = ctx;
+	return 0;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int i = 0, gpc, tp, ret;
+	u32 magic;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x2000, 256, NVOBJ_FLAG_VM,
+				 &grch->unk408004);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 256, NVOBJ_FLAG_VM,
+				 &grch->unk40800c);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
+				 &grch->unk418810);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0, NVOBJ_FLAG_VM,
+				 &grch->mmio);
+	if (ret)
+		return ret;
+
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->vinst >> 12);
+	nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+	nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->vinst >> 12);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+	nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+	nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+	nv_wo32(grch->mmio, i++ * 4, grch->unk408004->vinst >> 8);
+	nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+	nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+	magic = 0x02180000;
+	nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+	nv_wo32(grch->mmio, i++ * 4, magic);
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x02fc) {
+			u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800);
+			nv_wo32(grch->mmio, i++ * 4, reg);
+			nv_wo32(grch->mmio, i++ * 4, magic);
+		}
+	}
+
+	grch->mmio_nr = i / 2;
+	return 0;
+}
+
 int
 nvc0_graph_create_context(struct nouveau_channel *chan)
 {
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv = pgraph->priv;
+	struct nvc0_graph_chan *grch;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_gpuobj *grctx;
+	int ret, i;
+
+	chan->pgraph_ctx = kzalloc(sizeof(*grch), GFP_KERNEL);
+	if (!chan->pgraph_ctx)
+		return -ENOMEM;
+	grch = chan->pgraph_ctx;
+
+	ret = nouveau_gpuobj_new(dev, NULL, priv->grctx_size, 256,
+				 NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+				 &grch->grctx);
+	if (ret)
+		goto error;
+	chan->ramin_grctx = grch->grctx;
+	grctx = grch->grctx;
+
+	ret = nvc0_graph_create_context_mmio_list(chan);
+	if (ret)
+		goto error;
+
+	nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->vinst) | 4);
+	nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->vinst));
+	pinstmem->flush(dev);
+
+	if (!priv->grctx_vals) {
+		ret = nvc0_graph_construct_context(chan);
+		if (ret)
+			goto error;
+	}
+
+	for (i = 0; i < priv->grctx_size; i += 4)
+		nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+        nv_wo32(grctx, 0xf4, 0);
+        nv_wo32(grctx, 0xf8, 0);
+        nv_wo32(grctx, 0x10, grch->mmio_nr);
+        nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->vinst));
+        nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->vinst));
+        nv_wo32(grctx, 0x1c, 1);
+        nv_wo32(grctx, 0x20, 0);
+        nv_wo32(grctx, 0x28, 0);
+        nv_wo32(grctx, 0x2c, 0);
+	pinstmem->flush(dev);
 	return 0;
+
+error:
+	pgraph->destroy_context(chan);
+	return ret;
 }
 
 void
 nvc0_graph_destroy_context(struct nouveau_channel *chan)
 {
+	struct nvc0_graph_chan *grch;
+
+	grch = chan->pgraph_ctx;
+	chan->pgraph_ctx = NULL;
+	if (!grch)
+		return;
+
+	nouveau_gpuobj_ref(NULL, &grch->mmio);
+	nouveau_gpuobj_ref(NULL, &grch->unk418810);
+	nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+	nouveau_gpuobj_ref(NULL, &grch->unk408004);
+	nouveau_gpuobj_ref(NULL, &grch->grctx);
+	chan->ramin_grctx = NULL;
 }
 
 int
 nvc0_graph_load_context(struct nouveau_channel *chan)
 {
+	struct drm_device *dev = chan->dev;
+
+	nv_wr32(dev, 0x409840, 0x00000030);
+	nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+	nv_wr32(dev, 0x409504, 0x00000003);
+	if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+		NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+	return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+	nv_wr32(dev, 0x409840, 0x00000003);
+	nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+	nv_wr32(dev, 0x409504, 0x00000009);
+	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+		NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+		return -EBUSY;
+	}
+
 	return 0;
 }
 
 int
 nvc0_graph_unload_context(struct drm_device *dev)
 {
-	return 0;
+	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+	return nvc0_graph_unload_context_to(dev, inst);
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+
+	priv = pgraph->priv;
+	if (!priv)
+		return;
+
+	nouveau_irq_unregister(dev, 12);
+
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+	nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+	if (priv->grctx_vals)
+		kfree(priv->grctx_vals);
+	kfree(priv);
 }
 
 void
 nvc0_graph_takedown(struct drm_device *dev)
 {
+	nvc0_graph_destroy(dev);
+}
+
+static int
+nvc0_graph_create(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+	int ret, gpc, i;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	pgraph->priv = priv;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+	if (ret)
+		goto error;
+
+	for (i = 0; i < 0x1000; i += 4) {
+		nv_wo32(priv->unk4188b4, i, 0x00000010);
+		nv_wo32(priv->unk4188b8, i, 0x00000010);
+	}
+
+	priv->gpc_nr  =  nv_rd32(dev, 0x409604) & 0x0000001f;
+	priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+		priv->tp_total += priv->tp_nr[gpc];
+	}
+
+	/*XXX: these need figuring out... */
+	switch (dev_priv->chipset) {
+	case 0xc0:
+		if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+			priv->magic_not_rop_nr = 0x07;
+			/* filled values up to tp_total, the rest 0 */
+			priv->magicgpc980[0]   = 0x22111000;
+			priv->magicgpc980[1]   = 0x00000233;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x000ba2e9;
+		} else
+		if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+			priv->magic_not_rop_nr = 0x05;
+			priv->magicgpc980[0]   = 0x11110000;
+			priv->magicgpc980[1]   = 0x00233222;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x00092493;
+		} else
+		if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+			priv->magic_not_rop_nr = 0x06;
+			priv->magicgpc980[0]   = 0x11110000;
+			priv->magicgpc980[1]   = 0x03332222;
+			priv->magicgpc980[2]   = 0x00000000;
+			priv->magicgpc980[3]   = 0x00000000;
+			priv->magicgpc918      = 0x00088889;
+		}
+		break;
+	case 0xc3: /* 450, 4/0/0/0, 2 */
+		priv->magic_not_rop_nr = 0x03;
+		priv->magicgpc980[0]   = 0x00003210;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00200000;
+		break;
+	case 0xc4: /* 460, 3/4/0/0, 4 */
+		priv->magic_not_rop_nr = 0x01;
+		priv->magicgpc980[0]   = 0x02321100;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00124925;
+		break;
+	}
+
+	if (!priv->magic_not_rop_nr) {
+		NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+			 priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+			 priv->tp_nr[3], priv->rop_nr);
+		/* use 0xc3's values... */
+		priv->magic_not_rop_nr = 0x03;
+		priv->magicgpc980[0]   = 0x00003210;
+		priv->magicgpc980[1]   = 0x00000000;
+		priv->magicgpc980[2]   = 0x00000000;
+		priv->magicgpc980[3]   = 0x00000000;
+		priv->magicgpc918      = 0x00200000;
+	}
+
+	nouveau_irq_register(dev, 12, nvc0_graph_isr);
+	NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+	NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+	NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+	NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+	return 0;
+
+error:
+	nvc0_graph_destroy(dev);
+	return ret;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv = pgraph->priv;
+	int i;
+
+	nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+	nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+	for (i = 0; i < 4; i++)
+		nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+	nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+	nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x400080, 0x003083c2);
+	nv_wr32(dev, 0x400088, 0x00006fe7);
+	nv_wr32(dev, 0x40008c, 0x00000000);
+	nv_wr32(dev, 0x400090, 0x00000030);
+	nv_wr32(dev, 0x40013c, 0x013901f7);
+	nv_wr32(dev, 0x400140, 0x00000100);
+	nv_wr32(dev, 0x400144, 0x00000000);
+	nv_wr32(dev, 0x400148, 0x00000110);
+	nv_wr32(dev, 0x400138, 0x00000000);
+	nv_wr32(dev, 0x400130, 0x00000000);
+	nv_wr32(dev, 0x400134, 0x00000000);
+	nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int gpc;
+	
+	//      TP      ROP UNKVAL(magic_not_rop_nr)
+	// 450: 4/0/0/0 2        3
+	// 460: 3/4/0/0 4        1
+	// 465: 3/4/4/0 4        7
+	// 470: 3/3/4/4 5        5
+	// 480: 3/4/4/4 6        6
+
+	// magicgpc918
+	// 450: 00200000 00000000001000000000000000000000
+	// 460: 00124925 00000000000100100100100100100101
+	// 465: 000ba2e9 00000000000010111010001011101001
+	// 470: 00092493 00000000000010010010010010010011
+	// 480: 00088889 00000000000010001000100010001001
+
+	/* filled values up to tp_total, remainder 0 */
+	// 450: 00003210 00000000 00000000 00000000
+	// 460: 02321100 00000000 00000000 00000000
+	// 465: 22111000 00000233 00000000 00000000
+	// 470: 11110000 00233222 00000000 00000000
+	// 480: 11110000 03332222 00000000 00000000
+	
+	nv_wr32(dev, GPC_BCAST(0x0980), priv->magicgpc980[0]);
+	nv_wr32(dev, GPC_BCAST(0x0984), priv->magicgpc980[1]);
+	nv_wr32(dev, GPC_BCAST(0x0988), priv->magicgpc980[2]);
+	nv_wr32(dev, GPC_BCAST(0x098c), priv->magicgpc980[3]);
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+						  priv->tp_nr[gpc]);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0918), priv->magicgpc918);
+	}
+
+	nv_wr32(dev, GPC_BCAST(0x1bd4), priv->magicgpc918);
+	nv_wr32(dev, GPC_BCAST(0x08ac), priv->rop_nr);
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x409c24, 0x000f0000);
+	nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+	nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+	nv_wr32(dev, 0x408030, 0xc0000000);
+	nv_wr32(dev, 0x40601c, 0xc0000000);
+	nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+	nv_wr32(dev, 0x406018, 0xc0000000);
+	nv_wr32(dev, 0x405840, 0xc0000000);
+	nv_wr32(dev, 0x405844, 0x00ffffff);
+	nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+	nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int gpc, tp;
+
+	for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+		for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe44), 0x001ffffe);
+			nv_wr32(dev, TP_UNIT(gpc, tp, 0xe4c), 0x0000000f);
+		}
+		nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+		nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+	}
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	int rop;
+
+	for (rop = 0; rop < priv->rop_nr; rop++) {
+		nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+		nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+		nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+		nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+	}
+}
+
+static int
+nvc0_fuc_load_fw(struct drm_device *dev, u32 fuc_base,
+		 const char *code_fw, const char *data_fw)
+{
+	const struct firmware *fw;
+	char name[32];
+	int ret, i;
+
+	snprintf(name, sizeof(name), "nouveau/%s", data_fw);
+	ret = request_firmware(&fw, name, &dev->pdev->dev);
+	if (ret) {
+		NV_ERROR(dev, "failed to load %s\n", data_fw);
+		return ret;
+	}
+
+	nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+	for (i = 0; i < fw->size / 4; i++)
+		nv_wr32(dev, fuc_base + 0x01c4, ((u32 *)fw->data)[i]);
+	release_firmware(fw);
+
+	snprintf(name, sizeof(name), "nouveau/%s", code_fw);
+	ret = request_firmware(&fw, name, &dev->pdev->dev);
+	if (ret) {
+		NV_ERROR(dev, "failed to load %s\n", code_fw);
+		return ret;
+	}
+
+	nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+	for (i = 0; i < fw->size / 4; i++) {
+		if ((i & 0x3f) == 0)
+			nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+		nv_wr32(dev, fuc_base + 0x0184, ((u32 *)fw->data)[i]);
+	}
+	release_firmware(fw);
+
+	return 0;
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	u32 r000260;
+	int ret;
+
+	/* load fuc microcode */
+	r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+	ret = nvc0_fuc_load_fw(dev, 0x409000, "fuc409c", "fuc409d");
+	if (ret == 0)
+		ret = nvc0_fuc_load_fw(dev, 0x41a000, "fuc41ac", "fuc41ad");
+	nv_wr32(dev, 0x000260, r000260);
+
+	if (ret)
+		return ret;
+
+	/* start both of them running */
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x41a10c, 0x00000000);
+	nv_wr32(dev, 0x40910c, 0x00000000);
+	nv_wr32(dev, 0x41a100, 0x00000002);
+	nv_wr32(dev, 0x409100, 0x00000002);
+	if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+		NV_INFO(dev, "0x409800 wait failed\n");
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x7fffffff);
+	nv_wr32(dev, 0x409504, 0x00000021);
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000010);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+		return -EBUSY;
+	}
+	priv->grctx_size = nv_rd32(dev, 0x409800);
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000016);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+		return -EBUSY;
+	}
+
+	nv_wr32(dev, 0x409840, 0xffffffff);
+	nv_wr32(dev, 0x409500, 0x00000000);
+	nv_wr32(dev, 0x409504, 0x00000025);
+	if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+		NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+		return -EBUSY;
+	}
+
+	return 0;
 }
 
 int
 nvc0_graph_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nvc0_graph_priv *priv;
+	int ret;
+
 	dev_priv->engine.graph.accel_blocked = true;
+
+	switch (dev_priv->chipset) {
+	case 0xc0:
+	case 0xc3:
+	case 0xc4:
+		break;
+	default:
+		NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+		if (nouveau_noaccel != 0)
+			return 0;
+		break;
+	}
+
+	nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+	nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+	if (!pgraph->priv) {
+		ret = nvc0_graph_create(dev);
+		if (ret)
+			return ret;
+	}
+	priv = pgraph->priv;
+
+	nvc0_graph_init_obj418880(dev);
+	nvc0_graph_init_regs(dev);
+	//nvc0_graph_init_unitplemented_magics(dev);
+	nvc0_graph_init_gpc_0(dev);
+	//nvc0_graph_init_unitplemented_c242(dev);
+
+	nv_wr32(dev, 0x400500, 0x00010001);
+	nv_wr32(dev, 0x400100, 0xffffffff);
+	nv_wr32(dev, 0x40013c, 0xffffffff);
+
+	nvc0_graph_init_units(dev);
+	nvc0_graph_init_gpc_1(dev);
+	nvc0_graph_init_rop(dev);
+
+	nv_wr32(dev, 0x400108, 0xffffffff);
+	nv_wr32(dev, 0x400138, 0xffffffff);
+	nv_wr32(dev, 0x400118, 0xffffffff);
+	nv_wr32(dev, 0x400130, 0xffffffff);
+	nv_wr32(dev, 0x40011c, 0xffffffff);
+	nv_wr32(dev, 0x400134, 0xffffffff);
+	nv_wr32(dev, 0x400054, 0x34ce3464);
+
+	ret = nvc0_graph_init_ctxctl(dev);
+	if (ret == 0)
+		dev_priv->engine.graph.accel_blocked = false;
 	return 0;
 }
 
+static int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_channel *chan;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev_priv->channels.lock, flags);
+	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
+		chan = dev_priv->channels.ptr[i];
+		if (!chan || !chan->ramin)
+			continue;
+
+		if (inst == chan->ramin->vinst)
+			break;
+	}
+	spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+	return i;
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+	u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+	u32 chid = nvc0_graph_isr_chid(dev, inst);
+	u32 stat = nv_rd32(dev, 0x400100);
+	u32 addr = nv_rd32(dev, 0x400704);
+	u32 mthd = (addr & 0x00003ffc);
+	u32 subc = (addr & 0x00070000) >> 16;
+	u32 data = nv_rd32(dev, 0x400708);
+	u32 code = nv_rd32(dev, 0x400110);
+	u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+	if (stat & 0x00000010) {
+		NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00000010);
+		stat &= ~0x00000010;
+	}
+
+	if (stat & 0x00000020) {
+		NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+			     "class 0x%04x mthd 0x%04x data 0x%08x\n",
+			chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00000020);
+		stat &= ~0x00000020;
+	}
+
+	if (stat & 0x00100000) {
+		NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+		nouveau_enum_print(nv50_data_error_names, code);
+		printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+		       "mthd 0x%04x data 0x%08x\n",
+		       chid, inst, subc, class, mthd, data);
+		nv_wr32(dev, 0x400100, 0x00100000);
+		stat &= ~0x00100000;
+	}
+
+	if (stat & 0x00200000) {
+		u32 trap = nv_rd32(dev, 0x400108);
+		NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+		nv_wr32(dev, 0x400108, trap);
+		nv_wr32(dev, 0x400100, 0x00200000);
+		stat &= ~0x00200000;
+	}
+
+	if (stat & 0x00080000) {
+		u32 ustat = nv_rd32(dev, 0x409c18);
+
+		NV_INFO(dev, "PGRAPH: CTXCTRL ustat 0x%08x\n", ustat);
+
+		nv_wr32(dev, 0x409c20, ustat);
+		nv_wr32(dev, 0x400100, 0x00080000);
+		stat &= ~0x00080000;
+	}
+
+	if (stat) {
+		NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+		nv_wr32(dev, 0x400100, stat);
+	}
+
+	nv_wr32(dev, 0x400500, 0x00010001);
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
new file mode 100644
index 0000000..40e26f9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r)   (0x408800 + (r))
+#define ROP_UNIT(u,r)  (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r)   (0x418000 + (r))
+#define GPC_UNIT(t,r)  (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t,m,r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_priv {
+	u8 gpc_nr;
+	u8 rop_nr;
+	u8 tp_nr[GPC_MAX];
+	u8 tp_total;
+
+	u32  grctx_size;
+	u32 *grctx_vals;
+	struct nouveau_gpuobj *unk4188b4;
+	struct nouveau_gpuobj *unk4188b8;
+
+	u8  magic_not_rop_nr;
+	u32 magicgpc980[4];
+	u32 magicgpc918;
+};
+
+struct nvc0_graph_chan {
+	struct nouveau_gpuobj *grctx;
+	struct nouveau_gpuobj *unk408004; // 0x418810 too
+	struct nouveau_gpuobj *unk40800c; // 0x419004 too
+	struct nouveau_gpuobj *unk418810; // 0x419848 too
+	struct nouveau_gpuobj *mmio;
+	int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
new file mode 100644
index 0000000..b9e68b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -0,0 +1,2874 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+#include "nvc0_graph.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+	nv_wr32(dev, 0x400204, data);
+	nv_wr32(dev, 0x400200, icmd);
+	while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+	nv_wr32(dev, 0x40448c, data);
+	nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+	nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+	nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+	nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+	nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+	nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+	nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+	nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x3400, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3404, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3408, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x340c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3410, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3414, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3418, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x341c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3420, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3424, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3428, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x342c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3430, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3434, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3438, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x343c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3440, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3444, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3448, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x344c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3450, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3454, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3458, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x345c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3460, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3464, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3468, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x346c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3470, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3474, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3478, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x347c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3480, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3484, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3488, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x348c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3490, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3494, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3498, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x349c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x34fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3500, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3504, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3508, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x350c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3510, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3514, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3518, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x351c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3520, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3524, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3528, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x352c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3530, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3534, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3538, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x353c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3540, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3544, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3548, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x354c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3550, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3554, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3558, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x355c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3560, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3564, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3568, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x356c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3570, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3574, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3578, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x357c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3580, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3584, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3588, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x358c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3590, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3594, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x3598, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x359c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x35fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+	nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+	nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+	nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+	nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+	nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+	nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+	nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+	nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+	nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+	nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+	nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+	nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+	nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+	nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+	nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+	nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+	nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+	nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+	nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+	nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+	nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+	nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+	nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+	nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+	nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+	nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+	nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+	nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+	nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+	nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+	nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+	nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+	nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+	nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+	nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+	nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+	nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+	nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+	nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+	nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+	nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+	nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+	nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+	nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+	nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+	nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+	nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+	nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+	nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+	nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+	nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+	nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+	nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+	nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+	nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+	nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+	nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+	nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+	nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+	nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+	nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+	nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+	nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+	nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+	/* in trace, right after 0x90c0, not here */
+	nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+	nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+	nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+	nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+	nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+	nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+	nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+	nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+	nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+	nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+	nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+	nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+	nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+	nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+	nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+	nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+	int i;
+
+	nv_wr32(dev, 0x404004, 0x00000000);
+	nv_wr32(dev, 0x404008, 0x00000000);
+	nv_wr32(dev, 0x40400c, 0x00000000);
+	nv_wr32(dev, 0x404010, 0x00000000);
+	nv_wr32(dev, 0x404014, 0x00000000);
+	nv_wr32(dev, 0x404018, 0x00000000);
+	nv_wr32(dev, 0x40401c, 0x00000000);
+	nv_wr32(dev, 0x404020, 0x00000000);
+	nv_wr32(dev, 0x404024, 0x00000000);
+	nv_wr32(dev, 0x404028, 0x00000000);
+	nv_wr32(dev, 0x40402c, 0x00000000);
+	nv_wr32(dev, 0x404044, 0x00000000);
+	nv_wr32(dev, 0x404094, 0x00000000);
+	nv_wr32(dev, 0x404098, 0x00000000);
+	nv_wr32(dev, 0x40409c, 0x00000000);
+	nv_wr32(dev, 0x4040a0, 0x00000000);
+	nv_wr32(dev, 0x4040a4, 0x00000000);
+	nv_wr32(dev, 0x4040a8, 0x00000000);
+	nv_wr32(dev, 0x4040ac, 0x00000000);
+	nv_wr32(dev, 0x4040b0, 0x00000000);
+	nv_wr32(dev, 0x4040b4, 0x00000000);
+	nv_wr32(dev, 0x4040b8, 0x00000000);
+	nv_wr32(dev, 0x4040bc, 0x00000000);
+	nv_wr32(dev, 0x4040c0, 0x00000000);
+	nv_wr32(dev, 0x4040c4, 0x00000000);
+	nv_wr32(dev, 0x4040c8, 0xf0000087);
+	nv_wr32(dev, 0x4040d4, 0x00000000);
+	nv_wr32(dev, 0x4040d8, 0x00000000);
+	nv_wr32(dev, 0x4040dc, 0x00000000);
+	nv_wr32(dev, 0x4040e0, 0x00000000);
+	nv_wr32(dev, 0x4040e4, 0x00000000);
+	nv_wr32(dev, 0x4040e8, 0x00001000);
+	nv_wr32(dev, 0x4040f8, 0x00000000);
+	nv_wr32(dev, 0x404130, 0x00000000);
+	nv_wr32(dev, 0x404134, 0x00000000);
+	nv_wr32(dev, 0x404138, 0x20000040);
+	nv_wr32(dev, 0x404150, 0x0000002e);
+	nv_wr32(dev, 0x404154, 0x00000400);
+	nv_wr32(dev, 0x404158, 0x00000200);
+	nv_wr32(dev, 0x404164, 0x00000055);
+	nv_wr32(dev, 0x404168, 0x00000000);
+	nv_wr32(dev, 0x404174, 0x00000000);
+	nv_wr32(dev, 0x404178, 0x00000000);
+	nv_wr32(dev, 0x40417c, 0x00000000);
+	for (i = 0; i < 8; i++)
+		nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404404, 0x00000000);
+	nv_wr32(dev, 0x404408, 0x00000000);
+	nv_wr32(dev, 0x40440c, 0x00000000);
+	nv_wr32(dev, 0x404410, 0x00000000);
+	nv_wr32(dev, 0x404414, 0x00000000);
+	nv_wr32(dev, 0x404418, 0x00000000);
+	nv_wr32(dev, 0x40441c, 0x00000000);
+	nv_wr32(dev, 0x404420, 0x00000000);
+	nv_wr32(dev, 0x404424, 0x00000000);
+	nv_wr32(dev, 0x404428, 0x00000000);
+	nv_wr32(dev, 0x40442c, 0x00000000);
+	nv_wr32(dev, 0x404430, 0x00000000);
+	nv_wr32(dev, 0x404434, 0x00000000);
+	nv_wr32(dev, 0x404438, 0x00000000);
+	nv_wr32(dev, 0x404460, 0x00000000);
+	nv_wr32(dev, 0x404464, 0x00000000);
+	nv_wr32(dev, 0x404468, 0x00ffffff);
+	nv_wr32(dev, 0x40446c, 0x00000000);
+	nv_wr32(dev, 0x404480, 0x00000001);
+	nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404604, 0x00000015);
+	nv_wr32(dev, 0x404608, 0x00000000);
+	nv_wr32(dev, 0x40460c, 0x00002e00);
+	nv_wr32(dev, 0x404610, 0x00000100);
+	nv_wr32(dev, 0x404618, 0x00000000);
+	nv_wr32(dev, 0x40461c, 0x00000000);
+	nv_wr32(dev, 0x404620, 0x00000000);
+	nv_wr32(dev, 0x404624, 0x00000000);
+	nv_wr32(dev, 0x404628, 0x00000000);
+	nv_wr32(dev, 0x40462c, 0x00000000);
+	nv_wr32(dev, 0x404630, 0x00000000);
+	nv_wr32(dev, 0x404634, 0x00000000);
+	nv_wr32(dev, 0x404638, 0x00000004);
+	nv_wr32(dev, 0x40463c, 0x00000000);
+	nv_wr32(dev, 0x404640, 0x00000000);
+	nv_wr32(dev, 0x404644, 0x00000000);
+	nv_wr32(dev, 0x404648, 0x00000000);
+	nv_wr32(dev, 0x40464c, 0x00000000);
+	nv_wr32(dev, 0x404650, 0x00000000);
+	nv_wr32(dev, 0x404654, 0x00000000);
+	nv_wr32(dev, 0x404658, 0x00000000);
+	nv_wr32(dev, 0x40465c, 0x007f0100);
+	nv_wr32(dev, 0x404660, 0x00000000);
+	nv_wr32(dev, 0x404664, 0x00000000);
+	nv_wr32(dev, 0x404668, 0x00000000);
+	nv_wr32(dev, 0x40466c, 0x00000000);
+	nv_wr32(dev, 0x404670, 0x00000000);
+	nv_wr32(dev, 0x404674, 0x00000000);
+	nv_wr32(dev, 0x404678, 0x00000000);
+	nv_wr32(dev, 0x40467c, 0x00000002);
+	nv_wr32(dev, 0x404680, 0x00000000);
+	nv_wr32(dev, 0x404684, 0x00000000);
+	nv_wr32(dev, 0x404688, 0x00000000);
+	nv_wr32(dev, 0x40468c, 0x00000000);
+	nv_wr32(dev, 0x404690, 0x00000000);
+	nv_wr32(dev, 0x404694, 0x00000000);
+	nv_wr32(dev, 0x404698, 0x00000000);
+	nv_wr32(dev, 0x40469c, 0x00000000);
+	nv_wr32(dev, 0x4046a0, 0x007f0080);
+	nv_wr32(dev, 0x4046a4, 0x00000000);
+	nv_wr32(dev, 0x4046a8, 0x00000000);
+	nv_wr32(dev, 0x4046ac, 0x00000000);
+	nv_wr32(dev, 0x4046b0, 0x00000000);
+	nv_wr32(dev, 0x4046b4, 0x00000000);
+	nv_wr32(dev, 0x4046b8, 0x00000000);
+	nv_wr32(dev, 0x4046bc, 0x00000000);
+	nv_wr32(dev, 0x4046c0, 0x00000000);
+	nv_wr32(dev, 0x4046c4, 0x00000000);
+	nv_wr32(dev, 0x4046c8, 0x00000000);
+	nv_wr32(dev, 0x4046cc, 0x00000000);
+	nv_wr32(dev, 0x4046d0, 0x00000000);
+	nv_wr32(dev, 0x4046d4, 0x00000000);
+	nv_wr32(dev, 0x4046d8, 0x00000000);
+	nv_wr32(dev, 0x4046dc, 0x00000000);
+	nv_wr32(dev, 0x4046e0, 0x00000000);
+	nv_wr32(dev, 0x4046e4, 0x00000000);
+	nv_wr32(dev, 0x4046e8, 0x00000000);
+	nv_wr32(dev, 0x4046f0, 0x00000000);
+	nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x404700, 0x00000000);
+	nv_wr32(dev, 0x404704, 0x00000000);
+	nv_wr32(dev, 0x404708, 0x00000000);
+	nv_wr32(dev, 0x40470c, 0x00000000);
+	nv_wr32(dev, 0x404710, 0x00000000);
+	nv_wr32(dev, 0x404714, 0x00000000);
+	nv_wr32(dev, 0x404718, 0x00000000);
+	nv_wr32(dev, 0x40471c, 0x00000000);
+	nv_wr32(dev, 0x404720, 0x00000000);
+	nv_wr32(dev, 0x404724, 0x00000000);
+	nv_wr32(dev, 0x404728, 0x00000000);
+	nv_wr32(dev, 0x40472c, 0x00000000);
+	nv_wr32(dev, 0x404730, 0x00000000);
+	nv_wr32(dev, 0x404734, 0x00000100);
+	nv_wr32(dev, 0x404738, 0x00000000);
+	nv_wr32(dev, 0x40473c, 0x00000000);
+	nv_wr32(dev, 0x404740, 0x00000000);
+	nv_wr32(dev, 0x404744, 0x00000000);
+	nv_wr32(dev, 0x404748, 0x00000000);
+	nv_wr32(dev, 0x40474c, 0x00000000);
+	nv_wr32(dev, 0x404750, 0x00000000);
+	nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x405800, 0x078000bf);
+	nv_wr32(dev, 0x405830, 0x02180000);
+	nv_wr32(dev, 0x405834, 0x00000000);
+	nv_wr32(dev, 0x405838, 0x00000000);
+	nv_wr32(dev, 0x405854, 0x00000000);
+	nv_wr32(dev, 0x405870, 0x00000001);
+	nv_wr32(dev, 0x405874, 0x00000001);
+	nv_wr32(dev, 0x405878, 0x00000001);
+	nv_wr32(dev, 0x40587c, 0x00000001);
+	nv_wr32(dev, 0x405a00, 0x00000000);
+	nv_wr32(dev, 0x405a04, 0x00000000);
+	nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x406020, 0x000103c1);
+	nv_wr32(dev, 0x406028, 0x00000001);
+	nv_wr32(dev, 0x40602c, 0x00000001);
+	nv_wr32(dev, 0x406030, 0x00000001);
+	nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x4064a8, 0x00000000);
+	nv_wr32(dev, 0x4064ac, 0x00003fff);
+	nv_wr32(dev, 0x4064b4, 0x00000000);
+	nv_wr32(dev, 0x4064b8, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x407804, 0x00000023);
+	nv_wr32(dev, 0x40780c, 0x0a418820);
+	nv_wr32(dev, 0x407810, 0x062080e6);
+	nv_wr32(dev, 0x407814, 0x020398a4);
+	nv_wr32(dev, 0x407818, 0x0e629062);
+	nv_wr32(dev, 0x40781c, 0x0a418820);
+	nv_wr32(dev, 0x407820, 0x000000e6);
+	nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+	nv_wr32(dev, 0x408000, 0x00000000);
+	nv_wr32(dev, 0x408004, 0x00000000);
+	nv_wr32(dev, 0x408008, 0x00000018);
+	nv_wr32(dev, 0x40800c, 0x00000000);
+	nv_wr32(dev, 0x408010, 0x00000000);
+	nv_wr32(dev, 0x408014, 0x00000069);
+	nv_wr32(dev, 0x408018, 0xe100e100);
+	nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	// ROPC_BROADCAST
+	nv_wr32(dev, 0x408800, 0x02802a3c);
+	nv_wr32(dev, 0x408804, 0x00000040);
+	nv_wr32(dev, 0x408808, 0x0003e00d);
+	switch (dev_priv->chipset) {
+	case 0xc0:
+		nv_wr32(dev, 0x408900, 0x0080b801);
+		break;
+	case 0xc3:
+	case 0xc4:
+		nv_wr32(dev, 0x408900, 0x3080b801);
+		break;
+	}
+	nv_wr32(dev, 0x408904, 0x02000001);
+	nv_wr32(dev, 0x408908, 0x00c80929);
+	nv_wr32(dev, 0x40890c, 0x00000000);
+	nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+	int i;
+
+	// GPC_BROADCAST
+	nv_wr32(dev, 0x418380, 0x00000016);
+	nv_wr32(dev, 0x418400, 0x38004e00);
+	nv_wr32(dev, 0x418404, 0x71e0ffff);
+	nv_wr32(dev, 0x418408, 0x00000000);
+	nv_wr32(dev, 0x41840c, 0x00001008);
+	nv_wr32(dev, 0x418410, 0x0fff0fff);
+	nv_wr32(dev, 0x418414, 0x00200fff);
+	nv_wr32(dev, 0x418450, 0x00000000);
+	nv_wr32(dev, 0x418454, 0x00000000);
+	nv_wr32(dev, 0x418458, 0x00000000);
+	nv_wr32(dev, 0x41845c, 0x00000000);
+	nv_wr32(dev, 0x418460, 0x00000000);
+	nv_wr32(dev, 0x418464, 0x00000000);
+	nv_wr32(dev, 0x418468, 0x00000001);
+	nv_wr32(dev, 0x41846c, 0x00000000);
+	nv_wr32(dev, 0x418470, 0x00000000);
+	nv_wr32(dev, 0x418600, 0x0000001f);
+	nv_wr32(dev, 0x418684, 0x0000000f);
+	nv_wr32(dev, 0x418700, 0x00000002);
+	nv_wr32(dev, 0x418704, 0x00000080);
+	nv_wr32(dev, 0x418708, 0x00000000);
+	nv_wr32(dev, 0x41870c, 0x07c80000);
+	nv_wr32(dev, 0x418710, 0x00000000);
+	nv_wr32(dev, 0x418800, 0x0006860a);
+	nv_wr32(dev, 0x418808, 0x00000000);
+	nv_wr32(dev, 0x41880c, 0x00000000);
+	nv_wr32(dev, 0x418810, 0x00000000);
+	nv_wr32(dev, 0x418828, 0x00008442);
+	nv_wr32(dev, 0x418830, 0x00000001);
+	nv_wr32(dev, 0x4188d8, 0x00000008);
+	nv_wr32(dev, 0x4188e0, 0x01000000);
+	nv_wr32(dev, 0x4188e8, 0x00000000);
+	nv_wr32(dev, 0x4188ec, 0x00000000);
+	nv_wr32(dev, 0x4188f0, 0x00000000);
+	nv_wr32(dev, 0x4188f4, 0x00000000);
+	nv_wr32(dev, 0x4188f8, 0x00000000);
+	nv_wr32(dev, 0x4188fc, 0x00100000);
+	nv_wr32(dev, 0x41891c, 0x00ff00ff);
+	nv_wr32(dev, 0x418924, 0x00000000);
+	nv_wr32(dev, 0x418928, 0x00ffff00);
+	nv_wr32(dev, 0x41892c, 0x0000ff00);
+	for (i = 0; i < 8; i++) {
+		nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+		nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+		nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+	}
+	nv_wr32(dev, 0x418b00, 0x00000000);
+	nv_wr32(dev, 0x418b08, 0x0a418820);
+	nv_wr32(dev, 0x418b0c, 0x062080e6);
+	nv_wr32(dev, 0x418b10, 0x020398a4);
+	nv_wr32(dev, 0x418b14, 0x0e629062);
+	nv_wr32(dev, 0x418b18, 0x0a418820);
+	nv_wr32(dev, 0x418b1c, 0x000000e6);
+	nv_wr32(dev, 0x418bb8, 0x00000103);
+	nv_wr32(dev, 0x418c08, 0x00000001);
+	nv_wr32(dev, 0x418c10, 0x00000000);
+	nv_wr32(dev, 0x418c14, 0x00000000);
+	nv_wr32(dev, 0x418c18, 0x00000000);
+	nv_wr32(dev, 0x418c1c, 0x00000000);
+	nv_wr32(dev, 0x418c20, 0x00000000);
+	nv_wr32(dev, 0x418c24, 0x00000000);
+	nv_wr32(dev, 0x418c28, 0x00000000);
+	nv_wr32(dev, 0x418c2c, 0x00000000);
+	nv_wr32(dev, 0x418c80, 0x20200004);
+	nv_wr32(dev, 0x418c8c, 0x00000001);
+	nv_wr32(dev, 0x419000, 0x00000780);
+	nv_wr32(dev, 0x419004, 0x00000000);
+	nv_wr32(dev, 0x419008, 0x00000000);
+	nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	// GPC_BROADCAST.TP_BROADCAST
+	nv_wr32(dev, 0x419848, 0x00000000);
+	nv_wr32(dev, 0x419864, 0x0000012a);
+	nv_wr32(dev, 0x419888, 0x00000000);
+	nv_wr32(dev, 0x419a00, 0x000001f0);
+	nv_wr32(dev, 0x419a04, 0x00000001);
+	nv_wr32(dev, 0x419a08, 0x00000023);
+	nv_wr32(dev, 0x419a0c, 0x00020000);
+	nv_wr32(dev, 0x419a10, 0x00000000);
+	nv_wr32(dev, 0x419a14, 0x00000200);
+	nv_wr32(dev, 0x419a1c, 0x00000000);
+	nv_wr32(dev, 0x419a20, 0x00000800);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x00419ac4, 0x0007f440); // 0xc3
+	nv_wr32(dev, 0x419b00, 0x0a418820);
+	nv_wr32(dev, 0x419b04, 0x062080e6);
+	nv_wr32(dev, 0x419b08, 0x020398a4);
+	nv_wr32(dev, 0x419b0c, 0x0e629062);
+	nv_wr32(dev, 0x419b10, 0x0a418820);
+	nv_wr32(dev, 0x419b14, 0x000000e6);
+	nv_wr32(dev, 0x419bd0, 0x00900103);
+	nv_wr32(dev, 0x419be0, 0x00000001);
+	nv_wr32(dev, 0x419be4, 0x00000000);
+	nv_wr32(dev, 0x419c00, 0x00000002);
+	nv_wr32(dev, 0x419c04, 0x00000006);
+	nv_wr32(dev, 0x419c08, 0x00000002);
+	nv_wr32(dev, 0x419c20, 0x00000000);
+	nv_wr32(dev, 0x419cbc, 0x28137606);
+	nv_wr32(dev, 0x419ce8, 0x00000000);
+	nv_wr32(dev, 0x419cf4, 0x00000183);
+	nv_wr32(dev, 0x419d20, 0x02180000);
+	nv_wr32(dev, 0x419d24, 0x00001fff);
+	nv_wr32(dev, 0x419e04, 0x00000000);
+	nv_wr32(dev, 0x419e08, 0x00000000);
+	nv_wr32(dev, 0x419e0c, 0x00000000);
+	nv_wr32(dev, 0x419e10, 0x00000002);
+	nv_wr32(dev, 0x419e44, 0x001beff2);
+	nv_wr32(dev, 0x419e48, 0x00000000);
+	nv_wr32(dev, 0x419e4c, 0x0000000f);
+	nv_wr32(dev, 0x419e50, 0x00000000);
+	nv_wr32(dev, 0x419e54, 0x00000000);
+	nv_wr32(dev, 0x419e58, 0x00000000);
+	nv_wr32(dev, 0x419e5c, 0x00000000);
+	nv_wr32(dev, 0x419e60, 0x00000000);
+	nv_wr32(dev, 0x419e64, 0x00000000);
+	nv_wr32(dev, 0x419e68, 0x00000000);
+	nv_wr32(dev, 0x419e6c, 0x00000000);
+	nv_wr32(dev, 0x419e70, 0x00000000);
+	nv_wr32(dev, 0x419e74, 0x00000000);
+	nv_wr32(dev, 0x419e78, 0x00000000);
+	nv_wr32(dev, 0x419e7c, 0x00000000);
+	nv_wr32(dev, 0x419e80, 0x00000000);
+	nv_wr32(dev, 0x419e84, 0x00000000);
+	nv_wr32(dev, 0x419e88, 0x00000000);
+	nv_wr32(dev, 0x419e8c, 0x00000000);
+	nv_wr32(dev, 0x419e90, 0x00000000);
+	nv_wr32(dev, 0x419e98, 0x00000000);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x419ee0, 0x00011110);
+	nv_wr32(dev, 0x419f50, 0x00000000);
+	nv_wr32(dev, 0x419f54, 0x00000000);
+	if (dev_priv->chipset != 0xc0)
+		nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	struct nvc0_graph_priv *priv = dev_priv->engine.graph.priv;
+	struct nvc0_graph_chan *grch = chan->pgraph_ctx;
+	struct drm_device *dev = chan->dev;
+	int i, gpc, tp, id;
+	u32 r000260, tmp;
+
+	r000260 = nv_rd32(dev, 0x000260);
+	nv_wr32(dev, 0x000260, r000260 & ~1);
+	nv_wr32(dev, 0x400208, 0x00000000);
+
+	nvc0_grctx_generate_dispatch(dev);
+	nvc0_grctx_generate_macro(dev);
+	nvc0_grctx_generate_m2mf(dev);
+	nvc0_grctx_generate_unk47xx(dev);
+	nvc0_grctx_generate_shaders(dev);
+	nvc0_grctx_generate_unk60xx(dev);
+	nvc0_grctx_generate_unk64xx(dev);
+	nvc0_grctx_generate_tpbus(dev);
+	nvc0_grctx_generate_ccache(dev);
+	nvc0_grctx_generate_rop(dev);
+	nvc0_grctx_generate_gpc(dev);
+	nvc0_grctx_generate_tp(dev);
+
+	nv_wr32(dev, 0x404154, 0x00000000);
+
+	/* fuc "mmio list" writes */
+	for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+		u32 reg = nv_ro32(grch->mmio, i + 0);
+		nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+	}
+
+	for (tp = 0, id = 0; tp < 4; tp++) {
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+			if (tp <= priv->tp_nr[gpc]) {
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+				nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+				nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+				id++;
+			}
+
+			nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+			nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+		}
+	}
+
+	tmp = 0;
+	for (i = 0; i < priv->gpc_nr; i++)
+		tmp |= priv->tp_nr[i] << (i * 4);
+	nv_wr32(dev, 0x406028, tmp);
+	nv_wr32(dev, 0x405870, tmp);
+
+	nv_wr32(dev, 0x40602c, 0x00000000);
+	nv_wr32(dev, 0x405874, 0x00000000);
+	nv_wr32(dev, 0x406030, 0x00000000);
+	nv_wr32(dev, 0x405878, 0x00000000);
+	nv_wr32(dev, 0x406034, 0x00000000);
+	nv_wr32(dev, 0x40587c, 0x00000000);
+
+	if (1) {
+		const u8 chipset_tp_max[] = { 16, 0, 0, 4, 8 };
+		u8 max = chipset_tp_max[dev_priv->chipset & 0x0f];
+		u8 tpnr[GPC_MAX];
+		u8 data[32];
+
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+		memset(data, 0x1f, sizeof(data));
+
+		gpc = -1;
+		for (tp = 0; tp < priv->tp_total; tp++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tpnr[gpc]--;
+			data[tp] = gpc;
+		}
+
+		for (i = 0; i < max / 4; i++)
+			nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+	}
+
+	if (1) {
+		u32 data[6] = {}, data2[2] = {};
+		u8 tpnr[GPC_MAX];
+		u8 shift, ntpcv;
+
+		/* calculate first set of magics */
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+		for (tp = 0; tp < priv->tp_total; tp++) {
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tpnr[gpc]--;
+
+			data[tp / 6] |= gpc << ((tp % 6) * 5);
+		}
+
+		for (; tp < 32; tp++)
+			data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+		/* and the second... */
+		shift = 0;
+		ntpcv = priv->tp_total;
+		while (!(ntpcv & (1 << 4))) {
+			ntpcv <<= 1;
+			shift++;
+		}
+
+		data2[0]  = (ntpcv << 16);
+		data2[0] |= (shift << 21);
+		data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+		for (i = 1; i < 7; i++)
+			data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+		// GPC_BROADCAST
+		nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+		// GPC_BROADCAST.TP_BROADCAST
+		nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+				       priv->magic_not_rop_nr |
+				       data2[0]);
+		nv_wr32(dev, 0x419be4, data2[1]);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+		// UNK78xx
+		nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+					priv->magic_not_rop_nr);
+		for (i = 0; i < 6; i++)
+			nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+	}
+
+	if (1) {
+		u32 tp_mask = 0, tp_set = 0;
+		u8  tpnr[GPC_MAX];
+
+		memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+		for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+			tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+		gpc = -1;
+		for (i = 0, gpc = -1; i < 32; i++) {
+			int ltp = i * (priv->tp_total - 1) / 32;
+			
+			do {
+				gpc = (gpc + 1) % priv->gpc_nr;
+			} while (!tpnr[gpc]);
+			tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+			tp_set |= 1 << ((gpc * 8) + tp);
+
+			do {
+				nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+				tp_set ^= tp_mask;
+				nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
+				tp_set ^= tp_mask;
+			} while (ltp == (++i * (priv->tp_total - 1) / 32));
+			i--;
+		}
+	}
+
+	nv_wr32(dev, 0x400208, 0x80000000);
+
+	nv_icmd(dev, 0x00001000, 0x00000004);
+	nv_icmd(dev, 0x000000a9, 0x0000ffff);
+	nv_icmd(dev, 0x00000038, 0x0fac6881);
+	nv_icmd(dev, 0x0000003d, 0x00000001);
+	nv_icmd(dev, 0x000000e8, 0x00000400);
+	nv_icmd(dev, 0x000000e9, 0x00000400);
+	nv_icmd(dev, 0x000000ea, 0x00000400);
+	nv_icmd(dev, 0x000000eb, 0x00000400);
+	nv_icmd(dev, 0x000000ec, 0x00000400);
+	nv_icmd(dev, 0x000000ed, 0x00000400);
+	nv_icmd(dev, 0x000000ee, 0x00000400);
+	nv_icmd(dev, 0x000000ef, 0x00000400);
+	nv_icmd(dev, 0x00000078, 0x00000300);
+	nv_icmd(dev, 0x00000079, 0x00000300);
+	nv_icmd(dev, 0x0000007a, 0x00000300);
+	nv_icmd(dev, 0x0000007b, 0x00000300);
+	nv_icmd(dev, 0x0000007c, 0x00000300);
+	nv_icmd(dev, 0x0000007d, 0x00000300);
+	nv_icmd(dev, 0x0000007e, 0x00000300);
+	nv_icmd(dev, 0x0000007f, 0x00000300);
+	nv_icmd(dev, 0x00000050, 0x00000011);
+	nv_icmd(dev, 0x00000058, 0x00000008);
+	nv_icmd(dev, 0x00000059, 0x00000008);
+	nv_icmd(dev, 0x0000005a, 0x00000008);
+	nv_icmd(dev, 0x0000005b, 0x00000008);
+	nv_icmd(dev, 0x0000005c, 0x00000008);
+	nv_icmd(dev, 0x0000005d, 0x00000008);
+	nv_icmd(dev, 0x0000005e, 0x00000008);
+	nv_icmd(dev, 0x0000005f, 0x00000008);
+	nv_icmd(dev, 0x00000208, 0x00000001);
+	nv_icmd(dev, 0x00000209, 0x00000001);
+	nv_icmd(dev, 0x0000020a, 0x00000001);
+	nv_icmd(dev, 0x0000020b, 0x00000001);
+	nv_icmd(dev, 0x0000020c, 0x00000001);
+	nv_icmd(dev, 0x0000020d, 0x00000001);
+	nv_icmd(dev, 0x0000020e, 0x00000001);
+	nv_icmd(dev, 0x0000020f, 0x00000001);
+	nv_icmd(dev, 0x00000081, 0x00000001);
+	nv_icmd(dev, 0x00000085, 0x00000004);
+	nv_icmd(dev, 0x00000088, 0x00000400);
+	nv_icmd(dev, 0x00000090, 0x00000300);
+	nv_icmd(dev, 0x00000098, 0x00001001);
+	nv_icmd(dev, 0x000000e3, 0x00000001);
+	nv_icmd(dev, 0x000000da, 0x00000001);
+	nv_icmd(dev, 0x000000f8, 0x00000003);
+	nv_icmd(dev, 0x000000fa, 0x00000001);
+	nv_icmd(dev, 0x0000009f, 0x0000ffff);
+	nv_icmd(dev, 0x000000a0, 0x0000ffff);
+	nv_icmd(dev, 0x000000a1, 0x0000ffff);
+	nv_icmd(dev, 0x000000a2, 0x0000ffff);
+	nv_icmd(dev, 0x000000b1, 0x00000001);
+	nv_icmd(dev, 0x000000b2, 0x00000000);
+	nv_icmd(dev, 0x000000b3, 0x00000000);
+	nv_icmd(dev, 0x000000b4, 0x00000000);
+	nv_icmd(dev, 0x000000b5, 0x00000000);
+	nv_icmd(dev, 0x000000b6, 0x00000000);
+	nv_icmd(dev, 0x000000b7, 0x00000000);
+	nv_icmd(dev, 0x000000b8, 0x00000000);
+	nv_icmd(dev, 0x000000b9, 0x00000000);
+	nv_icmd(dev, 0x000000ba, 0x00000000);
+	nv_icmd(dev, 0x000000bb, 0x00000000);
+	nv_icmd(dev, 0x000000bc, 0x00000000);
+	nv_icmd(dev, 0x000000bd, 0x00000000);
+	nv_icmd(dev, 0x000000be, 0x00000000);
+	nv_icmd(dev, 0x000000bf, 0x00000000);
+	nv_icmd(dev, 0x000000c0, 0x00000000);
+	nv_icmd(dev, 0x000000c1, 0x00000000);
+	nv_icmd(dev, 0x000000c2, 0x00000000);
+	nv_icmd(dev, 0x000000c3, 0x00000000);
+	nv_icmd(dev, 0x000000c4, 0x00000000);
+	nv_icmd(dev, 0x000000c5, 0x00000000);
+	nv_icmd(dev, 0x000000c6, 0x00000000);
+	nv_icmd(dev, 0x000000c7, 0x00000000);
+	nv_icmd(dev, 0x000000c8, 0x00000000);
+	nv_icmd(dev, 0x000000c9, 0x00000000);
+	nv_icmd(dev, 0x000000ca, 0x00000000);
+	nv_icmd(dev, 0x000000cb, 0x00000000);
+	nv_icmd(dev, 0x000000cc, 0x00000000);
+	nv_icmd(dev, 0x000000cd, 0x00000000);
+	nv_icmd(dev, 0x000000ce, 0x00000000);
+	nv_icmd(dev, 0x000000cf, 0x00000000);
+	nv_icmd(dev, 0x000000d0, 0x00000000);
+	nv_icmd(dev, 0x000000d1, 0x00000000);
+	nv_icmd(dev, 0x000000d2, 0x00000000);
+	nv_icmd(dev, 0x000000d3, 0x00000000);
+	nv_icmd(dev, 0x000000d4, 0x00000000);
+	nv_icmd(dev, 0x000000d5, 0x00000000);
+	nv_icmd(dev, 0x000000d6, 0x00000000);
+	nv_icmd(dev, 0x000000d7, 0x00000000);
+	nv_icmd(dev, 0x000000d8, 0x00000000);
+	nv_icmd(dev, 0x000000d9, 0x00000000);
+	nv_icmd(dev, 0x00000210, 0x00000040);
+	nv_icmd(dev, 0x00000211, 0x00000040);
+	nv_icmd(dev, 0x00000212, 0x00000040);
+	nv_icmd(dev, 0x00000213, 0x00000040);
+	nv_icmd(dev, 0x00000214, 0x00000040);
+	nv_icmd(dev, 0x00000215, 0x00000040);
+	nv_icmd(dev, 0x00000216, 0x00000040);
+	nv_icmd(dev, 0x00000217, 0x00000040);
+	nv_icmd(dev, 0x00000218, 0x0000c080);
+	nv_icmd(dev, 0x00000219, 0x0000c080);
+	nv_icmd(dev, 0x0000021a, 0x0000c080);
+	nv_icmd(dev, 0x0000021b, 0x0000c080);
+	nv_icmd(dev, 0x0000021c, 0x0000c080);
+	nv_icmd(dev, 0x0000021d, 0x0000c080);
+	nv_icmd(dev, 0x0000021e, 0x0000c080);
+	nv_icmd(dev, 0x0000021f, 0x0000c080);
+	nv_icmd(dev, 0x000000ad, 0x0000013e);
+	nv_icmd(dev, 0x000000e1, 0x00000010);
+	nv_icmd(dev, 0x00000290, 0x00000000);
+	nv_icmd(dev, 0x00000291, 0x00000000);
+	nv_icmd(dev, 0x00000292, 0x00000000);
+	nv_icmd(dev, 0x00000293, 0x00000000);
+	nv_icmd(dev, 0x00000294, 0x00000000);
+	nv_icmd(dev, 0x00000295, 0x00000000);
+	nv_icmd(dev, 0x00000296, 0x00000000);
+	nv_icmd(dev, 0x00000297, 0x00000000);
+	nv_icmd(dev, 0x00000298, 0x00000000);
+	nv_icmd(dev, 0x00000299, 0x00000000);
+	nv_icmd(dev, 0x0000029a, 0x00000000);
+	nv_icmd(dev, 0x0000029b, 0x00000000);
+	nv_icmd(dev, 0x0000029c, 0x00000000);
+	nv_icmd(dev, 0x0000029d, 0x00000000);
+	nv_icmd(dev, 0x0000029e, 0x00000000);
+	nv_icmd(dev, 0x0000029f, 0x00000000);
+	nv_icmd(dev, 0x000003b0, 0x00000000);
+	nv_icmd(dev, 0x000003b1, 0x00000000);
+	nv_icmd(dev, 0x000003b2, 0x00000000);
+	nv_icmd(dev, 0x000003b3, 0x00000000);
+	nv_icmd(dev, 0x000003b4, 0x00000000);
+	nv_icmd(dev, 0x000003b5, 0x00000000);
+	nv_icmd(dev, 0x000003b6, 0x00000000);
+	nv_icmd(dev, 0x000003b7, 0x00000000);
+	nv_icmd(dev, 0x000003b8, 0x00000000);
+	nv_icmd(dev, 0x000003b9, 0x00000000);
+	nv_icmd(dev, 0x000003ba, 0x00000000);
+	nv_icmd(dev, 0x000003bb, 0x00000000);
+	nv_icmd(dev, 0x000003bc, 0x00000000);
+	nv_icmd(dev, 0x000003bd, 0x00000000);
+	nv_icmd(dev, 0x000003be, 0x00000000);
+	nv_icmd(dev, 0x000003bf, 0x00000000);
+	nv_icmd(dev, 0x000002a0, 0x00000000);
+	nv_icmd(dev, 0x000002a1, 0x00000000);
+	nv_icmd(dev, 0x000002a2, 0x00000000);
+	nv_icmd(dev, 0x000002a3, 0x00000000);
+	nv_icmd(dev, 0x000002a4, 0x00000000);
+	nv_icmd(dev, 0x000002a5, 0x00000000);
+	nv_icmd(dev, 0x000002a6, 0x00000000);
+	nv_icmd(dev, 0x000002a7, 0x00000000);
+	nv_icmd(dev, 0x000002a8, 0x00000000);
+	nv_icmd(dev, 0x000002a9, 0x00000000);
+	nv_icmd(dev, 0x000002aa, 0x00000000);
+	nv_icmd(dev, 0x000002ab, 0x00000000);
+	nv_icmd(dev, 0x000002ac, 0x00000000);
+	nv_icmd(dev, 0x000002ad, 0x00000000);
+	nv_icmd(dev, 0x000002ae, 0x00000000);
+	nv_icmd(dev, 0x000002af, 0x00000000);
+	nv_icmd(dev, 0x00000420, 0x00000000);
+	nv_icmd(dev, 0x00000421, 0x00000000);
+	nv_icmd(dev, 0x00000422, 0x00000000);
+	nv_icmd(dev, 0x00000423, 0x00000000);
+	nv_icmd(dev, 0x00000424, 0x00000000);
+	nv_icmd(dev, 0x00000425, 0x00000000);
+	nv_icmd(dev, 0x00000426, 0x00000000);
+	nv_icmd(dev, 0x00000427, 0x00000000);
+	nv_icmd(dev, 0x00000428, 0x00000000);
+	nv_icmd(dev, 0x00000429, 0x00000000);
+	nv_icmd(dev, 0x0000042a, 0x00000000);
+	nv_icmd(dev, 0x0000042b, 0x00000000);
+	nv_icmd(dev, 0x0000042c, 0x00000000);
+	nv_icmd(dev, 0x0000042d, 0x00000000);
+	nv_icmd(dev, 0x0000042e, 0x00000000);
+	nv_icmd(dev, 0x0000042f, 0x00000000);
+	nv_icmd(dev, 0x000002b0, 0x00000000);
+	nv_icmd(dev, 0x000002b1, 0x00000000);
+	nv_icmd(dev, 0x000002b2, 0x00000000);
+	nv_icmd(dev, 0x000002b3, 0x00000000);
+	nv_icmd(dev, 0x000002b4, 0x00000000);
+	nv_icmd(dev, 0x000002b5, 0x00000000);
+	nv_icmd(dev, 0x000002b6, 0x00000000);
+	nv_icmd(dev, 0x000002b7, 0x00000000);
+	nv_icmd(dev, 0x000002b8, 0x00000000);
+	nv_icmd(dev, 0x000002b9, 0x00000000);
+	nv_icmd(dev, 0x000002ba, 0x00000000);
+	nv_icmd(dev, 0x000002bb, 0x00000000);
+	nv_icmd(dev, 0x000002bc, 0x00000000);
+	nv_icmd(dev, 0x000002bd, 0x00000000);
+	nv_icmd(dev, 0x000002be, 0x00000000);
+	nv_icmd(dev, 0x000002bf, 0x00000000);
+	nv_icmd(dev, 0x00000430, 0x00000000);
+	nv_icmd(dev, 0x00000431, 0x00000000);
+	nv_icmd(dev, 0x00000432, 0x00000000);
+	nv_icmd(dev, 0x00000433, 0x00000000);
+	nv_icmd(dev, 0x00000434, 0x00000000);
+	nv_icmd(dev, 0x00000435, 0x00000000);
+	nv_icmd(dev, 0x00000436, 0x00000000);
+	nv_icmd(dev, 0x00000437, 0x00000000);
+	nv_icmd(dev, 0x00000438, 0x00000000);
+	nv_icmd(dev, 0x00000439, 0x00000000);
+	nv_icmd(dev, 0x0000043a, 0x00000000);
+	nv_icmd(dev, 0x0000043b, 0x00000000);
+	nv_icmd(dev, 0x0000043c, 0x00000000);
+	nv_icmd(dev, 0x0000043d, 0x00000000);
+	nv_icmd(dev, 0x0000043e, 0x00000000);
+	nv_icmd(dev, 0x0000043f, 0x00000000);
+	nv_icmd(dev, 0x000002c0, 0x00000000);
+	nv_icmd(dev, 0x000002c1, 0x00000000);
+	nv_icmd(dev, 0x000002c2, 0x00000000);
+	nv_icmd(dev, 0x000002c3, 0x00000000);
+	nv_icmd(dev, 0x000002c4, 0x00000000);
+	nv_icmd(dev, 0x000002c5, 0x00000000);
+	nv_icmd(dev, 0x000002c6, 0x00000000);
+	nv_icmd(dev, 0x000002c7, 0x00000000);
+	nv_icmd(dev, 0x000002c8, 0x00000000);
+	nv_icmd(dev, 0x000002c9, 0x00000000);
+	nv_icmd(dev, 0x000002ca, 0x00000000);
+	nv_icmd(dev, 0x000002cb, 0x00000000);
+	nv_icmd(dev, 0x000002cc, 0x00000000);
+	nv_icmd(dev, 0x000002cd, 0x00000000);
+	nv_icmd(dev, 0x000002ce, 0x00000000);
+	nv_icmd(dev, 0x000002cf, 0x00000000);
+	nv_icmd(dev, 0x000004d0, 0x00000000);
+	nv_icmd(dev, 0x000004d1, 0x00000000);
+	nv_icmd(dev, 0x000004d2, 0x00000000);
+	nv_icmd(dev, 0x000004d3, 0x00000000);
+	nv_icmd(dev, 0x000004d4, 0x00000000);
+	nv_icmd(dev, 0x000004d5, 0x00000000);
+	nv_icmd(dev, 0x000004d6, 0x00000000);
+	nv_icmd(dev, 0x000004d7, 0x00000000);
+	nv_icmd(dev, 0x000004d8, 0x00000000);
+	nv_icmd(dev, 0x000004d9, 0x00000000);
+	nv_icmd(dev, 0x000004da, 0x00000000);
+	nv_icmd(dev, 0x000004db, 0x00000000);
+	nv_icmd(dev, 0x000004dc, 0x00000000);
+	nv_icmd(dev, 0x000004dd, 0x00000000);
+	nv_icmd(dev, 0x000004de, 0x00000000);
+	nv_icmd(dev, 0x000004df, 0x00000000);
+	nv_icmd(dev, 0x00000720, 0x00000000);
+	nv_icmd(dev, 0x00000721, 0x00000000);
+	nv_icmd(dev, 0x00000722, 0x00000000);
+	nv_icmd(dev, 0x00000723, 0x00000000);
+	nv_icmd(dev, 0x00000724, 0x00000000);
+	nv_icmd(dev, 0x00000725, 0x00000000);
+	nv_icmd(dev, 0x00000726, 0x00000000);
+	nv_icmd(dev, 0x00000727, 0x00000000);
+	nv_icmd(dev, 0x00000728, 0x00000000);
+	nv_icmd(dev, 0x00000729, 0x00000000);
+	nv_icmd(dev, 0x0000072a, 0x00000000);
+	nv_icmd(dev, 0x0000072b, 0x00000000);
+	nv_icmd(dev, 0x0000072c, 0x00000000);
+	nv_icmd(dev, 0x0000072d, 0x00000000);
+	nv_icmd(dev, 0x0000072e, 0x00000000);
+	nv_icmd(dev, 0x0000072f, 0x00000000);
+	nv_icmd(dev, 0x000008c0, 0x00000000);
+	nv_icmd(dev, 0x000008c1, 0x00000000);
+	nv_icmd(dev, 0x000008c2, 0x00000000);
+	nv_icmd(dev, 0x000008c3, 0x00000000);
+	nv_icmd(dev, 0x000008c4, 0x00000000);
+	nv_icmd(dev, 0x000008c5, 0x00000000);
+	nv_icmd(dev, 0x000008c6, 0x00000000);
+	nv_icmd(dev, 0x000008c7, 0x00000000);
+	nv_icmd(dev, 0x000008c8, 0x00000000);
+	nv_icmd(dev, 0x000008c9, 0x00000000);
+	nv_icmd(dev, 0x000008ca, 0x00000000);
+	nv_icmd(dev, 0x000008cb, 0x00000000);
+	nv_icmd(dev, 0x000008cc, 0x00000000);
+	nv_icmd(dev, 0x000008cd, 0x00000000);
+	nv_icmd(dev, 0x000008ce, 0x00000000);
+	nv_icmd(dev, 0x000008cf, 0x00000000);
+	nv_icmd(dev, 0x00000890, 0x00000000);
+	nv_icmd(dev, 0x00000891, 0x00000000);
+	nv_icmd(dev, 0x00000892, 0x00000000);
+	nv_icmd(dev, 0x00000893, 0x00000000);
+	nv_icmd(dev, 0x00000894, 0x00000000);
+	nv_icmd(dev, 0x00000895, 0x00000000);
+	nv_icmd(dev, 0x00000896, 0x00000000);
+	nv_icmd(dev, 0x00000897, 0x00000000);
+	nv_icmd(dev, 0x00000898, 0x00000000);
+	nv_icmd(dev, 0x00000899, 0x00000000);
+	nv_icmd(dev, 0x0000089a, 0x00000000);
+	nv_icmd(dev, 0x0000089b, 0x00000000);
+	nv_icmd(dev, 0x0000089c, 0x00000000);
+	nv_icmd(dev, 0x0000089d, 0x00000000);
+	nv_icmd(dev, 0x0000089e, 0x00000000);
+	nv_icmd(dev, 0x0000089f, 0x00000000);
+	nv_icmd(dev, 0x000008e0, 0x00000000);
+	nv_icmd(dev, 0x000008e1, 0x00000000);
+	nv_icmd(dev, 0x000008e2, 0x00000000);
+	nv_icmd(dev, 0x000008e3, 0x00000000);
+	nv_icmd(dev, 0x000008e4, 0x00000000);
+	nv_icmd(dev, 0x000008e5, 0x00000000);
+	nv_icmd(dev, 0x000008e6, 0x00000000);
+	nv_icmd(dev, 0x000008e7, 0x00000000);
+	nv_icmd(dev, 0x000008e8, 0x00000000);
+	nv_icmd(dev, 0x000008e9, 0x00000000);
+	nv_icmd(dev, 0x000008ea, 0x00000000);
+	nv_icmd(dev, 0x000008eb, 0x00000000);
+	nv_icmd(dev, 0x000008ec, 0x00000000);
+	nv_icmd(dev, 0x000008ed, 0x00000000);
+	nv_icmd(dev, 0x000008ee, 0x00000000);
+	nv_icmd(dev, 0x000008ef, 0x00000000);
+	nv_icmd(dev, 0x000008a0, 0x00000000);
+	nv_icmd(dev, 0x000008a1, 0x00000000);
+	nv_icmd(dev, 0x000008a2, 0x00000000);
+	nv_icmd(dev, 0x000008a3, 0x00000000);
+	nv_icmd(dev, 0x000008a4, 0x00000000);
+	nv_icmd(dev, 0x000008a5, 0x00000000);
+	nv_icmd(dev, 0x000008a6, 0x00000000);
+	nv_icmd(dev, 0x000008a7, 0x00000000);
+	nv_icmd(dev, 0x000008a8, 0x00000000);
+	nv_icmd(dev, 0x000008a9, 0x00000000);
+	nv_icmd(dev, 0x000008aa, 0x00000000);
+	nv_icmd(dev, 0x000008ab, 0x00000000);
+	nv_icmd(dev, 0x000008ac, 0x00000000);
+	nv_icmd(dev, 0x000008ad, 0x00000000);
+	nv_icmd(dev, 0x000008ae, 0x00000000);
+	nv_icmd(dev, 0x000008af, 0x00000000);
+	nv_icmd(dev, 0x000008f0, 0x00000000);
+	nv_icmd(dev, 0x000008f1, 0x00000000);
+	nv_icmd(dev, 0x000008f2, 0x00000000);
+	nv_icmd(dev, 0x000008f3, 0x00000000);
+	nv_icmd(dev, 0x000008f4, 0x00000000);
+	nv_icmd(dev, 0x000008f5, 0x00000000);
+	nv_icmd(dev, 0x000008f6, 0x00000000);
+	nv_icmd(dev, 0x000008f7, 0x00000000);
+	nv_icmd(dev, 0x000008f8, 0x00000000);
+	nv_icmd(dev, 0x000008f9, 0x00000000);
+	nv_icmd(dev, 0x000008fa, 0x00000000);
+	nv_icmd(dev, 0x000008fb, 0x00000000);
+	nv_icmd(dev, 0x000008fc, 0x00000000);
+	nv_icmd(dev, 0x000008fd, 0x00000000);
+	nv_icmd(dev, 0x000008fe, 0x00000000);
+	nv_icmd(dev, 0x000008ff, 0x00000000);
+	nv_icmd(dev, 0x0000094c, 0x000000ff);
+	nv_icmd(dev, 0x0000094d, 0xffffffff);
+	nv_icmd(dev, 0x0000094e, 0x00000002);
+	nv_icmd(dev, 0x000002ec, 0x00000001);
+	nv_icmd(dev, 0x00000303, 0x00000001);
+	nv_icmd(dev, 0x000002e6, 0x00000001);
+	nv_icmd(dev, 0x00000466, 0x00000052);
+	nv_icmd(dev, 0x00000301, 0x3f800000);
+	nv_icmd(dev, 0x00000304, 0x30201000);
+	nv_icmd(dev, 0x00000305, 0x70605040);
+	nv_icmd(dev, 0x00000306, 0xb8a89888);
+	nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+	nv_icmd(dev, 0x0000030a, 0x00ffff00);
+	nv_icmd(dev, 0x0000030b, 0x0000001a);
+	nv_icmd(dev, 0x0000030c, 0x00000001);
+	nv_icmd(dev, 0x00000318, 0x00000001);
+	nv_icmd(dev, 0x00000340, 0x00000000);
+	nv_icmd(dev, 0x00000375, 0x00000001);
+	nv_icmd(dev, 0x00000351, 0x00000100);
+	nv_icmd(dev, 0x0000037d, 0x00000006);
+	nv_icmd(dev, 0x000003a0, 0x00000002);
+	nv_icmd(dev, 0x000003aa, 0x00000001);
+	nv_icmd(dev, 0x000003a9, 0x00000001);
+	nv_icmd(dev, 0x00000380, 0x00000001);
+	nv_icmd(dev, 0x00000360, 0x00000040);
+	nv_icmd(dev, 0x00000366, 0x00000000);
+	nv_icmd(dev, 0x00000367, 0x00000000);
+	nv_icmd(dev, 0x00000368, 0x00001fff);
+	nv_icmd(dev, 0x00000370, 0x00000000);
+	nv_icmd(dev, 0x00000371, 0x00000000);
+	nv_icmd(dev, 0x00000372, 0x003fffff);
+	nv_icmd(dev, 0x0000037a, 0x00000012);
+	nv_icmd(dev, 0x000005e0, 0x00000022);
+	nv_icmd(dev, 0x000005e1, 0x00000022);
+	nv_icmd(dev, 0x000005e2, 0x00000022);
+	nv_icmd(dev, 0x000005e3, 0x00000022);
+	nv_icmd(dev, 0x000005e4, 0x00000022);
+	nv_icmd(dev, 0x00000619, 0x00000003);
+	nv_icmd(dev, 0x00000811, 0x00000003);
+	nv_icmd(dev, 0x00000812, 0x00000004);
+	nv_icmd(dev, 0x00000813, 0x00000006);
+	nv_icmd(dev, 0x00000814, 0x00000008);
+	nv_icmd(dev, 0x00000815, 0x0000000b);
+	nv_icmd(dev, 0x00000800, 0x00000001);
+	nv_icmd(dev, 0x00000801, 0x00000001);
+	nv_icmd(dev, 0x00000802, 0x00000001);
+	nv_icmd(dev, 0x00000803, 0x00000001);
+	nv_icmd(dev, 0x00000804, 0x00000001);
+	nv_icmd(dev, 0x00000805, 0x00000001);
+	nv_icmd(dev, 0x00000632, 0x00000001);
+	nv_icmd(dev, 0x00000633, 0x00000002);
+	nv_icmd(dev, 0x00000634, 0x00000003);
+	nv_icmd(dev, 0x00000635, 0x00000004);
+	nv_icmd(dev, 0x00000654, 0x3f800000);
+	nv_icmd(dev, 0x00000657, 0x3f800000);
+	nv_icmd(dev, 0x00000655, 0x3f800000);
+	nv_icmd(dev, 0x00000656, 0x3f800000);
+	nv_icmd(dev, 0x000006cd, 0x3f800000);
+	nv_icmd(dev, 0x000007f5, 0x3f800000);
+	nv_icmd(dev, 0x000007dc, 0x39291909);
+	nv_icmd(dev, 0x000007dd, 0x79695949);
+	nv_icmd(dev, 0x000007de, 0xb9a99989);
+	nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+	nv_icmd(dev, 0x000007e8, 0x00003210);
+	nv_icmd(dev, 0x000007e9, 0x00007654);
+	nv_icmd(dev, 0x000007ea, 0x00000098);
+	nv_icmd(dev, 0x000007ec, 0x39291909);
+	nv_icmd(dev, 0x000007ed, 0x79695949);
+	nv_icmd(dev, 0x000007ee, 0xb9a99989);
+	nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+	nv_icmd(dev, 0x000007f0, 0x00003210);
+	nv_icmd(dev, 0x000007f1, 0x00007654);
+	nv_icmd(dev, 0x000007f2, 0x00000098);
+	nv_icmd(dev, 0x000005a5, 0x00000001);
+	nv_icmd(dev, 0x00000980, 0x00000000);
+	nv_icmd(dev, 0x00000981, 0x00000000);
+	nv_icmd(dev, 0x00000982, 0x00000000);
+	nv_icmd(dev, 0x00000983, 0x00000000);
+	nv_icmd(dev, 0x00000984, 0x00000000);
+	nv_icmd(dev, 0x00000985, 0x00000000);
+	nv_icmd(dev, 0x00000986, 0x00000000);
+	nv_icmd(dev, 0x00000987, 0x00000000);
+	nv_icmd(dev, 0x00000988, 0x00000000);
+	nv_icmd(dev, 0x00000989, 0x00000000);
+	nv_icmd(dev, 0x0000098a, 0x00000000);
+	nv_icmd(dev, 0x0000098b, 0x00000000);
+	nv_icmd(dev, 0x0000098c, 0x00000000);
+	nv_icmd(dev, 0x0000098d, 0x00000000);
+	nv_icmd(dev, 0x0000098e, 0x00000000);
+	nv_icmd(dev, 0x0000098f, 0x00000000);
+	nv_icmd(dev, 0x00000990, 0x00000000);
+	nv_icmd(dev, 0x00000991, 0x00000000);
+	nv_icmd(dev, 0x00000992, 0x00000000);
+	nv_icmd(dev, 0x00000993, 0x00000000);
+	nv_icmd(dev, 0x00000994, 0x00000000);
+	nv_icmd(dev, 0x00000995, 0x00000000);
+	nv_icmd(dev, 0x00000996, 0x00000000);
+	nv_icmd(dev, 0x00000997, 0x00000000);
+	nv_icmd(dev, 0x00000998, 0x00000000);
+	nv_icmd(dev, 0x00000999, 0x00000000);
+	nv_icmd(dev, 0x0000099a, 0x00000000);
+	nv_icmd(dev, 0x0000099b, 0x00000000);
+	nv_icmd(dev, 0x0000099c, 0x00000000);
+	nv_icmd(dev, 0x0000099d, 0x00000000);
+	nv_icmd(dev, 0x0000099e, 0x00000000);
+	nv_icmd(dev, 0x0000099f, 0x00000000);
+	nv_icmd(dev, 0x000009a0, 0x00000000);
+	nv_icmd(dev, 0x000009a1, 0x00000000);
+	nv_icmd(dev, 0x000009a2, 0x00000000);
+	nv_icmd(dev, 0x000009a3, 0x00000000);
+	nv_icmd(dev, 0x000009a4, 0x00000000);
+	nv_icmd(dev, 0x000009a5, 0x00000000);
+	nv_icmd(dev, 0x000009a6, 0x00000000);
+	nv_icmd(dev, 0x000009a7, 0x00000000);
+	nv_icmd(dev, 0x000009a8, 0x00000000);
+	nv_icmd(dev, 0x000009a9, 0x00000000);
+	nv_icmd(dev, 0x000009aa, 0x00000000);
+	nv_icmd(dev, 0x000009ab, 0x00000000);
+	nv_icmd(dev, 0x000009ac, 0x00000000);
+	nv_icmd(dev, 0x000009ad, 0x00000000);
+	nv_icmd(dev, 0x000009ae, 0x00000000);
+	nv_icmd(dev, 0x000009af, 0x00000000);
+	nv_icmd(dev, 0x000009b0, 0x00000000);
+	nv_icmd(dev, 0x000009b1, 0x00000000);
+	nv_icmd(dev, 0x000009b2, 0x00000000);
+	nv_icmd(dev, 0x000009b3, 0x00000000);
+	nv_icmd(dev, 0x000009b4, 0x00000000);
+	nv_icmd(dev, 0x000009b5, 0x00000000);
+	nv_icmd(dev, 0x000009b6, 0x00000000);
+	nv_icmd(dev, 0x000009b7, 0x00000000);
+	nv_icmd(dev, 0x000009b8, 0x00000000);
+	nv_icmd(dev, 0x000009b9, 0x00000000);
+	nv_icmd(dev, 0x000009ba, 0x00000000);
+	nv_icmd(dev, 0x000009bb, 0x00000000);
+	nv_icmd(dev, 0x000009bc, 0x00000000);
+	nv_icmd(dev, 0x000009bd, 0x00000000);
+	nv_icmd(dev, 0x000009be, 0x00000000);
+	nv_icmd(dev, 0x000009bf, 0x00000000);
+	nv_icmd(dev, 0x000009c0, 0x00000000);
+	nv_icmd(dev, 0x000009c1, 0x00000000);
+	nv_icmd(dev, 0x000009c2, 0x00000000);
+	nv_icmd(dev, 0x000009c3, 0x00000000);
+	nv_icmd(dev, 0x000009c4, 0x00000000);
+	nv_icmd(dev, 0x000009c5, 0x00000000);
+	nv_icmd(dev, 0x000009c6, 0x00000000);
+	nv_icmd(dev, 0x000009c7, 0x00000000);
+	nv_icmd(dev, 0x000009c8, 0x00000000);
+	nv_icmd(dev, 0x000009c9, 0x00000000);
+	nv_icmd(dev, 0x000009ca, 0x00000000);
+	nv_icmd(dev, 0x000009cb, 0x00000000);
+	nv_icmd(dev, 0x000009cc, 0x00000000);
+	nv_icmd(dev, 0x000009cd, 0x00000000);
+	nv_icmd(dev, 0x000009ce, 0x00000000);
+	nv_icmd(dev, 0x000009cf, 0x00000000);
+	nv_icmd(dev, 0x000009d0, 0x00000000);
+	nv_icmd(dev, 0x000009d1, 0x00000000);
+	nv_icmd(dev, 0x000009d2, 0x00000000);
+	nv_icmd(dev, 0x000009d3, 0x00000000);
+	nv_icmd(dev, 0x000009d4, 0x00000000);
+	nv_icmd(dev, 0x000009d5, 0x00000000);
+	nv_icmd(dev, 0x000009d6, 0x00000000);
+	nv_icmd(dev, 0x000009d7, 0x00000000);
+	nv_icmd(dev, 0x000009d8, 0x00000000);
+	nv_icmd(dev, 0x000009d9, 0x00000000);
+	nv_icmd(dev, 0x000009da, 0x00000000);
+	nv_icmd(dev, 0x000009db, 0x00000000);
+	nv_icmd(dev, 0x000009dc, 0x00000000);
+	nv_icmd(dev, 0x000009dd, 0x00000000);
+	nv_icmd(dev, 0x000009de, 0x00000000);
+	nv_icmd(dev, 0x000009df, 0x00000000);
+	nv_icmd(dev, 0x000009e0, 0x00000000);
+	nv_icmd(dev, 0x000009e1, 0x00000000);
+	nv_icmd(dev, 0x000009e2, 0x00000000);
+	nv_icmd(dev, 0x000009e3, 0x00000000);
+	nv_icmd(dev, 0x000009e4, 0x00000000);
+	nv_icmd(dev, 0x000009e5, 0x00000000);
+	nv_icmd(dev, 0x000009e6, 0x00000000);
+	nv_icmd(dev, 0x000009e7, 0x00000000);
+	nv_icmd(dev, 0x000009e8, 0x00000000);
+	nv_icmd(dev, 0x000009e9, 0x00000000);
+	nv_icmd(dev, 0x000009ea, 0x00000000);
+	nv_icmd(dev, 0x000009eb, 0x00000000);
+	nv_icmd(dev, 0x000009ec, 0x00000000);
+	nv_icmd(dev, 0x000009ed, 0x00000000);
+	nv_icmd(dev, 0x000009ee, 0x00000000);
+	nv_icmd(dev, 0x000009ef, 0x00000000);
+	nv_icmd(dev, 0x000009f0, 0x00000000);
+	nv_icmd(dev, 0x000009f1, 0x00000000);
+	nv_icmd(dev, 0x000009f2, 0x00000000);
+	nv_icmd(dev, 0x000009f3, 0x00000000);
+	nv_icmd(dev, 0x000009f4, 0x00000000);
+	nv_icmd(dev, 0x000009f5, 0x00000000);
+	nv_icmd(dev, 0x000009f6, 0x00000000);
+	nv_icmd(dev, 0x000009f7, 0x00000000);
+	nv_icmd(dev, 0x000009f8, 0x00000000);
+	nv_icmd(dev, 0x000009f9, 0x00000000);
+	nv_icmd(dev, 0x000009fa, 0x00000000);
+	nv_icmd(dev, 0x000009fb, 0x00000000);
+	nv_icmd(dev, 0x000009fc, 0x00000000);
+	nv_icmd(dev, 0x000009fd, 0x00000000);
+	nv_icmd(dev, 0x000009fe, 0x00000000);
+	nv_icmd(dev, 0x000009ff, 0x00000000);
+	nv_icmd(dev, 0x00000468, 0x00000004);
+	nv_icmd(dev, 0x0000046c, 0x00000001);
+	nv_icmd(dev, 0x00000470, 0x00000000);
+	nv_icmd(dev, 0x00000471, 0x00000000);
+	nv_icmd(dev, 0x00000472, 0x00000000);
+	nv_icmd(dev, 0x00000473, 0x00000000);
+	nv_icmd(dev, 0x00000474, 0x00000000);
+	nv_icmd(dev, 0x00000475, 0x00000000);
+	nv_icmd(dev, 0x00000476, 0x00000000);
+	nv_icmd(dev, 0x00000477, 0x00000000);
+	nv_icmd(dev, 0x00000478, 0x00000000);
+	nv_icmd(dev, 0x00000479, 0x00000000);
+	nv_icmd(dev, 0x0000047a, 0x00000000);
+	nv_icmd(dev, 0x0000047b, 0x00000000);
+	nv_icmd(dev, 0x0000047c, 0x00000000);
+	nv_icmd(dev, 0x0000047d, 0x00000000);
+	nv_icmd(dev, 0x0000047e, 0x00000000);
+	nv_icmd(dev, 0x0000047f, 0x00000000);
+	nv_icmd(dev, 0x00000480, 0x00000000);
+	nv_icmd(dev, 0x00000481, 0x00000000);
+	nv_icmd(dev, 0x00000482, 0x00000000);
+	nv_icmd(dev, 0x00000483, 0x00000000);
+	nv_icmd(dev, 0x00000484, 0x00000000);
+	nv_icmd(dev, 0x00000485, 0x00000000);
+	nv_icmd(dev, 0x00000486, 0x00000000);
+	nv_icmd(dev, 0x00000487, 0x00000000);
+	nv_icmd(dev, 0x00000488, 0x00000000);
+	nv_icmd(dev, 0x00000489, 0x00000000);
+	nv_icmd(dev, 0x0000048a, 0x00000000);
+	nv_icmd(dev, 0x0000048b, 0x00000000);
+	nv_icmd(dev, 0x0000048c, 0x00000000);
+	nv_icmd(dev, 0x0000048d, 0x00000000);
+	nv_icmd(dev, 0x0000048e, 0x00000000);
+	nv_icmd(dev, 0x0000048f, 0x00000000);
+	nv_icmd(dev, 0x00000490, 0x00000000);
+	nv_icmd(dev, 0x00000491, 0x00000000);
+	nv_icmd(dev, 0x00000492, 0x00000000);
+	nv_icmd(dev, 0x00000493, 0x00000000);
+	nv_icmd(dev, 0x00000494, 0x00000000);
+	nv_icmd(dev, 0x00000495, 0x00000000);
+	nv_icmd(dev, 0x00000496, 0x00000000);
+	nv_icmd(dev, 0x00000497, 0x00000000);
+	nv_icmd(dev, 0x00000498, 0x00000000);
+	nv_icmd(dev, 0x00000499, 0x00000000);
+	nv_icmd(dev, 0x0000049a, 0x00000000);
+	nv_icmd(dev, 0x0000049b, 0x00000000);
+	nv_icmd(dev, 0x0000049c, 0x00000000);
+	nv_icmd(dev, 0x0000049d, 0x00000000);
+	nv_icmd(dev, 0x0000049e, 0x00000000);
+	nv_icmd(dev, 0x0000049f, 0x00000000);
+	nv_icmd(dev, 0x000004a0, 0x00000000);
+	nv_icmd(dev, 0x000004a1, 0x00000000);
+	nv_icmd(dev, 0x000004a2, 0x00000000);
+	nv_icmd(dev, 0x000004a3, 0x00000000);
+	nv_icmd(dev, 0x000004a4, 0x00000000);
+	nv_icmd(dev, 0x000004a5, 0x00000000);
+	nv_icmd(dev, 0x000004a6, 0x00000000);
+	nv_icmd(dev, 0x000004a7, 0x00000000);
+	nv_icmd(dev, 0x000004a8, 0x00000000);
+	nv_icmd(dev, 0x000004a9, 0x00000000);
+	nv_icmd(dev, 0x000004aa, 0x00000000);
+	nv_icmd(dev, 0x000004ab, 0x00000000);
+	nv_icmd(dev, 0x000004ac, 0x00000000);
+	nv_icmd(dev, 0x000004ad, 0x00000000);
+	nv_icmd(dev, 0x000004ae, 0x00000000);
+	nv_icmd(dev, 0x000004af, 0x00000000);
+	nv_icmd(dev, 0x000004b0, 0x00000000);
+	nv_icmd(dev, 0x000004b1, 0x00000000);
+	nv_icmd(dev, 0x000004b2, 0x00000000);
+	nv_icmd(dev, 0x000004b3, 0x00000000);
+	nv_icmd(dev, 0x000004b4, 0x00000000);
+	nv_icmd(dev, 0x000004b5, 0x00000000);
+	nv_icmd(dev, 0x000004b6, 0x00000000);
+	nv_icmd(dev, 0x000004b7, 0x00000000);
+	nv_icmd(dev, 0x000004b8, 0x00000000);
+	nv_icmd(dev, 0x000004b9, 0x00000000);
+	nv_icmd(dev, 0x000004ba, 0x00000000);
+	nv_icmd(dev, 0x000004bb, 0x00000000);
+	nv_icmd(dev, 0x000004bc, 0x00000000);
+	nv_icmd(dev, 0x000004bd, 0x00000000);
+	nv_icmd(dev, 0x000004be, 0x00000000);
+	nv_icmd(dev, 0x000004bf, 0x00000000);
+	nv_icmd(dev, 0x000004c0, 0x00000000);
+	nv_icmd(dev, 0x000004c1, 0x00000000);
+	nv_icmd(dev, 0x000004c2, 0x00000000);
+	nv_icmd(dev, 0x000004c3, 0x00000000);
+	nv_icmd(dev, 0x000004c4, 0x00000000);
+	nv_icmd(dev, 0x000004c5, 0x00000000);
+	nv_icmd(dev, 0x000004c6, 0x00000000);
+	nv_icmd(dev, 0x000004c7, 0x00000000);
+	nv_icmd(dev, 0x000004c8, 0x00000000);
+	nv_icmd(dev, 0x000004c9, 0x00000000);
+	nv_icmd(dev, 0x000004ca, 0x00000000);
+	nv_icmd(dev, 0x000004cb, 0x00000000);
+	nv_icmd(dev, 0x000004cc, 0x00000000);
+	nv_icmd(dev, 0x000004cd, 0x00000000);
+	nv_icmd(dev, 0x000004ce, 0x00000000);
+	nv_icmd(dev, 0x000004cf, 0x00000000);
+	nv_icmd(dev, 0x00000510, 0x3f800000);
+	nv_icmd(dev, 0x00000511, 0x3f800000);
+	nv_icmd(dev, 0x00000512, 0x3f800000);
+	nv_icmd(dev, 0x00000513, 0x3f800000);
+	nv_icmd(dev, 0x00000514, 0x3f800000);
+	nv_icmd(dev, 0x00000515, 0x3f800000);
+	nv_icmd(dev, 0x00000516, 0x3f800000);
+	nv_icmd(dev, 0x00000517, 0x3f800000);
+	nv_icmd(dev, 0x00000518, 0x3f800000);
+	nv_icmd(dev, 0x00000519, 0x3f800000);
+	nv_icmd(dev, 0x0000051a, 0x3f800000);
+	nv_icmd(dev, 0x0000051b, 0x3f800000);
+	nv_icmd(dev, 0x0000051c, 0x3f800000);
+	nv_icmd(dev, 0x0000051d, 0x3f800000);
+	nv_icmd(dev, 0x0000051e, 0x3f800000);
+	nv_icmd(dev, 0x0000051f, 0x3f800000);
+	nv_icmd(dev, 0x00000520, 0x000002b6);
+	nv_icmd(dev, 0x00000529, 0x00000001);
+	nv_icmd(dev, 0x00000530, 0xffff0000);
+	nv_icmd(dev, 0x00000531, 0xffff0000);
+	nv_icmd(dev, 0x00000532, 0xffff0000);
+	nv_icmd(dev, 0x00000533, 0xffff0000);
+	nv_icmd(dev, 0x00000534, 0xffff0000);
+	nv_icmd(dev, 0x00000535, 0xffff0000);
+	nv_icmd(dev, 0x00000536, 0xffff0000);
+	nv_icmd(dev, 0x00000537, 0xffff0000);
+	nv_icmd(dev, 0x00000538, 0xffff0000);
+	nv_icmd(dev, 0x00000539, 0xffff0000);
+	nv_icmd(dev, 0x0000053a, 0xffff0000);
+	nv_icmd(dev, 0x0000053b, 0xffff0000);
+	nv_icmd(dev, 0x0000053c, 0xffff0000);
+	nv_icmd(dev, 0x0000053d, 0xffff0000);
+	nv_icmd(dev, 0x0000053e, 0xffff0000);
+	nv_icmd(dev, 0x0000053f, 0xffff0000);
+	nv_icmd(dev, 0x00000585, 0x0000003f);
+	nv_icmd(dev, 0x00000576, 0x00000003);
+	nv_icmd(dev, 0x00000586, 0x00000040);
+	nv_icmd(dev, 0x00000582, 0x00000080);
+	nv_icmd(dev, 0x00000583, 0x00000080);
+	nv_icmd(dev, 0x000005c2, 0x00000001);
+	nv_icmd(dev, 0x00000638, 0x00000001);
+	nv_icmd(dev, 0x00000639, 0x00000001);
+	nv_icmd(dev, 0x0000063a, 0x00000002);
+	nv_icmd(dev, 0x0000063b, 0x00000001);
+	nv_icmd(dev, 0x0000063c, 0x00000001);
+	nv_icmd(dev, 0x0000063d, 0x00000002);
+	nv_icmd(dev, 0x0000063e, 0x00000001);
+	nv_icmd(dev, 0x000008b8, 0x00000001);
+	nv_icmd(dev, 0x000008b9, 0x00000001);
+	nv_icmd(dev, 0x000008ba, 0x00000001);
+	nv_icmd(dev, 0x000008bb, 0x00000001);
+	nv_icmd(dev, 0x000008bc, 0x00000001);
+	nv_icmd(dev, 0x000008bd, 0x00000001);
+	nv_icmd(dev, 0x000008be, 0x00000001);
+	nv_icmd(dev, 0x000008bf, 0x00000001);
+	nv_icmd(dev, 0x00000900, 0x00000001);
+	nv_icmd(dev, 0x00000901, 0x00000001);
+	nv_icmd(dev, 0x00000902, 0x00000001);
+	nv_icmd(dev, 0x00000903, 0x00000001);
+	nv_icmd(dev, 0x00000904, 0x00000001);
+	nv_icmd(dev, 0x00000905, 0x00000001);
+	nv_icmd(dev, 0x00000906, 0x00000001);
+	nv_icmd(dev, 0x00000907, 0x00000001);
+	nv_icmd(dev, 0x00000908, 0x00000002);
+	nv_icmd(dev, 0x00000909, 0x00000002);
+	nv_icmd(dev, 0x0000090a, 0x00000002);
+	nv_icmd(dev, 0x0000090b, 0x00000002);
+	nv_icmd(dev, 0x0000090c, 0x00000002);
+	nv_icmd(dev, 0x0000090d, 0x00000002);
+	nv_icmd(dev, 0x0000090e, 0x00000002);
+	nv_icmd(dev, 0x0000090f, 0x00000002);
+	nv_icmd(dev, 0x00000910, 0x00000001);
+	nv_icmd(dev, 0x00000911, 0x00000001);
+	nv_icmd(dev, 0x00000912, 0x00000001);
+	nv_icmd(dev, 0x00000913, 0x00000001);
+	nv_icmd(dev, 0x00000914, 0x00000001);
+	nv_icmd(dev, 0x00000915, 0x00000001);
+	nv_icmd(dev, 0x00000916, 0x00000001);
+	nv_icmd(dev, 0x00000917, 0x00000001);
+	nv_icmd(dev, 0x00000918, 0x00000001);
+	nv_icmd(dev, 0x00000919, 0x00000001);
+	nv_icmd(dev, 0x0000091a, 0x00000001);
+	nv_icmd(dev, 0x0000091b, 0x00000001);
+	nv_icmd(dev, 0x0000091c, 0x00000001);
+	nv_icmd(dev, 0x0000091d, 0x00000001);
+	nv_icmd(dev, 0x0000091e, 0x00000001);
+	nv_icmd(dev, 0x0000091f, 0x00000001);
+	nv_icmd(dev, 0x00000920, 0x00000002);
+	nv_icmd(dev, 0x00000921, 0x00000002);
+	nv_icmd(dev, 0x00000922, 0x00000002);
+	nv_icmd(dev, 0x00000923, 0x00000002);
+	nv_icmd(dev, 0x00000924, 0x00000002);
+	nv_icmd(dev, 0x00000925, 0x00000002);
+	nv_icmd(dev, 0x00000926, 0x00000002);
+	nv_icmd(dev, 0x00000927, 0x00000002);
+	nv_icmd(dev, 0x00000928, 0x00000001);
+	nv_icmd(dev, 0x00000929, 0x00000001);
+	nv_icmd(dev, 0x0000092a, 0x00000001);
+	nv_icmd(dev, 0x0000092b, 0x00000001);
+	nv_icmd(dev, 0x0000092c, 0x00000001);
+	nv_icmd(dev, 0x0000092d, 0x00000001);
+	nv_icmd(dev, 0x0000092e, 0x00000001);
+	nv_icmd(dev, 0x0000092f, 0x00000001);
+	nv_icmd(dev, 0x00000648, 0x00000001);
+	nv_icmd(dev, 0x00000649, 0x00000001);
+	nv_icmd(dev, 0x0000064a, 0x00000001);
+	nv_icmd(dev, 0x0000064b, 0x00000001);
+	nv_icmd(dev, 0x0000064c, 0x00000001);
+	nv_icmd(dev, 0x0000064d, 0x00000001);
+	nv_icmd(dev, 0x0000064e, 0x00000001);
+	nv_icmd(dev, 0x0000064f, 0x00000001);
+	nv_icmd(dev, 0x00000650, 0x00000001);
+	nv_icmd(dev, 0x00000658, 0x0000000f);
+	nv_icmd(dev, 0x000007ff, 0x0000000a);
+	nv_icmd(dev, 0x0000066a, 0x40000000);
+	nv_icmd(dev, 0x0000066b, 0x10000000);
+	nv_icmd(dev, 0x0000066c, 0xffff0000);
+	nv_icmd(dev, 0x0000066d, 0xffff0000);
+	nv_icmd(dev, 0x000007af, 0x00000008);
+	nv_icmd(dev, 0x000007b0, 0x00000008);
+	nv_icmd(dev, 0x000007f6, 0x00000001);
+	nv_icmd(dev, 0x000006b2, 0x00000055);
+	nv_icmd(dev, 0x000007ad, 0x00000003);
+	nv_icmd(dev, 0x00000937, 0x00000001);
+	nv_icmd(dev, 0x00000971, 0x00000008);
+	nv_icmd(dev, 0x00000972, 0x00000040);
+	nv_icmd(dev, 0x00000973, 0x0000012c);
+	nv_icmd(dev, 0x0000097c, 0x00000040);
+	nv_icmd(dev, 0x00000979, 0x00000003);
+	nv_icmd(dev, 0x00000975, 0x00000020);
+	nv_icmd(dev, 0x00000976, 0x00000001);
+	nv_icmd(dev, 0x00000977, 0x00000020);
+	nv_icmd(dev, 0x00000978, 0x00000001);
+	nv_icmd(dev, 0x00000957, 0x00000003);
+	nv_icmd(dev, 0x0000095e, 0x20164010);
+	nv_icmd(dev, 0x0000095f, 0x00000020);
+	nv_icmd(dev, 0x00000683, 0x00000006);
+	nv_icmd(dev, 0x00000685, 0x003fffff);
+	nv_icmd(dev, 0x00000687, 0x00000c48);
+	nv_icmd(dev, 0x000006a0, 0x00000005);
+	nv_icmd(dev, 0x00000840, 0x00300008);
+	nv_icmd(dev, 0x00000841, 0x04000080);
+	nv_icmd(dev, 0x00000842, 0x00300008);
+	nv_icmd(dev, 0x00000843, 0x04000080);
+	nv_icmd(dev, 0x00000818, 0x00000000);
+	nv_icmd(dev, 0x00000819, 0x00000000);
+	nv_icmd(dev, 0x0000081a, 0x00000000);
+	nv_icmd(dev, 0x0000081b, 0x00000000);
+	nv_icmd(dev, 0x0000081c, 0x00000000);
+	nv_icmd(dev, 0x0000081d, 0x00000000);
+	nv_icmd(dev, 0x0000081e, 0x00000000);
+	nv_icmd(dev, 0x0000081f, 0x00000000);
+	nv_icmd(dev, 0x00000848, 0x00000000);
+	nv_icmd(dev, 0x00000849, 0x00000000);
+	nv_icmd(dev, 0x0000084a, 0x00000000);
+	nv_icmd(dev, 0x0000084b, 0x00000000);
+	nv_icmd(dev, 0x0000084c, 0x00000000);
+	nv_icmd(dev, 0x0000084d, 0x00000000);
+	nv_icmd(dev, 0x0000084e, 0x00000000);
+	nv_icmd(dev, 0x0000084f, 0x00000000);
+	nv_icmd(dev, 0x00000850, 0x00000000);
+	nv_icmd(dev, 0x00000851, 0x00000000);
+	nv_icmd(dev, 0x00000852, 0x00000000);
+	nv_icmd(dev, 0x00000853, 0x00000000);
+	nv_icmd(dev, 0x00000854, 0x00000000);
+	nv_icmd(dev, 0x00000855, 0x00000000);
+	nv_icmd(dev, 0x00000856, 0x00000000);
+	nv_icmd(dev, 0x00000857, 0x00000000);
+	nv_icmd(dev, 0x00000738, 0x00000000);
+	nv_icmd(dev, 0x000006aa, 0x00000001);
+	nv_icmd(dev, 0x000006ab, 0x00000002);
+	nv_icmd(dev, 0x000006ac, 0x00000080);
+	nv_icmd(dev, 0x000006ad, 0x00000100);
+	nv_icmd(dev, 0x000006ae, 0x00000100);
+	nv_icmd(dev, 0x000006b1, 0x00000011);
+	nv_icmd(dev, 0x000006bb, 0x000000cf);
+	nv_icmd(dev, 0x000006ce, 0x2a712488);
+	nv_icmd(dev, 0x00000739, 0x4085c000);
+	nv_icmd(dev, 0x0000073a, 0x00000080);
+	nv_icmd(dev, 0x00000786, 0x80000100);
+	nv_icmd(dev, 0x0000073c, 0x00010100);
+	nv_icmd(dev, 0x0000073d, 0x02800000);
+	nv_icmd(dev, 0x00000787, 0x000000cf);
+	nv_icmd(dev, 0x0000078c, 0x00000008);
+	nv_icmd(dev, 0x00000792, 0x00000001);
+	nv_icmd(dev, 0x00000794, 0x00000001);
+	nv_icmd(dev, 0x00000795, 0x00000001);
+	nv_icmd(dev, 0x00000796, 0x00000001);
+	nv_icmd(dev, 0x00000797, 0x000000cf);
+	nv_icmd(dev, 0x00000836, 0x00000001);
+	nv_icmd(dev, 0x0000079a, 0x00000002);
+	nv_icmd(dev, 0x00000833, 0x04444480);
+	nv_icmd(dev, 0x000007a1, 0x00000001);
+	nv_icmd(dev, 0x000007a3, 0x00000001);
+	nv_icmd(dev, 0x000007a4, 0x00000001);
+	nv_icmd(dev, 0x000007a5, 0x00000001);
+	nv_icmd(dev, 0x00000831, 0x00000004);
+	nv_icmd(dev, 0x0000080c, 0x00000002);
+	nv_icmd(dev, 0x0000080d, 0x00000100);
+	nv_icmd(dev, 0x0000080e, 0x00000100);
+	nv_icmd(dev, 0x0000080f, 0x00000001);
+	nv_icmd(dev, 0x00000823, 0x00000002);
+	nv_icmd(dev, 0x00000824, 0x00000100);
+	nv_icmd(dev, 0x00000825, 0x00000100);
+	nv_icmd(dev, 0x00000826, 0x00000001);
+	nv_icmd(dev, 0x0000095d, 0x00000001);
+	nv_icmd(dev, 0x0000082b, 0x00000004);
+	nv_icmd(dev, 0x00000942, 0x00010001);
+	nv_icmd(dev, 0x00000943, 0x00000001);
+	nv_icmd(dev, 0x00000944, 0x00000022);
+	nv_icmd(dev, 0x000007c5, 0x00010001);
+	nv_icmd(dev, 0x00000834, 0x00000001);
+	nv_icmd(dev, 0x000007c7, 0x00000001);
+	nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+	nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+	nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000002);
+	nv_icmd(dev, 0x000006aa, 0x00000001);
+	nv_icmd(dev, 0x000006ad, 0x00000100);
+	nv_icmd(dev, 0x000006ae, 0x00000100);
+	nv_icmd(dev, 0x000006b1, 0x00000011);
+	nv_icmd(dev, 0x0000078c, 0x00000008);
+	nv_icmd(dev, 0x00000792, 0x00000001);
+	nv_icmd(dev, 0x00000794, 0x00000001);
+	nv_icmd(dev, 0x00000795, 0x00000001);
+	nv_icmd(dev, 0x00000796, 0x00000001);
+	nv_icmd(dev, 0x00000797, 0x000000cf);
+	nv_icmd(dev, 0x0000079a, 0x00000002);
+	nv_icmd(dev, 0x00000833, 0x04444480);
+	nv_icmd(dev, 0x000007a1, 0x00000001);
+	nv_icmd(dev, 0x000007a3, 0x00000001);
+	nv_icmd(dev, 0x000007a4, 0x00000001);
+	nv_icmd(dev, 0x000007a5, 0x00000001);
+	nv_icmd(dev, 0x00000831, 0x00000004);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000014);
+	nv_icmd(dev, 0x00000351, 0x00000100);
+	nv_icmd(dev, 0x00000957, 0x00000003);
+	nv_icmd(dev, 0x0000095d, 0x00000001);
+	nv_icmd(dev, 0x0000082b, 0x00000004);
+	nv_icmd(dev, 0x00000942, 0x00010001);
+	nv_icmd(dev, 0x00000943, 0x00000001);
+	nv_icmd(dev, 0x000007c5, 0x00010001);
+	nv_icmd(dev, 0x00000834, 0x00000001);
+	nv_icmd(dev, 0x000007c7, 0x00000001);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_icmd(dev, 0x00001000, 0x00000001);
+	nv_icmd(dev, 0x0000080c, 0x00000002);
+	nv_icmd(dev, 0x0000080d, 0x00000100);
+	nv_icmd(dev, 0x0000080e, 0x00000100);
+	nv_icmd(dev, 0x0000080f, 0x00000001);
+	nv_icmd(dev, 0x00000823, 0x00000002);
+	nv_icmd(dev, 0x00000824, 0x00000100);
+	nv_icmd(dev, 0x00000825, 0x00000100);
+	nv_icmd(dev, 0x00000826, 0x00000001);
+	nv_icmd(dev, 0x0001e100, 0x00000001);
+	nv_wr32(dev, 0x400208, 0x00000000);
+	nv_wr32(dev, 0x404154, 0x00000400);
+
+	nvc0_grctx_generate_9097(dev);
+	nvc0_grctx_generate_902d(dev);
+	nvc0_grctx_generate_9039(dev);
+	nvc0_grctx_generate_90c0(dev);
+
+	nv_wr32(dev, 0x000260, r000260);
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
index 13a0f78..c090917 100644
--- a/drivers/gpu/drm/nouveau/nvc0_instmem.c
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -25,132 +25,22 @@
 #include "drmP.h"
 
 #include "nouveau_drv.h"
+#include "nouveau_vm.h"
 
-int
-nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
-		      uint32_t *size)
-{
-	int ret;
-
-	*size = ALIGN(*size, 4096);
-	if (*size == 0)
-		return -EINVAL;
-
-	ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
-			     true, false, &gpuobj->im_backing);
-	if (ret) {
-		NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
-		return ret;
-	}
-
-	ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
-	if (ret) {
-		NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		return ret;
-	}
-
-	gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT;
-	return 0;
-}
-
-void
-nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-
-	if (gpuobj && gpuobj->im_backing) {
-		if (gpuobj->im_bound)
-			dev_priv->engine.instmem.unbind(dev, gpuobj);
-		nouveau_bo_unpin(gpuobj->im_backing);
-		nouveau_bo_ref(NULL, &gpuobj->im_backing);
-		gpuobj->im_backing = NULL;
-	}
-}
-
-int
-nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
-	uint64_t vram;
-
-	if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
-		return -EINVAL;
-
-	NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
-		 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
-
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
-	vram    = gpuobj->vinst;
-
-	NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
-		 gpuobj->im_pramin->start, pte, pte_end);
-	NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
-	while (pte < pte_end) {
-		nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
-		nv_wr32(dev, 0x702004 + (pte * 8), 0);
-		vram += 4096;
-		pte++;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	if (1) {
-		u32 chan = nv_rd32(dev, 0x1700) << 16;
-		nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
-		nv_wr32(dev, 0x100cbc, 0x80000005);
-	}
-
-	gpuobj->im_bound = 1;
-	return 0;
-}
-
-int
-nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t pte, pte_end;
-
-	if (gpuobj->im_bound == 0)
-		return -EINVAL;
-
-	pte     = gpuobj->im_pramin->start >> 12;
-	pte_end = (gpuobj->im_pramin->size >> 12) + pte;
-	while (pte < pte_end) {
-		nv_wr32(dev, 0x702000 + (pte * 8), 0);
-		nv_wr32(dev, 0x702004 + (pte * 8), 0);
-		pte++;
-	}
-	dev_priv->engine.instmem.flush(dev);
-
-	gpuobj->im_bound = 0;
-	return 0;
-}
-
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
-	nv_wr32(dev, 0x070000, 1);
-	if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
-		NV_ERROR(dev, "PRAMIN flush timeout\n");
-}
+struct nvc0_instmem_priv {
+	struct nouveau_gpuobj  *bar1_pgd;
+	struct nouveau_channel *bar1;
+	struct nouveau_gpuobj  *bar3_pgd;
+	struct nouveau_channel *bar3;
+	struct nouveau_gpuobj  *chan_pgd;
+};
 
 int
 nvc0_instmem_suspend(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 *buf;
-	int i;
 
-	dev_priv->susres.ramin_copy = vmalloc(65536);
-	if (!dev_priv->susres.ramin_copy)
-		return -ENOMEM;
-	buf = dev_priv->susres.ramin_copy;
-
-	for (i = 0; i < 65536; i += 4)
-		buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+	dev_priv->ramin_available = false;
 	return 0;
 }
 
@@ -158,73 +48,184 @@
 nvc0_instmem_resume(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u32 *buf = dev_priv->susres.ramin_copy;
-	u64 chan;
-	int i;
+	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
 
-	chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
-	nv_wr32(dev, 0x001700, chan >> 16);
+	nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+	nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+	nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+	dev_priv->ramin_available = true;
+}
 
-	for (i = 0; i < 65536; i += 4)
-		nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
-	vfree(dev_priv->susres.ramin_copy);
-	dev_priv->susres.ramin_copy = NULL;
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
+{
+	struct nouveau_channel *chan;
 
-	nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+	chan = *pchan;
+	*pchan = NULL;
+	if (!chan)
+		return;
+
+	nouveau_vm_ref(NULL, &chan->vm, NULL);
+	if (chan->ramin_heap.free_stack.next)
+		drm_mm_takedown(&chan->ramin_heap);
+	nouveau_gpuobj_ref(NULL, &chan->ramin);
+	kfree(chan);
+}
+
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+		 struct nouveau_channel **pchan,
+		 struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+	struct nouveau_channel *chan;
+	int ret;
+
+	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+	if (!chan)
+		return -ENOMEM;
+	chan->dev = dev;
+
+	ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+	if (ret) {
+		nvc0_channel_del(&chan);
+		return ret;
+	}
+
+	nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+	nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+	nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+	nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
+
+	*pchan = chan;
+	return 0;
 }
 
 int
 nvc0_instmem_init(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
-	int ret, i;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct pci_dev *pdev = dev->pdev;
+	struct nvc0_instmem_priv *priv;
+	struct nouveau_vm *vm = NULL;
+	int ret;
 
-	dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
-	chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
-	imem = 4096 + 4096 + 32768;
-
-	nv_wr32(dev, 0x001700, chan >> 16);
-
-	/* channel setup */
-	nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
-	nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
-	nv_wr32(dev, 0x700208, lower_32_bits(lim3));
-	nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
-	/* point pgd -> pgt */
-	nv_wr32(dev, 0x701000, 0);
-	nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
-	/* point pgt -> physical vram for channel */
-	pgt3 = 0x2000;
-	for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
-		nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
-		nv_wr32(dev, 0x700004 + pgt3, 0);
-	}
-
-	/* clear rest of pgt */
-	for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
-		nv_wr32(dev, 0x700000 + pgt3, 0);
-		nv_wr32(dev, 0x700004 + pgt3, 0);
-	}
-
-	/* point bar3 at the channel */
-	nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-
-	/* Global PRAMIN heap */
-	ret = drm_mm_init(&dev_priv->ramin_heap, imem,
-			  dev_priv->ramin_size - imem);
-	if (ret) {
-		NV_ERROR(dev, "Failed to init RAMIN heap\n");
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
 		return -ENOMEM;
-	}
+	pinstmem->priv = priv;
 
+	/* BAR3 VM */
+	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+			     &dev_priv->bar3_vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL,
+				 (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+				 NVOBJ_FLAG_DONT_MAP |
+				 NVOBJ_FLAG_ZERO_ALLOC,
+				 &dev_priv->bar3_vm->pgt[0].obj[0]);
+	if (ret)
+		goto error;
+	dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+	nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+			       priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+	if (ret)
+		goto error;
+
+	/* BAR1 VM */
+	ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+				 NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+	if (ret)
+		goto error;
+
+	ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+	if (ret)
+		goto error;
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+			       priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+	if (ret)
+		goto error;
+
+	/* channel vm */
+	ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm);
+	if (ret)
+		goto error;
+
+	ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd);
+	if (ret)
+		goto error;
+
+	nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd);
+	nouveau_vm_ref(NULL, &vm, NULL);
+
+	nvc0_instmem_resume(dev);
 	return 0;
+error:
+	nvc0_instmem_takedown(dev);
+	return ret;
 }
 
 void
 nvc0_instmem_takedown(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+	struct nouveau_vm *vm = NULL;
+
+	nvc0_instmem_suspend(dev);
+
+	nv_wr32(dev, 0x1704, 0x00000000);
+	nv_wr32(dev, 0x1714, 0x00000000);
+
+	nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->chan_pgd);
+
+	nvc0_channel_del(&priv->bar1);
+	nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
+
+	nvc0_channel_del(&priv->bar3);
+	nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+	nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+	nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+	nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+	nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+	dev_priv->engine.instmem.priv = NULL;
+	kfree(priv);
 }
 
diff --git a/drivers/gpu/drm/nouveau/nvc0_vm.c b/drivers/gpu/drm/nouveau/nvc0_vm.c
new file mode 100644
index 0000000..4b9251b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vm.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_vm.h"
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+		struct nouveau_gpuobj *pgt[2])
+{
+	u32 pde[2] = { 0, 0 };
+
+	if (pgt[0])
+		pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+	if (pgt[1])
+		pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+	nv_wo32(pgd, (index * 8) + 0, pde[0]);
+	nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+	phys >>= 8;
+
+	phys |= 0x00000001; /* present */
+//	if (vma->access & NV_MEM_ACCESS_SYS)
+//		phys |= 0x00000002;
+
+	phys |= ((u64)target  << 32);
+	phys |= ((u64)memtype << 36);
+
+	return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	    struct nouveau_vram *mem, u32 pte, u32 cnt, u64 phys)
+{
+	u32 next = 1 << (vma->node->type - 8);
+
+	phys  = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		phys += next;
+		pte  += 8;
+	}
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+	       u32 pte, dma_addr_t *list, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		u64 phys = nvc0_vm_addr(vma, *list++, 0, 5);
+		nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+		nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+		pte += 8;
+	}
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+	pte <<= 3;
+	while (cnt--) {
+		nv_wo32(pgt, pte + 0, 0x00000000);
+		nv_wo32(pgt, pte + 4, 0x00000000);
+		pte += 8;
+	}
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+	struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+	struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+	struct drm_device *dev = vm->dev;
+	struct nouveau_vm_pgd *vpgd;
+	u32 r100c80, engine;
+
+	pinstmem->flush(vm->dev);
+
+	if (vm == dev_priv->chan_vm)
+		engine = 1;
+	else
+		engine = 5;
+
+	list_for_each_entry(vpgd, &vm->pgd_list, head) {
+		r100c80 = nv_rd32(dev, 0x100c80);
+		nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+		nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+		if (!nv_wait(dev, 0x100c80, 0xffffffff, r100c80))
+			NV_ERROR(dev, "vm flush timeout eng %d\n", engine);
+	}
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
new file mode 100644
index 0000000..858eda5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+	switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) {
+	case 0x0000:
+	case 0xfe00:
+	case 0xdb00:
+	case 0x1100:
+		return true;
+	default:
+		break;
+	}
+
+	return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+	      u32 type, struct nouveau_vram **pvram)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+	struct nouveau_mm *mm = man->priv;
+	struct nouveau_mm_node *r;
+	struct nouveau_vram *vram;
+	int ret;
+
+	size  >>= 12;
+	align >>= 12;
+	ncmin >>= 12;
+
+	vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+	if (!vram)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&vram->regions);
+	vram->dev = dev_priv->dev;
+	vram->memtype = type;
+	vram->size = size;
+
+	mutex_lock(&mm->mutex);
+	do {
+		ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+		if (ret) {
+			mutex_unlock(&mm->mutex);
+			nv50_vram_del(dev, &vram);
+			return ret;
+		}
+
+		list_add_tail(&r->rl_entry, &vram->regions);
+		size -= r->length;
+	} while (size);
+	mutex_unlock(&mm->mutex);
+
+	r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+	vram->offset = (u64)r->offset << 12;
+	*pvram = vram;
+	return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
+	dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+	dev_priv->vram_rblock_size = 4096;
+	return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 881f8a5..fe0f253 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -153,7 +153,8 @@
 #define NV_PCRTC_START					0x00600800
 #define NV_PCRTC_CONFIG					0x00600804
 #	define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA		(1 << 0)
-#	define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
+#	define NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC		(4 << 0)
+#	define NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC		(2 << 0)
 #define NV_PCRTC_CURSOR_CONFIG				0x00600810
 #	define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE		(1 << 0)
 #	define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE	(1 << 4)
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 6cae4f2..e47eecf 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -65,10 +65,13 @@
 	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
 	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
-	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o
+	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+	radeon_trace_points.o ni.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
 radeon-$(CONFIG_ACPI) += radeon_acpi.o
 
 obj-$(CONFIG_DRM_RADEON)+= radeon.o
+
+CFLAGS_radeon_trace_points.o := -I$(src)
\ No newline at end of file
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index c714179..c61c3fe 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -37,6 +37,8 @@
 #define GRAPH_OBJECT_TYPE_CONNECTOR               0x3
 #define GRAPH_OBJECT_TYPE_ROUTER                  0x4
 /* deleted */
+#define GRAPH_OBJECT_TYPE_DISPLAY_PATH            0x6  
+#define GRAPH_OBJECT_TYPE_GENERIC                 0x7
 
 /****************************************************/
 /* Encoder Object ID Definition                     */
@@ -64,6 +66,9 @@
 #define ENCODER_OBJECT_ID_VT1623                  0x10
 #define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
 #define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
+#define ENCODER_OBJECT_ID_ALMOND                  0x22
+#define ENCODER_OBJECT_ID_TRAVIS                  0x23
+#define ENCODER_OBJECT_ID_NUTMEG                  0x22
 /* Kaleidoscope (KLDSCP) Class Display Hardware (internal) */
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
@@ -108,6 +113,7 @@
 #define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
 #define CONNECTOR_OBJECT_ID_eDP                   0x14
 #define CONNECTOR_OBJECT_ID_MXM                   0x15
+#define CONNECTOR_OBJECT_ID_LVDS_eDP              0x16
 
 /* deleted */
 
@@ -124,6 +130,7 @@
 #define GENERIC_OBJECT_ID_GLSYNC                  0x01
 #define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
 #define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+#define GENERIC_OBJECT_ID_STEREO_PIN              0x04        //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
 
 /****************************************************/
 /* Graphics Object ENUM ID Definition               */
@@ -360,6 +367,26 @@
                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                   ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
 
+#define ENCODER_ALMOND_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_ALMOND_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_ALMOND << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_TRAVIS_ENUM_ID2                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_TRAVIS << OBJECT_ID_SHIFT)
+
+#define ENCODER_NUTMEG_ENUM_ID1                  ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /****************************************************/
@@ -421,6 +448,14 @@
                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
 
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID3   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID4   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
 #define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
@@ -512,6 +547,7 @@
 #define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
 #define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
                                                  GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
@@ -593,6 +629,14 @@
                                                  GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
                                                  CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
 
+#define CONNECTOR_LVDS_eDP_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_eDP_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS_eDP << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Router Object ID definition - Shared with BIOS   */
 /****************************************************/
@@ -621,6 +665,10 @@
                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
                                                  GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
 
+#define GENERICOBJECT_STEREO_PIN_ENUM_ID1        (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /* Object Cap definition - Shared with BIOS         */
 /****************************************************/
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 05efb5b..258fa5e 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -734,16 +734,16 @@
 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
 {
 	uint8_t attr = U8((*ptr)++);
-	uint32_t dst, src1, src2, saved;
+	uint32_t dst, mask, src, saved;
 	int dptr = *ptr;
 	SDEBUG("   dst: ");
 	dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
-	SDEBUG("   src1: ");
-	src1 = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
-	SDEBUG("   src2: ");
-	src2 = atom_get_src(ctx, attr, ptr);
-	dst &= src1;
-	dst |= src2;
+	mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr);
+	SDEBUG("   mask: 0x%08x", mask);
+	SDEBUG("   src: ");
+	src = atom_get_src(ctx, attr, ptr);
+	dst &= mask;
+	dst |= src;
 	SDEBUG("   dst: ");
 	atom_put_dst(ctx, arg, attr, &dptr, dst, saved);
 }
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index fe359a2..04b269d 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -73,8 +73,18 @@
 #define ATOM_PPLL1            0
 #define ATOM_PPLL2            1
 #define ATOM_DCPLL            2
+#define ATOM_PPLL0            2
+#define ATOM_EXT_PLL1         8
+#define ATOM_EXT_PLL2         9
+#define ATOM_EXT_CLOCK        10
 #define ATOM_PPLL_INVALID     0xFF
 
+#define ENCODER_REFCLK_SRC_P1PLL       0       
+#define ENCODER_REFCLK_SRC_P2PLL       1
+#define ENCODER_REFCLK_SRC_DCPLL       2
+#define ENCODER_REFCLK_SRC_EXTCLK      3
+#define ENCODER_REFCLK_SRC_INVALID     0xFF
+
 #define ATOM_SCALER1          0
 #define ATOM_SCALER2          1
 
@@ -192,6 +202,9 @@
                                   /*Image can't be updated, while Driver needs to carry the new table! */
 }ATOM_COMMON_TABLE_HEADER;
 
+/****************************************************************************/	
+// Structure stores the ROM header.
+/****************************************************************************/	
 typedef struct _ATOM_ROM_HEADER
 {
   ATOM_COMMON_TABLE_HEADER		sHeader;
@@ -221,6 +234,9 @@
 	#define	USHORT	void*
 #endif
 
+/****************************************************************************/	
+// Structures used in Command.mtb 
+/****************************************************************************/	
 typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
   USHORT ASIC_Init;                              //Function Table, used by various SW components,latest version 1.1
   USHORT GetDisplaySurfaceSize;                  //Atomic Table,  Used by Bios when enabling HW ICON
@@ -312,6 +328,7 @@
 #define SetUniphyInstance                        ASIC_StaticPwrMgtStatusChange
 #define HPDInterruptService                      ReadHWAssistedI2CStatus
 #define EnableVGA_Access                         GetSCLKOverMCLKRatio
+#define GetDispObjectInfo                        EnableYUV 
 
 typedef struct _ATOM_MASTER_COMMAND_TABLE
 {
@@ -357,6 +374,24 @@
 /****************************************************************************/	
 #define COMPUTE_MEMORY_PLL_PARAM        1
 #define COMPUTE_ENGINE_PLL_PARAM        2
+#define ADJUST_MC_SETTING_PARAM         3
+
+/****************************************************************************/	
+// Structures used by AdjustMemoryControllerTable
+/****************************************************************************/	
+typedef struct _ATOM_ADJUST_MEMORY_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulClockFreq:24;
+#else
+  ULONG ulClockFreq:24;
+  ULONG ulMemoryModuleNumber:7;     // BYTE_3[6:0]
+  ULONG ulPointerReturnFlag:1;      // BYTE_3[7]=1 - Return the pointer to the right Data Block; BYTE_3[7]=0 - Program the right Data Block 
+#endif
+}ATOM_ADJUST_MEMORY_CLOCK_FREQ;
+#define POINTER_RETURN_FLAG             0x80
 
 typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
 {
@@ -440,6 +475,26 @@
 #endif
 }COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
 
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5
+{
+  union
+  {
+    ATOM_COMPUTE_CLOCK_FREQ  ulClock;         //Input Parameter
+    ATOM_S_MPLL_FB_DIVIDER   ulFbDiv;         //Output Parameter
+  };
+  UCHAR   ucRefDiv;                           //Output Parameter      
+  UCHAR   ucPostDiv;                          //Output Parameter      
+  union
+  {
+    UCHAR   ucCntlFlag;                       //Output Flags
+    UCHAR   ucInputFlag;                      //Input Flags. ucInputFlag[0] - Strobe(1)/Performance(0) mode
+  };
+  UCHAR   ucReserved;                       
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5;
+
+// ucInputFlag
+#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN  1   // 1-StrobeMode, 0-PerformanceMode
+
 typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
 {
   ATOM_COMPUTE_CLOCK_FREQ ulClock;
@@ -583,6 +638,7 @@
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK				0x01
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ		0x00
 #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ		0x01
+#define ATOM_ENCODER_CONFIG_DPLINKRATE_5_40GHZ		0x02
 #define ATOM_ENCODER_CONFIG_LINK_SEL_MASK				  0x04
 #define ATOM_ENCODER_CONFIG_LINKA								  0x00
 #define ATOM_ENCODER_CONFIG_LINKB								  0x04
@@ -608,6 +664,9 @@
 #define ATOM_ENCODER_MODE_TV											13
 #define ATOM_ENCODER_MODE_CV											14
 #define ATOM_ENCODER_MODE_CRT											15
+#define ATOM_ENCODER_MODE_DVO											16
+#define ATOM_ENCODER_MODE_DP_SST                  ATOM_ENCODER_MODE_DP    // For DP1.2
+#define ATOM_ENCODER_MODE_DP_MST                  5                       // For DP1.2
 
 typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
 {
@@ -661,6 +720,7 @@
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START       0x08
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1    0x09
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2    0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3    0x13
 #define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE    0x0b
 #define ATOM_ENCODER_CMD_DP_VIDEO_OFF                 0x0c
 #define ATOM_ENCODER_CMD_DP_VIDEO_ON                  0x0d
@@ -671,24 +731,34 @@
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE    0x10
 #define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE  0x00
 
+//ucTableFormatRevision=1
+//ucTableContentRevision=3
 // Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
 typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
 {
 #if ATOM_BIG_ENDIAN
     UCHAR ucReserved1:1;
-    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
     UCHAR ucReserved:3;
     UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
 #else
     UCHAR ucDPLinkRate:1;         // =0: 1.62Ghz, =1: 2.7Ghz
     UCHAR ucReserved:3;
-    UCHAR ucDigSel:3;             // =0: DIGA/B/C/D/E/F
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
     UCHAR ucReserved1:1;
 #endif
 }ATOM_DIG_ENCODER_CONFIG_V3;
 
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
 #define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL					  0x70
-
+#define ATOM_ENCODER_CONFIG_V3_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V3_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V3_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V3_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V3_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V3_DIG5_ENCODER					  0x50
 
 typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
 {
@@ -707,6 +777,56 @@
   UCHAR ucReserved;
 }DIG_ENCODER_CONTROL_PARAMETERS_V3;
 
+//ucTableFormatRevision=1
+//ucTableContentRevision=4
+// start from NI           
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+    UCHAR ucReserved1:1;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+    UCHAR ucReserved:2;
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+#else
+    UCHAR ucDPLinkRate:2;         // =0: 1.62Ghz, =1: 2.7Ghz, 2=5.4Ghz    <= Changed comparing to previous version
+    UCHAR ucReserved:2;
+    UCHAR ucDigSel:3;             // =0/1/2/3/4/5: DIG0/1/2/3/4/5 (In register spec also refered as DIGA/B/C/D/E/F)
+    UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V4;
+
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_MASK				0x03
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ		  0x00
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ		  0x01
+#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ		  0x02
+#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL					  0x70
+#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER					  0x00
+#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER					  0x10
+#define ATOM_ENCODER_CONFIG_V4_DIG2_ENCODER					  0x20
+#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER					  0x30
+#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER					  0x40
+#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER					  0x50
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
+{
+  USHORT usPixelClock;      // in 10KHz; for bios convenient
+  union{
+  ATOM_DIG_ENCODER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;                              
+  UCHAR ucEncoderMode;
+                            // =0: DP   encoder      
+                            // =1: LVDS encoder          
+                            // =2: DVI  encoder  
+                            // =3: HDMI encoder
+                            // =4: SDVO encoder
+                            // =5: DP audio
+  UCHAR ucLaneNum;          // how many lanes to enable
+  UCHAR ucBitPerColor;      // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+  UCHAR ucHPD_ID;           // HPD ID (1-6). =0 means to skip HDP programming. New comparing to previous version
+}DIG_ENCODER_CONTROL_PARAMETERS_V4;
 
 // define ucBitPerColor: 
 #define PANEL_BPC_UNDEFINE                               0x00
@@ -893,6 +1013,7 @@
 #endif
 }ATOM_DIG_TRANSMITTER_CONFIG_V3;
 
+
 typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
 {
 	union
@@ -936,6 +1057,149 @@
 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2           	0x40	//CD
 #define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3           	0x80	//EF
 
+
+/****************************************************************************/	
+// Structures used by UNIPHYTransmitterControlTable V1.4
+// ASIC Families: NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=4
+/****************************************************************************/	
+typedef struct _ATOM_DP_VS_MODE_V4
+{
+  UCHAR ucLaneSel;
+ 	union
+ 	{  
+ 	  UCHAR ucLaneSet;
+ 	  struct {
+#if ATOM_BIG_ENDIAN
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+#else
+ 		  UCHAR ucVOLTAGE_SWING:3;        //Bit[2:0] Voltage Swing Level
+ 		  UCHAR ucPRE_EMPHASIS:3;         //Bit[5:3] Pre-emphasis Level
+ 		  UCHAR ucPOST_CURSOR2:2;         //Bit[7:6] Post Cursor2 Level      <= New in V4
+#endif
+ 		};
+ 	}; 
+}ATOM_DP_VS_MODE_V4;
+ 
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V4
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+#else
+  UCHAR fDualLinkConnector:1;       //bit0=1: Dual Link DVI connector
+  UCHAR fCoherentMode:1;            //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+  UCHAR ucLinkSel:1;                //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+                                    //    =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+  UCHAR ucEncoderSel:1;             //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+  UCHAR ucRefClkSource:2;           //bit5:4: PPLL1 =0, PPLL2=1, DCPLL=2, EXT_CLK=3   <= New
+  UCHAR ucTransmitterSel:2;         //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+                                    //        =1 Dig Transmitter 2 ( Uniphy CD )
+                                    //        =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V4;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V4
+{
+  union
+  {
+    USHORT usPixelClock;		// in 10KHz; for bios convenient
+    USHORT usInitInfo;			// when init uniphy,lower 8bit is used for connector type defined in objectid.h
+    ATOM_DP_VS_MODE_V4 asMode; // DP Voltage swing mode     Redefined comparing to previous version
+  };
+  union
+  {
+  ATOM_DIG_TRANSMITTER_CONFIG_V4 acConfig;
+  UCHAR ucConfig;
+  };
+  UCHAR ucAction;				    // define as ATOM_TRANSMITER_ACTION_XXX	                        
+  UCHAR ucLaneNum;
+  UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V4;
+
+//ucConfig 
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V4_DUAL_LINK_CONNECTOR			0x01
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V4_COHERENT				          0x02
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V4_LINK_SEL_MASK		        0x04
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKA  			            0x00			
+#define ATOM_TRANSMITTER_CONFIG_V4_LINKB				            0x04
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V4_ENCODER_SEL_MASK	        0x08
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG1_ENCODER		          0x00				 
+#define ATOM_TRANSMITTER_CONFIG_V4_DIG2_ENCODER		          0x08				
+// Bit5:4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SEL_MASK 	        0x30
+#define ATOM_TRANSMITTER_CONFIG_V4_P1PLL         		        0x00
+#define ATOM_TRANSMITTER_CONFIG_V4_P2PLL		                0x10
+#define ATOM_TRANSMITTER_CONFIG_V4_DCPLL		                0x20   // New in _V4
+#define ATOM_TRANSMITTER_CONFIG_V4_REFCLK_SRC_EXT           0x30   // Changed comparing to V3
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER_SEL_MASK     0xC0
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER1           	0x00	//AB
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER2           	0x40	//CD
+#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3           	0x80	//EF
+
+
+/****************************************************************************/	
+// Structures used by ExternalEncoderControlTable V1.3
+// ASIC Families: Evergreen, Llano, NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
+{
+  union{
+  USHORT usPixelClock;      // pixel clock in 10Khz, valid when ucAction=SETUP/ENABLE_OUTPUT 
+  USHORT usConnectorId;     // connector id, valid when ucAction = INIT
+  };
+  UCHAR  ucConfig;          // indicate which encoder, and DP link rate when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucAction;          // 
+  UCHAR  ucEncoderMode;     // encoder mode, only used when ucAction = SETUP/ENABLE_OUTPUT
+  UCHAR  ucLaneNum;         // lane number, only used when ucAction = SETUP/ENABLE_OUTPUT  
+  UCHAR  ucBitPerColor;     // output bit per color, only valid when ucAction = SETUP/ENABLE_OUTPUT and ucEncodeMode= DP
+  UCHAR  ucReserved;        
+}EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3;
+
+// ucAction
+#define EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT         0x00
+#define EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT          0x01
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT           0x07
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP          0x0f
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF   0x10
+#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING       0x11
+#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION      0x12
+
+// ucConfig
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK				0x03
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_1_62GHZ		  0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ		  0x01
+#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ		  0x02
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER_SEL_MASK		    0x70
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER1		            0x00
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER2		            0x10
+#define EXTERNAL_ENCODER_CONFIG_V3_ENCODER3		            0x20
+
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3
+{
+  EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3 sExtEncoder;
+  ULONG ulReserved[2];
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3;
+
+
 /****************************************************************************/	
 // Structures used by DAC1OuputControlTable
 //                    DAC2OuputControlTable
@@ -1142,6 +1406,7 @@
 #define PIXEL_CLOCK_V4_MISC_SS_ENABLE               0x10
 #define PIXEL_CLOCK_V4_MISC_COHERENT_MODE           0x20
 
+
 typedef struct _PIXEL_CLOCK_PARAMETERS_V3
 {
   USHORT usPixelClock;                // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
@@ -1202,6 +1467,55 @@
 #define PIXEL_CLOCK_V5_MISC_HDMI_32BPP              0x08
 #define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC             0x10
 
+typedef struct _CRTC_PIXEL_CLOCK_FREQ
+{
+#if ATOM_BIG_ENDIAN
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+#else
+  ULONG  ulPixelClock:24;     // target the pixel clock to drive the CRTC timing. 
+                              // 0 means disable PPLL/DCPLL. Expanded to 24 bits comparing to previous version.
+  ULONG  ucCRTC:8;            // ATOM_CRTC1~6, indicate the CRTC controller to 
+                              // drive the pixel clock. not used for DCPLL case.
+#endif
+}CRTC_PIXEL_CLOCK_FREQ;
+
+typedef struct _PIXEL_CLOCK_PARAMETERS_V6
+{
+  union{
+    CRTC_PIXEL_CLOCK_FREQ ulCrtcPclkFreq;    // pixel clock and CRTC id frequency 
+    ULONG ulDispEngClkFreq;                  // dispclk frequency
+  };
+  USHORT usFbDiv;            // feedback divider integer part. 
+  UCHAR  ucPostDiv;          // post divider. 
+  UCHAR  ucRefDiv;           // Reference divider
+  UCHAR  ucPpll;             // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+  UCHAR  ucTransmitterID;    // ASIC encoder id defined in objectId.h, 
+                             // indicate which graphic encoder will be used. 
+  UCHAR  ucEncoderMode;      // Encoder mode: 
+  UCHAR  ucMiscInfo;         // bit[0]= Force program PPLL 
+                             // bit[1]= when VGA timing is used. 
+                             // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+                             // bit[4]= RefClock source for PPLL. 
+                             // =0: XTLAIN( default mode )
+	                           // =1: other external clock source, which is pre-defined                                            
+                             //     by VBIOS depend on the feature required.
+                             // bit[7:5]: reserved.
+  ULONG  ulFbDivDecFrac;     // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V6;
+
+#define PIXEL_CLOCK_V6_MISC_FORCE_PROG_PPLL					0x01
+#define PIXEL_CLOCK_V6_MISC_VGA_MODE								0x02
+#define PIXEL_CLOCK_V6_MISC_HDMI_BPP_MASK           0x0c
+#define PIXEL_CLOCK_V6_MISC_HDMI_24BPP              0x00
+#define PIXEL_CLOCK_V6_MISC_HDMI_36BPP              0x04
+#define PIXEL_CLOCK_V6_MISC_HDMI_30BPP              0x08
+#define PIXEL_CLOCK_V6_MISC_HDMI_48BPP              0x0c
+#define PIXEL_CLOCK_V6_MISC_REF_DIV_SRC             0x10
+
 typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
 {
   PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
@@ -1241,10 +1555,11 @@
 typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
 {
 	USHORT usPixelClock;                    // target pixel clock
-	UCHAR ucTransmitterID;                  // transmitter id defined in objectid.h
+	UCHAR ucTransmitterID;                  // GPU transmitter id defined in objectid.h
 	UCHAR ucEncodeMode;                     // encoder mode: CRT, LVDS, DP, TMDS or HDMI
   UCHAR ucDispPllConfig;                 // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
-	UCHAR ucReserved[3];
+  UCHAR ucExtTransmitterID;               // external encoder id.
+	UCHAR ucReserved[2];
 }ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
 
 // usDispPllConfig v1.2 for RoadRunner
@@ -1314,7 +1629,7 @@
 typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
 {
   USHORT    usPrescale;         //Ratio between Engine clock and I2C clock
-  USHORT    usVRAMAddress;      //Adress in Frame Buffer where to pace raw EDID
+  USHORT    usVRAMAddress;      //Address in Frame Buffer where to pace raw EDID
   USHORT    usStatus;           //When use output: lower byte EDID checksum, high byte hardware status
                                 //WHen use input:  lower byte as 'byte to read':currently limited to 128byte or 1byte
   UCHAR     ucSlaveAddr;        //Read from which slave
@@ -1358,6 +1673,7 @@
 /**************************************************************************/
 #define SPEED_FAN_CONTROL_PS_ALLOCATION   WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
+
 /****************************************************************************/	
 // Structures used by PowerConnectorDetectionTable
 /****************************************************************************/	
@@ -1438,6 +1754,31 @@
 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK     0x0F00
 #define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT    8
 
+// Used by DCE5.0
+ typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3
+{
+  USHORT  usSpreadSpectrumAmountFrac;   // SS_AMOUNT_DSFRAC New in DCE5.0
+  UCHAR   ucSpreadSpectrumType;	        // Bit[0]: 0-Down Spread,1-Center Spread. 
+                                        // Bit[1]: 1-Ext. 0-Int. 
+                                        // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+                                        // Bits[7:4] reserved
+  UCHAR   ucEnable;	                    // ATOM_ENABLE or ATOM_DISABLE
+  USHORT  usSpreadSpectrumAmount;      	// Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]    
+  USHORT  usSpreadSpectrumStep;	        // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3;
+    
+#define ATOM_PPLL_SS_TYPE_V3_DOWN_SPREAD      0x00
+#define ATOM_PPLL_SS_TYPE_V3_CENTRE_SPREAD    0x01
+#define ATOM_PPLL_SS_TYPE_V3_EXT_SPREAD       0x02
+#define ATOM_PPLL_SS_TYPE_V3_PPLL_SEL_MASK    0x0c
+#define ATOM_PPLL_SS_TYPE_V3_P1PLL            0x00
+#define ATOM_PPLL_SS_TYPE_V3_P2PLL            0x04
+#define ATOM_PPLL_SS_TYPE_V3_DCPLL            0x08
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK     0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT    0
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK     0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_SHIFT    8
+
 #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION  ENABLE_SPREAD_SPECTRUM_ON_PPLL
 
 /**************************************************************************/
@@ -1706,7 +2047,7 @@
   USHORT        StandardVESA_Timing;      // Only used by Bios
   USHORT        FirmwareInfo;             // Shared by various SW components,latest version 1.4
   USHORT        DAC_Info;                 // Will be obsolete from R600
-  USHORT        LVDS_Info;                // Shared by various SW components,latest version 1.1 
+  USHORT        LCD_Info;                 // Shared by various SW components,latest version 1.3, was called LVDS_Info 
   USHORT        TMDS_Info;                // Will be obsolete from R600
   USHORT        AnalogTV_Info;            // Shared by various SW components,latest version 1.1 
   USHORT        SupportedDevicesInfo;     // Will be obsolete from R600
@@ -1736,12 +2077,16 @@
 	USHORT				PowerSourceInfo;					// Shared by various SW components, latest versoin 1.1
 }ATOM_MASTER_LIST_OF_DATA_TABLES;
 
+// For backward compatible 
+#define LVDS_Info                LCD_Info
+
 typedef struct _ATOM_MASTER_DATA_TABLE
 { 
   ATOM_COMMON_TABLE_HEADER sHeader;  
   ATOM_MASTER_LIST_OF_DATA_TABLES   ListOfDataTables;
 }ATOM_MASTER_DATA_TABLE;
 
+
 /****************************************************************************/	
 // Structure used in MultimediaCapabilityInfoTable
 /****************************************************************************/	
@@ -1776,6 +2121,7 @@
   UCHAR                    ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
 }ATOM_MULTIMEDIA_CONFIG_INFO;
 
+
 /****************************************************************************/	
 // Structures used in FirmwareInfoTable
 /****************************************************************************/	
@@ -2031,8 +2377,47 @@
   UCHAR                           ucReserved4[3];
 }ATOM_FIRMWARE_INFO_V2_1;
 
+//the structure below to be used from NI
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_FIRMWARE_INFO_V2_2
+{
+  ATOM_COMMON_TABLE_HEADER        sHeader; 
+  ULONG                           ulFirmwareRevision;
+  ULONG                           ulDefaultEngineClock;       //In 10Khz unit
+  ULONG                           ulDefaultMemoryClock;       //In 10Khz unit
+  ULONG                           ulReserved[2];
+  ULONG                           ulReserved1;                //Was ulMaxEngineClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulReserved2;                //Was ulMaxMemoryClockPLL_Output; //In 10Khz unit*
+  ULONG                           ulMaxPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulBinaryAlteredInfo;        //Was ulASICMaxEngineClock  ?
+  ULONG                           ulDefaultDispEngineClkFreq; //In 10Khz unit. This is the frequency before DCDTO, corresponding to usBootUpVDDCVoltage.          
+  UCHAR                           ucReserved3;                //Was ucASICMaxTemperature;
+  UCHAR                           ucMinAllowedBL_Level;
+  USHORT                          usBootUpVDDCVoltage;        //In MV unit
+  USHORT                          usLcdMinPixelClockPLL_Output; // In MHz unit
+  USHORT                          usLcdMaxPixelClockPLL_Output; // In MHz unit
+  ULONG                           ulReserved4;                //Was ulAsicMaximumVoltage
+  ULONG                           ulMinPixelClockPLL_Output;  //In 10Khz unit
+  ULONG                           ulReserved5;                //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
+  ULONG                           ulReserved6;                //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
+  ULONG                           ulReserved7;                //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
+  USHORT                          usReserved11;               //Was usMaxPixelClock;  //In 10Khz unit, Max.  Pclk used only for DAC
+  USHORT                          usMinPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usMaxPixelClockPLL_Input;   //In 10Khz unit
+  USHORT                          usBootUpVDDCIVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+  USHORT                          usCoreReferenceClock;       //In 10Khz unit	
+  USHORT                          usMemoryReferenceClock;     //In 10Khz unit	
+  USHORT                          usUniphyDPModeExtClkFreq;   //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+  UCHAR                           ucMemoryModule_ID;          //Indicate what is the board design
+  UCHAR                           ucReserved9[3];
+  USHORT                          usBootUpMVDDCVoltage;       //In unit of mv; Was usMinPixelClockPLL_Output;
+  USHORT                          usReserved12;
+  ULONG                           ulReserved10[3];            // New added comparing to previous version
+}ATOM_FIRMWARE_INFO_V2_2;
 
-#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_1
+#define ATOM_FIRMWARE_INFO_LAST  ATOM_FIRMWARE_INFO_V2_2
 
 /****************************************************************************/	
 // Structures used in IntegratedSystemInfoTable
@@ -2212,7 +2597,7 @@
 ucDockingPinBit:     which bit in this register to read the pin status;
 ucDockingPinPolarity:Polarity of the pin when docked;
 
-ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
+ulCPUCapInfo:        [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, [7:0]=4:Pharaoh, other bits reserved for now and must be 0x0
 
 usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
 
@@ -2250,6 +2635,14 @@
 usMinDownStreamHTLinkWidth:  same as above.
 */
 
+// ATOM_INTEGRATED_SYSTEM_INFO::ulCPUCapInfo  - CPU type definition 
+#define    INTEGRATED_SYSTEM_INFO__UNKNOWN_CPU             0
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GRIFFIN        1
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND      2
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__K8             3
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH        4
+
+#define    INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE       INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH    // this deff reflects max defined CPU code
 
 #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE                 0x00000001
 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE             0x00000002
@@ -2778,8 +3171,88 @@
 #define PANEL_RANDOM_DITHER   0x80
 #define PANEL_RANDOM_DITHER_MASK   0x80
 
+#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12   // no need to change this 
 
-#define ATOM_LVDS_INFO_LAST  ATOM_LVDS_INFO_V12
+/****************************************************************************/	
+// Structures used by LCD_InfoTable V1.3    Note: previous version was called ATOM_LVDS_INFO_V12
+// ASIC Families:  NI
+// ucTableFormatRevision=1
+// ucTableContentRevision=3
+/****************************************************************************/	
+typedef struct _ATOM_LCD_INFO_V13
+{
+  ATOM_COMMON_TABLE_HEADER sHeader;  
+  ATOM_DTD_FORMAT     sLCDTiming;
+  USHORT              usExtInfoTableOffset;
+  USHORT              usSupportedRefreshRate;     //Refer to panel info table in ATOMBIOS extension Spec.
+  ULONG               ulReserved0;
+  UCHAR               ucLCD_Misc;                // Reorganized in V13
+                                                 // Bit0: {=0:single, =1:dual},
+                                                 // Bit1: {=0:LDI format for RGB888, =1 FPDI format for RGB888}  // was {=0:666RGB, =1:888RGB},
+                                                 // Bit3:2: {Grey level}
+                                                 // Bit6:4 Color Bit Depth definition (see below definition in EDID V1.4 @BYTE 14h) 
+                                                 // Bit7   Reserved.  was for ATOM_PANEL_MISC_API_ENABLED, still need it?  
+  UCHAR               ucPanelDefaultRefreshRate;
+  UCHAR               ucPanelIdentification;
+  UCHAR               ucSS_Id;
+  USHORT              usLCDVenderID;
+  USHORT              usLCDProductID;
+  UCHAR               ucLCDPanel_SpecialHandlingCap;  // Reorganized in V13 
+                                                 // Bit0: Once DAL sees this CAP is set, it will read EDID from LCD on its own
+                                                 // Bit1: See LCDPANEL_CAP_DRR_SUPPORTED
+                                                 // Bit2: a quick reference whether an embadded panel (LCD1 ) is LVDS (0) or eDP (1)
+                                                 // Bit7-3: Reserved 
+  UCHAR               ucPanelInfoSize;					 //  start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+  USHORT              usBacklightPWM;            //  Backlight PWM in Hz. New in _V13
+
+  UCHAR               ucPowerSequenceDIGONtoDE_in4Ms;
+  UCHAR               ucPowerSequenceDEtoVARY_BL_in4Ms;
+  UCHAR               ucPowerSequenceDEtoDIGON_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoDE_in4Ms;
+
+  UCHAR               ucOffDelay_in4Ms;
+  UCHAR               ucPowerSequenceVARY_BLtoBLON_in4Ms;
+  UCHAR               ucPowerSequenceBLONtoVARY_BL_in4Ms;
+  UCHAR               ucReserved1;
+
+  ULONG               ulReserved[4];
+}ATOM_LCD_INFO_V13;  
+
+#define ATOM_LCD_INFO_LAST  ATOM_LCD_INFO_V13    
+
+//Definitions for ucLCD_Misc
+#define ATOM_PANEL_MISC_V13_DUAL                   0x00000001
+#define ATOM_PANEL_MISC_V13_FPDI                   0x00000002
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL             0x0000000C
+#define ATOM_PANEL_MISC_V13_GREY_LEVEL_SHIFT       2
+#define ATOM_PANEL_MISC_V13_COLOR_BIT_DEPTH_MASK   0x70
+#define ATOM_PANEL_MISC_V13_6BIT_PER_COLOR         0x10
+#define ATOM_PANEL_MISC_V13_8BIT_PER_COLOR         0x20
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6  5  4
+                              //      0  0  0  -  Color bit depth is undefined
+                              //      0  0  1  -  6 Bits per Primary Color
+                              //      0  1  0  -  8 Bits per Primary Color
+                              //      0  1  1  - 10 Bits per Primary Color
+                              //      1  0  0  - 12 Bits per Primary Color
+                              //      1  0  1  - 14 Bits per Primary Color
+                              //      1  1  0  - 16 Bits per Primary Color
+                              //      1  1  1  - Reserved
+ 
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. 
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL 
+#define	LCDPANEL_CAP_V13_READ_EDID              0x1        // = LCDPANEL_CAP_READ_EDID no change comparing to previous version
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define	LCDPANEL_CAP_V13_DRR_SUPPORTED          0x2        // = LCDPANEL_CAP_DRR_SUPPORTED no change comparing to previous version
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define	LCDPANEL_CAP_V13_eDP                    0x4        // = LCDPANEL_CAP_eDP no change comparing to previous version
 
 typedef struct  _ATOM_PATCH_RECORD_MODE
 {
@@ -2944,9 +3417,9 @@
 #define MAX_DTD_MODE_IN_VRAM            6
 #define ATOM_DTD_MODE_SUPPORT_TBL_SIZE  (MAX_DTD_MODE_IN_VRAM*28)    //28= (SIZEOF ATOM_DTD_FORMAT) 
 #define ATOM_STD_MODE_SUPPORT_TBL_SIZE  32*8                         //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
-#define DFP_ENCODER_TYPE_OFFSET					0x80
-#define DP_ENCODER_LANE_NUM_OFFSET			0x84
-#define DP_ENCODER_LINK_RATE_OFFSET			0x88
+//20 bytes for Encoder Type and DPCD in STD EDID area
+#define DFP_ENCODER_TYPE_OFFSET         (ATOM_EDID_RAW_DATASIZE + ATOM_DTD_MODE_SUPPORT_TBL_SIZE + ATOM_STD_MODE_SUPPORT_TBL_SIZE - 20)    
+#define ATOM_DP_DPCD_OFFSET             (DFP_ENCODER_TYPE_OFFSET + 4 )        
 
 #define ATOM_HWICON1_SURFACE_ADDR       0
 #define ATOM_HWICON2_SURFACE_ADDR       (ATOM_HWICON1_SURFACE_ADDR + ATOM_HWICON_SURFACE_SIZE)
@@ -2997,14 +3470,16 @@
 #define ATOM_DFP5_DTD_MODE_TBL_ADDR     (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
 #define ATOM_DFP5_STD_MODE_TBL_ADDR     (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
 
-#define ATOM_DP_TRAINING_TBL_ADDR				(ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)       
+#define ATOM_DP_TRAINING_TBL_ADDR       (ATOM_DFP5_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
 
-#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR+256)       
-#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START+512        
+#define ATOM_STACK_STORAGE_START        (ATOM_DP_TRAINING_TBL_ADDR + 1024)       
+#define ATOM_STACK_STORAGE_END          ATOM_STACK_STORAGE_START + 512        
 
 //The size below is in Kb!
 #define ATOM_VRAM_RESERVE_SIZE         ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
    
+#define ATOM_VRAM_RESERVE_V2_SIZE      32
+
 #define	ATOM_VRAM_OPERATION_FLAGS_MASK         0xC0000000L
 #define ATOM_VRAM_OPERATION_FLAGS_SHIFT        30
 #define	ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION   0x1
@@ -3206,6 +3681,15 @@
   USHORT    usGraphicObjIds[1];                             //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
 }ATOM_DISPLAY_OBJECT_PATH;
 
+typedef struct  _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
+{
+  USHORT    usDeviceTag;                                   //supported device 
+  USHORT    usSize;                                        //the size of ATOM_DISPLAY_OBJECT_PATH
+  USHORT    usConnObjectId;                                //Connector Object ID 
+  USHORT    usGPUObjectId;                                 //GPU ID 
+  USHORT    usGraphicObjIds[2];                            //usGraphicObjIds[0]= GPU internal encoder, usGraphicObjIds[1]= external encoder 
+}ATOM_DISPLAY_EXTERNAL_OBJECT_PATH;
+
 typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
 {
   UCHAR                           ucNumOfDispPath;
@@ -3261,6 +3745,47 @@
 #define EXT_AUXDDC_LUTINDEX_7                   7
 #define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES   (EXT_AUXDDC_LUTINDEX_7+1)
 
+//ucChannelMapping are defined as following
+//for DP connector, eDP, DP to VGA/LVDS 
+//Bit[1:0]: Define which pin connect to DP connector DP_Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DP connector DP_Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DP connector DP_Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DP connector DP_Lane3, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DP_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDP_Lane3_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane0_Source:2;
+#else
+  UCHAR ucDP_Lane0_Source:2;
+  UCHAR ucDP_Lane1_Source:2;
+  UCHAR ucDP_Lane2_Source:2;
+  UCHAR ucDP_Lane3_Source:2;
+#endif
+}ATOM_DP_CONN_CHANNEL_MAPPING;
+
+//for DVI/HDMI, in dual link case, both links have to have same mapping. 
+//Bit[1:0]: Define which pin connect to DVI connector data Lane2, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[3:2]: Define which pin connect to DVI connector data Lane1, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[5:4]: Define which pin connect to DVI connector data Lane0, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+//Bit[7:6]: Define which pin connect to DVI connector clock lane, =0: source from GPU pin TX0, =1: from GPU pin TX1, =2: from GPU pin TX2, =3 from GPU pin TX3
+typedef struct _ATOM_DVI_CONN_CHANNEL_MAPPING
+{
+#if ATOM_BIG_ENDIAN
+  UCHAR ucDVI_CLK_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA2_Source:2;
+#else
+  UCHAR ucDVI_DATA2_Source:2;
+  UCHAR ucDVI_DATA1_Source:2;
+  UCHAR ucDVI_DATA0_Source:2;
+  UCHAR ucDVI_CLK_Source:2;
+#endif
+}ATOM_DVI_CONN_CHANNEL_MAPPING;
+
 typedef struct _EXT_DISPLAY_PATH
 {
   USHORT  usDeviceTag;                    //A bit vector to show what devices are supported 
@@ -3269,7 +3794,13 @@
   UCHAR   ucExtAUXDDCLutIndex;            //An index into external AUX/DDC channel LUT
   UCHAR   ucExtHPDPINLutIndex;            //An index into external HPD pin LUT
   USHORT  usExtEncoderObjId;              //external encoder object id
-  USHORT  usReserved[3]; 
+  union{
+    UCHAR   ucChannelMapping;                  // if ucChannelMapping=0, using default one to one mapping
+    ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
+    ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
+  };
+  UCHAR   ucReserved;
+  USHORT  usReserved[2]; 
 }EXT_DISPLAY_PATH;
    
 #define NUMBER_OF_UCHAR_FOR_GUID          16
@@ -3281,7 +3812,8 @@
   UCHAR                    ucGuid [NUMBER_OF_UCHAR_FOR_GUID];     // a GUID is a 16 byte long string
   EXT_DISPLAY_PATH         sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
   UCHAR                    ucChecksum;                            // a  simple Checksum of the sum of whole structure equal to 0x0. 
-  UCHAR                    Reserved [7];                          // for potential expansion
+  UCHAR                    uc3DStereoPinId;                       // use for eDP panel
+  UCHAR                    Reserved [6];                          // for potential expansion
 }ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
 //Related definitions, all records are differnt but they have a commond header
@@ -3311,10 +3843,11 @@
 #define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE          17 //This is for the case when connectors are not known to object table
 #define ATOM_OBJECT_LINK_RECORD_TYPE                   18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
 #define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE          19
+#define ATOM_ENCODER_CAP_RECORD_TYPE                   20
 
 
 //Must be updated when new record type is added,equal to that record definition!
-#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+#define ATOM_MAX_OBJECT_RECORD_NUMBER             ATOM_ENCODER_CAP_RECORD_TYPE
 
 typedef struct  _ATOM_I2C_RECORD
 {
@@ -3441,6 +3974,26 @@
   UCHAR                       ucPadding[2];
 }ATOM_ENCODER_DVO_CF_RECORD;
 
+// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
+#define ATOM_ENCODER_CAP_RECORD_HBR2     0x01         // DP1.2 HBR2 is supported by this path
+
+typedef struct  _ATOM_ENCODER_CAP_RECORD
+{
+  ATOM_COMMON_RECORD_HEADER   sheader;
+  union {
+    USHORT                    usEncoderCap;         
+    struct {
+#if ATOM_BIG_ENDIAN
+      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+#else
+      USHORT                  usHBR2Cap:1;          // Bit0 is for DP1.2 HBR2 capability. 
+      USHORT                  usReserved:15;        // Bit1-15 may be defined for other capability in future
+#endif
+    };
+  }; 
+}ATOM_ENCODER_CAP_RECORD;                             
+
 // value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA   1
 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB   2
@@ -3580,6 +4133,11 @@
 #define	VOLTAGE_CONTROL_ID_DAC								0x02									//I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
 #define	VOLTAGE_CONTROL_ID_VT116xM						0x03									//I2C control, used for R6xx Core Voltage
 #define VOLTAGE_CONTROL_ID_DS4402							0x04									
+#define VOLTAGE_CONTROL_ID_UP6266 						0x05									
+#define VOLTAGE_CONTROL_ID_SCORPIO						0x06
+#define	VOLTAGE_CONTROL_ID_VT1556M						0x07									
+#define	VOLTAGE_CONTROL_ID_CHL822x						0x08									
+#define	VOLTAGE_CONTROL_ID_VT1586M						0x09
 
 typedef struct  _ATOM_VOLTAGE_OBJECT
 {
@@ -3670,66 +4228,157 @@
 #define POWER_SENSOR_GPIO								0x01
 #define POWER_SENSOR_I2C								0x02
 
+typedef struct _ATOM_CLK_VOLT_CAPABILITY
+{
+  ULONG      ulVoltageIndex;                      // The Voltage Index indicated by FUSE, same voltage index shared with SCLK DPM fuse table        
+  ULONG      ulMaximumSupportedCLK;               // Maximum clock supported with specified voltage index, unit in 10kHz
+}ATOM_CLK_VOLT_CAPABILITY;
+
+typedef struct _ATOM_AVAILABLE_SCLK_LIST
+{
+  ULONG      ulSupportedSCLK;               // Maximum clock supported with specified voltage index,  unit in 10kHz
+  USHORT     usVoltageIndex;                // The Voltage Index indicated by FUSE for specified SCLK  
+  USHORT     usVoltageID;                   // The Voltage ID indicated by FUSE for specified SCLK 
+}ATOM_AVAILABLE_SCLK_LIST;
+
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 ulSystemConfig cap definition
+#define ATOM_IGP_INFO_V6_SYSTEM_CONFIG__PCIE_POWER_GATING_ENABLE             1       // refer to ulSystemConfig bit[0]
+
+// this IntegrateSystemInfoTable is used for Liano/Ontario APU
 typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
   ULONG  ulBootUpEngineClock;
   ULONG  ulDentistVCOFreq;          
   ULONG  ulBootUpUMAClock;          
-  ULONG  ulReserved1[8];            
+  ATOM_CLK_VOLT_CAPABILITY   sDISPCLK_Voltage[4];            
   ULONG  ulBootUpReqDisplayVector;
   ULONG  ulOtherDisplayMisc;
   ULONG  ulGPUCapInfo;
-  ULONG  ulReserved2[3];            
+  ULONG  ulSB_MMIO_Base_Addr;
+  USHORT usRequestedPWMFreqInHz;
+  UCHAR  ucHtcTmpLmt;   
+  UCHAR  ucHtcHystLmt;
+  ULONG  ulMinEngineClock;           
   ULONG  ulSystemConfig;            
   ULONG  ulCPUCapInfo;              
-  USHORT usMaxNBVoltage;  
-  USHORT usMinNBVoltage;  
-  USHORT usBootUpNBVoltage;         
-  USHORT usExtDispConnInfoOffset;  
-  UCHAR  ucHtcTmpLmt;   
-  UCHAR  ucTjOffset;    
+  USHORT usNBP0Voltage;               
+  USHORT usNBP1Voltage;
+  USHORT usBootUpNBVoltage;                       
+  USHORT usExtDispConnInfoOffset;
+  USHORT usPanelRefreshRateRange;     
   UCHAR  ucMemoryType;  
   UCHAR  ucUMAChannelNumber;
   ULONG  ulCSR_M3_ARB_CNTL_DEFAULT[10];  
   ULONG  ulCSR_M3_ARB_CNTL_UVD[10]; 
   ULONG  ulCSR_M3_ARB_CNTL_FS3D[10];
-  ULONG  ulReserved3[42]; 
+  ATOM_AVAILABLE_SCLK_LIST   sAvail_SCLK[5];
+  ULONG  ulGMCRestoreResetTime;
+  ULONG  ulMinimumNClk;
+  ULONG  ulIdleNClk;
+  ULONG  ulDDR_DLL_PowerUpTime;
+  ULONG  ulDDR_PLL_PowerUpTime;
+  USHORT usPCIEClkSSPercentage;
+  USHORT usPCIEClkSSType;
+  USHORT usLvdsSSPercentage;
+  USHORT usLvdsSSpreadRateIn10Hz;
+  USHORT usHDMISSPercentage;
+  USHORT usHDMISSpreadRateIn10Hz;
+  USHORT usDVISSPercentage;
+  USHORT usDVISSpreadRateIn10Hz;
+  ULONG  ulReserved3[21]; 
   ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;   
 }ATOM_INTEGRATED_SYSTEM_INFO_V6;   
 
+// ulGPUCapInfo
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE       0x01
+#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION          0x08
+
+// ulOtherDisplayMisc
+#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT                       0x01
+
+
 /**********************************************************************************************************************
-// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
-//ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. 
-//ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
-//ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
-//ulReserved1[8]                    Reserved by now, must be 0x0. 
-//ulBootUpReqDisplayVector	        VBIOS boot up display IDs
-//                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
-//                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
-//                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
-//                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
-//                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
-//                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
-//                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
-//                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
-//                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
-//ulOtherDisplayMisc      	        Other display related flags, not defined yet. 
-//ulGPUCapInfo                      TBD
-//ulReserved2[3]                    must be 0x0 for the reserved.
-//ulSystemConfig                    TBD
-//ulCPUCapInfo                      TBD
-//usMaxNBVoltage                    High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. 
-//usMinNBVoltage                    Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
-//usBootUpNBVoltage                 Boot up NB voltage in unit of mv.
-//ucHtcTmpLmt                       Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
-//ucTjOffset                        Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
-//ucMemoryType                      [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
-//ucUMAChannelNumber      	        System memory channel numbers. 
-//usExtDispConnectionInfoOffset     ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. 
-//ulCSR_M3_ARB_CNTL_DEFAULT[10]     Arrays with values for CSR M3 arbiter for default
-//ulCSR_M3_ARB_CNTL_UVD[10]         Arrays with values for CSR M3 arbiter for UVD playback.
-//ulCSR_M3_ARB_CNTL_FS3D[10]        Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+  ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+ulBootUpEngineClock:              VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
+ulDentistVCOFreq:                 Dentist VCO clock in 10kHz unit. 
+ulBootUpUMAClock:                 System memory boot up clock frequency in 10Khz unit. 
+sDISPCLK_Voltage:                 Report Display clock voltage requirement.
+ 
+ulBootUpReqDisplayVector:         VBIOS boot up display IDs, following are supported devices in Liano/Ontaio projects:
+                                  ATOM_DEVICE_CRT1_SUPPORT                  0x0001
+                                  ATOM_DEVICE_CRT2_SUPPORT                  0x0010
+                                  ATOM_DEVICE_DFP1_SUPPORT                  0x0008 
+                                  ATOM_DEVICE_DFP6_SUPPORT                  0x0040 
+                                  ATOM_DEVICE_DFP2_SUPPORT                  0x0080       
+                                  ATOM_DEVICE_DFP3_SUPPORT                  0x0200       
+                                  ATOM_DEVICE_DFP4_SUPPORT                  0x0400        
+                                  ATOM_DEVICE_DFP5_SUPPORT                  0x0800
+                                  ATOM_DEVICE_LCD1_SUPPORT                  0x0002
+ulOtherDisplayMisc:      	        Other display related flags, not defined yet. 
+ulGPUCapInfo:                     bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
+                                        =1: TMDS/HDMI Coherent Mode use signel PLL mode.
+                                  bit[3]=0: Enable HW AUX mode detection logic
+                                        =1: Disable HW AUX mode dettion logic
+ulSB_MMIO_Base_Addr:              Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
+
+usRequestedPWMFreqInHz:           When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW). 
+                                  Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
+                                  
+                                  When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
+                                  1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
+                                  VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
+                                  Changing BL using VBIOS function is functional in both driver and non-driver present environment; 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+                                  2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
+                                  that BL control from GPU is expected.
+                                  VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
+                                  Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
+                                  it's per platform 
+                                  and enabling VariBri under the driver environment from PP table is optional.
+
+ucHtcTmpLmt:                      Refer to D18F3x64 bit[22:16], HtcTmpLmt. 
+                                  Threshold on value to enter HTC_active state.
+ucHtcHystLmt:                     Refer to D18F3x64 bit[27:24], HtcHystLmt. 
+                                  To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
+ulMinEngineClock:                 Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
+ulSystemConfig:                   Bit[0]=0: PCIE Power Gating Disabled 
+                                        =1: PCIE Power Gating Enabled
+                                  Bit[1]=0: DDR-DLL shut-down feature disabled.
+                                         1: DDR-DLL shut-down feature enabled.
+                                  Bit[2]=0: DDR-PLL Power down feature disabled.
+                                         1: DDR-PLL Power down feature enabled.                                 
+ulCPUCapInfo:                     TBD
+usNBP0Voltage:                    VID for voltage on NB P0 State
+usNBP1Voltage:                    VID for voltage on NB P1 State  
+usBootUpNBVoltage:                Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
+usExtDispConnInfoOffset:          Offset to sExtDispConnInfo inside the structure
+usPanelRefreshRateRange:          Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
+                                  to indicate a range.
+                                  SUPPORTED_LCD_REFRESHRATE_30Hz          0x0004
+                                  SUPPORTED_LCD_REFRESHRATE_40Hz          0x0008
+                                  SUPPORTED_LCD_REFRESHRATE_50Hz          0x0010
+                                  SUPPORTED_LCD_REFRESHRATE_60Hz          0x0020
+ucMemoryType:                     [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+ucUMAChannelNumber:      	        System memory channel numbers. 
+ulCSR_M3_ARB_CNTL_DEFAULT[10]:    Arrays with values for CSR M3 arbiter for default
+ulCSR_M3_ARB_CNTL_UVD[10]:        Arrays with values for CSR M3 arbiter for UVD playback.
+ulCSR_M3_ARB_CNTL_FS3D[10]:       Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+sAvail_SCLK[5]:                   Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high  
+ulGMCRestoreResetTime:            GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns. 
+ulMinimumNClk:                    Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz. 
+ulIdleNClk:                       NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
+ulDDR_DLL_PowerUpTime:            DDR PHY DLL power up time. Unit in ns.
+ulDDR_PLL_PowerUpTime:            DDR PHY PLL power up time. Unit in ns.
+usPCIEClkSSPercentage:            PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
+usPCIEClkSSType:                  PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
+usLvdsSSPercentage:               LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting. 
+usLvdsSSpreadRateIn10Hz:          LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting. 
+usHDMISSPercentage:               HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usHDMISSpreadRateIn10Hz:          HDMI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
+usDVISSPercentage:                DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%,  =0, use VBIOS default setting. 
+usDVISSpreadRateIn10Hz:           DVI Spread Spectrum frequency in unit of 10Hz,  =0, use VBIOS default setting. 
 **********************************************************************************************************************/
 
 /**************************************************************************/
@@ -3790,6 +4439,7 @@
 #define ASIC_INTERNAL_SS_ON_LVDS    6
 #define ASIC_INTERNAL_SS_ON_DP      7
 #define ASIC_INTERNAL_SS_ON_DCPLL   8
+#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
 
 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
 {
@@ -3903,6 +4553,7 @@
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC     1
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC     2
 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
+#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LIT2AC 4
 
 //Byte aligned defintion for BIOS usage
 #define ATOM_S0_CRT1_MONOb0             0x01
@@ -4529,7 +5180,8 @@
 #define INDEX_ACCESS_RANGE_BEGIN	    (VALUE_DWORD + 1)
 #define INDEX_ACCESS_RANGE_END		    (INDEX_ACCESS_RANGE_BEGIN + 1)
 #define VALUE_INDEX_ACCESS_SINGLE	    (INDEX_ACCESS_RANGE_END + 1)
-
+//#define ACCESS_MCIODEBUGIND            0x40       //defined in BIOS code
+#define ACCESS_PLACEHOLDER             0x80
 
 typedef struct _ATOM_MC_INIT_PARAM_TABLE
 { 
@@ -4554,6 +5206,10 @@
 #define _32Mx32             0x33
 #define _64Mx8              0x41
 #define _64Mx16             0x42
+#define _64Mx32             0x43
+#define _128Mx8             0x51
+#define _128Mx16            0x52
+#define _256Mx8             0x61
 
 #define SAMSUNG             0x1
 #define INFINEON            0x2
@@ -4569,10 +5225,11 @@
 #define QIMONDA             INFINEON
 #define PROMOS              MOSEL
 #define KRETON              INFINEON
+#define ELIXIR              NANYA
 
 /////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
 
-#define UCODE_ROM_START_ADDRESS		0x1c000
+#define UCODE_ROM_START_ADDRESS		0x1b800
 #define	UCODE_SIGNATURE			0x4375434d // 'MCuC' - MC uCode
 
 //uCode block header for reference
@@ -4903,7 +5560,34 @@
   ATOM_MEMORY_TIMING_FORMAT_V2  asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
 }ATOM_VRAM_MODULE_V6;
 
-
+typedef struct _ATOM_VRAM_MODULE_V7
+{
+// Design Specific Values
+  ULONG	  ulChannelMapCfg;	                // mmMC_SHARED_CHREMAP
+  USHORT  usModuleSize;                     // Size of ATOM_VRAM_MODULE_V7
+  USHORT  usPrivateReserved;                // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+  USHORT  usReserved;
+  UCHAR   ucExtMemoryID;                    // Current memory module ID
+  UCHAR   ucMemoryType;                     // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
+  UCHAR   ucChannelNum;                     // Number of mem. channels supported in this module
+  UCHAR   ucChannelWidth;                   // CHANNEL_16BIT/CHANNEL_32BIT/CHANNEL_64BIT
+  UCHAR   ucDensity;                        // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+  UCHAR	  ucReserve;                        // Former container for Mx_FLAGS like DBI_AC_MODE_ENABLE_ASIC for GDDR4. Not used now.
+  UCHAR	  ucMisc;                           // RANK_OF_THISMEMORY etc.
+  UCHAR	  ucVREFI;                          // Not used.
+  UCHAR   ucNPL_RT;                         // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
+  UCHAR	  ucPreamble;                       // [7:4] Write Preamble, [3:0] Read Preamble
+  UCHAR   ucMemorySize;                     // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+  UCHAR   ucReserved[3];
+// Memory Module specific values
+  USHORT  usEMRS2Value;                     // EMRS2/MR2 Value. 
+  USHORT  usEMRS3Value;                     // EMRS3/MR3 Value.
+  UCHAR   ucMemoryVenderID;                 // [7:4] Revision, [3:0] Vendor code
+  UCHAR	  ucRefreshRateFactor;              // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+  UCHAR	  ucFIFODepth;                      // FIFO depth can be detected during vendor detection, here is hardcoded per memory
+  UCHAR   ucCDR_Bandwidth;                  // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+  char    strMemPNString[20];               // part number end with '0'. 
+}ATOM_VRAM_MODULE_V7;
 
 typedef struct _ATOM_VRAM_INFO_V2
 {
@@ -4942,6 +5626,20 @@
 																																						 //	ATOM_INIT_REG_BLOCK				 aMemAdjust;
 }ATOM_VRAM_INFO_V4;
 
+typedef struct _ATOM_VRAM_INFO_HEADER_V2_1
+{
+  ATOM_COMMON_TABLE_HEADER   sHeader;
+	USHORT										 usMemAdjustTblOffset;													 // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+	USHORT										 usMemClkPatchTblOffset;												 //	offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+	USHORT										 usReserved[4];
+  UCHAR                      ucNumOfVRAMModule;                              // indicate number of VRAM module
+  UCHAR                      ucMemoryClkPatchTblVer;                         // version of memory AC timing register list
+  UCHAR                      ucVramModuleVer;                                // indicate ATOM_VRAM_MODUE version
+  UCHAR                      ucReserved; 
+  ATOM_VRAM_MODULE_V7		     aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE];      // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_HEADER_V2_1;
+
+
 typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
 {
   ATOM_COMMON_TABLE_HEADER   sHeader;
@@ -5182,6 +5880,16 @@
 	UCHAR  ucReserved;
 }ASIC_TRANSMITTER_INFO;
 
+#define ASIC_TRANSMITTER_INFO_CONFIG__DVO_SDR_MODE          0x01
+#define ASIC_TRANSMITTER_INFO_CONFIG__COHERENT_MODE         0x02
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODEROBJ_ID_MASK    0xc4
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_A             0x00
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_B             0x04
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_C             0x40
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_D             0x44
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_E             0x80
+#define ASIC_TRANSMITTER_INFO_CONFIG__ENCODER_F             0x84
+
 typedef struct _ASIC_ENCODER_INFO
 {
 	UCHAR ucEncoderID;
@@ -5284,6 +5992,28 @@
 /* /obselete */
 #define DP_ENCODER_SERVICE_PS_ALLOCATION				WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
 
+
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS_V2
+{
+	USHORT usExtEncoderObjId;   // External Encoder Object Id, output parameter only, use when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+  UCHAR  ucAuxId;
+  UCHAR  ucAction;
+  UCHAR  ucSinkType;          // Iput and Output parameters. 
+  UCHAR  ucHPDId;             // Input parameter, used when ucAction = DP_SERVICE_V2_ACTION_DET_EXT_CONNECTION
+	UCHAR  ucReserved[2];
+}DP_ENCODER_SERVICE_PARAMETERS_V2;
+
+typedef struct _DP_ENCODER_SERVICE_PS_ALLOCATION_V2
+{
+  DP_ENCODER_SERVICE_PARAMETERS_V2 asDPServiceParam;
+  PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 asAuxParam;
+}DP_ENCODER_SERVICE_PS_ALLOCATION_V2;
+
+// ucAction
+#define DP_SERVICE_V2_ACTION_GET_SINK_TYPE							0x01
+#define DP_SERVICE_V2_ACTION_DET_LCD_CONNECTION			    0x02
+
+
 // DP_TRAINING_TABLE
 #define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR				ATOM_DP_TRAINING_TBL_ADDR		
 #define DPCD_SET_SS_CNTL_TBL_ADDR													(ATOM_DP_TRAINING_TBL_ADDR + 8 )
@@ -5339,6 +6069,7 @@
 #define SELECT_DCIO_IMPCAL            4
 #define SELECT_DCIO_DIG               6
 #define SELECT_CRTC_PIXEL_RATE        7
+#define SELECT_VGA_BLK                8
 
 /****************************************************************************/	
 //Portion VI: Definitinos for vbios MC scratch registers that driver used
@@ -5744,7 +6475,17 @@
 #define ATOM_PP_THERMALCONTROLLER_ADT7473   9
 #define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO     11
 #define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103   13  /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO      14  /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS  15
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
 #define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL   0x89    // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL   0x8D    // EMC2103 Fan Control + Internal Thermal Controller
 
 typedef struct _ATOM_PPLIB_STATE
 {
@@ -5841,6 +6582,29 @@
     USHORT                     usExtendendedHeaderOffset;
 } ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
 
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+    ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+    ULONG                      ulGoldenPPID;                    // PPGen use only     
+    ULONG                      ulGoldenRevision;                // PPGen use only
+    USHORT                     usVddcDependencyOnSCLKOffset;
+    USHORT                     usVddciDependencyOnMCLKOffset;
+    USHORT                     usVddcDependencyOnMCLKOffset;
+    USHORT                     usMaxClockVoltageOnDCOffset;
+    USHORT                     usReserved[2];  
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+    ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+    ULONG                      ulTDPLimit;
+    ULONG                      ulNearTDPLimit;
+    ULONG                      ulSQRampingThreshold;
+    USHORT                     usCACLeakageTableOffset;         // Points to ATOM_PPLIB_CAC_Leakage_Table
+    ULONG                      ulCACLeakage;                    // TBD, this parameter is still under discussion.  Change to ulReserved if not needed.
+    ULONG                      ulReserved;
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
 //// ATOM_PPLIB_NONCLOCK_INFO::usClassification
 #define ATOM_PPLIB_CLASSIFICATION_UI_MASK          0x0007
 #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT         0
@@ -5864,6 +6628,10 @@
 #define ATOM_PPLIB_CLASSIFICATION_HDSTATE                0x4000
 #define ATOM_PPLIB_CLASSIFICATION_SDSTATE                0x8000
 
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2     0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV                      0x0002
+
 //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
 #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY           0x00000001
 #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK         0x00000002
@@ -5896,9 +6664,21 @@
 #define ATOM_PPLIB_M3ARB_MASK                       0x00060000
 #define ATOM_PPLIB_M3ARB_SHIFT                      17
 
+#define ATOM_PPLIB_ENABLE_DRR                       0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+    UCHAR   ucMinTemperature;
+    UCHAR   ucMaxTemperature;
+    UCHAR   ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
 typedef struct _ATOM_PPLIB_NONCLOCK_INFO
 {
       USHORT usClassification;
@@ -5906,15 +6686,15 @@
       UCHAR  ucMaxTemperature;
       ULONG  ulCapsAndSettings;
       UCHAR  ucRequiredPower;
-      UCHAR  ucUnused1[3];
+      USHORT usClassification2;
+      ULONG  ulVCLK;
+      ULONG  ulDCLK;
+      UCHAR  ucUnused[5];
 } ATOM_PPLIB_NONCLOCK_INFO;
 
 // Contained in an array starting at the offset
 // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
 // referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-#define ATOM_PPLIB_NONCLOCKINFO_VER1      12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2      24
-
 typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
 {
       USHORT usEngineClockLow;
@@ -5985,6 +6765,93 @@
 #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW        1 
 #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH       2 
 
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+      USHORT usEngineClockLow;  //clockfrequency & 0xFFFF. The unit is in 10khz
+      UCHAR  ucEngineClockHigh; //clockfrequency >> 16. 
+      UCHAR  vddcIndex;         //2-bit vddc index;
+      UCHAR  leakage;          //please use 8-bit absolute value, not the 6-bit % value 
+      //please initalize to 0
+      UCHAR  rsv;
+      //please initalize to 0
+      USHORT rsv1;
+      //please initialize to 0s
+      ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+      //number of valid dpm levels in this state; Driver uses it to calculate the whole 
+      //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+      UCHAR ucNumDPMLevels;
+      
+      //a index to the array of nonClockInfos
+      UCHAR nonClockInfoIndex;
+      /**
+      * Driver will read the first ucNumDPMLevels in this array
+      */
+      UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct StateArray{
+    //how many states we have 
+    UCHAR ucNumEntries;
+    
+    ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct ClockInfoArray{
+    //how many clock levels we have
+    UCHAR ucNumEntries;
+    
+    //sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    //this is for Sumo
+    ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
+}ClockInfoArray;
+
+typedef struct NonClockInfoArray{
+
+    //how many non-clock levels we have. normally should be same as number of states
+    UCHAR ucNumEntries;
+    //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+    UCHAR ucEntrySize;
+    
+    ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+    USHORT usClockLow;
+    UCHAR  ucClockHigh;
+    USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1];             // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+    USHORT usSclkLow;
+    UCHAR  ucSclkHigh;
+    USHORT usMclkLow;
+    UCHAR  ucMclkHigh;
+    USHORT usVddc;
+    USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+    UCHAR ucNumEntries;                                                // Number of entries.
+    ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1];                  // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
 /**************************************************************************/
 
 
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9fbabaa..b0ab185 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -403,6 +403,7 @@
 	ENABLE_LVDS_SS_PARAMETERS_V2 lvds_ss_2;
 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
 	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 v2;
+	ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
 };
 
 static void atombios_crtc_program_ss(struct drm_crtc *crtc,
@@ -417,7 +418,30 @@
 
 	memset(&args, 0, sizeof(args));
 
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		args.v3.usSpreadSpectrumAmountFrac = 0;
+		args.v3.ucSpreadSpectrumType = ss->type;
+		switch (pll_id) {
+		case ATOM_PPLL1:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
+			args.v3.usSpreadSpectrumAmount = ss->amount;
+			args.v3.usSpreadSpectrumStep = ss->step;
+			break;
+		case ATOM_PPLL2:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
+			args.v3.usSpreadSpectrumAmount = ss->amount;
+			args.v3.usSpreadSpectrumStep = ss->step;
+			break;
+		case ATOM_DCPLL:
+			args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
+			args.v3.usSpreadSpectrumAmount = 0;
+			args.v3.usSpreadSpectrumStep = 0;
+			break;
+		case ATOM_PPLL_INVALID:
+			return;
+		}
+		args.v2.ucEnable = enable;
+	} else if (ASIC_IS_DCE4(rdev)) {
 		args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
 		args.v2.ucSpreadSpectrumType = ss->type;
 		switch (pll_id) {
@@ -673,9 +697,14 @@
 	PIXEL_CLOCK_PARAMETERS_V2 v2;
 	PIXEL_CLOCK_PARAMETERS_V3 v3;
 	PIXEL_CLOCK_PARAMETERS_V5 v5;
+	PIXEL_CLOCK_PARAMETERS_V6 v6;
 };
 
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+/* on DCE5, make sure the voltage is high enough to support the
+ * required disp clk.
+ */
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+				    u32 dispclk)
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -698,9 +727,16 @@
 			 * SetPixelClock provides the dividers
 			 */
 			args.v5.ucCRTC = ATOM_CRTC_INVALID;
-			args.v5.usPixelClock = rdev->clock.default_dispclk;
+			args.v5.usPixelClock = dispclk;
 			args.v5.ucPpll = ATOM_DCPLL;
 			break;
+		case 6:
+			/* if the default dcpll clock is specified,
+			 * SetPixelClock provides the dividers
+			 */
+			args.v6.ulDispEngClkFreq = dispclk;
+			args.v6.ucPpll = ATOM_DCPLL;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
 			return;
@@ -784,6 +820,18 @@
 			args.v5.ucEncoderMode = encoder_mode;
 			args.v5.ucPpll = pll_id;
 			break;
+		case 6:
+			args.v6.ulCrtcPclkFreq.ucCRTC = crtc_id;
+			args.v6.ulCrtcPclkFreq.ulPixelClock = cpu_to_le32(clock / 10);
+			args.v6.ucRefDiv = ref_div;
+			args.v6.usFbDiv = cpu_to_le16(fb_div);
+			args.v6.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+			args.v6.ucPostDiv = post_div;
+			args.v6.ucMiscInfo = 0; /* HDMI depth, etc. */
+			args.v6.ucTransmitterID = encoder_id;
+			args.v6.ucEncoderMode = encoder_mode;
+			args.v6.ucPpll = pll_id;
+			break;
 		default:
 			DRM_ERROR("Unknown table version %d %d\n", frev, crev);
 			return;
@@ -1377,7 +1425,8 @@
 								   rdev->clock.default_dispclk);
 		if (ss_enabled)
 			atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
-		atombios_crtc_set_dcpll(crtc);
+		/* XXX: DCE5, make sure voltage, dispclk is high enough */
+		atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
 		if (ss_enabled)
 			atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
 	}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7b337c3..7fe8ebd 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,62 @@
 
 static void evergreen_gpu_init(struct radeon_device *rdev);
 void evergreen_fini(struct radeon_device *rdev);
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+
+void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void evergreen_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+	       upper_32_bits(crtc_base));
+	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
+	WREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING;
+}
 
 /* get temperature in millidegrees */
 u32 evergreen_get_temp(struct radeon_device *rdev)
@@ -57,6 +113,14 @@
 	return actual_temp * 1000;
 }
 
+u32 sumo_get_temp(struct radeon_device *rdev)
+{
+	u32 temp = RREG32(CG_THERMAL_STATUS) & 0xff;
+	u32 actual_temp = (temp >> 1) & 0xff;
+
+	return actual_temp * 1000;
+}
+
 void evergreen_pm_misc(struct radeon_device *rdev)
 {
 	int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -337,16 +401,28 @@
 	case 0:
 	case 4:
 	default:
-		return 3840 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 4096 * 2;
+		else
+			return 3840 * 2;
 	case 1:
 	case 5:
-		return 5760 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 6144 * 2;
+		else
+			return 5760 * 2;
 	case 2:
 	case 6:
-		return 7680 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 8192 * 2;
+		else
+			return 7680 * 2;
 	case 3:
 	case 7:
-		return 1920 * 2;
+		if (ASIC_IS_DCE5(rdev))
+			return 2048 * 2;
+		else
+			return 1920 * 2;
 	}
 }
 
@@ -890,31 +966,39 @@
 	save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
 	save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
 	save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
-	save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
-	save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
-	save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
-	save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+		save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+		save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+		save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	}
 
 	/* Stop all video */
 	WREG32(VGA_RENDER_CONTROL, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	}
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(D1VGA_CONTROL, 0);
 	WREG32(D2VGA_CONTROL, 0);
@@ -944,41 +1028,43 @@
 	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
 	       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
 
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       upper_32_bits(rdev->mc.vram_start));
-	WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
-	WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
-	       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       upper_32_bits(rdev->mc.vram_start));
+		WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+		WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+		       (u32)rdev->mc.vram_start);
+	}
 
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
 	WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
@@ -994,22 +1080,28 @@
 	WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+	}
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
 	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
-	WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+		WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+	}
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 	WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
@@ -1057,11 +1149,17 @@
 			rdev->mc.vram_end >> 12);
 	}
 	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+	if (rdev->flags & RADEON_IS_IGP) {
+		tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
+		tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
+		tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
+		WREG32(MC_FUS_VM_FB_OFFSET, tmp);
+	}
 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
 	WREG32(MC_VM_FB_LOCATION, tmp);
 	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
-	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+	WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
 	WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
 	if (rdev->flags & RADEON_IS_AGP) {
 		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
@@ -1285,11 +1383,15 @@
 	switch (rdev->family) {
 	case CHIP_CEDAR:
 	case CHIP_REDWOOD:
+	case CHIP_PALM:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
 		force_no_swizzle = false;
 		break;
 	case CHIP_CYPRESS:
 	case CHIP_HEMLOCK:
 	case CHIP_JUNIPER:
+	case CHIP_BARTS:
 	default:
 		force_no_swizzle = true;
 		break;
@@ -1384,6 +1486,46 @@
 	return backend_map;
 }
 
+static void evergreen_program_channel_remap(struct radeon_device *rdev)
+{
+	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
+
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+	default:
+		/* default mapping */
+		mc_shared_chremap = 0x00fac688;
+		break;
+	}
+
+	switch (rdev->family) {
+	case CHIP_HEMLOCK:
+	case CHIP_CYPRESS:
+	case CHIP_BARTS:
+		tcp_chan_steer_lo = 0x54763210;
+		tcp_chan_steer_hi = 0x0000ba98;
+		break;
+	case CHIP_JUNIPER:
+	case CHIP_REDWOOD:
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+	default:
+		tcp_chan_steer_lo = 0x76543210;
+		tcp_chan_steer_hi = 0x0000ba98;
+		break;
+	}
+
+	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
+	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
+	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
 static void evergreen_gpu_init(struct radeon_device *rdev)
 {
 	u32 cc_rb_backend_disable = 0;
@@ -1495,6 +1637,90 @@
 		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
 		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
 		break;
+	case CHIP_PALM:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 2;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_BARTS:
+		rdev->config.evergreen.num_ses = 2;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 8;
+		rdev->config.evergreen.max_simds = 7;
+		rdev->config.evergreen.max_backends = 4 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 512;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_TURKS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 4;
+		rdev->config.evergreen.max_simds = 6;
+		rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 248;
+		rdev->config.evergreen.max_gs_threads = 32;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 256;
+		rdev->config.evergreen.sx_max_export_pos_size = 64;
+		rdev->config.evergreen.sx_max_export_smx_size = 192;
+		rdev->config.evergreen.max_hw_contexts = 8;
+		rdev->config.evergreen.sq_num_cf_insts = 2;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x100;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
+	case CHIP_CAICOS:
+		rdev->config.evergreen.num_ses = 1;
+		rdev->config.evergreen.max_pipes = 4;
+		rdev->config.evergreen.max_tile_pipes = 2;
+		rdev->config.evergreen.max_simds = 2;
+		rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
+		rdev->config.evergreen.max_gprs = 256;
+		rdev->config.evergreen.max_threads = 192;
+		rdev->config.evergreen.max_gs_threads = 16;
+		rdev->config.evergreen.max_stack_entries = 256;
+		rdev->config.evergreen.sx_num_of_sets = 4;
+		rdev->config.evergreen.sx_max_export_size = 128;
+		rdev->config.evergreen.sx_max_export_pos_size = 32;
+		rdev->config.evergreen.sx_max_export_smx_size = 96;
+		rdev->config.evergreen.max_hw_contexts = 4;
+		rdev->config.evergreen.sq_num_cf_insts = 1;
+
+		rdev->config.evergreen.sc_prim_fifo_size = 0x40;
+		rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
+		rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
+		break;
 	}
 
 	/* Initialize HDP */
@@ -1636,6 +1862,7 @@
 		switch (rdev->family) {
 		case CHIP_CYPRESS:
 		case CHIP_HEMLOCK:
+		case CHIP_BARTS:
 			gb_backend_map = 0x66442200;
 			break;
 		case CHIP_JUNIPER:
@@ -1687,6 +1914,8 @@
 	WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
 	WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
+	evergreen_program_channel_remap(rdev);
+
 	num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
 	grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
 
@@ -1769,9 +1998,16 @@
 		      GS_PRIO(2) |
 		      ES_PRIO(3));
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_CAICOS:
 		/* no vertex cache */
 		sq_config &= ~VC_ENABLE;
+		break;
+	default:
+		break;
+	}
 
 	sq_lds_resource_mgmt = RREG32(SQ_LDS_RESOURCE_MGMT);
 
@@ -1783,10 +2019,15 @@
 	sq_gpr_resource_mgmt_3 = NUM_HS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 	sq_gpr_resource_mgmt_3 |= NUM_LS_GPRS((rdev->config.evergreen.max_gprs - (4 * 2)) * 3 / 32);
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
 		ps_thread_count = 96;
-	else
+		break;
+	default:
 		ps_thread_count = 128;
+		break;
+	}
 
 	sq_thread_resource_mgmt = NUM_PS_THREADS(ps_thread_count);
 	sq_thread_resource_mgmt |= NUM_VS_THREADS((((rdev->config.evergreen.max_threads - ps_thread_count) / 6) / 8) * 8);
@@ -1817,10 +2058,16 @@
 	WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
 					  FORCE_EOV_MAX_REZ_CNT(255)));
 
-	if (rdev->family == CHIP_CEDAR)
+	switch (rdev->family) {
+	case CHIP_CEDAR:
+	case CHIP_PALM:
+	case CHIP_CAICOS:
 		vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
-	else
+		break;
+	default:
 		vgt_cache_invalidation = CACHE_INVALIDATION(VC_AND_TC);
+		break;
+	}
 	vgt_cache_invalidation |= AUTO_INVLD_EN(ES_AND_GS_AUTO);
 	WREG32(VGT_CACHE_INVALIDATION, vgt_cache_invalidation);
 
@@ -1904,12 +2151,18 @@
 	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
 	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
 	/* Setup GPU memory space */
-	/* size in MB on evergreen */
-	rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
-	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* size in bytes on fusion */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
+	} else {
+		/* size in MB on evergreen */
+		rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+		rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+	}
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
-	r600_vram_gtt_location(rdev, &rdev->mc);
+	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
@@ -1917,8 +2170,30 @@
 
 bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
 {
-	/* FIXME: implement for evergreen */
-	return false;
+	u32 srbm_status;
+	u32 grbm_status;
+	u32 grbm_status_se0, grbm_status_se1;
+	struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
+	int r;
+
+	srbm_status = RREG32(SRBM_STATUS);
+	grbm_status = RREG32(GRBM_STATUS);
+	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
+	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
+	if (!(grbm_status & GUI_ACTIVE)) {
+		r100_gpu_lockup_update(lockup, &rdev->cp);
+		return false;
+	}
+	/* force CP activities */
+	r = radeon_ring_lock(rdev, 2);
+	if (!r) {
+		/* PACKET2 NOP */
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_write(rdev, 0x80000000);
+		radeon_ring_unlock_commit(rdev);
+	}
+	rdev->cp.rptr = RREG32(CP_RB_RPTR);
+	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
 }
 
 static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2011,17 +2286,21 @@
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
 	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
-	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+		WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+	}
 
 	WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 	WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2047,6 +2326,7 @@
 	u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
 	u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
 	u32 grbm_int_cntl = 0;
+	u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2072,27 +2352,33 @@
 		cp_int_cntl |= RB_INT_ENABLE;
 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 0\n");
 		crtc1 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 1\n");
 		crtc2 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[2]) {
+	if (rdev->irq.crtc_vblank_int[2] ||
+	    rdev->irq.pflip[2]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 2\n");
 		crtc3 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[3]) {
+	if (rdev->irq.crtc_vblank_int[3] ||
+	    rdev->irq.pflip[3]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 3\n");
 		crtc4 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[4]) {
+	if (rdev->irq.crtc_vblank_int[4] ||
+	    rdev->irq.pflip[4]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 4\n");
 		crtc5 |= VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[5]) {
+	if (rdev->irq.crtc_vblank_int[5] ||
+	    rdev->irq.pflip[5]) {
 		DRM_DEBUG("evergreen_irq_set: vblank 5\n");
 		crtc6 |= VBLANK_INT_MASK;
 	}
@@ -2130,10 +2416,19 @@
 
 	WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
 	WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
-	WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
-	WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
-	WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
-	WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	if (!(rdev->flags & RADEON_IS_IGP)) {
+		WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
+		WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+		WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
+		WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
+	}
+
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+	WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
 
 	WREG32(DC_HPD1_INT_CONTROL, hpd1);
 	WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2145,79 +2440,92 @@
 	return 0;
 }
 
-static inline void evergreen_irq_ack(struct radeon_device *rdev,
-				     u32 *disp_int,
-				     u32 *disp_int_cont,
-				     u32 *disp_int_cont2,
-				     u32 *disp_int_cont3,
-				     u32 *disp_int_cont4,
-				     u32 *disp_int_cont5)
+static inline void evergreen_irq_ack(struct radeon_device *rdev)
 {
 	u32 tmp;
 
-	*disp_int = RREG32(DISP_INTERRUPT_STATUS);
-	*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
-	*disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
-	*disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
-	*disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
-	*disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+	rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
+	rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
+	rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
+	rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
+	rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+	rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
 
-	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+		WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont & LB_D2_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont & LB_D2_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
 		WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
-	if (*disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
 		WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
 
-	if (*disp_int & DC_HPD1_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
 		tmp = RREG32(DC_HPD1_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD1_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont & DC_HPD2_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
 		tmp = RREG32(DC_HPD2_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD2_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont2 & DC_HPD3_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
 		tmp = RREG32(DC_HPD3_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD3_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont3 & DC_HPD4_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
 		tmp = RREG32(DC_HPD4_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD4_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont4 & DC_HPD5_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
 		tmp = RREG32(DC_HPD5_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD5_INT_CONTROL, tmp);
 	}
-	if (*disp_int_cont5 & DC_HPD6_INTERRUPT) {
+	if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
 		tmp = RREG32(DC_HPD5_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -2226,14 +2534,10 @@
 
 void evergreen_irq_disable(struct radeon_device *rdev)
 {
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
-
 	r600_disable_interrupts(rdev);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
-			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+	evergreen_irq_ack(rdev);
 	evergreen_disable_interrupt_state(rdev);
 }
 
@@ -2273,8 +2577,6 @@
 	u32 rptr = rdev->ih.rptr;
 	u32 src_id, src_data;
 	u32 ring_index;
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-	u32 disp_int_cont3, disp_int_cont4, disp_int_cont5;
 	unsigned long flags;
 	bool queue_hotplug = false;
 
@@ -2295,8 +2597,7 @@
 
 restart_ih:
 	/* display interrupts */
-	evergreen_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2,
-			  &disp_int_cont3, &disp_int_cont4, &disp_int_cont5);
+	evergreen_irq_ack(rdev);
 
 	rdev->ih.wptr = wptr;
 	while (rptr != wptr) {
@@ -2309,17 +2610,21 @@
 		case 1: /* D1 vblank/vline */
 			switch (src_data) {
 			case 0: /* D1 vblank */
-				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 0);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[0])
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D1_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D1 vline\n");
 				}
 				break;
@@ -2331,17 +2636,21 @@
 		case 2: /* D2 vblank/vline */
 			switch (src_data) {
 			case 0: /* D2 vblank */
-				if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 1);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[1])
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
 				}
 				break;
 			case 1: /* D2 vline */
-				if (disp_int_cont & LB_D2_VLINE_INTERRUPT) {
-					disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D2 vline\n");
 				}
 				break;
@@ -2353,17 +2662,21 @@
 		case 3: /* D3 vblank/vline */
 			switch (src_data) {
 			case 0: /* D3 vblank */
-				if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 2);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[2]) {
+						drm_handle_vblank(rdev->ddev, 2);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[2])
+						radeon_crtc_handle_flip(rdev, 2);
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D3 vblank\n");
 				}
 				break;
 			case 1: /* D3 vline */
-				if (disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
-					disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D3 vline\n");
 				}
 				break;
@@ -2375,17 +2688,21 @@
 		case 4: /* D4 vblank/vline */
 			switch (src_data) {
 			case 0: /* D4 vblank */
-				if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 3);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[3]) {
+						drm_handle_vblank(rdev->ddev, 3);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[3])
+						radeon_crtc_handle_flip(rdev, 3);
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D4 vblank\n");
 				}
 				break;
 			case 1: /* D4 vline */
-				if (disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
-					disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D4 vline\n");
 				}
 				break;
@@ -2397,17 +2714,21 @@
 		case 5: /* D5 vblank/vline */
 			switch (src_data) {
 			case 0: /* D5 vblank */
-				if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 4);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[4]) {
+						drm_handle_vblank(rdev->ddev, 4);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[4])
+						radeon_crtc_handle_flip(rdev, 4);
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D5 vblank\n");
 				}
 				break;
 			case 1: /* D5 vline */
-				if (disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
-					disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D5 vline\n");
 				}
 				break;
@@ -2419,17 +2740,21 @@
 		case 6: /* D6 vblank/vline */
 			switch (src_data) {
 			case 0: /* D6 vblank */
-				if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 5);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[5]) {
+						drm_handle_vblank(rdev->ddev, 5);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[5])
+						radeon_crtc_handle_flip(rdev, 5);
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D6 vblank\n");
 				}
 				break;
 			case 1: /* D6 vline */
-				if (disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
-					disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D6 vline\n");
 				}
 				break;
@@ -2441,43 +2766,43 @@
 		case 42: /* HPD hotplug */
 			switch (src_data) {
 			case 0:
-				if (disp_int & DC_HPD1_INTERRUPT) {
-					disp_int &= ~DC_HPD1_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD1\n");
 				}
 				break;
 			case 1:
-				if (disp_int_cont & DC_HPD2_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD2_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD2\n");
 				}
 				break;
 			case 2:
-				if (disp_int_cont2 & DC_HPD3_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD3\n");
 				}
 				break;
 			case 3:
-				if (disp_int_cont3 & DC_HPD4_INTERRUPT) {
-					disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD4\n");
 				}
 				break;
 			case 4:
-				if (disp_int_cont4 & DC_HPD5_INTERRUPT) {
-					disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD5\n");
 				}
 				break;
 			case 5:
-				if (disp_int_cont5 & DC_HPD6_INTERRUPT) {
-					disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
+				if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD6\n");
 				}
@@ -2516,7 +2841,7 @@
 	if (wptr != rdev->ih.wptr)
 		goto restart_ih;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	rdev->ih.rptr = rptr;
 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -2527,12 +2852,31 @@
 {
 	int r;
 
-	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
-		r = r600_init_microcode(rdev);
+	/* enable pcie gen2 link */
+	if (!ASIC_IS_DCE5(rdev))
+		evergreen_pcie_gen2_enable(rdev);
+
+	if (ASIC_IS_DCE5(rdev)) {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
+			r = ni_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
+		r = btc_mc_load_microcode(rdev);
 		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
+			DRM_ERROR("Failed to load MC firmware!\n");
 			return r;
 		}
+	} else {
+		if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+			r = r600_init_microcode(rdev);
+			if (r) {
+				DRM_ERROR("Failed to load firmware!\n");
+				return r;
+			}
+		}
 	}
 
 	evergreen_mc_program(rdev);
@@ -2551,6 +2895,11 @@
 		rdev->asic->copy = NULL;
 		dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
 	}
+	/* XXX: ontario has problems blitting to gart at the moment */
+	if (rdev->family == CHIP_PALM) {
+		rdev->asic->copy = NULL;
+		rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
+	}
 
 	/* allocate wb buffer */
 	r = radeon_wb_init(rdev);
@@ -2658,12 +3007,16 @@
 	u32 reg;
 
 	/* first check CRTCs */
-	reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+	if (rdev->flags & RADEON_IS_IGP)
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+	else
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
 	if (reg & EVERGREEN_CRTC_MASTER_EN)
 		return true;
 
@@ -2800,3 +3153,52 @@
 	rdev->bios = NULL;
 	radeon_dummy_page_fini(rdev);
 }
+
+static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, speed_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index e0e5901..b758dc7 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -147,7 +147,9 @@
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
 
-	if (rdev->family == CHIP_CEDAR)
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_CAICOS))
 		cp_set_surface_sync(rdev,
 				    PACKET3_TC_ACTION_ENA, 48, gpu_addr);
 	else
@@ -331,9 +333,95 @@
 		num_hs_stack_entries = 85;
 		num_ls_stack_entries = 85;
 		break;
+	case CHIP_PALM:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 96;
+		num_vs_threads = 16;
+		num_gs_threads = 16;
+		num_es_threads = 16;
+		num_hs_threads = 16;
+		num_ls_threads = 16;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
+	case CHIP_BARTS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 20;
+		num_gs_threads = 20;
+		num_es_threads = 20;
+		num_hs_threads = 20;
+		num_ls_threads = 20;
+		num_ps_stack_entries = 85;
+		num_vs_stack_entries = 85;
+		num_gs_stack_entries = 85;
+		num_es_stack_entries = 85;
+		num_hs_stack_entries = 85;
+		num_ls_stack_entries = 85;
+		break;
+	case CHIP_TURKS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 20;
+		num_gs_threads = 20;
+		num_es_threads = 20;
+		num_hs_threads = 20;
+		num_ls_threads = 20;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
+	case CHIP_CAICOS:
+		num_ps_gprs = 93;
+		num_vs_gprs = 46;
+		num_temp_gprs = 4;
+		num_gs_gprs = 31;
+		num_es_gprs = 31;
+		num_hs_gprs = 23;
+		num_ls_gprs = 23;
+		num_ps_threads = 128;
+		num_vs_threads = 10;
+		num_gs_threads = 10;
+		num_es_threads = 10;
+		num_hs_threads = 10;
+		num_ls_threads = 10;
+		num_ps_stack_entries = 42;
+		num_vs_stack_entries = 42;
+		num_gs_stack_entries = 42;
+		num_es_stack_entries = 42;
+		num_hs_stack_entries = 42;
+		num_ls_stack_entries = 42;
+		break;
 	}
 
-	if (rdev->family == CHIP_CEDAR)
+	if ((rdev->family == CHIP_CEDAR) ||
+	    (rdev->family == CHIP_PALM) ||
+	    (rdev->family == CHIP_CAICOS))
 		sq_config = 0;
 	else
 		sq_config = VC_ENABLE;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 2330f3a..c781c92 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -105,6 +105,11 @@
 #define EVERGREEN_GRPH_Y_START                          0x6830
 #define EVERGREEN_GRPH_X_END                            0x6834
 #define EVERGREEN_GRPH_Y_END                            0x6838
+#define EVERGREEN_GRPH_UPDATE                           0x6844
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_PENDING    (1 << 2)
+#       define EVERGREEN_GRPH_UPDATE_LOCK               (1 << 16)
+#define EVERGREEN_GRPH_FLIP_CONTROL                     0x6848
+#       define EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN (1 << 0)
 
 /* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
 #define EVERGREEN_CUR_CONTROL                           0x6998
@@ -178,6 +183,7 @@
 #       define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
 #define EVERGREEN_CRTC_STATUS                           0x6e8c
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
+#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 
 #define EVERGREEN_DC_GPIO_HPD_MASK                      0x64b0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a73b53c..36d32d8 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -164,11 +164,13 @@
 #define		SE_SC_BUSY					(1 << 29)
 #define		SE_DB_BUSY					(1 << 30)
 #define		SE_CB_BUSY					(1 << 31)
-
+/* evergreen */
 #define	CG_MULT_THERMAL_STATUS				0x740
 #define		ASIC_T(x)			        ((x) << 16)
 #define		ASIC_T_MASK			        0x7FF0000
 #define		ASIC_T_SHIFT			        16
+/* APU */
+#define	CG_THERMAL_STATUS			        0x678
 
 #define	HDP_HOST_PATH_CNTL				0x2C00
 #define	HDP_NONSURFACE_BASE				0x2C04
@@ -181,6 +183,7 @@
 #define MC_SHARED_CHMAP						0x2004
 #define		NOOFCHAN_SHIFT					12
 #define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
 
 #define	MC_ARB_RAMCFG					0x2760
 #define		NOOFBANK_SHIFT					0
@@ -200,6 +203,7 @@
 #define	MC_VM_AGP_BOT					0x202C
 #define	MC_VM_AGP_BASE					0x2030
 #define	MC_VM_FB_LOCATION				0x2024
+#define	MC_FUS_VM_FB_OFFSET				0x2898
 #define	MC_VM_MB_L1_TLB0_CNTL				0x2234
 #define	MC_VM_MB_L1_TLB1_CNTL				0x2238
 #define	MC_VM_MB_L1_TLB2_CNTL				0x223C
@@ -349,6 +353,9 @@
 #define		SYNC_WALKER					(1 << 25)
 #define		SYNC_ALIGNER					(1 << 26)
 
+#define	TCP_CHAN_STEER_LO				0x960c
+#define	TCP_CHAN_STEER_HI				0x9610
+
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x) << 0)
 #define			VC_ONLY						0
@@ -574,6 +581,44 @@
 #       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
 #       define DC_HPDx_EN                                 (1 << 28)
 
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 /*
  * PM4
  */
@@ -603,7 +648,7 @@
 #define	PACKET3_NOP					0x10
 #define	PACKET3_SET_BASE				0x11
 #define	PACKET3_CLEAR_STATE				0x12
-#define	PACKET3_INDIRECT_BUFFER_SIZE			0x13
+#define	PACKET3_INDEX_BUFFER_SIZE			0x13
 #define	PACKET3_DISPATCH_DIRECT				0x15
 #define	PACKET3_DISPATCH_INDIRECT			0x16
 #define	PACKET3_INDIRECT_BUFFER_END			0x17
@@ -644,14 +689,14 @@
 #              define PACKET3_CB8_DEST_BASE_ENA    (1 << 15)
 #              define PACKET3_CB9_DEST_BASE_ENA    (1 << 16)
 #              define PACKET3_CB10_DEST_BASE_ENA   (1 << 17)
-#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 17)
+#              define PACKET3_CB11_DEST_BASE_ENA   (1 << 18)
 #              define PACKET3_FULL_CACHE_ENA       (1 << 20)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
 #              define PACKET3_VC_ACTION_ENA        (1 << 24)
 #              define PACKET3_CB_ACTION_ENA        (1 << 25)
 #              define PACKET3_DB_ACTION_ENA        (1 << 26)
 #              define PACKET3_SH_ACTION_ENA        (1 << 27)
-#              define PACKET3_SMX_ACTION_ENA       (1 << 28)
+#              define PACKET3_SX_ACTION_ENA        (1 << 28)
 #define	PACKET3_ME_INITIALIZE				0x44
 #define		PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
 #define	PACKET3_COND_WRITE				0x45
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
new file mode 100644
index 0000000..5e0bef8
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "radeon_drm.h"
+#include "nid.h"
+#include "atom.h"
+#include "ni_reg.h"
+
+#define EVERGREEN_PFP_UCODE_SIZE 1120
+#define EVERGREEN_PM4_UCODE_SIZE 1376
+#define EVERGREEN_RLC_UCODE_SIZE 768
+#define BTC_MC_UCODE_SIZE 6024
+
+/* Firmware Names */
+MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
+MODULE_FIRMWARE("radeon/BARTS_me.bin");
+MODULE_FIRMWARE("radeon/BARTS_mc.bin");
+MODULE_FIRMWARE("radeon/BTC_rlc.bin");
+MODULE_FIRMWARE("radeon/TURKS_pfp.bin");
+MODULE_FIRMWARE("radeon/TURKS_me.bin");
+MODULE_FIRMWARE("radeon/TURKS_mc.bin");
+MODULE_FIRMWARE("radeon/CAICOS_pfp.bin");
+MODULE_FIRMWARE("radeon/CAICOS_me.bin");
+MODULE_FIRMWARE("radeon/CAICOS_mc.bin");
+
+#define BTC_IO_MC_REGS_SIZE 29
+
+static const u32 barts_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00946a00}
+};
+
+static const u32 turks_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00936a00}
+};
+
+static const u32 caicos_io_mc_regs[BTC_IO_MC_REGS_SIZE][2] = {
+	{0x00000077, 0xff010100},
+	{0x00000078, 0x00000000},
+	{0x00000079, 0x00001434},
+	{0x0000007a, 0xcc08ec08},
+	{0x0000007b, 0x00040000},
+	{0x0000007c, 0x000080c0},
+	{0x0000007d, 0x09000000},
+	{0x0000007e, 0x00210404},
+	{0x00000081, 0x08a8e800},
+	{0x00000082, 0x00030444},
+	{0x00000083, 0x00000000},
+	{0x00000085, 0x00000001},
+	{0x00000086, 0x00000002},
+	{0x00000087, 0x48490000},
+	{0x00000088, 0x20244647},
+	{0x00000089, 0x00000005},
+	{0x0000008b, 0x66030000},
+	{0x0000008c, 0x00006603},
+	{0x0000008d, 0x00000100},
+	{0x0000008f, 0x00001c0a},
+	{0x00000090, 0xff000001},
+	{0x00000094, 0x00101101},
+	{0x00000095, 0x00000fff},
+	{0x00000096, 0x00116fff},
+	{0x00000097, 0x60010000},
+	{0x00000098, 0x10010000},
+	{0x00000099, 0x00006000},
+	{0x0000009a, 0x00001000},
+	{0x0000009f, 0x00916a00}
+};
+
+int btc_mc_load_microcode(struct radeon_device *rdev)
+{
+	const __be32 *fw_data;
+	u32 mem_type, running, blackout = 0;
+	u32 *io_mc_regs;
+	int i;
+
+	if (!rdev->mc_fw)
+		return -EINVAL;
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		io_mc_regs = (u32 *)&barts_io_mc_regs;
+		break;
+	case CHIP_TURKS:
+		io_mc_regs = (u32 *)&turks_io_mc_regs;
+		break;
+	case CHIP_CAICOS:
+	default:
+		io_mc_regs = (u32 *)&caicos_io_mc_regs;
+		break;
+	}
+
+	mem_type = (RREG32(MC_SEQ_MISC0) & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT;
+	running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
+
+	if ((mem_type == MC_SEQ_MISC0_GDDR5_VALUE) && (running == 0)) {
+		if (running) {
+			blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
+			WREG32(MC_SHARED_BLACKOUT_CNTL, 1);
+		}
+
+		/* reset the engine and set to writable */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
+
+		/* load mc io regs */
+		for (i = 0; i < BTC_IO_MC_REGS_SIZE; i++) {
+			WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
+			WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
+		}
+		/* load the MC ucode */
+		fw_data = (const __be32 *)rdev->mc_fw->data;
+		for (i = 0; i < BTC_MC_UCODE_SIZE; i++)
+			WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
+
+		/* put the engine back into the active state */
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
+		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
+
+		/* wait for training to complete */
+		while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
+			udelay(10);
+
+		if (running)
+			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
+	}
+
+	return 0;
+}
+
+int ni_init_microcode(struct radeon_device *rdev)
+{
+	struct platform_device *pdev;
+	const char *chip_name;
+	const char *rlc_chip_name;
+	size_t pfp_req_size, me_req_size, rlc_req_size, mc_req_size;
+	char fw_name[30];
+	int err;
+
+	DRM_DEBUG("\n");
+
+	pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
+	err = IS_ERR(pdev);
+	if (err) {
+		printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
+		return -EINVAL;
+	}
+
+	switch (rdev->family) {
+	case CHIP_BARTS:
+		chip_name = "BARTS";
+		rlc_chip_name = "BTC";
+		break;
+	case CHIP_TURKS:
+		chip_name = "TURKS";
+		rlc_chip_name = "BTC";
+		break;
+	case CHIP_CAICOS:
+		chip_name = "CAICOS";
+		rlc_chip_name = "BTC";
+		break;
+	default: BUG();
+	}
+
+	pfp_req_size = EVERGREEN_PFP_UCODE_SIZE * 4;
+	me_req_size = EVERGREEN_PM4_UCODE_SIZE * 4;
+	rlc_req_size = EVERGREEN_RLC_UCODE_SIZE * 4;
+	mc_req_size = BTC_MC_UCODE_SIZE * 4;
+
+	DRM_INFO("Loading %s Microcode\n", chip_name);
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
+	err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->pfp_fw->size != pfp_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->pfp_fw->size, fw_name);
+		err = -EINVAL;
+		goto out;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
+	err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->me_fw->size != me_req_size) {
+		printk(KERN_ERR
+		       "ni_cp: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->me_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
+	err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->rlc_fw->size != rlc_req_size) {
+		printk(KERN_ERR
+		       "ni_rlc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->rlc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+
+	snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+	err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
+	if (err)
+		goto out;
+	if (rdev->mc_fw->size != mc_req_size) {
+		printk(KERN_ERR
+		       "ni_mc: Bogus length %zu in firmware \"%s\"\n",
+		       rdev->mc_fw->size, fw_name);
+		err = -EINVAL;
+	}
+out:
+	platform_device_unregister(pdev);
+
+	if (err) {
+		if (err != -EINVAL)
+			printk(KERN_ERR
+			       "ni_cp: Failed to load firmware \"%s\"\n",
+			       fw_name);
+		release_firmware(rdev->pfp_fw);
+		rdev->pfp_fw = NULL;
+		release_firmware(rdev->me_fw);
+		rdev->me_fw = NULL;
+		release_firmware(rdev->rlc_fw);
+		rdev->rlc_fw = NULL;
+		release_firmware(rdev->mc_fw);
+		rdev->mc_fw = NULL;
+	}
+	return err;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_reg.h b/drivers/gpu/drm/radeon/ni_reg.h
new file mode 100644
index 0000000..5db7b7d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_reg.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __NI_REG_H__
+#define __NI_REG_H__
+
+/* northern islands - DCE5 */
+
+#define NI_INPUT_GAMMA_CONTROL                         0x6840
+#       define NI_GRPH_INPUT_GAMMA_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_INPUT_GAMMA_USE_LUT                  0
+#       define NI_INPUT_GAMMA_BYPASS                   1
+#       define NI_INPUT_GAMMA_SRGB_24                  2
+#       define NI_INPUT_GAMMA_XVYCC_222                3
+#       define NI_OVL_INPUT_GAMMA_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_PRESCALE_GRPH_CONTROL                       0x68b4
+#       define NI_GRPH_PRESCALE_BYPASS                 (1 << 4)
+
+#define NI_PRESCALE_OVL_CONTROL                        0x68c4
+#       define NI_OVL_PRESCALE_BYPASS                  (1 << 4)
+
+#define NI_INPUT_CSC_CONTROL                           0x68d4
+#       define NI_INPUT_CSC_GRPH_MODE(x)               (((x) & 0x3) << 0)
+#       define NI_INPUT_CSC_BYPASS                     0
+#       define NI_INPUT_CSC_PROG_COEFF                 1
+#       define NI_INPUT_CSC_PROG_SHARED_MATRIXA        2
+#       define NI_INPUT_CSC_OVL_MODE(x)                (((x) & 0x3) << 4)
+
+#define NI_OUTPUT_CSC_CONTROL                          0x68f0
+#       define NI_OUTPUT_CSC_GRPH_MODE(x)              (((x) & 0x7) << 0)
+#       define NI_OUTPUT_CSC_BYPASS                    0
+#       define NI_OUTPUT_CSC_TV_RGB                    1
+#       define NI_OUTPUT_CSC_YCBCR_601                 2
+#       define NI_OUTPUT_CSC_YCBCR_709                 3
+#       define NI_OUTPUT_CSC_PROG_COEFF                4
+#       define NI_OUTPUT_CSC_PROG_SHARED_MATRIXB       5
+#       define NI_OUTPUT_CSC_OVL_MODE(x)               (((x) & 0x7) << 4)
+
+#define NI_DEGAMMA_CONTROL                             0x6960
+#       define NI_GRPH_DEGAMMA_MODE(x)                 (((x) & 0x3) << 0)
+#       define NI_DEGAMMA_BYPASS                       0
+#       define NI_DEGAMMA_SRGB_24                      1
+#       define NI_DEGAMMA_XVYCC_222                    2
+#       define NI_OVL_DEGAMMA_MODE(x)                  (((x) & 0x3) << 4)
+#       define NI_ICON_DEGAMMA_MODE(x)                 (((x) & 0x3) << 8)
+#       define NI_CURSOR_DEGAMMA_MODE(x)               (((x) & 0x3) << 12)
+
+#define NI_GAMUT_REMAP_CONTROL                         0x6964
+#       define NI_GRPH_GAMUT_REMAP_MODE(x)             (((x) & 0x3) << 0)
+#       define NI_GAMUT_REMAP_BYPASS                   0
+#       define NI_GAMUT_REMAP_PROG_COEFF               1
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXA      2
+#       define NI_GAMUT_REMAP_PROG_SHARED_MATRIXB      3
+#       define NI_OVL_GAMUT_REMAP_MODE(x)              (((x) & 0x3) << 4)
+
+#define NI_REGAMMA_CONTROL                             0x6a80
+#       define NI_GRPH_REGAMMA_MODE(x)                 (((x) & 0x7) << 0)
+#       define NI_REGAMMA_BYPASS                       0
+#       define NI_REGAMMA_SRGB_24                      1
+#       define NI_REGAMMA_XVYCC_222                    2
+#       define NI_REGAMMA_PROG_A                       3
+#       define NI_REGAMMA_PROG_B                       4
+#       define NI_OVL_REGAMMA_MODE(x)                  (((x) & 0x7) << 4)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
new file mode 100644
index 0000000..f7b4453
--- /dev/null
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef NI_H
+#define NI_H
+
+#define MC_SHARED_BLACKOUT_CNTL           		0x20ac
+#define MC_SEQ_SUP_CNTL           			0x28c8
+#define		RUN_MASK      				(1 << 0)
+#define MC_SEQ_SUP_PGM           			0x28cc
+#define MC_IO_PAD_CNTL_D0           			0x29d0
+#define		MEM_FALL_OUT_CMD      			(1 << 8)
+#define MC_SEQ_MISC0           				0x2a00
+#define		MC_SEQ_MISC0_GDDR5_SHIFT      		28
+#define		MC_SEQ_MISC0_GDDR5_MASK      		0xf0000000
+#define		MC_SEQ_MISC0_GDDR5_VALUE      		5
+#define MC_SEQ_IO_DEBUG_INDEX           		0x2a44
+#define MC_SEQ_IO_DEBUG_DATA           			0x2a48
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8e10aa9f..f637595 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -68,6 +68,56 @@
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
+	tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
+	/* make sure pending bit is asserted */
+	tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
+	WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen as late as possible in the vblank interval.
+	 * same field for crtc1/2
+	 */
+	tmp = RREG32(RADEON_CRTC_GEN_CNTL);
+	tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
+	WREG32(RADEON_CRTC_GEN_CNTL, tmp);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void r100_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+
+	/* Lock the graphics update lock */
+	/* update the scanout addresses */
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
+	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
+}
+
 void r100_pm_get_dynpm_state(struct radeon_device *rdev)
 {
 	int i;
@@ -526,10 +576,12 @@
 	if (rdev->irq.gui_idle) {
 		tmp |= RADEON_GUI_IDLE_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		tmp |= RADEON_CRTC_VBLANK_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		tmp |= RADEON_CRTC2_VBLANK_MASK;
 	}
 	if (rdev->irq.hpd[0]) {
@@ -600,14 +652,22 @@
 		}
 		/* Vertical blank interrupts */
 		if (status & RADEON_CRTC_VBLANK_STAT) {
-			drm_handle_vblank(rdev->ddev, 0);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[0])
+				radeon_crtc_handle_flip(rdev, 0);
 		}
 		if (status & RADEON_CRTC2_VBLANK_STAT) {
-			drm_handle_vblank(rdev->ddev, 1);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[1])
+				radeon_crtc_handle_flip(rdev, 1);
 		}
 		if (status & RADEON_FP_DETECT_STAT) {
 			queue_hotplug = true;
@@ -622,7 +682,7 @@
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS400:
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index b121b6c..eab9176 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -551,7 +551,7 @@
 #define   S_000360_CUR2_LOCK(x)                        (((x) & 0x1) << 31)
 #define   G_000360_CUR2_LOCK(x)                        (((x) >> 31) & 0x1)
 #define   C_000360_CUR2_LOCK                           0x7FFFFFFF
-#define R_0003C2_GENMO_WT                            0x0003C0
+#define R_0003C2_GENMO_WT                            0x0003C2
 #define   S_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) & 0x1) << 0)
 #define   G_0003C2_GENMO_MONO_ADDRESS_B(x)             (((x) >> 0) & 0x1)
 #define   C_0003C2_GENMO_MONO_ADDRESS_B                0xFE
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index cde1d34..fae5e70 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -558,10 +558,7 @@
 
 	/* FIXME wait for idle */
 
-	if (rdev->family < CHIP_R600)
-		link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
-	else
-		link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
 
 	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
 	case RADEON_PCIE_LC_LINK_WIDTH_X0:
@@ -745,6 +742,11 @@
 		break;
 	case 0x4E00:
 		/* RB3D_CCTL */
+		if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */
+		    p->rdev->cmask_filp != p->filp) {
+			DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
+			return -EINVAL;
+		}
 		track->num_cb = ((idx_value >> 5) & 0x3) + 1;
 		break;
 	case 0x4E38:
@@ -787,6 +789,13 @@
 		case 15:
 			track->cb[i].cpp = 2;
 			break;
+		case 5:
+			if (p->rdev->family < CHIP_RV515) {
+				DRM_ERROR("Invalid color buffer format (%d)!\n",
+					  ((idx_value >> 21) & 0xF));
+				return -EINVAL;
+			}
+			/* Pass through. */
 		case 6:
 			track->cb[i].cpp = 4;
 			break;
@@ -1199,6 +1208,10 @@
 		if (p->rdev->hyperz_filp != p->filp)
 			return -EINVAL;
 		break;
+	case PACKET3_3D_CLEAR_CMASK:
+		if (p->rdev->cmask_filp != p->filp)
+			return -EINVAL;
+		break;
 	case PACKET3_NOP:
 		break;
 	default:
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 0c036c60d..1f519a5 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -54,6 +54,7 @@
 #define		PACKET3_3D_DRAW_IMMD_2		0x35
 #define		PACKET3_3D_DRAW_INDX_2		0x36
 #define		PACKET3_3D_CLEAR_HIZ		0x37
+#define		PACKET3_3D_CLEAR_CMASK		0x38
 #define		PACKET3_BITBLT_MULTI		0x9B
 
 #define PACKET0(reg, n)	(CP_PACKET0 |					\
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 6ac1f60..fc43705 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,8 @@
 #define AVIVO_D1CRTC_FRAME_COUNT                                0x60a4
 #define AVIVO_D1CRTC_STEREO_CONTROL                             0x60c4
 
+#define AVIVO_D1MODE_MASTER_UPDATE_MODE                         0x60e4
+
 /* master controls */
 #define AVIVO_DC_CRTC_MASTER_EN                                 0x60f8
 #define AVIVO_DC_CRTC_TV_CONTROL                                0x60fc
@@ -409,8 +411,10 @@
 #define AVIVO_D1GRPH_X_END                                      0x6134
 #define AVIVO_D1GRPH_Y_END                                      0x6138
 #define AVIVO_D1GRPH_UPDATE                                     0x6144
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_PENDING              (1 << 2)
 #       define AVIVO_D1GRPH_UPDATE_LOCK                         (1 << 16)
 #define AVIVO_D1GRPH_FLIP_CONTROL                               0x6148
+#       define AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN         (1 << 0)
 
 #define AVIVO_D1CUR_CONTROL                     0x6400
 #       define AVIVO_D1CURSOR_EN                (1 << 0)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9c92db7..6b50716 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -83,6 +83,9 @@
 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
+MODULE_FIRMWARE("radeon/PALM_pfp.bin");
+MODULE_FIRMWARE("radeon/PALM_me.bin");
+MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
 
 int r600_debugfs_mc_info_init(struct radeon_device *rdev);
 
@@ -91,6 +94,7 @@
 void r600_gpu_init(struct radeon_device *rdev);
 void r600_fini(struct radeon_device *rdev);
 void r600_irq_disable(struct radeon_device *rdev);
+static void r600_pcie_gen2_enable(struct radeon_device *rdev);
 
 /* get temperature in millidegrees */
 u32 rv6xx_get_temp(struct radeon_device *rdev)
@@ -1164,7 +1168,7 @@
  * Note: GTT start, end, size should be initialized before calling this
  * function on AGP platform.
  */
-void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
 	u64 size_bf, size_af;
 
@@ -2009,6 +2013,10 @@
 		chip_name = "CYPRESS";
 		rlc_chip_name = "CYPRESS";
 		break;
+	case CHIP_PALM:
+		chip_name = "PALM";
+		rlc_chip_name = "SUMO";
+		break;
 	default: BUG();
 	}
 
@@ -2372,6 +2380,9 @@
 {
 	int r;
 
+	/* enable pcie gen2 link */
+	r600_pcie_gen2_enable(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -2874,6 +2885,8 @@
 	WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
 	WREG32(GRBM_INT_CNTL, 0);
 	WREG32(DxMODE_INT_MASK, 0);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, 0);
 	if (ASIC_IS_DCE3(rdev)) {
 		WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
 		WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
@@ -2998,6 +3011,7 @@
 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
 	u32 grbm_int_cntl = 0;
 	u32 hdmi1, hdmi2;
+	u32 d1grph = 0, d2grph = 0;
 
 	if (!rdev->irq.installed) {
 		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3034,11 +3048,13 @@
 		cp_int_cntl |= RB_INT_ENABLE;
 		cp_int_cntl |= TIME_STAMP_INT_ENABLE;
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		DRM_DEBUG("r600_irq_set: vblank 0\n");
 		mode_int |= D1MODE_VBLANK_INT_MASK;
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		DRM_DEBUG("r600_irq_set: vblank 1\n");
 		mode_int |= D2MODE_VBLANK_INT_MASK;
 	}
@@ -3081,6 +3097,8 @@
 
 	WREG32(CP_INT_CNTL, cp_int_cntl);
 	WREG32(DxMODE_INT_MASK, mode_int);
+	WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
+	WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
 	WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 	WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
 	if (ASIC_IS_DCE3(rdev)) {
@@ -3103,32 +3121,35 @@
 	return 0;
 }
 
-static inline void r600_irq_ack(struct radeon_device *rdev,
-				u32 *disp_int,
-				u32 *disp_int_cont,
-				u32 *disp_int_cont2)
+static inline void r600_irq_ack(struct radeon_device *rdev)
 {
 	u32 tmp;
 
 	if (ASIC_IS_DCE3(rdev)) {
-		*disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
-		*disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
-		*disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
 	} else {
-		*disp_int = RREG32(DISP_INTERRUPT_STATUS);
-		*disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
-		*disp_int_cont2 = 0;
+		rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
+		rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
+		rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
 	}
+	rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
+	rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
 
-	if (*disp_int & LB_D1_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.d1grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D1GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.d2grph_int & DxGRPH_PFLIP_INT_OCCURRED)
+		WREG32(D2GRPH_INTERRUPT_STATUS, DxGRPH_PFLIP_INT_CLEAR);
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT)
 		WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-	if (*disp_int & LB_D1_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT)
 		WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-	if (*disp_int & LB_D2_VBLANK_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT)
 		WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
-	if (*disp_int & LB_D2_VLINE_INTERRUPT)
+	if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT)
 		WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
-	if (*disp_int & DC_HPD1_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD1_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3139,7 +3160,7 @@
 			WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int & DC_HPD2_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD2_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3150,7 +3171,7 @@
 			WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int_cont & DC_HPD3_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
 		if (ASIC_IS_DCE3(rdev)) {
 			tmp = RREG32(DC_HPD3_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
@@ -3161,18 +3182,18 @@
 			WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
 		}
 	}
-	if (*disp_int_cont & DC_HPD4_INTERRUPT) {
+	if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
 		tmp = RREG32(DC_HPD4_INT_CONTROL);
 		tmp |= DC_HPDx_INT_ACK;
 		WREG32(DC_HPD4_INT_CONTROL, tmp);
 	}
 	if (ASIC_IS_DCE32(rdev)) {
-		if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
 			tmp = RREG32(DC_HPD5_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD5_INT_CONTROL, tmp);
 		}
-		if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
+		if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
 			tmp = RREG32(DC_HPD5_INT_CONTROL);
 			tmp |= DC_HPDx_INT_ACK;
 			WREG32(DC_HPD6_INT_CONTROL, tmp);
@@ -3194,12 +3215,10 @@
 
 void r600_irq_disable(struct radeon_device *rdev)
 {
-	u32 disp_int, disp_int_cont, disp_int_cont2;
-
 	r600_disable_interrupts(rdev);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+	r600_irq_ack(rdev);
 	r600_disable_interrupt_state(rdev);
 }
 
@@ -3262,7 +3281,7 @@
 	u32 wptr = r600_get_ih_wptr(rdev);
 	u32 rptr = rdev->ih.rptr;
 	u32 src_id, src_data;
-	u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
+	u32 ring_index;
 	unsigned long flags;
 	bool queue_hotplug = false;
 
@@ -3283,7 +3302,7 @@
 
 restart_ih:
 	/* display interrupts */
-	r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
+	r600_irq_ack(rdev);
 
 	rdev->ih.wptr = wptr;
 	while (rptr != wptr) {
@@ -3296,17 +3315,21 @@
 		case 1: /* D1 vblank/vline */
 			switch (src_data) {
 			case 0: /* D1 vblank */
-				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 0);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[0]) {
+						drm_handle_vblank(rdev->ddev, 0);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[0])
+						radeon_crtc_handle_flip(rdev, 0);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D1_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D1_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D1 vline\n");
 				}
 				break;
@@ -3318,17 +3341,21 @@
 		case 5: /* D2 vblank/vline */
 			switch (src_data) {
 			case 0: /* D2 vblank */
-				if (disp_int & LB_D2_VBLANK_INTERRUPT) {
-					drm_handle_vblank(rdev->ddev, 1);
-					rdev->pm.vblank_sync = true;
-					wake_up(&rdev->irq.vblank_queue);
-					disp_int &= ~LB_D2_VBLANK_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
+					if (rdev->irq.crtc_vblank_int[1]) {
+						drm_handle_vblank(rdev->ddev, 1);
+						rdev->pm.vblank_sync = true;
+						wake_up(&rdev->irq.vblank_queue);
+					}
+					if (rdev->irq.pflip[1])
+						radeon_crtc_handle_flip(rdev, 1);
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
 				}
 				break;
 			case 1: /* D1 vline */
-				if (disp_int & LB_D2_VLINE_INTERRUPT) {
-					disp_int &= ~LB_D2_VLINE_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
 					DRM_DEBUG("IH: D2 vline\n");
 				}
 				break;
@@ -3340,43 +3367,43 @@
 		case 19: /* HPD/DAC hotplug */
 			switch (src_data) {
 			case 0:
-				if (disp_int & DC_HPD1_INTERRUPT) {
-					disp_int &= ~DC_HPD1_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD1\n");
 				}
 				break;
 			case 1:
-				if (disp_int & DC_HPD2_INTERRUPT) {
-					disp_int &= ~DC_HPD2_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD2\n");
 				}
 				break;
 			case 4:
-				if (disp_int_cont & DC_HPD3_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD3_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD3\n");
 				}
 				break;
 			case 5:
-				if (disp_int_cont & DC_HPD4_INTERRUPT) {
-					disp_int_cont &= ~DC_HPD4_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD4\n");
 				}
 				break;
 			case 10:
-				if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD5\n");
 				}
 				break;
 			case 12:
-				if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
-					disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
+				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
+					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
 					queue_hotplug = true;
 					DRM_DEBUG("IH: HPD6\n");
 				}
@@ -3419,7 +3446,7 @@
 	if (wptr != rdev->ih.wptr)
 		goto restart_ih;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	rdev->ih.rptr = rptr;
 	WREG32(IH_RB_RPTR, rdev->ih.rptr);
 	spin_unlock_irqrestore(&rdev->ih.lock, flags);
@@ -3508,3 +3535,219 @@
 	} else
 		WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 }
+
+void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes)
+{
+	u32 link_width_cntl, mask, target_reg;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* FIXME wait for idle */
+
+	switch (lanes) {
+	case 0:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
+		break;
+	case 1:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
+		break;
+	case 2:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
+		break;
+	case 4:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
+		break;
+	case 8:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
+		break;
+	case 12:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
+		break;
+	case 16:
+	default:
+		mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
+		break;
+	}
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
+	    (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
+		return;
+
+	if (link_width_cntl & R600_PCIE_LC_UPCONFIGURE_DIS)
+		return;
+
+	link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
+			     RADEON_PCIE_LC_RECONFIG_NOW |
+			     R600_PCIE_LC_RENEGOTIATE_EN |
+			     R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE);
+	link_width_cntl |= mask;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+
+        /* some northbridges can renegotiate the link rather than requiring                                  
+         * a complete re-config.                                                                             
+         * e.g., AMD 780/790 northbridges (pci ids: 0x5956, 0x5957, 0x5958, etc.)                            
+         */
+        if (link_width_cntl & R600_PCIE_LC_RENEGOTIATION_SUPPORT)
+		link_width_cntl |= R600_PCIE_LC_RENEGOTIATE_EN | R600_PCIE_LC_UPCONFIGURE_SUPPORT;
+        else
+		link_width_cntl |= R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE;
+
+	WREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
+						       RADEON_PCIE_LC_RECONFIG_NOW));
+
+        if (rdev->family >= CHIP_RV770)
+		target_reg = R700_TARGET_AND_CURRENT_PROFILE_INDEX;
+        else
+		target_reg = R600_TARGET_AND_CURRENT_PROFILE_INDEX;
+
+        /* wait for lane set to complete */
+        link_width_cntl = RREG32(target_reg);
+        while (link_width_cntl == 0xffffffff)
+		link_width_cntl = RREG32(target_reg);
+
+}
+
+int r600_get_pcie_lanes(struct radeon_device *rdev)
+{
+	u32 link_width_cntl;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return 0;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return 0;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return 0;
+
+	/* FIXME wait for idle */
+
+	link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+	switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+	case RADEON_PCIE_LC_LINK_WIDTH_X0:
+		return 0;
+	case RADEON_PCIE_LC_LINK_WIDTH_X1:
+		return 1;
+	case RADEON_PCIE_LC_LINK_WIDTH_X2:
+		return 2;
+	case RADEON_PCIE_LC_LINK_WIDTH_X4:
+		return 4;
+	case RADEON_PCIE_LC_LINK_WIDTH_X8:
+		return 8;
+	case RADEON_PCIE_LC_LINK_WIDTH_X16:
+	default:
+		return 16;
+	}
+}
+
+static void r600_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
+	u16 link_cntl2;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* only RV6xx+ chips are supported */
+	if (rdev->family <= CHIP_R600)
+		return;
+
+	/* 55 nm r6xx asics */
+	if ((rdev->family == CHIP_RV670) ||
+	    (rdev->family == CHIP_RV620) ||
+	    (rdev->family == CHIP_RV635)) {
+		/* advertise upconfig capability */
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+			lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+			link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+					     LC_RECONFIG_ARC_MISSING_ESCAPE);
+			link_width_cntl |= lanes | LC_RECONFIG_NOW | LC_RENEGOTIATE_EN;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		} else {
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+			WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+		}
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		/* 55 nm r6xx asics */
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			WREG32(MM_CFGREGS_CNTL, 0x8);
+			link_cntl2 = RREG32(0x4088);
+			WREG32(MM_CFGREGS_CNTL, 0);
+			/* not supported yet */
+			if (link_cntl2 & SELECTABLE_DEEMPHASIS)
+				return;
+		}
+
+		speed_cntl &= ~LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK;
+		speed_cntl |= (0x3 << LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT);
+		speed_cntl &= ~LC_VOLTAGE_TIMER_SEL_MASK;
+		speed_cntl &= ~LC_FORCE_DIS_HW_SPEED_CHANGE;
+		speed_cntl |= LC_FORCE_EN_HW_SPEED_CHANGE;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		if ((rdev->family == CHIP_RV670) ||
+		    (rdev->family == CHIP_RV620) ||
+		    (rdev->family == CHIP_RV635)) {
+			training_cntl = RREG32_PCIE_P(PCIE_LC_TRAINING_CNTL);
+			training_cntl &= ~LC_POINT_7_PLUS_EN;
+			WREG32_PCIE_P(PCIE_LC_TRAINING_CNTL, training_cntl);
+		} else {
+			speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+			speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+			WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+		}
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bff4dc4..a5d898b 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -728,6 +728,54 @@
 /* DCE 3.2 */
 #       define DC_HPDx_EN                                 (1 << 28)
 
+#define D1GRPH_INTERRUPT_STATUS                           0x6158
+#define D2GRPH_INTERRUPT_STATUS                           0x6958
+#       define DxGRPH_PFLIP_INT_OCCURRED                  (1 << 0)
+#       define DxGRPH_PFLIP_INT_CLEAR                     (1 << 8)
+#define D1GRPH_INTERRUPT_CONTROL                          0x615c
+#define D2GRPH_INTERRUPT_CONTROL                          0x695c
+#       define DxGRPH_PFLIP_INT_MASK                      (1 << 0)
+#       define DxGRPH_PFLIP_INT_TYPE                      (1 << 8)
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#       define LC_POINT_7_PLUS_EN                         (1 << 6)
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 /*
  * PM4
  */
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3a70957..e948663 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -69,6 +69,7 @@
 #include <ttm/ttm_bo_driver.h>
 #include <ttm/ttm_placement.h>
 #include <ttm/ttm_module.h>
+#include <ttm/ttm_execbuf_util.h>
 
 #include "radeon_family.h"
 #include "radeon_mode.h"
@@ -180,6 +181,7 @@
 extern u32 rv6xx_get_temp(struct radeon_device *rdev);
 extern u32 rv770_get_temp(struct radeon_device *rdev);
 extern u32 evergreen_get_temp(struct radeon_device *rdev);
+extern u32 sumo_get_temp(struct radeon_device *rdev);
 
 /*
  * Fences.
@@ -259,13 +261,12 @@
 };
 
 struct radeon_bo_list {
-	struct list_head	list;
+	struct ttm_validate_buffer tv;
 	struct radeon_bo	*bo;
 	uint64_t		gpu_offset;
 	unsigned		rdomain;
 	unsigned		wdomain;
 	u32			tiling_flags;
-	bool			reserved;
 };
 
 /*
@@ -377,11 +378,56 @@
 /*
  * IRQS.
  */
+
+struct radeon_unpin_work {
+	struct work_struct work;
+	struct radeon_device *rdev;
+	int crtc_id;
+	struct radeon_fence *fence;
+	struct drm_pending_vblank_event *event;
+	struct radeon_bo *old_rbo;
+	u64 new_crtc_base;
+};
+
+struct r500_irq_stat_regs {
+	u32 disp_int;
+};
+
+struct r600_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 d1grph_int;
+	u32 d2grph_int;
+};
+
+struct evergreen_irq_stat_regs {
+	u32 disp_int;
+	u32 disp_int_cont;
+	u32 disp_int_cont2;
+	u32 disp_int_cont3;
+	u32 disp_int_cont4;
+	u32 disp_int_cont5;
+	u32 d1grph_int;
+	u32 d2grph_int;
+	u32 d3grph_int;
+	u32 d4grph_int;
+	u32 d5grph_int;
+	u32 d6grph_int;
+};
+
+union radeon_irq_stat_regs {
+	struct r500_irq_stat_regs r500;
+	struct r600_irq_stat_regs r600;
+	struct evergreen_irq_stat_regs evergreen;
+};
+
 struct radeon_irq {
 	bool		installed;
 	bool		sw_int;
 	/* FIXME: use a define max crtc rather than hardcode it */
 	bool		crtc_vblank_int[6];
+	bool		pflip[6];
 	wait_queue_head_t	vblank_queue;
 	/* FIXME: use defines for max hpd/dacs */
 	bool            hpd[6];
@@ -392,12 +438,17 @@
 	bool		hdmi[2];
 	spinlock_t sw_lock;
 	int sw_refcount;
+	union radeon_irq_stat_regs stat_regs;
+	spinlock_t pflip_lock[6];
+	int pflip_refcount[6];
 };
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
 void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
 
 /*
  * CP & ring.
@@ -687,6 +738,8 @@
 	THERMAL_TYPE_RV6XX,
 	THERMAL_TYPE_RV770,
 	THERMAL_TYPE_EVERGREEN,
+	THERMAL_TYPE_SUMO,
+	THERMAL_TYPE_NI,
 };
 
 struct radeon_voltage {
@@ -770,6 +823,9 @@
 	u32                     current_sclk;
 	u32                     current_mclk;
 	u32                     current_vddc;
+	u32                     default_sclk;
+	u32                     default_mclk;
+	u32                     default_vddc;
 	struct radeon_i2c_chan *i2c_bus;
 	/* selected pm method */
 	enum radeon_pm_method     pm_method;
@@ -881,6 +937,10 @@
 	void (*pm_finish)(struct radeon_device *rdev);
 	void (*pm_init_profile)(struct radeon_device *rdev);
 	void (*pm_get_dynpm_state)(struct radeon_device *rdev);
+	/* pageflipping */
+	void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
+	u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
+	void (*post_page_flip)(struct radeon_device *rdev, int crtc);
 };
 
 /*
@@ -975,6 +1035,7 @@
 	unsigned tiling_npipes;
 	unsigned tiling_group_size;
 	unsigned tile_config;
+	struct r100_gpu_lockup	lockup;
 };
 
 union radeon_asic_config {
@@ -1091,11 +1152,11 @@
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
 	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
+	const struct firmware *mc_fw;	/* NI MC firmware */
 	struct r600_blit r600_blit;
 	struct r700_vram_scratch vram_scratch;
 	int msi_enabled; /* msi enabled */
 	struct r600_ih ih; /* r6/700 interrupt ring */
-	struct workqueue_struct *wq;
 	struct work_struct hotplug_work;
 	int num_crtc; /* number of crtcs */
 	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
@@ -1110,10 +1171,10 @@
 	uint8_t			audio_status_bits;
 	uint8_t			audio_category_code;
 
-	bool powered_down;
 	struct notifier_block acpi_nb;
-	/* only one userspace can use Hyperz features at a time */
+	/* only one userspace can use Hyperz features or CMASK at a time */
 	struct drm_file *hyperz_filp;
+	struct drm_file *cmask_filp;
 	/* i2c buses */
 	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
 };
@@ -1188,6 +1249,8 @@
  */
 #define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
 #define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
+#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
+#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
 #define RREG32(reg) r100_mm_rreg(rdev, (reg))
 #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
 #define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
@@ -1261,6 +1324,14 @@
 		(rdev->family == CHIP_RV410) ||			\
 		(rdev->family == CHIP_RS400) ||			\
 		(rdev->family == CHIP_RS480))
+#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
+		(rdev->ddev->pdev->device == 0x9443) || \
+		(rdev->ddev->pdev->device == 0x944B) || \
+		(rdev->ddev->pdev->device == 0x9506) || \
+		(rdev->ddev->pdev->device == 0x9509) || \
+		(rdev->ddev->pdev->device == 0x950F) || \
+		(rdev->ddev->pdev->device == 0x689C) || \
+		(rdev->ddev->pdev->device == 0x689D))
 #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
 #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
 			    (rdev->family == CHIP_RS690)  ||	\
@@ -1269,6 +1340,9 @@
 #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
 #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
 #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
+#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
+			     (rdev->flags & RADEON_IS_IGP))
+#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
 
 /*
  * BIOS helpers.
@@ -1344,6 +1418,9 @@
 #define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
 #define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
 #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
+#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
+#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
+#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
 
 /* Common functions */
 /* AGP */
@@ -1372,67 +1449,7 @@
 extern int radeon_resume_kms(struct drm_device *dev);
 extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
 
-/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-extern void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-extern bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp);
-
-/* rv200,rv250,rv280 */
-extern void r200_set_safe_registers(struct radeon_device *rdev);
-
-/* r300,r350,rv350,rv370,rv380 */
-extern void r300_set_reg_safe(struct radeon_device *rdev);
-extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_mc_init(struct radeon_device *rdev);
-extern void r300_clock_startup(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
-extern int rv370_pcie_gart_init(struct radeon_device *rdev);
-extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
-extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
-extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-
-/* r420,r423,rv410 */
-extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
-extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
-extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
-extern void r420_pipes_init(struct radeon_device *rdev);
-
-/* rv515 */
-struct rv515_mc_save {
-	u32 d1vga_control;
-	u32 d2vga_control;
-	u32 vga_render_control;
-	u32 vga_hdp_control;
-	u32 d1crtc_control;
-	u32 d2crtc_control;
-};
-extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
-extern void rv515_vga_render_disable(struct radeon_device *rdev);
-extern void rv515_set_safe_registers(struct radeon_device *rdev);
-extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
-extern void rv515_clock_startup(struct radeon_device *rdev);
-extern void rv515_debugfs(struct radeon_device *rdev);
-extern int rv515_suspend(struct radeon_device *rdev);
-
-/* rs400 */
-extern int rs400_gart_init(struct radeon_device *rdev);
-extern int rs400_gart_enable(struct radeon_device *rdev);
-extern void rs400_gart_adjust_size(struct radeon_device *rdev);
-extern void rs400_gart_disable(struct radeon_device *rdev);
-extern void rs400_gart_fini(struct radeon_device *rdev);
-
-/* rs600 */
-extern void rs600_set_safe_registers(struct radeon_device *rdev);
-extern int rs600_irq_set(struct radeon_device *rdev);
-extern void rs600_irq_disable(struct radeon_device *rdev);
-
-/* rs690, rs740 */
-extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
-					struct drm_display_mode *mode1,
-					struct drm_display_mode *mode2);
-
 /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
-extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern bool r600_card_posted(struct radeon_device *rdev);
 extern void r600_cp_stop(struct radeon_device *rdev);
 extern int r600_cp_start(struct radeon_device *rdev);
@@ -1478,6 +1495,7 @@
 extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
 extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
 
+extern void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
 extern void r700_cp_stop(struct radeon_device *rdev);
 extern void r700_cp_fini(struct radeon_device *rdev);
 extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
@@ -1485,6 +1503,9 @@
 extern int evergreen_blit_init(struct radeon_device *rdev);
 extern void evergreen_blit_fini(struct radeon_device *rdev);
 
+extern int ni_init_microcode(struct radeon_device *rdev);
+extern int btc_mc_load_microcode(struct radeon_device *rdev);
+
 /* radeon_acpi.c */ 
 #if defined(CONFIG_ACPI) 
 extern int radeon_acpi_init(struct radeon_device *rdev); 
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 64fb89e..3a1b161 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -94,7 +94,7 @@
 		rdev->mc_rreg = &rs600_mc_rreg;
 		rdev->mc_wreg = &rs600_mc_wreg;
 	}
-	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+	if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_HEMLOCK)) {
 		rdev->pciep_rreg = &r600_pciep_rreg;
 		rdev->pciep_wreg = &r600_pciep_wreg;
 	}
@@ -171,6 +171,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r200_asic = {
@@ -215,6 +218,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r300_asic = {
@@ -260,6 +266,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r300_asic_pcie = {
@@ -304,6 +313,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic r420_asic = {
@@ -349,6 +361,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic rs400_asic = {
@@ -394,6 +409,9 @@
 	.pm_finish = &r100_pm_finish,
 	.pm_init_profile = &r100_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &r100_pre_page_flip,
+	.page_flip = &r100_page_flip,
+	.post_page_flip = &r100_post_page_flip,
 };
 
 static struct radeon_asic rs600_asic = {
@@ -439,6 +457,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rs690_asic = {
@@ -484,6 +505,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rv515_asic = {
@@ -529,6 +553,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic r520_asic = {
@@ -574,6 +601,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r420_pm_init_profile,
 	.pm_get_dynpm_state = &r100_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic r600_asic = {
@@ -601,8 +631,8 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
 	.set_clock_gating = NULL,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
@@ -618,6 +648,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rs780_asic = {
@@ -662,6 +695,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &rs780_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rs600_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic rv770_asic = {
@@ -689,8 +725,8 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
-	.get_pcie_lanes = &rv370_get_pcie_lanes,
-	.set_pcie_lanes = NULL,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
@@ -706,6 +742,9 @@
 	.pm_finish = &rs600_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &rs600_pre_page_flip,
+	.page_flip = &rv770_page_flip,
+	.post_page_flip = &rs600_post_page_flip,
 };
 
 static struct radeon_asic evergreen_asic = {
@@ -733,6 +772,95 @@
 	.set_engine_clock = &radeon_atom_set_engine_clock,
 	.get_memory_clock = &radeon_atom_get_memory_clock,
 	.set_memory_clock = &radeon_atom_set_memory_clock,
+	.get_pcie_lanes = &r600_get_pcie_lanes,
+	.set_pcie_lanes = &r600_set_pcie_lanes,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &evergreen_bandwidth_update,
+	.hpd_init = &evergreen_hpd_init,
+	.hpd_fini = &evergreen_hpd_fini,
+	.hpd_sense = &evergreen_hpd_sense,
+	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &evergreen_pm_misc,
+	.pm_prepare = &evergreen_pm_prepare,
+	.pm_finish = &evergreen_pm_finish,
+	.pm_init_profile = &r600_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &evergreen_pre_page_flip,
+	.page_flip = &evergreen_page_flip,
+	.post_page_flip = &evergreen_post_page_flip,
+};
+
+static struct radeon_asic sumo_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &evergreen_irq_set,
+	.irq_process = &evergreen_irq_process,
+	.get_vblank_counter = &evergreen_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.copy_blit = &evergreen_copy_blit,
+	.copy_dma = &evergreen_copy_blit,
+	.copy = &evergreen_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = NULL,
+	.set_memory_clock = NULL,
+	.get_pcie_lanes = NULL,
+	.set_pcie_lanes = NULL,
+	.set_clock_gating = NULL,
+	.set_surface_reg = r600_set_surface_reg,
+	.clear_surface_reg = r600_clear_surface_reg,
+	.bandwidth_update = &evergreen_bandwidth_update,
+	.hpd_init = &evergreen_hpd_init,
+	.hpd_fini = &evergreen_hpd_fini,
+	.hpd_sense = &evergreen_hpd_sense,
+	.hpd_set_polarity = &evergreen_hpd_set_polarity,
+	.gui_idle = &r600_gui_idle,
+	.pm_misc = &evergreen_pm_misc,
+	.pm_prepare = &evergreen_pm_prepare,
+	.pm_finish = &evergreen_pm_finish,
+	.pm_init_profile = &rs780_pm_init_profile,
+	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+};
+
+static struct radeon_asic btc_asic = {
+	.init = &evergreen_init,
+	.fini = &evergreen_fini,
+	.suspend = &evergreen_suspend,
+	.resume = &evergreen_resume,
+	.cp_commit = &r600_cp_commit,
+	.gpu_is_lockup = &evergreen_gpu_is_lockup,
+	.asic_reset = &evergreen_asic_reset,
+	.vga_set_state = &r600_vga_set_state,
+	.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
+	.gart_set_page = &rs600_gart_set_page,
+	.ring_test = &r600_ring_test,
+	.ring_ib_execute = &r600_ring_ib_execute,
+	.irq_set = &evergreen_irq_set,
+	.irq_process = &evergreen_irq_process,
+	.get_vblank_counter = &evergreen_get_vblank_counter,
+	.fence_ring_emit = &r600_fence_ring_emit,
+	.cs_parse = &evergreen_cs_parse,
+	.copy_blit = &evergreen_copy_blit,
+	.copy_dma = &evergreen_copy_blit,
+	.copy = &evergreen_copy_blit,
+	.get_engine_clock = &radeon_atom_get_engine_clock,
+	.set_engine_clock = &radeon_atom_set_engine_clock,
+	.get_memory_clock = &radeon_atom_get_memory_clock,
+	.set_memory_clock = &radeon_atom_set_memory_clock,
 	.get_pcie_lanes = NULL,
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = NULL,
@@ -749,6 +877,9 @@
 	.pm_finish = &evergreen_pm_finish,
 	.pm_init_profile = &r600_pm_init_profile,
 	.pm_get_dynpm_state = &r600_pm_get_dynpm_state,
+	.pre_page_flip = &evergreen_pre_page_flip,
+	.page_flip = &evergreen_page_flip,
+	.post_page_flip = &evergreen_post_page_flip,
 };
 
 int radeon_asic_init(struct radeon_device *rdev)
@@ -835,6 +966,14 @@
 	case CHIP_HEMLOCK:
 		rdev->asic = &evergreen_asic;
 		break;
+	case CHIP_PALM:
+		rdev->asic = &sumo_asic;
+		break;
+	case CHIP_BARTS:
+	case CHIP_TURKS:
+	case CHIP_CAICOS:
+		rdev->asic = &btc_asic;
+		break;
 	default:
 		/* FIXME: not supported yet */
 		return -EINVAL;
@@ -849,7 +988,9 @@
 	if (rdev->flags & RADEON_SINGLE_CRTC)
 		rdev->num_crtc = 1;
 	else {
-		if (ASIC_IS_DCE4(rdev))
+		if (ASIC_IS_DCE41(rdev))
+			rdev->num_crtc = 2;
+		else if (ASIC_IS_DCE4(rdev))
 			rdev->num_crtc = 6;
 		else
 			rdev->num_crtc = 2;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 7409882..e01f0771 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -102,6 +102,11 @@
 void r100_pci_gart_disable(struct radeon_device *rdev);
 int r100_debugfs_mc_info_init(struct radeon_device *rdev);
 int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
+			    struct radeon_cp *cp);
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
+			   struct r100_gpu_lockup *lockup,
+			   struct radeon_cp *cp);
 void r100_ib_fini(struct radeon_device *rdev);
 int r100_ib_init(struct radeon_device *rdev);
 void r100_irq_disable(struct radeon_device *rdev);
@@ -130,15 +135,19 @@
 extern void r100_pm_finish(struct radeon_device *rdev);
 extern void r100_pm_init_profile(struct radeon_device *rdev);
 extern void r100_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
 
 /*
  * r200,rv250,rs300,rv280
  */
 extern int r200_copy_dma(struct radeon_device *rdev,
-			uint64_t src_offset,
-			uint64_t dst_offset,
-			unsigned num_pages,
+			 uint64_t src_offset,
+			 uint64_t dst_offset,
+			 unsigned num_pages,
 			 struct radeon_fence *fence);
+void r200_set_safe_registers(struct radeon_device *rdev);
 
 /*
  * r300,r350,rv350,rv380
@@ -159,6 +168,15 @@
 extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+extern void r300_set_reg_safe(struct radeon_device *rdev);
+extern void r300_mc_program(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
+extern void r300_clock_startup(struct radeon_device *rdev);
+extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
+extern int rv370_pcie_gart_init(struct radeon_device *rdev);
+extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
+extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
+extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
 
 /*
  * r420,r423,rv410
@@ -168,6 +186,10 @@
 extern int r420_suspend(struct radeon_device *rdev);
 extern int r420_resume(struct radeon_device *rdev);
 extern void r420_pm_init_profile(struct radeon_device *rdev);
+extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
+extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
+extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
+extern void r420_pipes_init(struct radeon_device *rdev);
 
 /*
  * rs400,rs480
@@ -180,6 +202,12 @@
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
+int rs400_gart_init(struct radeon_device *rdev);
+int rs400_gart_enable(struct radeon_device *rdev);
+void rs400_gart_adjust_size(struct radeon_device *rdev);
+void rs400_gart_disable(struct radeon_device *rdev);
+void rs400_gart_fini(struct radeon_device *rdev);
+
 
 /*
  * rs600.
@@ -191,6 +219,7 @@
 extern int rs600_resume(struct radeon_device *rdev);
 int rs600_irq_set(struct radeon_device *rdev);
 int rs600_irq_process(struct radeon_device *rdev);
+void rs600_irq_disable(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
@@ -205,6 +234,11 @@
 extern void rs600_pm_misc(struct radeon_device *rdev);
 extern void rs600_pm_prepare(struct radeon_device *rdev);
 extern void rs600_pm_finish(struct radeon_device *rdev);
+extern void rs600_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
+void rs600_set_safe_registers(struct radeon_device *rdev);
+
 
 /*
  * rs690,rs740
@@ -216,10 +250,21 @@
 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs690_bandwidth_update(struct radeon_device *rdev);
+void rs690_line_buffer_adjust(struct radeon_device *rdev,
+					struct drm_display_mode *mode1,
+					struct drm_display_mode *mode2);
 
 /*
  * rv515
  */
+struct rv515_mc_save {
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 vga_hdp_control;
+	u32 d1crtc_control;
+	u32 d2crtc_control;
+};
 int rv515_init(struct radeon_device *rdev);
 void rv515_fini(struct radeon_device *rdev);
 uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -230,6 +275,14 @@
 void rv515_bandwidth_update(struct radeon_device *rdev);
 int rv515_resume(struct radeon_device *rdev);
 int rv515_suspend(struct radeon_device *rdev);
+void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
+void rv515_vga_render_disable(struct radeon_device *rdev);
+void rv515_set_safe_registers(struct radeon_device *rdev);
+void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
+void rv515_clock_startup(struct radeon_device *rdev);
+void rv515_debugfs(struct radeon_device *rdev);
+
 
 /*
  * r520,rv530,rv560,rv570,r580
@@ -278,6 +331,8 @@
 extern void r600_pm_init_profile(struct radeon_device *rdev);
 extern void rs780_pm_init_profile(struct radeon_device *rdev);
 extern void r600_pm_get_dynpm_state(struct radeon_device *rdev);
+extern void r600_set_pcie_lanes(struct radeon_device *rdev, int lanes);
+extern int r600_get_pcie_lanes(struct radeon_device *rdev);
 
 /*
  * rv770,rv730,rv710,rv740
@@ -287,6 +342,7 @@
 int rv770_suspend(struct radeon_device *rdev);
 int rv770_resume(struct radeon_device *rdev);
 extern void rv770_pm_misc(struct radeon_device *rdev);
+extern u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
 
 /*
  * evergreen
@@ -314,5 +370,8 @@
 extern void evergreen_pm_misc(struct radeon_device *rdev);
 extern void evergreen_pm_prepare(struct radeon_device *rdev);
 extern void evergreen_pm_finish(struct radeon_device *rdev);
+extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
+extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
+extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
 
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index bc5a2c3..1573202 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -37,7 +37,7 @@
 extern void radeon_link_encoder_connector(struct drm_device *dev);
 extern void
 radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum,
-			uint32_t supported_device);
+			uint32_t supported_device, u16 caps);
 
 /* from radeon_connector.c */
 extern void
@@ -313,7 +313,6 @@
 				     uint16_t *line_mux,
 				     struct radeon_hpd *hpd)
 {
-	struct radeon_device *rdev = dev->dev_private;
 
 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
 	if ((dev->pdev->device == 0x791e) &&
@@ -388,6 +387,17 @@
 			*line_mux = 0x90;
 	}
 
+	/* mac rv630 */
+	if ((dev->pdev->device == 0x9588) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x00a6)) {
+		if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) &&
+		    (*connector_type == DRM_MODE_CONNECTOR_DVII)) {
+			*connector_type = DRM_MODE_CONNECTOR_9PinDIN;
+			*line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1;
+		}
+	}
+
 	/* ASUS HD 3600 XT board lists the DVI port as HDMI */
 	if ((dev->pdev->device == 0x9598) &&
 	    (dev->pdev->subsystem_vendor == 0x1043) &&
@@ -425,21 +435,23 @@
 		}
 	}
 
-	/* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
+	/* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+	 * on the laptop and a DVI port on the docking station and
+	 * both share the same encoder, hpd pin, and ddc line.
+	 * So while the bios table is technically correct,
+	 * we drop the DVI port here since xrandr has no concept of
+	 * encoders and will try and drive both connectors
+	 * with different crtcs which isn't possible on the hardware
+	 * side and leaves no crtcs for LVDS or VGA.
+	 */
 	if ((dev->pdev->device == 0x95c4) &&
 	    (dev->pdev->subsystem_vendor == 0x1025) &&
 	    (dev->pdev->subsystem_device == 0x013c)) {
-		struct radeon_gpio_rec gpio;
-
 		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
 		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
-			gpio = radeon_lookup_gpio(rdev, 6);
-			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+			/* actually it's a DVI-D port not DVI-I */
 			*connector_type = DRM_MODE_CONNECTOR_DVID;
-		} else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
-			   (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
-			gpio = radeon_lookup_gpio(rdev, 7);
-			*hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+			return false;
 		}
 	}
 
@@ -525,6 +537,7 @@
 	u16 size, data_offset;
 	u8 frev, crev;
 	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
 	ATOM_OBJECT_TABLE *router_obj;
 	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
 	ATOM_OBJECT_HEADER *obj_header;
@@ -549,6 +562,9 @@
 	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
 	    (ctx->bios + data_offset +
 	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
+	    (ctx->bios + data_offset +
+	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
 	router_obj = (ATOM_OBJECT_TABLE *)
 		(ctx->bios + data_offset +
 		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
@@ -654,14 +670,35 @@
 				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
 
 				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
-					u16 encoder_obj = le16_to_cpu(path->usGraphicObjIds[j]);
+					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
+						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
+						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
+							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+								(ctx->bios + data_offset +
+								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
+							ATOM_ENCODER_CAP_RECORD *cap_record;
+							u16 caps = 0;
 
-					radeon_add_atom_encoder(dev,
-								encoder_obj,
-								le16_to_cpu
-								(path->
-								 usDeviceTag));
-
+							while (record->ucRecordType > 0 &&
+							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+								switch (record->ucRecordType) {
+								case ATOM_ENCODER_CAP_RECORD_TYPE:
+									cap_record =(ATOM_ENCODER_CAP_RECORD *)
+										record;
+									caps = le16_to_cpu(cap_record->usEncoderCap);
+									break;
+								}
+								record = (ATOM_COMMON_RECORD_HEADER *)
+									((char *)record + record->ucRecordSize);
+							}
+							radeon_add_atom_encoder(dev,
+										encoder_obj,
+										le16_to_cpu
+										(path->
+										 usDeviceTag),
+										caps);
+						}
+					}
 				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
 					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
 						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
@@ -995,7 +1032,8 @@
 						radeon_get_encoder_enum(dev,
 								      (1 << i),
 								      dac),
-						(1 << i));
+						(1 << i),
+						0);
 		else
 			radeon_add_legacy_encoder(dev,
 						  radeon_get_encoder_enum(dev,
@@ -1074,6 +1112,7 @@
 	ATOM_FIRMWARE_INFO_V1_3 info_13;
 	ATOM_FIRMWARE_INFO_V1_4 info_14;
 	ATOM_FIRMWARE_INFO_V2_1 info_21;
+	ATOM_FIRMWARE_INFO_V2_2 info_22;
 };
 
 bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -1148,8 +1187,12 @@
 		*p2pll = *p1pll;
 
 		/* system clock */
-		spll->reference_freq =
-		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		if (ASIC_IS_DCE4(rdev))
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
+		else
+			spll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
 		spll->reference_div = 0;
 
 		spll->pll_out_min =
@@ -1171,8 +1214,12 @@
 		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
 
 		/* memory clock */
-		mpll->reference_freq =
-		    le16_to_cpu(firmware_info->info.usReferenceClock);
+		if (ASIC_IS_DCE4(rdev))
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
+		else
+			mpll->reference_freq =
+				le16_to_cpu(firmware_info->info.usReferenceClock);
 		mpll->reference_div = 0;
 
 		mpll->pll_out_min =
@@ -1201,8 +1248,12 @@
 		if (ASIC_IS_DCE4(rdev)) {
 			rdev->clock.default_dispclk =
 				le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
-			if (rdev->clock.default_dispclk == 0)
-				rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			if (rdev->clock.default_dispclk == 0) {
+				if (ASIC_IS_DCE5(rdev))
+					rdev->clock.default_dispclk = 54000; /* 540 Mhz */
+				else
+					rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+			}
 			rdev->clock.dp_extclk =
 				le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
 		}
@@ -1337,6 +1388,43 @@
 	return false;
 }
 
+static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev,
+						 struct radeon_atom_ss *ss,
+						 int id)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	u16 data_offset, size;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
+	u8 frev, crev;
+	u16 percentage = 0, rate = 0;
+
+	/* get any igp specific overrides */
+	if (atom_parse_data_header(mode_info->atom_context, index, &size,
+				   &frev, &crev, &data_offset)) {
+		igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
+			(mode_info->atom_context->bios + data_offset);
+		switch (id) {
+		case ASIC_INTERNAL_SS_ON_TMDS:
+			percentage = le16_to_cpu(igp_info->usDVISSPercentage);
+			rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
+			break;
+		case ASIC_INTERNAL_SS_ON_HDMI:
+			percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
+			rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
+			break;
+		case ASIC_INTERNAL_SS_ON_LVDS:
+			percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
+			rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
+			break;
+		}
+		if (percentage)
+			ss->percentage = percentage;
+		if (rate)
+			ss->rate = rate;
+	}
+}
+
 union asic_ss_info {
 	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
 	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
@@ -1401,6 +1489,8 @@
 						le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
 					ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
 					ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz);
+					if (rdev->flags & RADEON_IS_IGP)
+						radeon_atombios_get_igp_ss_overrides(rdev, ss, id);
 					return true;
 				}
 			}
@@ -1477,6 +1567,9 @@
 		if (misc & ATOM_DOUBLE_CLOCK_MODE)
 			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
 
+		lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize;
+		lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize;
+
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 
@@ -1489,6 +1582,59 @@
 		else
 			lvds->linkb = false;
 
+		/* parse the lcd record table */
+		if (lvds_info->info.usModePatchTableOffset) {
+			ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
+			ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
+			bool bad_record = false;
+			u8 *record = (u8 *)(mode_info->atom_context->bios +
+					    data_offset +
+					    lvds_info->info.usModePatchTableOffset);
+			while (*record != ATOM_RECORD_END_TYPE) {
+				switch (*record) {
+				case LCD_MODE_PATCH_RECORD_MODE_TYPE:
+					record += sizeof(ATOM_PATCH_RECORD_MODE);
+					break;
+				case LCD_RTS_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_RTS_RECORD);
+					break;
+				case LCD_CAP_RECORD_TYPE:
+					record += sizeof(ATOM_LCD_MODE_CONTROL_CAP);
+					break;
+				case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
+					fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
+					if (fake_edid_record->ucFakeEDIDLength) {
+						struct edid *edid;
+						int edid_size =
+							max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
+						edid = kmalloc(edid_size, GFP_KERNEL);
+						if (edid) {
+							memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
+							       fake_edid_record->ucFakeEDIDLength);
+
+							if (drm_edid_is_valid(edid))
+								rdev->mode_info.bios_hardcoded_edid = edid;
+							else
+								kfree(edid);
+						}
+					}
+					record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+					break;
+				case LCD_PANEL_RESOLUTION_RECORD_TYPE:
+					panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
+					lvds->native_mode.width_mm = panel_res_record->usHSize;
+					lvds->native_mode.height_mm = panel_res_record->usVSize;
+					record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD);
+					break;
+				default:
+					DRM_ERROR("Bad LCD record %d\n", *record);
+					bad_record = true;
+					break;
+				}
+				if (bad_record)
+					break;
+			}
+		}
 	}
 	return lvds;
 }
@@ -1740,495 +1886,613 @@
 	"RV6xx",
 	"RV770",
 	"adt7473",
+	"NONE",
 	"External GPIO",
 	"Evergreen",
-	"adt7473 with internal",
-
+	"emc2103",
+	"Sumo",
+	"Northern Islands",
 };
 
 union power_info {
 	struct _ATOM_POWERPLAY_INFO info;
 	struct _ATOM_POWERPLAY_INFO_V2 info_2;
 	struct _ATOM_POWERPLAY_INFO_V3 info_3;
-	struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
 };
 
+union pplib_clock_info {
+	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+	struct _ATOM_PPLIB_STATE v1;
+	struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev,
+						 int state_index,
+						 u32 misc, u32 misc2)
+{
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	/* order matters! */
+	if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_POWERSAVE;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+	if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	}
+	if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+	if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[0];
+	} else if (state_index == 0) {
+		rdev->pm.power_state[state_index].clock_info[0].flags |=
+			RADEON_PM_MODE_NO_DISPLAY;
+	}
+}
+
+static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	u32 misc, misc2 = 0;
+	int num_modes = 0, i;
+	int state_index = 0;
+	struct radeon_i2c_bus_rec i2c_bus;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	/* add the i2c bus for thermal/fan chip */
+	if (power_info->info.ucOverdriveThermalController > 0) {
+		DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+			 thermal_controller_names[power_info->info.ucOverdriveThermalController],
+			 power_info->info.ucOverdriveControllerAddress >> 1);
+		i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+		rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+		if (rdev->pm.i2c_bus) {
+			struct i2c_board_info info = { };
+			const char *name = thermal_controller_names[power_info->info.
+								    ucOverdriveThermalController];
+			info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
+			strlcpy(info.type, name, sizeof(info.type));
+			i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+		}
+	}
+	num_modes = power_info->info.ucNumOfPowerModeEntries;
+	if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+		num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+	/* last mode is usually default, array is low to high */
+	for (i = 0; i < num_modes; i++) {
+		rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+		switch (frev) {
+		case 1:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0);
+			state_index++;
+			break;
+		case 2:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		case 3:
+			rdev->pm.power_state[state_index].num_clock_modes = 1;
+			rdev->pm.power_state[state_index].clock_info[0].mclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+			rdev->pm.power_state[state_index].clock_info[0].sclk =
+				le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+			/* skip invalid modes */
+			if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+			    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+				continue;
+			rdev->pm.power_state[state_index].pcie_lanes =
+				power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+			misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+			misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+			if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
+			    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_GPIO;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+					radeon_lookup_gpio(rdev,
+							   power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+				if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						true;
+				else
+					rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+						false;
+			} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+				rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+					VOLTAGE_VDDC;
+				rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+					power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+				if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+						true;
+					rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+						power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+				}
+			}
+			rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+			radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2);
+			state_index++;
+			break;
+		}
+	}
+	/* last mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[state_index - 1].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index - 1;
+		rdev->pm.power_state[state_index - 1].default_clock_mode =
+			&rdev->pm.power_state[state_index - 1].clock_info[0];
+		rdev->pm.power_state[state_index].flags &=
+			~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+		rdev->pm.power_state[state_index].misc = 0;
+		rdev->pm.power_state[state_index].misc2 = 0;
+	}
+	return state_index;
+}
+
+static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev,
+							 ATOM_PPLIB_THERMALCONTROLLER *controller)
+{
+	struct radeon_i2c_bus_rec i2c_bus;
+
+	/* add the i2c bus for thermal/fan chip */
+	if (controller->ucType > 0) {
+		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+			DRM_INFO("Internal thermal controller %s fan control\n",
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
+		} else if ((controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) ||
+			   (controller->ucType ==
+			    ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
+			DRM_INFO("Special thermal controller config\n");
+		} else {
+			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+				 pp_lib_thermal_controller_names[controller->ucType],
+				 controller->ucI2cAddress >> 1,
+				 (controller->ucFanParameters &
+				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+			i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
+			rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+			if (rdev->pm.i2c_bus) {
+				struct i2c_board_info info = { };
+				const char *name = pp_lib_thermal_controller_names[controller->ucType];
+				info.addr = controller->ucI2cAddress >> 1;
+				strlcpy(info.type, name, sizeof(info.type));
+				i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+			}
+		}
+	}
+}
+
+static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+	u8 frev, crev;
+	u16 data_offset;
+	union firmware_info *firmware_info;
+	u16 vddc = 0;
+
+	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset)) {
+		firmware_info =
+			(union firmware_info *)(mode_info->atom_context->bios +
+						data_offset);
+		vddc = firmware_info->info_14.usBootUpVDDCVoltage;
+	}
+
+	return vddc;
+}
+
+static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev,
+						       int state_index, int mode_index,
+						       struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info)
+{
+	int j;
+	u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+	u32 misc2 = le16_to_cpu(non_clock_info->usClassification);
+	u16 vddc = radeon_atombios_get_default_vddc(rdev);
+
+	rdev->pm.power_state[state_index].misc = misc;
+	rdev->pm.power_state[state_index].misc2 = misc2;
+	rdev->pm.power_state[state_index].pcie_lanes =
+		((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+		 ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+	switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BATTERY;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_BALANCED;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+		if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+			rdev->pm.power_state[state_index].type =
+				POWER_STATE_TYPE_PERFORMANCE;
+		break;
+	}
+	rdev->pm.power_state[state_index].flags = 0;
+	if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+		rdev->pm.power_state[state_index].flags |=
+			RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
+	if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+		rdev->pm.power_state[state_index].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = state_index;
+		rdev->pm.power_state[state_index].default_clock_mode =
+			&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+		if (ASIC_IS_DCE5(rdev)) {
+			/* NI chips post without MC ucode, so default clocks are strobe mode only */
+			rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
+			rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
+			rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage;
+		} else {
+			/* patch the table values with the default slck/mclk from firmware info */
+			for (j = 0; j < mode_index; j++) {
+				rdev->pm.power_state[state_index].clock_info[j].mclk =
+					rdev->clock.default_mclk;
+				rdev->pm.power_state[state_index].clock_info[j].sclk =
+					rdev->clock.default_sclk;
+				if (vddc)
+					rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
+						vddc;
+			}
+		}
+	}
+}
+
+static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
+						   int state_index, int mode_index,
+						   union pplib_clock_info *clock_info)
+{
+	u32 sclk, mclk;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		if (rdev->family >= CHIP_PALM) {
+			sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+			sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		} else {
+			sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow);
+			sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
+			rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		}
+	} else if (ASIC_IS_DCE4(rdev)) {
+		sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
+		sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
+		mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			clock_info->evergreen.usVDDC;
+	} else {
+		sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
+		sclk |= clock_info->r600.ucEngineClockHigh << 16;
+		mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
+		mclk |= clock_info->r600.ucMemoryClockHigh << 16;
+		rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+			VOLTAGE_SW;
+		rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+			clock_info->r600.usVDDC;
+	}
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* skip invalid modes */
+		if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+			return false;
+	} else {
+		/* skip invalid modes */
+		if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+		    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+			return false;
+	}
+	return true;
+}
+
+static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	/* first mode is usually default, followed by low to high */
+	for (i = 0; i < power_info->pplib.ucNumStates; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
+			 i * power_info->pplib.ucStateEntrySize);
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			(mode_info->atom_context->bios + data_offset +
+			 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
+			 (power_state->v1.ucNonClockStateIndex *
+			  power_info->pplib.ucNonClockSize));
+		for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
+			clock_info = (union pplib_clock_info *)
+				(mode_info->atom_context->bios + data_offset +
+				 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
+				 (power_state->v1.ucClockStateIndices[j] *
+				  power_info->pplib.ucClockInfoSize));
+			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+								       state_index, mode_index,
+								       clock_info);
+			if (valid)
+				mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
+static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+	union pplib_power_state *power_state;
+	int i, j, non_clock_array_index, clock_array_index;
+	int state_index = 0, mode_index = 0;
+	union pplib_clock_info *clock_info;
+	struct StateArray *state_array;
+	struct ClockInfoArray *clock_info_array;
+	struct NonClockInfoArray *non_clock_info_array;
+	bool valid;
+	union power_info *power_info;
+	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+        u16 data_offset;
+	u8 frev, crev;
+
+	if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+				   &frev, &crev, &data_offset))
+		return state_index;
+	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+	radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
+	state_array = (struct StateArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usStateArrayOffset);
+	clock_info_array = (struct ClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usClockInfoArrayOffset);
+	non_clock_info_array = (struct NonClockInfoArray *)
+		(mode_info->atom_context->bios + data_offset +
+		 power_info->pplib.usNonClockInfoArrayOffset);
+	for (i = 0; i < state_array->ucNumEntries; i++) {
+		mode_index = 0;
+		power_state = (union pplib_power_state *)&state_array->states[i];
+		/* XXX this might be an inagua bug... */
+		non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
+		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+			&non_clock_info_array->nonClockInfo[non_clock_array_index];
+		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+			clock_array_index = power_state->v2.clockInfoIndex[j];
+			/* XXX this might be an inagua bug... */
+			if (clock_array_index >= clock_info_array->ucNumEntries)
+				continue;
+			clock_info = (union pplib_clock_info *)
+				&clock_info_array->clockInfo[clock_array_index];
+			valid = radeon_atombios_parse_pplib_clock_info(rdev,
+								       state_index, mode_index,
+								       clock_info);
+			if (valid)
+				mode_index++;
+		}
+		rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+		if (mode_index) {
+			radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
+								   non_clock_info);
+			state_index++;
+		}
+	}
+	/* if multiple clock modes, mark the lowest as no display */
+	for (i = 0; i < state_index; i++) {
+		if (rdev->pm.power_state[i].num_clock_modes > 1)
+			rdev->pm.power_state[i].clock_info[0].flags |=
+				RADEON_PM_MODE_NO_DISPLAY;
+	}
+	/* first mode is usually default */
+	if (rdev->pm.default_power_state_index == -1) {
+		rdev->pm.power_state[0].type =
+			POWER_STATE_TYPE_DEFAULT;
+		rdev->pm.default_power_state_index = 0;
+		rdev->pm.power_state[0].default_clock_mode =
+			&rdev->pm.power_state[0].clock_info[0];
+	}
+	return state_index;
+}
+
 void radeon_atombios_get_power_modes(struct radeon_device *rdev)
 {
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
 	u16 data_offset;
 	u8 frev, crev;
-	u32 misc, misc2 = 0, sclk, mclk;
-	union power_info *power_info;
-	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
-	struct _ATOM_PPLIB_STATE *power_state;
-	int num_modes = 0, i, j;
-	int state_index = 0, mode_index = 0;
-	struct radeon_i2c_bus_rec i2c_bus;
+	int state_index = 0;
 
 	rdev->pm.default_power_state_index = -1;
 
 	if (atom_parse_data_header(mode_info->atom_context, index, NULL,
 				   &frev, &crev, &data_offset)) {
-		power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-		if (frev < 4) {
-			/* add the i2c bus for thermal/fan chip */
-			if (power_info->info.ucOverdriveThermalController > 0) {
-				DRM_INFO("Possible %s thermal controller at 0x%02x\n",
-					 thermal_controller_names[power_info->info.ucOverdriveThermalController],
-					 power_info->info.ucOverdriveControllerAddress >> 1);
-				i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
-				rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
-				if (rdev->pm.i2c_bus) {
-					struct i2c_board_info info = { };
-					const char *name = thermal_controller_names[power_info->info.
-										    ucOverdriveThermalController];
-					info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
-					strlcpy(info.type, name, sizeof(info.type));
-					i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
-				}
-			}
-			num_modes = power_info->info.ucNumOfPowerModeEntries;
-			if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
-				num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
-			/* last mode is usually default, array is low to high */
-			for (i = 0; i < num_modes; i++) {
-				rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
-				switch (frev) {
-				case 1:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				case 2:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
-					misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc2 & ATOM_PM_MISCINFO2_MULTI_DISPLAY_SUPPORT)
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				case 3:
-					rdev->pm.power_state[state_index].num_clock_modes = 1;
-					rdev->pm.power_state[state_index].clock_info[0].mclk =
-						le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
-					rdev->pm.power_state[state_index].clock_info[0].sclk =
-						le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
-					/* skip invalid modes */
-					if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
-					    (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
-						continue;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
-					misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
-					misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
-					if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) ||
-					    (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_GPIO;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
-							radeon_lookup_gpio(rdev,
-							power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
-						if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								true;
-						else
-							rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
-								false;
-					} else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
-						rdev->pm.power_state[state_index].clock_info[0].voltage.type =
-							VOLTAGE_VDDC;
-						rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
-							power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
-						if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
-							rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
-								true;
-							rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
-							power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
-						}
-					}
-					rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					/* order matters! */
-					if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_POWERSAVE;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-					if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						rdev->pm.power_state[state_index].flags &=
-							~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					}
-					if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-					if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[0];
-					} else if (state_index == 0) {
-						rdev->pm.power_state[state_index].clock_info[0].flags |=
-							RADEON_PM_MODE_NO_DISPLAY;
-					}
-					state_index++;
-					break;
-				}
-			}
-			/* last mode is usually default */
-			if (rdev->pm.default_power_state_index == -1) {
-				rdev->pm.power_state[state_index - 1].type =
-					POWER_STATE_TYPE_DEFAULT;
-				rdev->pm.default_power_state_index = state_index - 1;
-				rdev->pm.power_state[state_index - 1].default_clock_mode =
-					&rdev->pm.power_state[state_index - 1].clock_info[0];
-				rdev->pm.power_state[state_index].flags &=
-					~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-				rdev->pm.power_state[state_index].misc = 0;
-				rdev->pm.power_state[state_index].misc2 = 0;
-			}
-		} else {
-			int fw_index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
-			uint8_t fw_frev, fw_crev;
-			uint16_t fw_data_offset, vddc = 0;
-			union firmware_info *firmware_info;
-			ATOM_PPLIB_THERMALCONTROLLER *controller = &power_info->info_4.sThermalController;
-
-			if (atom_parse_data_header(mode_info->atom_context, fw_index, NULL,
-						   &fw_frev, &fw_crev, &fw_data_offset)) {
-				firmware_info =
-					(union firmware_info *)(mode_info->atom_context->bios +
-								fw_data_offset);
-				vddc = firmware_info->info_14.usBootUpVDDCVoltage;
-			}
-
-			/* add the i2c bus for thermal/fan chip */
-			if (controller->ucType > 0) {
-				if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
-				} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
-				} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
-					DRM_INFO("Internal thermal controller %s fan control\n",
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
-				} else if ((controller->ucType ==
-					    ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
-					   (controller->ucType ==
-					    ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL)) {
-					DRM_INFO("Special thermal controller config\n");
-				} else {
-					DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
-						 pp_lib_thermal_controller_names[controller->ucType],
-						 controller->ucI2cAddress >> 1,
-						 (controller->ucFanParameters &
-						  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
-					i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
-					rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
-					if (rdev->pm.i2c_bus) {
-						struct i2c_board_info info = { };
-						const char *name = pp_lib_thermal_controller_names[controller->ucType];
-						info.addr = controller->ucI2cAddress >> 1;
-						strlcpy(info.type, name, sizeof(info.type));
-						i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
-					}
-
-				}
-			}
-			/* first mode is usually default, followed by low to high */
-			for (i = 0; i < power_info->info_4.ucNumStates; i++) {
-				mode_index = 0;
-				power_state = (struct _ATOM_PPLIB_STATE *)
-					(mode_info->atom_context->bios +
-					 data_offset +
-					 le16_to_cpu(power_info->info_4.usStateArrayOffset) +
-					 i * power_info->info_4.ucStateEntrySize);
-				non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
-					(mode_info->atom_context->bios +
-					 data_offset +
-					 le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
-					 (power_state->ucNonClockStateIndex *
-					  power_info->info_4.ucNonClockSize));
-				for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
-					if (rdev->flags & RADEON_IS_IGP) {
-						struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
-						sclk |= clock_info->ucLowEngineClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
-							continue;
-						/* voltage works differently on IGPs */
-						mode_index++;
-					} else if (ASIC_IS_DCE4(rdev)) {
-						struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usEngineClockLow);
-						sclk |= clock_info->ucEngineClockHigh << 16;
-						mclk = le16_to_cpu(clock_info->usMemoryClockLow);
-						mclk |= clock_info->ucMemoryClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
-						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
-							continue;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
-							VOLTAGE_SW;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-							clock_info->usVDDC;
-						/* XXX usVDDCI */
-						mode_index++;
-					} else {
-						struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
-							(struct _ATOM_PPLIB_R600_CLOCK_INFO *)
-							(mode_info->atom_context->bios +
-							 data_offset +
-							 le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
-							 (power_state->ucClockStateIndices[j] *
-							  power_info->info_4.ucClockInfoSize));
-						sclk = le16_to_cpu(clock_info->usEngineClockLow);
-						sclk |= clock_info->ucEngineClockHigh << 16;
-						mclk = le16_to_cpu(clock_info->usMemoryClockLow);
-						mclk |= clock_info->ucMemoryClockHigh << 16;
-						rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
-						rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
-						/* skip invalid modes */
-						if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
-						    (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
-							continue;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
-							VOLTAGE_SW;
-						rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
-							clock_info->usVDDC;
-						mode_index++;
-					}
-				}
-				rdev->pm.power_state[state_index].num_clock_modes = mode_index;
-				if (mode_index) {
-					misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
-					misc2 = le16_to_cpu(non_clock_info->usClassification);
-					rdev->pm.power_state[state_index].misc = misc;
-					rdev->pm.power_state[state_index].misc2 = misc2;
-					rdev->pm.power_state[state_index].pcie_lanes =
-						((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
-						ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
-					switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
-					case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BATTERY;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_BALANCED;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_PERFORMANCE;
-						break;
-					case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
-						if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
-							rdev->pm.power_state[state_index].type =
-								POWER_STATE_TYPE_PERFORMANCE;
-						break;
-					}
-					rdev->pm.power_state[state_index].flags = 0;
-					if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
-						rdev->pm.power_state[state_index].flags |=
-							RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
-					if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
-						rdev->pm.power_state[state_index].type =
-							POWER_STATE_TYPE_DEFAULT;
-						rdev->pm.default_power_state_index = state_index;
-						rdev->pm.power_state[state_index].default_clock_mode =
-							&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
-						/* patch the table values with the default slck/mclk from firmware info */
-						for (j = 0; j < mode_index; j++) {
-							rdev->pm.power_state[state_index].clock_info[j].mclk =
-								rdev->clock.default_mclk;
-							rdev->pm.power_state[state_index].clock_info[j].sclk =
-								rdev->clock.default_sclk;
-							if (vddc)
-								rdev->pm.power_state[state_index].clock_info[j].voltage.voltage =
-									vddc;
-						}
-					}
-					state_index++;
-				}
-			}
-			/* if multiple clock modes, mark the lowest as no display */
-			for (i = 0; i < state_index; i++) {
-				if (rdev->pm.power_state[i].num_clock_modes > 1)
-					rdev->pm.power_state[i].clock_info[0].flags |=
-						RADEON_PM_MODE_NO_DISPLAY;
-			}
-			/* first mode is usually default */
-			if (rdev->pm.default_power_state_index == -1) {
-				rdev->pm.power_state[0].type =
-					POWER_STATE_TYPE_DEFAULT;
-				rdev->pm.default_power_state_index = 0;
-				rdev->pm.power_state[0].default_clock_mode =
-					&rdev->pm.power_state[0].clock_info[0];
-			}
+		switch (frev) {
+		case 1:
+		case 2:
+		case 3:
+			state_index = radeon_atombios_parse_power_table_1_3(rdev);
+			break;
+		case 4:
+		case 5:
+			state_index = radeon_atombios_parse_power_table_4_5(rdev);
+			break;
+		case 6:
+			state_index = radeon_atombios_parse_power_table_6(rdev);
+			break;
+		default:
+			break;
 		}
 	} else {
 		/* add the default mode */
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 8f2c7b5..1aba85c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -131,6 +131,45 @@
 	return true;
 }
 
+static bool ni_read_disabled_bios(struct radeon_device *rdev)
+{
+	u32 bus_cntl;
+	u32 d1vga_control;
+	u32 d2vga_control;
+	u32 vga_render_control;
+	u32 rom_cntl;
+	bool r;
+
+	bus_cntl = RREG32(R600_BUS_CNTL);
+	d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
+	d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
+	vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
+	rom_cntl = RREG32(R600_ROM_CNTL);
+
+	/* enable the rom */
+	WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
+	/* Disable VGA mode */
+	WREG32(AVIVO_D1VGA_CONTROL,
+	       (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_D2VGA_CONTROL,
+	       (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
+		AVIVO_DVGA_CONTROL_TIMING_SELECT)));
+	WREG32(AVIVO_VGA_RENDER_CONTROL,
+	       (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
+	WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
+
+	r = radeon_read_bios(rdev);
+
+	/* restore regs */
+	WREG32(R600_BUS_CNTL, bus_cntl);
+	WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
+	WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
+	WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
+	WREG32(R600_ROM_CNTL, rom_cntl);
+	return r;
+}
+
 static bool r700_read_disabled_bios(struct radeon_device *rdev)
 {
 	uint32_t viph_control;
@@ -416,6 +455,8 @@
 {
 	if (rdev->flags & RADEON_IS_IGP)
 		return igp_read_bios_from_vram(rdev);
+	else if (rdev->family >= CHIP_BARTS)
+		return ni_read_disabled_bios(rdev);
 	else if (rdev->family >= CHIP_RV770)
 		return r700_read_disabled_bios(rdev);
 	else if (rdev->family >= CHIP_R600)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 137b807..591fcae 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -471,8 +471,9 @@
 	return true;
 }
 
+/* this is used for atom LCDs as well */
 struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev)
 {
 	if (rdev->mode_info.bios_hardcoded_edid)
 		return rdev->mode_info.bios_hardcoded_edid;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 8afaf7a..22b7e3d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -472,6 +472,9 @@
 	if (mode) {
 		ret = 1;
 		drm_mode_probed_add(connector, mode);
+		/* add the width/height from vbios tables if available */
+		connector->display_info.width_mm = mode->width_mm;
+		connector->display_info.height_mm = mode->height_mm;
 		/* add scaled modes */
 		radeon_add_common_modes(encoder, connector);
 	}
@@ -1216,7 +1219,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
@@ -1256,7 +1259,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
@@ -1299,7 +1302,7 @@
 		if (ASIC_IS_AVIVO(rdev)) {
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_property,
-						      UNDERSCAN_AUTO);
+						      UNDERSCAN_OFF);
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.underscan_hborder_property,
 						      0);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6d64a27..35b5eb8 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,13 +77,13 @@
 			p->relocs_ptr[i] = &p->relocs[i];
 			p->relocs[i].robj = p->relocs[i].gobj->driver_private;
 			p->relocs[i].lobj.bo = p->relocs[i].robj;
-			p->relocs[i].lobj.rdomain = r->read_domains;
 			p->relocs[i].lobj.wdomain = r->write_domain;
+			p->relocs[i].lobj.rdomain = r->read_domains;
+			p->relocs[i].lobj.tv.bo = &p->relocs[i].robj->tbo;
 			p->relocs[i].handle = r->handle;
 			p->relocs[i].flags = r->flags;
-			INIT_LIST_HEAD(&p->relocs[i].lobj.list);
 			radeon_bo_list_add_object(&p->relocs[i].lobj,
-						&p->validated);
+						  &p->validated);
 		}
 	}
 	return radeon_bo_list_validate(&p->validated);
@@ -189,10 +189,13 @@
 {
 	unsigned i;
 
-	if (!error && parser->ib) {
-		radeon_bo_list_fence(&parser->validated, parser->ib->fence);
-	}
-	radeon_bo_list_unreserve(&parser->validated);
+
+	if (!error && parser->ib)
+		ttm_eu_fence_buffer_objects(&parser->validated,
+					    parser->ib->fence);
+	else
+		ttm_eu_backoff_reservation(&parser->validated);
+
 	if (parser->relocs != NULL) {
 		for (i = 0; i < parser->nrelocs; i++) {
 			if (parser->relocs[i].gobj)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 501966a..26091d6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -81,6 +81,10 @@
 	"JUNIPER",
 	"CYPRESS",
 	"HEMLOCK",
+	"PALM",
+	"BARTS",
+	"TURKS",
+	"CAICOS",
 	"LAST",
 };
 
@@ -224,6 +228,11 @@
 				rdev->wb.use_event = true;
 		}
 	}
+	/* always use writeback/events on NI */
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->wb.enabled = true;
+		rdev->wb.use_event = true;
+	}
 
 	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
 
@@ -335,7 +344,12 @@
 	uint32_t reg;
 
 	/* first check CRTCs */
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE41(rdev)) {
+		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+		if (reg & EVERGREEN_CRTC_MASTER_EN)
+			return true;
+	} else if (ASIC_IS_DCE4(rdev)) {
 		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
 			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
@@ -636,20 +650,20 @@
 static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
 {
 	struct drm_device *dev = pci_get_drvdata(pdev);
-	struct radeon_device *rdev = dev->dev_private;
 	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
 	if (state == VGA_SWITCHEROO_ON) {
 		printk(KERN_INFO "radeon: switched on\n");
 		/* don't suspend or resume card normally */
-		rdev->powered_down = false;
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		radeon_resume_kms(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_ON;
 		drm_kms_helper_poll_enable(dev);
 	} else {
 		printk(KERN_INFO "radeon: switched off\n");
 		drm_kms_helper_poll_disable(dev);
+		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
 		radeon_suspend_kms(dev, pmm);
-		/* don't suspend or resume card normally */
-		rdev->powered_down = true;
+		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
 	}
 }
 
@@ -704,11 +718,6 @@
 	init_waitqueue_head(&rdev->irq.vblank_queue);
 	init_waitqueue_head(&rdev->irq.idle_queue);
 
-	/* setup workqueue */
-	rdev->wq = create_workqueue("radeon");
-	if (rdev->wq == NULL)
-		return -ENOMEM;
-
 	/* Set asic functions */
 	r = radeon_asic_init(rdev);
 	if (r)
@@ -773,6 +782,7 @@
 	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 	vga_switcheroo_register_client(rdev->pdev,
 				       radeon_switcheroo_set_state,
+				       NULL,
 				       radeon_switcheroo_can_switch);
 
 	r = radeon_init(rdev);
@@ -806,7 +816,6 @@
 	/* evict vram memory */
 	radeon_bo_evict_vram(rdev);
 	radeon_fini(rdev);
-	destroy_workqueue(rdev->wq);
 	vga_switcheroo_unregister_client(rdev->pdev);
 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
 	if (rdev->rio_mem)
@@ -835,7 +844,7 @@
 	}
 	rdev = dev->dev_private;
 
-	if (rdev->powered_down)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
 	/* turn off display hw */
@@ -893,7 +902,7 @@
 	struct drm_connector *connector;
 	struct radeon_device *rdev = dev->dev_private;
 
-	if (rdev->powered_down)
+	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
 		return 0;
 
 	acquire_console_sem();
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 1df4dc6..d26dabf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,7 +68,7 @@
 	WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
 }
 
-static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+static void dce4_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct drm_device *dev = crtc->dev;
@@ -98,6 +98,66 @@
 	}
 }
 
+static void dce5_crtc_load_lut(struct drm_crtc *crtc)
+{
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	int i;
+
+	DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
+
+	WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) |
+		NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS)));
+	WREG32(NI_PRESCALE_GRPH_CONTROL + radeon_crtc->crtc_offset,
+	       NI_GRPH_PRESCALE_BYPASS);
+	WREG32(NI_PRESCALE_OVL_CONTROL + radeon_crtc->crtc_offset,
+	       NI_OVL_PRESCALE_BYPASS);
+	WREG32(NI_INPUT_GAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT) |
+		NI_OVL_INPUT_GAMMA_MODE(NI_INPUT_GAMMA_USE_LUT)));
+
+	WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+	WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+	WREG32(EVERGREEN_DC_LUT_RW_MODE + radeon_crtc->crtc_offset, 0);
+	WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK + radeon_crtc->crtc_offset, 0x00000007);
+
+	WREG32(EVERGREEN_DC_LUT_RW_INDEX + radeon_crtc->crtc_offset, 0);
+	for (i = 0; i < 256; i++) {
+		WREG32(EVERGREEN_DC_LUT_30_COLOR + radeon_crtc->crtc_offset,
+		       (radeon_crtc->lut_r[i] << 20) |
+		       (radeon_crtc->lut_g[i] << 10) |
+		       (radeon_crtc->lut_b[i] << 0));
+	}
+
+	WREG32(NI_DEGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_OVL_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_ICON_DEGAMMA_MODE(NI_DEGAMMA_BYPASS) |
+		NI_CURSOR_DEGAMMA_MODE(NI_DEGAMMA_BYPASS)));
+	WREG32(NI_GAMUT_REMAP_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS) |
+		NI_OVL_GAMUT_REMAP_MODE(NI_GAMUT_REMAP_BYPASS)));
+	WREG32(NI_REGAMMA_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_GRPH_REGAMMA_MODE(NI_REGAMMA_BYPASS) |
+		NI_OVL_REGAMMA_MODE(NI_REGAMMA_BYPASS)));
+	WREG32(NI_OUTPUT_CSC_CONTROL + radeon_crtc->crtc_offset,
+	       (NI_OUTPUT_CSC_GRPH_MODE(NI_OUTPUT_CSC_BYPASS) |
+		NI_OUTPUT_CSC_OVL_MODE(NI_OUTPUT_CSC_BYPASS)));
+	/* XXX match this to the depth of the crtc fmt block, move to modeset? */
+	WREG32(0x6940 + radeon_crtc->crtc_offset, 0);
+
+}
+
 static void legacy_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -130,8 +190,10 @@
 	if (!crtc->enabled)
 		return;
 
-	if (ASIC_IS_DCE4(rdev))
-		evergreen_crtc_load_lut(crtc);
+	if (ASIC_IS_DCE5(rdev))
+		dce5_crtc_load_lut(crtc);
+	else if (ASIC_IS_DCE4(rdev))
+		dce4_crtc_load_lut(crtc);
 	else if (ASIC_IS_AVIVO(rdev))
 		avivo_crtc_load_lut(crtc);
 	else
@@ -183,12 +245,272 @@
 	kfree(radeon_crtc);
 }
 
+/*
+ * Handle unpin events outside the interrupt handler proper.
+ */
+static void radeon_unpin_work_func(struct work_struct *__work)
+{
+	struct radeon_unpin_work *work =
+		container_of(__work, struct radeon_unpin_work, work);
+	int r;
+
+	/* unpin of the old buffer */
+	r = radeon_bo_reserve(work->old_rbo, false);
+	if (likely(r == 0)) {
+		r = radeon_bo_unpin(work->old_rbo);
+		if (unlikely(r != 0)) {
+			DRM_ERROR("failed to unpin buffer after flip\n");
+		}
+		radeon_bo_unreserve(work->old_rbo);
+	} else
+		DRM_ERROR("failed to reserve buffer after flip\n");
+	kfree(work);
+}
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	struct radeon_unpin_work *work;
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	u32 update_pending;
+	int vpos, hpos;
+
+	spin_lock_irqsave(&rdev->ddev->event_lock, flags);
+	work = radeon_crtc->unpin_work;
+	if (work == NULL ||
+	    !radeon_fence_signaled(work->fence)) {
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+	/* New pageflip, or just completion of a previous one? */
+	if (!radeon_crtc->deferred_flip_completion) {
+		/* do the flip (mmio) */
+		update_pending = radeon_page_flip(rdev, crtc_id, work->new_crtc_base);
+	} else {
+		/* This is just a completion of a flip queued in crtc
+		 * at last invocation. Make sure we go directly to
+		 * completion routine.
+		 */
+		update_pending = 0;
+		radeon_crtc->deferred_flip_completion = 0;
+	}
+
+	/* Has the pageflip already completed in crtc, or is it certain
+	 * to complete in this vblank?
+	 */
+	if (update_pending &&
+	    (DRM_SCANOUTPOS_VALID & radeon_get_crtc_scanoutpos(rdev->ddev, crtc_id,
+							       &vpos, &hpos)) &&
+	    (vpos >=0) &&
+	    (vpos < (99 * rdev->mode_info.crtcs[crtc_id]->base.hwmode.crtc_vdisplay)/100)) {
+		/* crtc didn't flip in this target vblank interval,
+		 * but flip is pending in crtc. It will complete it
+		 * in next vblank interval, so complete the flip at
+		 * next vblank irq.
+		 */
+		radeon_crtc->deferred_flip_completion = 1;
+		spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+		return;
+	}
+
+	/* Pageflip (will be) certainly completed in this vblank. Clean up. */
+	radeon_crtc->unpin_work = NULL;
+
+	/* wakeup userspace */
+	if (work->event) {
+		e = work->event;
+		e->event.sequence = drm_vblank_count_and_time(rdev->ddev, crtc_id, &now);
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+	spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
+
+	drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+	radeon_fence_unref(&work->fence);
+	radeon_post_page_flip(work->rdev, work->crtc_id);
+	schedule_work(&work->work);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+				 struct drm_framebuffer *fb,
+				 struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+	struct radeon_framebuffer *old_radeon_fb;
+	struct radeon_framebuffer *new_radeon_fb;
+	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
+	struct radeon_fence *fence;
+	struct radeon_unpin_work *work;
+	unsigned long flags;
+	u32 tiling_flags, pitch_pixels;
+	u64 base;
+	int r;
+
+	work = kzalloc(sizeof *work, GFP_KERNEL);
+	if (work == NULL)
+		return -ENOMEM;
+
+	r = radeon_fence_create(rdev, &fence);
+	if (unlikely(r != 0)) {
+		kfree(work);
+		DRM_ERROR("flip queue: failed to create fence.\n");
+		return -ENOMEM;
+	}
+	work->event = event;
+	work->rdev = rdev;
+	work->crtc_id = radeon_crtc->crtc_id;
+	work->fence = radeon_fence_ref(fence);
+	old_radeon_fb = to_radeon_framebuffer(crtc->fb);
+	new_radeon_fb = to_radeon_framebuffer(fb);
+	/* schedule unpin of the old buffer */
+	obj = old_radeon_fb->obj;
+	rbo = obj->driver_private;
+	work->old_rbo = rbo;
+	INIT_WORK(&work->work, radeon_unpin_work_func);
+
+	/* We borrow the event spin lock for protecting unpin_work */
+	spin_lock_irqsave(&dev->event_lock, flags);
+	if (radeon_crtc->unpin_work) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(work);
+		radeon_fence_unref(&fence);
+
+		DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
+		return -EBUSY;
+	}
+	radeon_crtc->unpin_work = work;
+	radeon_crtc->deferred_flip_completion = 0;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* pin the new buffer */
+	obj = new_radeon_fb->obj;
+	rbo = obj->driver_private;
+
+	DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
+			 work->old_rbo, rbo);
+
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to pin new rbo buffer before flip\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+
+	if (!ASIC_IS_AVIVO(rdev)) {
+		/* crtc offset is from display base addr not FB location */
+		base -= radeon_crtc->legacy_display_base_addr;
+		pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+
+		if (tiling_flags & RADEON_TILING_MACRO) {
+			if (ASIC_IS_R300(rdev)) {
+				base &= ~0x7ff;
+			} else {
+				int byteshift = fb->bits_per_pixel >> 4;
+				int tile_addr = (((crtc->y >> 3) * pitch_pixels +  crtc->x) >> (8 - byteshift)) << 11;
+				base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8);
+			}
+		} else {
+			int offset = crtc->y * pitch_pixels + crtc->x;
+			switch (fb->bits_per_pixel) {
+			case 8:
+			default:
+				offset *= 1;
+				break;
+			case 15:
+			case 16:
+				offset *= 2;
+				break;
+			case 24:
+				offset *= 3;
+				break;
+			case 32:
+				offset *= 4;
+				break;
+			}
+			base += offset;
+		}
+		base &= ~7;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	work->new_crtc_base = base;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	/* update crtc fb */
+	crtc->fb = fb;
+
+	r = drm_vblank_get(dev, radeon_crtc->crtc_id);
+	if (r) {
+		DRM_ERROR("failed to get vblank before flip\n");
+		goto pflip_cleanup1;
+	}
+
+	/* 32 ought to cover us */
+	r = radeon_ring_lock(rdev, 32);
+	if (r) {
+		DRM_ERROR("failed to lock the ring before flip\n");
+		goto pflip_cleanup2;
+	}
+
+	/* emit the fence */
+	radeon_fence_emit(rdev, fence);
+	/* set the proper interrupt */
+	radeon_pre_page_flip(rdev, radeon_crtc->crtc_id);
+	/* fire the ring */
+	radeon_ring_unlock_commit(rdev);
+
+	return 0;
+
+pflip_cleanup2:
+	drm_vblank_put(dev, radeon_crtc->crtc_id);
+
+pflip_cleanup1:
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0)) {
+		DRM_ERROR("failed to reserve new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	r = radeon_bo_unpin(rbo);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
+		r = -EINVAL;
+		DRM_ERROR("failed to unpin new rbo in error path\n");
+		goto pflip_cleanup;
+	}
+	radeon_bo_unreserve(rbo);
+
+pflip_cleanup:
+	spin_lock_irqsave(&dev->event_lock, flags);
+	radeon_crtc->unpin_work = NULL;
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	radeon_fence_unref(&fence);
+	kfree(work);
+
+	return r;
+}
+
 static const struct drm_crtc_funcs radeon_crtc_funcs = {
 	.cursor_set = radeon_crtc_cursor_set,
 	.cursor_move = radeon_crtc_cursor_move,
 	.gamma_set = radeon_crtc_gamma_set,
 	.set_config = drm_crtc_helper_set_config,
 	.destroy = radeon_crtc_destroy,
+	.page_flip = radeon_crtc_page_flip,
 };
 
 static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -225,7 +547,7 @@
 		radeon_legacy_init_crtc(dev, radeon_crtc);
 }
 
-static const char *encoder_names[34] = {
+static const char *encoder_names[36] = {
 	"NONE",
 	"INTERNAL_LVDS",
 	"INTERNAL_TMDS1",
@@ -260,6 +582,8 @@
 	"INTERNAL_KLDSCP_LVTMA",
 	"INTERNAL_UNIPHY1",
 	"INTERNAL_UNIPHY2",
+	"NUTMEG",
+	"TRAVIS",
 };
 
 static const char *connector_names[15] = {
@@ -417,9 +741,17 @@
 	if (!radeon_connector->edid) {
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
 	}
-	/* some servers provide a hardcoded edid in rom for KVMs */
-	if (!radeon_connector->edid)
-		radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
+
+	if (!radeon_connector->edid) {
+		if (rdev->is_atom_bios) {
+			/* some laptops provide a hardcoded edid in rom for LCDs */
+			if (((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_LVDS) ||
+			     (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)))
+				radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+		} else
+			/* some servers provide a hardcoded edid in rom for KVMs */
+			radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
+	}
 	if (radeon_connector->edid) {
 		drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
 		ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -849,7 +1181,10 @@
 
 	rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
 
-	if (ASIC_IS_AVIVO(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		rdev->ddev->mode_config.max_width = 16384;
+		rdev->ddev->mode_config.max_height = 16384;
+	} else if (ASIC_IS_AVIVO(rdev)) {
 		rdev->ddev->mode_config.max_width = 8192;
 		rdev->ddev->mode_config.max_height = 8192;
 	} else {
@@ -1019,7 +1354,7 @@
 /*
  * Retrieve current video scanout position of crtc on a given gpu.
  *
- * \param rdev Device to query.
+ * \param dev Device to query.
  * \param crtc Crtc to query.
  * \param *vpos Location where vertical scanout position should be stored.
  * \param *hpos Location where horizontal scanout position should go.
@@ -1031,72 +1366,74 @@
  *
  * \return Flags, or'ed together as follows:
  *
- * RADEON_SCANOUTPOS_VALID = Query successfull.
- * RADEON_SCANOUTPOS_INVBL = Inside vblank.
- * RADEON_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
  * this flag means that returned position may be offset by a constant but
  * unknown small number of scanlines wrt. real scanout position.
  *
  */
-int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos)
+int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, int *vpos, int *hpos)
 {
 	u32 stat_crtc = 0, vbl = 0, position = 0;
 	int vbl_start, vbl_end, vtotal, ret = 0;
 	bool in_vbl = true;
 
+	struct radeon_device *rdev = dev->dev_private;
+
 	if (ASIC_IS_DCE4(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC0_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC0_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC1_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC1_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 2) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC2_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC2_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 3) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC3_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC3_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 4) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC4_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC4_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 5) {
 			vbl = RREG32(EVERGREEN_CRTC_V_BLANK_START_END +
 				     EVERGREEN_CRTC5_REGISTER_OFFSET);
 			position = RREG32(EVERGREEN_CRTC_STATUS_POSITION +
 					  EVERGREEN_CRTC5_REGISTER_OFFSET);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	} else if (ASIC_IS_AVIVO(rdev)) {
 		if (crtc == 0) {
 			vbl = RREG32(AVIVO_D1CRTC_V_BLANK_START_END);
 			position = RREG32(AVIVO_D1CRTC_STATUS_POSITION);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = RREG32(AVIVO_D2CRTC_V_BLANK_START_END);
 			position = RREG32(AVIVO_D2CRTC_STATUS_POSITION);
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	} else {
 		/* Pre-AVIVO: Different encoding of scanout pos and vblank interval. */
@@ -1112,7 +1449,7 @@
 			if (!(stat_crtc & 1))
 				in_vbl = false;
 
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 		if (crtc == 1) {
 			vbl = (RREG32(RADEON_CRTC2_V_TOTAL_DISP) &
@@ -1122,7 +1459,7 @@
 			if (!(stat_crtc & 1))
 				in_vbl = false;
 
-			ret |= RADEON_SCANOUTPOS_VALID;
+			ret |= DRM_SCANOUTPOS_VALID;
 		}
 	}
 
@@ -1133,13 +1470,13 @@
 	/* Valid vblank area boundaries from gpu retrieved? */
 	if (vbl > 0) {
 		/* Yes: Decode. */
-		ret |= RADEON_SCANOUTPOS_ACCURATE;
+		ret |= DRM_SCANOUTPOS_ACCURATE;
 		vbl_start = vbl & 0x1fff;
 		vbl_end = (vbl >> 16) & 0x1fff;
 	}
 	else {
 		/* No: Fake something reasonable which gives at least ok results. */
-		vbl_start = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vdisplay;
+		vbl_start = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vdisplay;
 		vbl_end = 0;
 	}
 
@@ -1155,7 +1492,7 @@
 
 	/* Inside "upper part" of vblank area? Apply corrective offset if so: */
 	if (in_vbl && (*vpos >= vbl_start)) {
-		vtotal = rdev->mode_info.crtcs[crtc]->base.mode.crtc_vtotal;
+		vtotal = rdev->mode_info.crtcs[crtc]->base.hwmode.crtc_vtotal;
 		*vpos = *vpos - vtotal;
 	}
 
@@ -1164,7 +1501,7 @@
 
 	/* In vblank? */
 	if (in_vbl)
-		ret |= RADEON_SCANOUTPOS_INVBL;
+		ret |= DRM_SCANOUTPOS_INVBL;
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 60e689f..be5cb4f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -48,9 +48,10 @@
  * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
  * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
  *   2.7.0 - fixups for r600 2D tiling support. (no external ABI change), add eg dyn gpr regs
+ *   2.8.0 - pageflip support, r500 US_FORMAT regs. r500 ARGB2101010 colorbuf, r300->r500 CMASK
  */
 #define KMS_DRIVER_MAJOR	2
-#define KMS_DRIVER_MINOR	7
+#define KMS_DRIVER_MINOR	8
 #define KMS_DRIVER_PATCHLEVEL	0
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
 int radeon_driver_unload_kms(struct drm_device *dev);
@@ -66,6 +67,10 @@
 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc);
 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc);
 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc);
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags);
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
 int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
 void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -74,6 +79,8 @@
 			 struct drm_file *file_priv);
 int radeon_gem_object_init(struct drm_gem_object *obj);
 void radeon_gem_object_free(struct drm_gem_object *obj);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
 extern struct drm_ioctl_desc radeon_ioctls_kms[];
 extern int radeon_max_kms_ioctl;
 int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -296,6 +303,8 @@
 	.get_vblank_counter = radeon_get_vblank_counter_kms,
 	.enable_vblank = radeon_enable_vblank_kms,
 	.disable_vblank = radeon_disable_vblank_kms,
+	.get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
+	.get_scanout_position = radeon_get_crtc_scanoutpos,
 #if defined(CONFIG_DEBUG_FS)
 	.debugfs_init = radeon_debugfs_init,
 	.debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 041943d..8fd1842 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -641,7 +641,7 @@
 	switch (connector->connector_type) {
 	case DRM_MODE_CONNECTOR_DVII:
 	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -655,7 +655,7 @@
 	case DRM_MODE_CONNECTOR_DVID:
 	case DRM_MODE_CONNECTOR_HDMIA:
 	default:
-		if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -673,7 +673,7 @@
 		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
 		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+		else if (drm_detect_monitor_audio(radeon_connector->edid)) {
 			/* fix me */
 			if (ASIC_IS_DCE4(rdev))
 				return ATOM_ENCODER_MODE_DVI;
@@ -712,8 +712,8 @@
  * - 2 DIG encoder blocks.
  * DIG1/2 can drive UNIPHY0/1/2 link A or link B
  *
- * DCE 4.0
- * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
  * Supports up to 6 digital outputs
  * - 6 DIG encoder blocks.
  * - DIG to PHY mapping is hardcoded
@@ -724,6 +724,12 @@
  * DIG5 drives UNIPHY2 link A, A+B
  * DIG6 drives UNIPHY2 link B
  *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
  * Routing
  * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
  * Examples:
@@ -737,6 +743,7 @@
 	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
 	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
 	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
 };
 
 void
@@ -752,6 +759,7 @@
 	uint8_t frev, crev;
 	int dp_clock = 0;
 	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
 
 	if (connector) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -760,6 +768,7 @@
 
 		dp_clock = dig_connector->dp_clock;
 		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
 	}
 
 	/* no dig encoder assigned */
@@ -784,19 +793,36 @@
 	args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 	args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
-	if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-		if (dp_clock == 270000)
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+	if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+	    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
 		args.v1.ucLaneNum = dp_lane_count;
-	} else if (radeon_encoder->pixel_clock > 165000)
+	else if (radeon_encoder->pixel_clock > 165000)
 		args.v1.ucLaneNum = 8;
 	else
 		args.v1.ucLaneNum = 4;
 
-	if (ASIC_IS_DCE4(rdev)) {
+	if (ASIC_IS_DCE5(rdev)) {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
+		    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
+			if (dp_clock == 270000)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+			else if (dp_clock == 540000)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+		}
+		args.v4.acConfig.ucDigSel = dig->dig_encoder;
+		args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+		if (hpd_id == RADEON_HPD_NONE)
+			args.v4.ucHPD_ID = 0;
+		else
+			args.v4.ucHPD_ID = hpd_id + 1;
+	} else if (ASIC_IS_DCE4(rdev)) {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
 		args.v3.acConfig.ucDigSel = dig->dig_encoder;
 		args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
 	} else {
+		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
+			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
 			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
@@ -823,6 +849,7 @@
 	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
 };
 
 void
@@ -917,10 +944,18 @@
 			struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 			pll_id = radeon_crtc->pll_id;
 		}
-		if (is_dp && rdev->clock.dp_extclk)
-			args.v3.acConfig.ucRefClkSource = 2; /* external src */
-		else
-			args.v3.acConfig.ucRefClkSource = pll_id;
+
+		if (ASIC_IS_DCE5(rdev)) {
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v4.acConfig.ucRefClkSource = 3; /* external src */
+			else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+		} else {
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+		}
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -1044,6 +1079,7 @@
 
 union external_encoder_control {
 	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
 };
 
 static void
@@ -1054,6 +1090,7 @@
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
 	union external_encoder_control args;
 	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
@@ -1061,6 +1098,7 @@
 	int dp_clock = 0;
 	int dp_lane_count = 0;
 	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
 	if (connector) {
 		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
@@ -1099,6 +1137,37 @@
 			else
 				args.v1.sDigEncoder.ucLaneNum = 4;
 			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = connector_object_id;
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_encoder->pixel_clock > 165000)
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+			break;
 		default:
 			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
 			return;
@@ -1158,6 +1227,8 @@
 	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
 	int index = 0;
 	bool is_dig = false;
+	bool is_dce5_dac = false;
+	bool is_dce5_dvo = false;
 
 	memset(&args, 0, sizeof(args));
 
@@ -1180,7 +1251,9 @@
 		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		if (ASIC_IS_DCE3(rdev))
+		if (ASIC_IS_DCE5(rdev))
+			is_dce5_dvo = true;
+		else if (ASIC_IS_DCE3(rdev))
 			is_dig = true;
 		else
 			index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
@@ -1196,12 +1269,16 @@
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		if (ASIC_IS_DCE5(rdev))
+			is_dce5_dac = true;
+		else {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+				index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+			else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+				index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+			else
+				index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		}
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
@@ -1260,6 +1337,28 @@
 				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
 			break;
 		}
+	} else if (is_dce5_dac) {
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			atombios_dac_setup(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			atombios_dac_setup(encoder, ATOM_DISABLE);
+			break;
+		}
+	} else if (is_dce5_dvo) {
+		switch (mode) {
+		case DRM_MODE_DPMS_ON:
+			atombios_dvo_setup(encoder, ATOM_ENABLE);
+			break;
+		case DRM_MODE_DPMS_STANDBY:
+		case DRM_MODE_DPMS_SUSPEND:
+		case DRM_MODE_DPMS_OFF:
+			atombios_dvo_setup(encoder, ATOM_DISABLE);
+			break;
+		}
 	} else {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
@@ -1289,12 +1388,18 @@
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
 		default:
-			action = ATOM_ENABLE;
+			if (ASIC_IS_DCE41(rdev))
+				action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT;
+			else
+				action = ATOM_ENABLE;
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			action = ATOM_DISABLE;
+			if (ASIC_IS_DCE41(rdev))
+				action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT;
+			else
+				action = ATOM_DISABLE;
 			break;
 		}
 		atombios_external_encoder_setup(encoder, ext_encoder, action);
@@ -1483,27 +1588,35 @@
 	struct radeon_encoder_atom_dig *dig;
 	uint32_t dig_enc_in_use = 0;
 
+	/* DCE4/5 */
 	if (ASIC_IS_DCE4(rdev)) {
 		dig = radeon_encoder->enc_priv;
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		if (ASIC_IS_DCE41(rdev)) {
 			if (dig->linkb)
 				return 1;
 			else
 				return 0;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			if (dig->linkb)
-				return 3;
-			else
-				return 2;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			if (dig->linkb)
-				return 5;
-			else
-				return 4;
-			break;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					return 3;
+				else
+					return 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					return 5;
+				else
+					return 4;
+				break;
+			}
 		}
 	}
 
@@ -1610,7 +1723,13 @@
 	}
 
 	if (ext_encoder) {
-		atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		if (ASIC_IS_DCE41(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
 	}
 
 	atombios_apply_encoder_quirks(encoder, adjusted_mode);
@@ -1927,7 +2046,10 @@
 }
 
 void
-radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device)
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_encoder *encoder;
@@ -1970,6 +2092,7 @@
 	radeon_encoder->rmx_type = RMX_OFF;
 	radeon_encoder->underscan_type = UNDERSCAN_OFF;
 	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
 
 	switch (radeon_encoder->encoder_id) {
 	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -2029,6 +2152,8 @@
 	case ENCODER_OBJECT_ID_TITFP513:
 	case ENCODER_OBJECT_ID_VT1623:
 	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
 		/* these are handled by the primary encoders */
 		radeon_encoder->is_ext_encoder = true;
 		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index e329066..1ca55eb 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -80,6 +80,10 @@
 	CHIP_JUNIPER,
 	CHIP_CYPRESS,
 	CHIP_HEMLOCK,
+	CHIP_PALM,
+	CHIP_BARTS,
+	CHIP_TURKS,
+	CHIP_CAICOS,
 	CHIP_LAST,
 };
 
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 6abea32..ca32e9c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -225,8 +225,6 @@
 
 	strcpy(info->fix.id, "radeondrmfb");
 
-	drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
-
 	info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
 	info->fbops = &radeonfb_ops;
 
@@ -247,8 +245,6 @@
 	info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
 	info->apertures->ranges[0].size = rdev->mc.aper_size;
 
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
 	info->pixmap.size = 64*1024;
 	info->pixmap.buf_align = 8;
 	info->pixmap.access_align = 32;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index daacb28..171b0b2 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -38,6 +38,7 @@
 #include "drm.h"
 #include "radeon_reg.h"
 #include "radeon.h"
+#include "radeon_trace.h"
 
 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
 {
@@ -57,6 +58,7 @@
 	} else
 		radeon_fence_ring_emit(rdev, fence);
 
+	trace_radeon_fence_emit(rdev->ddev, fence->seq);
 	fence->emited = true;
 	list_del(&fence->list);
 	list_add_tail(&fence->list, &rdev->fence_drv.emited);
@@ -213,6 +215,7 @@
 retry:
 	/* save current sequence used to check for GPU lockup */
 	seq = rdev->fence_drv.last_seq;
+	trace_radeon_fence_wait_begin(rdev->ddev, seq);
 	if (intr) {
 		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
@@ -227,6 +230,7 @@
 			 radeon_fence_signaled(fence), timeout);
 		radeon_irq_kms_sw_irq_put(rdev);
 	}
+	trace_radeon_fence_wait_end(rdev->ddev, seq);
 	if (unlikely(!radeon_fence_signaled(fence))) {
 		/* we were interrupted for some reason and fence isn't
 		 * isn't signaled yet, resume wait
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a108c7e..a289646 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -64,15 +64,15 @@
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned i;
 
-	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
-
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++) {
 		rdev->irq.hpd[i] = false;
+		rdev->irq.pflip[i] = false;
+	}
 	radeon_irq_set(rdev);
 	/* Clear bits */
 	radeon_irq_process(rdev);
@@ -101,8 +101,10 @@
 	rdev->irq.gui_idle = false;
 	for (i = 0; i < rdev->num_crtc; i++)
 		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++)
+	for (i = 0; i < 6; i++) {
 		rdev->irq.hpd[i] = false;
+		rdev->irq.pflip[i] = false;
+	}
 	radeon_irq_set(rdev);
 }
 
@@ -110,6 +112,8 @@
 {
 	int r = 0;
 
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
 	spin_lock_init(&rdev->irq.sw_lock);
 	r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
 	if (r) {
@@ -121,7 +125,7 @@
 	 * chips.  Disable MSI on them for now.
 	 */
 	if ((rdev->family >= CHIP_RV380) &&
-	    (!(rdev->flags & RADEON_IS_IGP)) &&
+	    ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
 	    (!(rdev->flags & RADEON_IS_AGP))) {
 		int ret = pci_enable_msi(rdev->pdev);
 		if (!ret) {
@@ -148,6 +152,7 @@
 		if (rdev->msi_enabled)
 			pci_disable_msi(rdev->pdev);
 	}
+	flush_work_sync(&rdev->hotplug_work);
 }
 
 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
@@ -175,3 +180,34 @@
 	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
 }
 
+void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	if (rdev->ddev->irq_enabled && (++rdev->irq.pflip_refcount[crtc] == 1)) {
+		rdev->irq.pflip[crtc] = true;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
+void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
+{
+	unsigned long irqflags;
+
+	if (crtc < 0 || crtc >= rdev->num_crtc)
+		return;
+
+	spin_lock_irqsave(&rdev->irq.pflip_lock[crtc], irqflags);
+	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.pflip_refcount[crtc] <= 0);
+	if (rdev->ddev->irq_enabled && (--rdev->irq.pflip_refcount[crtc] == 0)) {
+		rdev->irq.pflip[crtc] = false;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.pflip_lock[crtc], irqflags);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8fbbe1c..28a53e4a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -96,9 +96,27 @@
 	return r;
 }
 
+static void radeon_set_filp_rights(struct drm_device *dev,
+				   struct drm_file **owner,
+				   struct drm_file *applier,
+				   uint32_t *value)
+{
+	mutex_lock(&dev->struct_mutex);
+	if (*value == 1) {
+		/* wants rights */
+		if (!*owner)
+			*owner = applier;
+	} else if (*value == 0) {
+		/* revokes rights */
+		if (*owner == applier)
+			*owner = NULL;
+	}
+	*value = *owner == applier ? 1 : 0;
+	mutex_unlock(&dev->struct_mutex);
+}
 
 /*
- * Userspace get informations ioctl
+ * Userspace get information ioctl
  */
 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
@@ -173,18 +191,15 @@
 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
 			return -EINVAL;
 		}
-		mutex_lock(&dev->struct_mutex);
-		if (value == 1) {
-			/* wants hyper-z */
-			if (!rdev->hyperz_filp)
-				rdev->hyperz_filp = filp;
-		} else if (value == 0) {
-			/* revokes hyper-z */
-			if (rdev->hyperz_filp == filp)
-				rdev->hyperz_filp = NULL;
+		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
+		break;
+	case RADEON_INFO_WANT_CMASK:
+		/* The same logic as Hyper-Z. */
+		if (value >= 2) {
+			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
+			return -EINVAL;
 		}
-		value = rdev->hyperz_filp == filp ?  1 : 0;
-		mutex_unlock(&dev->struct_mutex);
+		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
 		break;
 	default:
 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
@@ -203,10 +218,6 @@
  */
 int radeon_driver_firstopen_kms(struct drm_device *dev)
 {
-	struct radeon_device *rdev = dev->dev_private;
-
-	if (rdev->powered_down)
-		return -EINVAL;
 	return 0;
 }
 
@@ -277,6 +288,27 @@
 	radeon_irq_set(rdev);
 }
 
+int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
+				    int *max_error,
+				    struct timeval *vblank_time,
+				    unsigned flags)
+{
+	struct drm_crtc *drmcrtc;
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (crtc < 0 || crtc >= dev->num_crtcs) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
+		return -EINVAL;
+	}
+
+	/* Get associated drm_crtc: */
+	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+
+	/* Helper routine in DRM core does all the work: */
+	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+						     vblank_time, flags,
+						     drmcrtc);
+}
 
 /*
  * IOCTL.
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e301c6f..12bdeab 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -277,6 +277,9 @@
 	fixed20_12 hsc;
 	struct drm_display_mode native_mode;
 	int pll_id;
+	/* page flipping */
+	struct radeon_unpin_work *unpin_work;
+	int deferred_flip_completion;
 };
 
 struct radeon_encoder_primary_dac {
@@ -376,6 +379,7 @@
 	int hdmi_audio_workaround;
 	int hdmi_buffer_status;
 	bool is_ext_encoder;
+	u16 caps;
 };
 
 struct radeon_connector_atom_dig {
@@ -442,10 +446,6 @@
 	struct drm_gem_object *obj;
 };
 
-/* radeon_get_crtc_scanoutpos() return flags */
-#define RADEON_SCANOUTPOS_VALID        (1 << 0)
-#define RADEON_SCANOUTPOS_INVBL        (1 << 1)
-#define RADEON_SCANOUTPOS_ACCURATE     (1 << 2)
 
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -562,11 +562,12 @@
 extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
 				   int x, int y);
 
-extern int radeon_get_crtc_scanoutpos(struct radeon_device *rdev, int crtc, int *vpos, int *hpos);
+extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
+				      int *vpos, int *hpos);
 
 extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
 extern struct edid *
-radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
+radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
 extern bool radeon_atom_get_clock_info(struct drm_device *dev);
 extern bool radeon_combios_get_clock_info(struct drm_device *dev);
 extern struct radeon_encoder_atom_dig *
@@ -662,4 +663,7 @@
 bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
 
 void radeon_fb_output_poll_changed(struct radeon_device *rdev);
+
+void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
+
 #endif
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index a598d00..7d6b8e8 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,6 +34,7 @@
 #include <drm/drmP.h>
 #include "radeon_drm.h"
 #include "radeon.h"
+#include "radeon_trace.h"
 
 
 int radeon_ttm_init(struct radeon_device *rdev);
@@ -146,6 +147,7 @@
 		list_add_tail(&bo->list, &rdev->gem.objects);
 		mutex_unlock(&bo->rdev->gem.mutex);
 	}
+	trace_radeon_bo_create(bo);
 	return 0;
 }
 
@@ -302,34 +304,9 @@
 				struct list_head *head)
 {
 	if (lobj->wdomain) {
-		list_add(&lobj->list, head);
+		list_add(&lobj->tv.head, head);
 	} else {
-		list_add_tail(&lobj->list, head);
-	}
-}
-
-int radeon_bo_list_reserve(struct list_head *head)
-{
-	struct radeon_bo_list *lobj;
-	int r;
-
-	list_for_each_entry(lobj, head, list){
-		r = radeon_bo_reserve(lobj->bo, false);
-		if (unlikely(r != 0))
-			return r;
-		lobj->reserved = true;
-	}
-	return 0;
-}
-
-void radeon_bo_list_unreserve(struct list_head *head)
-{
-	struct radeon_bo_list *lobj;
-
-	list_for_each_entry(lobj, head, list) {
-		/* only unreserve object we successfully reserved */
-		if (lobj->reserved && radeon_bo_is_reserved(lobj->bo))
-			radeon_bo_unreserve(lobj->bo);
+		list_add_tail(&lobj->tv.head, head);
 	}
 }
 
@@ -340,14 +317,11 @@
 	u32 domain;
 	int r;
 
-	list_for_each_entry(lobj, head, list) {
-		lobj->reserved = false;
-	}
-	r = radeon_bo_list_reserve(head);
+	r = ttm_eu_reserve_buffers(head);
 	if (unlikely(r != 0)) {
 		return r;
 	}
-	list_for_each_entry(lobj, head, list) {
+	list_for_each_entry(lobj, head, tv.head) {
 		bo = lobj->bo;
 		if (!bo->pin_count) {
 			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
@@ -370,25 +344,6 @@
 	return 0;
 }
 
-void radeon_bo_list_fence(struct list_head *head, void *fence)
-{
-	struct radeon_bo_list *lobj;
-	struct radeon_bo *bo;
-	struct radeon_fence *old_fence = NULL;
-
-	list_for_each_entry(lobj, head, list) {
-		bo = lobj->bo;
-		spin_lock(&bo->tbo.lock);
-		old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
-		bo->tbo.sync_obj = radeon_fence_ref(fence);
-		bo->tbo.sync_obj_arg = NULL;
-		spin_unlock(&bo->tbo.lock);
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
-		}
-	}
-}
-
 int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 			     struct vm_area_struct *vma)
 {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index d143702..22d4c23 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -126,12 +126,12 @@
 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
 	if (unlikely(r != 0))
 		return r;
-	spin_lock(&bo->tbo.lock);
+	spin_lock(&bo->tbo.bdev->fence_lock);
 	if (mem_type)
 		*mem_type = bo->tbo.mem.mem_type;
 	if (bo->tbo.sync_obj)
 		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
-	spin_unlock(&bo->tbo.lock);
+	spin_unlock(&bo->tbo.bdev->fence_lock);
 	ttm_bo_unreserve(&bo->tbo);
 	return r;
 }
@@ -152,10 +152,7 @@
 extern void radeon_bo_fini(struct radeon_device *rdev);
 extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
 				struct list_head *head);
-extern int radeon_bo_list_reserve(struct list_head *head);
-extern void radeon_bo_list_unreserve(struct list_head *head);
 extern int radeon_bo_list_validate(struct list_head *head);
-extern void radeon_bo_list_fence(struct list_head *head, void *fence);
 extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 				struct vm_area_struct *vma);
 extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8c9b2ef..3b1b2bf 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -167,13 +167,13 @@
 	if (radeon_gui_idle(rdev)) {
 		sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 			clock_info[rdev->pm.requested_clock_mode_index].sclk;
-		if (sclk > rdev->clock.default_sclk)
-			sclk = rdev->clock.default_sclk;
+		if (sclk > rdev->pm.default_sclk)
+			sclk = rdev->pm.default_sclk;
 
 		mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
 			clock_info[rdev->pm.requested_clock_mode_index].mclk;
-		if (mclk > rdev->clock.default_mclk)
-			mclk = rdev->clock.default_mclk;
+		if (mclk > rdev->pm.default_mclk)
+			mclk = rdev->pm.default_mclk;
 
 		/* upvolt before raising clocks, downvolt after lowering clocks */
 		if (sclk < rdev->pm.current_sclk)
@@ -405,20 +405,13 @@
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 		mutex_unlock(&rdev->pm.mutex);
 	} else if (strncmp("profile", buf, strlen("profile")) == 0) {
-		bool flush_wq = false;
-
 		mutex_lock(&rdev->pm.mutex);
-		if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
-			flush_wq = true;
-		}
 		/* disable dynpm */
 		rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 		rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 		rdev->pm.pm_method = PM_METHOD_PROFILE;
 		mutex_unlock(&rdev->pm.mutex);
-		if (flush_wq)
-			flush_workqueue(rdev->wq);
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 	} else {
 		DRM_ERROR("invalid power method!\n");
 		goto fail;
@@ -447,8 +440,12 @@
 		temp = rv770_get_temp(rdev);
 		break;
 	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_NI:
 		temp = evergreen_get_temp(rdev);
 		break;
+	case THERMAL_TYPE_SUMO:
+		temp = sumo_get_temp(rdev);
+		break;
 	default:
 		temp = 0;
 		break;
@@ -487,6 +484,7 @@
 	case THERMAL_TYPE_RV6XX:
 	case THERMAL_TYPE_RV770:
 	case THERMAL_TYPE_EVERGREEN:
+	case THERMAL_TYPE_SUMO:
 		rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
 		if (IS_ERR(rdev->pm.int_hwmon_dev)) {
 			err = PTR_ERR(rdev->pm.int_hwmon_dev);
@@ -520,34 +518,39 @@
 
 void radeon_pm_suspend(struct radeon_device *rdev)
 {
-	bool flush_wq = false;
-
 	mutex_lock(&rdev->pm.mutex);
 	if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-		cancel_delayed_work(&rdev->pm.dynpm_idle_work);
 		if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
 			rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
-		flush_wq = true;
 	}
 	mutex_unlock(&rdev->pm.mutex);
-	if (flush_wq)
-		flush_workqueue(rdev->wq);
+
+	cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 }
 
 void radeon_pm_resume(struct radeon_device *rdev)
 {
+	/* set up the default clocks if the MC ucode is loaded */
+	if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+		if (rdev->pm.default_vddc)
+			radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+		if (rdev->pm.default_sclk)
+			radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+		if (rdev->pm.default_mclk)
+			radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+	}
 	/* asic init will reset the default power state */
 	mutex_lock(&rdev->pm.mutex);
 	rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
 	rdev->pm.current_clock_mode_index = 0;
-	rdev->pm.current_sclk = rdev->clock.default_sclk;
-	rdev->pm.current_mclk = rdev->clock.default_mclk;
+	rdev->pm.current_sclk = rdev->pm.default_sclk;
+	rdev->pm.current_mclk = rdev->pm.default_mclk;
 	rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
 	if (rdev->pm.pm_method == PM_METHOD_DYNPM
 	    && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
 		rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
-		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 	}
 	mutex_unlock(&rdev->pm.mutex);
 	radeon_pm_compute_clocks(rdev);
@@ -564,6 +567,8 @@
 	rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 	rdev->pm.dynpm_can_upclock = true;
 	rdev->pm.dynpm_can_downclock = true;
+	rdev->pm.default_sclk = rdev->clock.default_sclk;
+	rdev->pm.default_mclk = rdev->clock.default_mclk;
 	rdev->pm.current_sclk = rdev->clock.default_sclk;
 	rdev->pm.current_mclk = rdev->clock.default_mclk;
 	rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
@@ -575,12 +580,24 @@
 			radeon_combios_get_power_modes(rdev);
 		radeon_pm_print_states(rdev);
 		radeon_pm_init_profile(rdev);
+		/* set up the default clocks if the MC ucode is loaded */
+		if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+			if (rdev->pm.default_vddc)
+				radeon_atom_set_voltage(rdev, rdev->pm.default_vddc);
+			if (rdev->pm.default_sclk)
+				radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
+			if (rdev->pm.default_mclk)
+				radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
+		}
 	}
 
 	/* set up the internal thermal sensor if applicable */
 	ret = radeon_hwmon_init(rdev);
 	if (ret)
 		return ret;
+
+	INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
+
 	if (rdev->pm.num_power_states > 1) {
 		/* where's the best place to put these? */
 		ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -594,8 +611,6 @@
 		rdev->acpi_nb.notifier_call = radeon_acpi_event;
 		register_acpi_notifier(&rdev->acpi_nb);
 #endif
-		INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
-
 		if (radeon_debugfs_pm_init(rdev)) {
 			DRM_ERROR("Failed to register debugfs file for PM!\n");
 		}
@@ -609,25 +624,20 @@
 void radeon_pm_fini(struct radeon_device *rdev)
 {
 	if (rdev->pm.num_power_states > 1) {
-		bool flush_wq = false;
-
 		mutex_lock(&rdev->pm.mutex);
 		if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
 			rdev->pm.profile = PM_PROFILE_DEFAULT;
 			radeon_pm_update_profile(rdev);
 			radeon_pm_set_clocks(rdev);
 		} else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
-			/* cancel work */
-			cancel_delayed_work(&rdev->pm.dynpm_idle_work);
-			flush_wq = true;
 			/* reset default clocks */
 			rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
 			rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
 			radeon_pm_set_clocks(rdev);
 		}
 		mutex_unlock(&rdev->pm.mutex);
-		if (flush_wq)
-			flush_workqueue(rdev->wq);
+
+		cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
 
 		device_remove_file(rdev->dev, &dev_attr_power_profile);
 		device_remove_file(rdev->dev, &dev_attr_power_method);
@@ -686,12 +696,12 @@
 					radeon_pm_get_dynpm_state(rdev);
 					radeon_pm_set_clocks(rdev);
 
-					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 				} else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
 					rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
-					queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-							   msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+					schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+							      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 					DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
 				}
 			} else { /* count == 0 */
@@ -720,9 +730,9 @@
 	 */
 	for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
 		if (rdev->pm.active_crtcs & (1 << crtc)) {
-			vbl_status = radeon_get_crtc_scanoutpos(rdev, crtc, &vpos, &hpos);
-			if ((vbl_status & RADEON_SCANOUTPOS_VALID) &&
-			    !(vbl_status & RADEON_SCANOUTPOS_INVBL))
+			vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
+			if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
+			    !(vbl_status & DRM_SCANOUTPOS_INVBL))
 				in_vbl = false;
 		}
 	}
@@ -796,8 +806,8 @@
 			radeon_pm_set_clocks(rdev);
 		}
 
-		queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
-					msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+		schedule_delayed_work(&rdev->pm.dynpm_idle_work,
+				      msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
 	}
 	mutex_unlock(&rdev->pm.mutex);
 	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
@@ -814,9 +824,9 @@
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
+	seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
 	seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
-	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
+	seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
 	if (rdev->asic->get_memory_clock)
 		seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
 	if (rdev->pm.current_vddc)
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6492881..3cd4dac 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -55,6 +55,7 @@
 #include "r500_reg.h"
 #include "r600_reg.h"
 #include "evergreen_reg.h"
+#include "ni_reg.h"
 
 #define RADEON_MC_AGP_LOCATION		0x014c
 #define		RADEON_MC_AGP_START_MASK	0x0000FFFF
@@ -320,6 +321,15 @@
 #       define RADEON_PCIE_LC_RECONFIG_NOW         (1 << 8)
 #       define RADEON_PCIE_LC_RECONFIG_LATER       (1 << 9)
 #       define RADEON_PCIE_LC_SHORT_RECONFIG_EN    (1 << 10)
+#       define R600_PCIE_LC_RECONFIG_ARC_MISSING_ESCAPE   (1 << 7)
+#       define R600_PCIE_LC_RENEGOTIATION_SUPPORT  (1 << 9)
+#       define R600_PCIE_LC_RENEGOTIATE_EN         (1 << 10)
+#       define R600_PCIE_LC_SHORT_RECONFIG_EN      (1 << 11)
+#       define R600_PCIE_LC_UPCONFIGURE_SUPPORT    (1 << 12)
+#       define R600_PCIE_LC_UPCONFIGURE_DIS        (1 << 13)
+
+#define R600_TARGET_AND_CURRENT_PROFILE_INDEX      0x70c
+#define R700_TARGET_AND_CURRENT_PROFILE_INDEX      0x66c
 
 #define RADEON_CACHE_CNTL                   0x1724
 #define RADEON_CACHE_LINE                   0x0f0c /* PCI */
@@ -422,6 +432,7 @@
 #       define RADEON_CRTC_CSYNC_EN         (1 <<  4)
 #       define RADEON_CRTC_ICON_EN          (1 << 15)
 #       define RADEON_CRTC_CUR_EN           (1 << 16)
+#       define RADEON_CRTC_VSTAT_MODE_MASK  (3 << 17)
 #       define RADEON_CRTC_CUR_MODE_MASK    (7 << 20)
 #       define RADEON_CRTC_CUR_MODE_SHIFT   20
 #       define RADEON_CRTC_CUR_MODE_MONO    0
@@ -509,6 +520,8 @@
 #       define RADEON_CRTC_TILE_EN                      (1 << 15)
 #       define RADEON_CRTC_OFFSET_FLIP_CNTL             (1 << 16)
 #       define RADEON_CRTC_STEREO_OFFSET_EN             (1 << 17)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN      (1 << 28)
+#       define RADEON_CRTC_GUI_TRIG_OFFSET_RIGHT_EN     (1 << 29)
 
 #define R300_CRTC_TILE_X0_Y0	            0x0350
 #define R300_CRTC2_TILE_X0_Y0	            0x0358
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
new file mode 100644
index 0000000..eafd816
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -0,0 +1,82 @@
+#if !defined(_RADEON_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RADEON_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM radeon
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE radeon_trace
+
+TRACE_EVENT(radeon_bo_create,
+	    TP_PROTO(struct radeon_bo *bo),
+	    TP_ARGS(bo),
+	    TP_STRUCT__entry(
+			     __field(struct radeon_bo *, bo)
+			     __field(u32, pages)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->bo = bo;
+			   __entry->pages = bo->tbo.num_pages;
+			   ),
+	    TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
+);
+
+DECLARE_EVENT_CLASS(radeon_fence_request,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno),
+
+	    TP_STRUCT__entry(
+			     __field(u32, dev)
+			     __field(u32, seqno)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->dev = dev->primary->index;
+			   __entry->seqno = seqno;
+			   ),
+
+	    TP_printk("dev=%u, seqno=%u", __entry->dev, __entry->seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_emit,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_retire,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_begin,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+DEFINE_EVENT(radeon_fence_request, radeon_fence_wait_end,
+
+	    TP_PROTO(struct drm_device *dev, u32 seqno),
+
+	    TP_ARGS(dev, seqno)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_trace_points.c b/drivers/gpu/drm/radeon/radeon_trace_points.c
new file mode 100644
index 0000000..8175993
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_trace_points.c
@@ -0,0 +1,9 @@
+/* Copyright Red Hat Inc 2010.
+ * Author : Dave Airlie <airlied@redhat.com>
+ */
+#include <drm/drmP.h>
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#define CREATE_TRACE_POINTS
+#include "radeon_trace.h"
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index b3f9f1d..ef422bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -304,6 +304,22 @@
 0x4630 US_CODE_ADDR
 0x4634 US_CODE_RANGE
 0x4638 US_CODE_OFFSET
+0x4640 US_FORMAT0_0
+0x4644 US_FORMAT0_1
+0x4648 US_FORMAT0_2
+0x464C US_FORMAT0_3
+0x4650 US_FORMAT0_4
+0x4654 US_FORMAT0_5
+0x4658 US_FORMAT0_6
+0x465C US_FORMAT0_7
+0x4660 US_FORMAT0_8
+0x4664 US_FORMAT0_9
+0x4668 US_FORMAT0_10
+0x466C US_FORMAT0_11
+0x4670 US_FORMAT0_12
+0x4674 US_FORMAT0_13
+0x4678 US_FORMAT0_14
+0x467C US_FORMAT0_15
 0x46A4 US_OUT_FMT_0
 0x46A8 US_OUT_FMT_1
 0x46AC US_OUT_FMT_2
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index f1c6e02c..b4192ac 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -46,6 +46,56 @@
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+void rs600_pre_page_flip(struct radeon_device *rdev, int crtc)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
+	u32 tmp;
+
+	/* make sure flip is at vb rather than hb */
+	tmp = RREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset);
+	tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
+	WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
+
+	/* set pageflip to happen anywhere in vblank interval */
+	WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+
+	/* enable the pflip int */
+	radeon_irq_kms_pflip_irq_get(rdev, crtc);
+}
+
+void rs600_post_page_flip(struct radeon_device *rdev, int crtc)
+{
+	/* disable the pflip int */
+	radeon_irq_kms_pflip_irq_put(rdev, crtc);
+}
+
+u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
+
 void rs600_pm_misc(struct radeon_device *rdev)
 {
 	int requested_index = rdev->pm.requested_power_state_index;
@@ -515,10 +565,12 @@
 	if (rdev->irq.gui_idle) {
 		tmp |= S_000040_GUI_IDLE(1);
 	}
-	if (rdev->irq.crtc_vblank_int[0]) {
+	if (rdev->irq.crtc_vblank_int[0] ||
+	    rdev->irq.pflip[0]) {
 		mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
 	}
-	if (rdev->irq.crtc_vblank_int[1]) {
+	if (rdev->irq.crtc_vblank_int[1] ||
+	    rdev->irq.pflip[1]) {
 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
 	}
 	if (rdev->irq.hpd[0]) {
@@ -534,7 +586,7 @@
 	return 0;
 }
 
-static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int)
+static inline u32 rs600_irq_ack(struct radeon_device *rdev)
 {
 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
 	uint32_t irq_mask = S_000044_SW_INT(1);
@@ -547,27 +599,27 @@
 	}
 
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
-		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
-		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) {
+		rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			WREG32(R_006534_D1MODE_VBLANK_STATUS,
 				S_006534_D1MODE_VBLANK_ACK(1));
 		}
-		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
 				S_006D34_D2MODE_VBLANK_ACK(1));
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
 			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
 			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
 			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
 			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
 		}
 	} else {
-		*r500_disp_int = 0;
+		rdev->irq.stat_regs.r500.disp_int = 0;
 	}
 
 	if (irqs) {
@@ -578,32 +630,30 @@
 
 void rs600_irq_disable(struct radeon_device *rdev)
 {
-	u32 tmp;
-
 	WREG32(R_000040_GEN_INT_CNTL, 0);
 	WREG32(R_006540_DxMODE_INT_MASK, 0);
 	/* Wait and acknowledge irq */
 	mdelay(1);
-	rs600_irq_ack(rdev, &tmp);
+	rs600_irq_ack(rdev);
 }
 
 int rs600_irq_process(struct radeon_device *rdev)
 {
-	uint32_t status, msi_rearm;
-	uint32_t r500_disp_int;
+	u32 status, msi_rearm;
 	bool queue_hotplug = false;
 
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 
-	status = rs600_irq_ack(rdev, &r500_disp_int);
-	if (!status && !r500_disp_int) {
+	status = rs600_irq_ack(rdev);
+	if (!status && !rdev->irq.stat_regs.r500.disp_int) {
 		return IRQ_NONE;
 	}
-	while (status || r500_disp_int) {
+	while (status || rdev->irq.stat_regs.r500.disp_int) {
 		/* SW interrupt */
-		if (G_000044_SW_INT(status))
+		if (G_000044_SW_INT(status)) {
 			radeon_fence_process(rdev);
+		}
 		/* GUI idle */
 		if (G_000040_GUI_IDLE(status)) {
 			rdev->irq.gui_idle_acked = true;
@@ -611,30 +661,38 @@
 			wake_up(&rdev->irq.idle_queue);
 		}
 		/* Vertical blank interrupts */
-		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
-			drm_handle_vblank(rdev->ddev, 0);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+		if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[0]) {
+				drm_handle_vblank(rdev->ddev, 0);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[0])
+				radeon_crtc_handle_flip(rdev, 0);
 		}
-		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
-			drm_handle_vblank(rdev->ddev, 1);
-			rdev->pm.vblank_sync = true;
-			wake_up(&rdev->irq.vblank_queue);
+		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
+			if (rdev->irq.crtc_vblank_int[1]) {
+				drm_handle_vblank(rdev->ddev, 1);
+				rdev->pm.vblank_sync = true;
+				wake_up(&rdev->irq.vblank_queue);
+			}
+			if (rdev->irq.pflip[1])
+				radeon_crtc_handle_flip(rdev, 1);
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			queue_hotplug = true;
 			DRM_DEBUG("HPD1\n");
 		}
-		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
 			queue_hotplug = true;
 			DRM_DEBUG("HPD2\n");
 		}
-		status = rs600_irq_ack(rdev, &r500_disp_int);
+		status = rs600_irq_ack(rdev);
 	}
 	/* reset gui idle ack.  the status bit is broken */
 	rdev->irq.gui_idle_acked = false;
 	if (queue_hotplug)
-		queue_work(rdev->wq, &rdev->hotplug_work);
+		schedule_work(&rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS600:
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 4dfead8..3a264aa 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -41,6 +41,41 @@
 
 static void rv770_gpu_init(struct radeon_device *rdev);
 void rv770_fini(struct radeon_device *rdev);
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
+
+u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
+{
+	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+	u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+
+	/* Lock the graphics update lock */
+	tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* update the scanout addresses */
+	if (radeon_crtc->crtc_id) {
+		WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	} else {
+		WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+		WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
+	}
+	WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+	WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+	       (u32)crtc_base);
+
+	/* Wait for update_pending to go high. */
+	while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+	DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
+
+	/* Unlock the lock, so double-buffering can take place inside vblank */
+	tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
+	WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
+
+	/* Return current update_pending status: */
+	return RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING;
+}
 
 /* get temperature in millidegrees */
 u32 rv770_get_temp(struct radeon_device *rdev)
@@ -489,6 +524,49 @@
 	return backend_map;
 }
 
+static void rv770_program_channel_remap(struct radeon_device *rdev)
+{
+	u32 tcp_chan_steer, mc_shared_chremap, tmp;
+	bool force_no_swizzle;
+
+	switch (rdev->family) {
+	case CHIP_RV770:
+	case CHIP_RV730:
+		force_no_swizzle = false;
+		break;
+	case CHIP_RV710:
+	case CHIP_RV740:
+	default:
+		force_no_swizzle = true;
+		break;
+	}
+
+	tmp = RREG32(MC_SHARED_CHMAP);
+	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+	case 0:
+	case 1:
+	default:
+		/* default mapping */
+		mc_shared_chremap = 0x00fac688;
+		break;
+	case 2:
+	case 3:
+		if (force_no_swizzle)
+			mc_shared_chremap = 0x00fac688;
+		else
+			mc_shared_chremap = 0x00bbc298;
+		break;
+	}
+
+	if (rdev->family == CHIP_RV740)
+		tcp_chan_steer = 0x00ef2a60;
+	else
+		tcp_chan_steer = 0x00fac688;
+
+	WREG32(TCP_CHAN_STEER, tcp_chan_steer);
+	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
+}
+
 static void rv770_gpu_init(struct radeon_device *rdev)
 {
 	int i, j, num_qd_pipes;
@@ -688,6 +766,8 @@
 	WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 	WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
+	rv770_program_channel_remap(rdev);
+
 	WREG32(CC_RB_BACKEND_DISABLE,      cc_rb_backend_disable);
 	WREG32(CC_GC_SHADER_PIPE_CONFIG,   cc_gc_shader_pipe_config);
 	WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
@@ -956,6 +1036,45 @@
 	radeon_bo_unref(&rdev->vram_scratch.robj);
 }
 
+void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+	u64 size_bf, size_af;
+
+	if (mc->mc_vram_size > 0xE0000000) {
+		/* leave room for at least 512M GTT */
+		dev_warn(rdev->dev, "limiting VRAM\n");
+		mc->real_vram_size = 0xE0000000;
+		mc->mc_vram_size = 0xE0000000;
+	}
+	if (rdev->flags & RADEON_IS_AGP) {
+		size_bf = mc->gtt_start;
+		size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+		if (size_bf > size_af) {
+			if (mc->mc_vram_size > size_bf) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_bf;
+				mc->mc_vram_size = size_bf;
+			}
+			mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+		} else {
+			if (mc->mc_vram_size > size_af) {
+				dev_warn(rdev->dev, "limiting VRAM\n");
+				mc->real_vram_size = size_af;
+				mc->mc_vram_size = size_af;
+			}
+			mc->vram_start = mc->gtt_end;
+		}
+		mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+		dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+				mc->mc_vram_size >> 20, mc->vram_start,
+				mc->vram_end, mc->real_vram_size >> 20);
+	} else {
+		radeon_vram_location(rdev, &rdev->mc, 0);
+		rdev->mc.gtt_base_align = 0;
+		radeon_gtt_location(rdev, mc);
+	}
+}
+
 int rv770_mc_init(struct radeon_device *rdev)
 {
 	u32 tmp;
@@ -996,7 +1115,7 @@
 	rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
 	rdev->mc.visible_vram_size = rdev->mc.aper_size;
 	rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
-	r600_vram_gtt_location(rdev, &rdev->mc);
+	r700_vram_gtt_location(rdev, &rdev->mc);
 	radeon_update_bandwidth_info(rdev);
 
 	return 0;
@@ -1006,6 +1125,9 @@
 {
 	int r;
 
+	/* enable pcie gen2 link */
+	rv770_pcie_gen2_enable(rdev);
+
 	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
 		r = r600_init_microcode(rdev);
 		if (r) {
@@ -1244,3 +1366,75 @@
 	rdev->bios = NULL;
 	radeon_dummy_page_fini(rdev);
 }
+
+static void rv770_pcie_gen2_enable(struct radeon_device *rdev)
+{
+	u32 link_width_cntl, lanes, speed_cntl, tmp;
+	u16 link_cntl2;
+
+	if (rdev->flags & RADEON_IS_IGP)
+		return;
+
+	if (!(rdev->flags & RADEON_IS_PCIE))
+		return;
+
+	/* x2 cards have a special sequence */
+	if (ASIC_IS_X2(rdev))
+		return;
+
+	/* advertise upconfig capability */
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+	WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+	if (link_width_cntl & LC_RENEGOTIATION_SUPPORT) {
+		lanes = (link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT;
+		link_width_cntl &= ~(LC_LINK_WIDTH_MASK |
+				     LC_RECONFIG_ARC_MISSING_ESCAPE);
+		link_width_cntl |= lanes | LC_RECONFIG_NOW |
+			LC_RENEGOTIATE_EN | LC_UPCONFIGURE_SUPPORT;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	} else {
+		link_width_cntl |= LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+
+	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
+	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
+
+		tmp = RREG32(0x541c);
+		WREG32(0x541c, tmp | 0x8);
+		WREG32(MM_CFGREGS_CNTL, MM_WR_TO_CFG_EN);
+		link_cntl2 = RREG16(0x4088);
+		link_cntl2 &= ~TARGET_LINK_SPEED_MASK;
+		link_cntl2 |= 0x2;
+		WREG16(0x4088, link_cntl2);
+		WREG32(MM_CFGREGS_CNTL, 0);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
+		speed_cntl |= LC_GEN2_EN_STRAP;
+		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+	} else {
+		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
+		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
+		if (1)
+			link_width_cntl |= LC_UPCONFIGURE_DIS;
+		else
+			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
+		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
+	}
+}
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b7a5a20..abc8cf5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -138,6 +138,7 @@
 #define MC_SHARED_CHMAP						0x2004
 #define		NOOFCHAN_SHIFT					12
 #define		NOOFCHAN_MASK					0x00003000
+#define MC_SHARED_CHREMAP					0x2008
 
 #define	MC_ARB_RAMCFG					0x2760
 #define		NOOFBANK_SHIFT					0
@@ -303,6 +304,7 @@
 #define		BILINEAR_PRECISION_8_BIT			(1 << 31)
 
 #define	TCP_CNTL					0x9610
+#define	TCP_CHAN_STEER					0x9614
 
 #define	VGT_CACHE_INVALIDATION				0x88C4
 #define		CACHE_INVALIDATION(x)				((x)<<0)
@@ -351,4 +353,49 @@
 
 #define	SRBM_STATUS				        0x0E50
 
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
+#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6914
+#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH               0x6114
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS                  0x6118
+#define D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x691c
+#define D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH             0x611c
+
+/* PCIE link stuff */
+#define PCIE_LC_TRAINING_CNTL                             0xa1 /* PCIE_P */
+#define PCIE_LC_LINK_WIDTH_CNTL                           0xa2 /* PCIE_P */
+#       define LC_LINK_WIDTH_SHIFT                        0
+#       define LC_LINK_WIDTH_MASK                         0x7
+#       define LC_LINK_WIDTH_X0                           0
+#       define LC_LINK_WIDTH_X1                           1
+#       define LC_LINK_WIDTH_X2                           2
+#       define LC_LINK_WIDTH_X4                           3
+#       define LC_LINK_WIDTH_X8                           4
+#       define LC_LINK_WIDTH_X16                          6
+#       define LC_LINK_WIDTH_RD_SHIFT                     4
+#       define LC_LINK_WIDTH_RD_MASK                      0x70
+#       define LC_RECONFIG_ARC_MISSING_ESCAPE             (1 << 7)
+#       define LC_RECONFIG_NOW                            (1 << 8)
+#       define LC_RENEGOTIATION_SUPPORT                   (1 << 9)
+#       define LC_RENEGOTIATE_EN                          (1 << 10)
+#       define LC_SHORT_RECONFIG_EN                       (1 << 11)
+#       define LC_UPCONFIGURE_SUPPORT                     (1 << 12)
+#       define LC_UPCONFIGURE_DIS                         (1 << 13)
+#define PCIE_LC_SPEED_CNTL                                0xa4 /* PCIE_P */
+#       define LC_GEN2_EN_STRAP                           (1 << 0)
+#       define LC_TARGET_LINK_SPEED_OVERRIDE_EN           (1 << 1)
+#       define LC_FORCE_EN_HW_SPEED_CHANGE                (1 << 5)
+#       define LC_FORCE_DIS_HW_SPEED_CHANGE               (1 << 6)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK      (0x3 << 8)
+#       define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT     3
+#       define LC_CURRENT_DATA_RATE                       (1 << 11)
+#       define LC_VOLTAGE_TIMER_SEL_MASK                  (0xf << 14)
+#       define LC_CLR_FAILED_SPD_CHANGE_CNT               (1 << 21)
+#       define LC_OTHER_SIDE_EVER_SENT_GEN2               (1 << 23)
+#       define LC_OTHER_SIDE_SUPPORTS_GEN2                (1 << 24)
+#define MM_CFGREGS_CNTL                                   0x544c
+#       define MM_WR_TO_CFG_EN                            (1 << 3)
+#define LINK_CNTL2                                        0x88 /* F0 */
+#       define TARGET_LINK_SPEED_MASK                     (0xf << 0)
+#       define SELECTABLE_DEEMPHASIS                      (1 << 6)
+
 #endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 934a96a..af61fc2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -169,7 +169,7 @@
 }
 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
 
-static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_mem_type_manager *man;
@@ -191,11 +191,7 @@
 	}
 }
 
-/**
- * Call with the lru_lock held.
- */
-
-static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 {
 	int put_count = 0;
 
@@ -227,9 +223,18 @@
 		/**
 		 * Deadlock avoidance for multi-bo reserving.
 		 */
-		if (use_sequence && bo->seq_valid &&
-			(sequence - bo->val_seq < (1 << 31))) {
-			return -EAGAIN;
+		if (use_sequence && bo->seq_valid) {
+			/**
+			 * We've already reserved this one.
+			 */
+			if (unlikely(sequence == bo->val_seq))
+				return -EDEADLK;
+			/**
+			 * Already reserved by a thread that will not back
+			 * off for us. We need to back off.
+			 */
+			if (unlikely(sequence - bo->val_seq < (1 << 31)))
+				return -EAGAIN;
 		}
 
 		if (no_wait)
@@ -267,6 +272,13 @@
 	BUG();
 }
 
+void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+			 bool never_free)
+{
+	kref_sub(&bo->list_kref, count,
+		 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
+}
+
 int ttm_bo_reserve(struct ttm_buffer_object *bo,
 		   bool interruptible,
 		   bool no_wait, bool use_sequence, uint32_t sequence)
@@ -282,20 +294,24 @@
 		put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	return ret;
 }
 
+void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
+{
+	ttm_bo_add_to_lru(bo);
+	atomic_set(&bo->reserved, 0);
+	wake_up_all(&bo->event_queue);
+}
+
 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_global *glob = bo->glob;
 
 	spin_lock(&glob->lru_lock);
-	ttm_bo_add_to_lru(bo);
-	atomic_set(&bo->reserved, 0);
-	wake_up_all(&bo->event_queue);
+	ttm_bo_unreserve_locked(bo);
 	spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_bo_unreserve);
@@ -362,8 +378,13 @@
 	int ret = 0;
 
 	if (old_is_pci || new_is_pci ||
-	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
-		ttm_bo_unmap_virtual(bo);
+	    ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
+		ret = ttm_mem_io_lock(old_man, true);
+		if (unlikely(ret != 0))
+			goto out_err;
+		ttm_bo_unmap_virtual_locked(bo);
+		ttm_mem_io_unlock(old_man);
+	}
 
 	/*
 	 * Create and bind a ttm if required.
@@ -416,11 +437,9 @@
 	}
 
 	if (bo->mem.mm_node) {
-		spin_lock(&bo->lock);
 		bo->offset = (bo->mem.start << PAGE_SHIFT) +
 		    bdev->man[bo->mem.mem_type].gpu_offset;
 		bo->cur_placement = bo->mem.placement;
-		spin_unlock(&bo->lock);
 	} else
 		bo->offset = 0;
 
@@ -452,7 +471,6 @@
 		ttm_tt_destroy(bo->ttm);
 		bo->ttm = NULL;
 	}
-
 	ttm_bo_mem_put(bo, &bo->mem);
 
 	atomic_set(&bo->reserved, 0);
@@ -474,14 +492,14 @@
 	int put_count;
 	int ret;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	(void) ttm_bo_wait(bo, false, false, true);
 	if (!bo->sync_obj) {
 
 		spin_lock(&glob->lru_lock);
 
 		/**
-		 * Lock inversion between bo::reserve and bo::lock here,
+		 * Lock inversion between bo:reserve and bdev::fence_lock here,
 		 * but that's OK, since we're only trylocking.
 		 */
 
@@ -490,14 +508,13 @@
 		if (unlikely(ret == -EBUSY))
 			goto queue;
 
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		put_count = ttm_bo_del_from_lru(bo);
 
 		spin_unlock(&glob->lru_lock);
 		ttm_bo_cleanup_memtype_use(bo);
 
-		while (put_count--)
-			kref_put(&bo->list_kref, ttm_bo_ref_bug);
+		ttm_bo_list_ref_sub(bo, put_count, true);
 
 		return;
 	} else {
@@ -512,7 +529,7 @@
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (sync_obj) {
 		driver->sync_obj_flush(sync_obj, sync_obj_arg);
@@ -537,14 +554,15 @@
 			       bool no_wait_reserve,
 			       bool no_wait_gpu)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	int put_count;
 	int ret = 0;
 
 retry:
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (unlikely(ret != 0))
 		return ret;
@@ -580,8 +598,7 @@
 	spin_unlock(&glob->lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	return 0;
 }
@@ -652,6 +669,7 @@
 	struct ttm_buffer_object *bo =
 	    container_of(kref, struct ttm_buffer_object, kref);
 	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
 
 	if (likely(bo->vm_node != NULL)) {
 		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
@@ -659,6 +677,9 @@
 		bo->vm_node = NULL;
 	}
 	write_unlock(&bdev->vm_lock);
+	ttm_mem_io_lock(man, false);
+	ttm_mem_io_free_vm(bo);
+	ttm_mem_io_unlock(man);
 	ttm_bo_cleanup_refs_or_queue(bo);
 	kref_put(&bo->list_kref, ttm_bo_release_list);
 	write_lock(&bdev->vm_lock);
@@ -698,9 +719,9 @@
 	struct ttm_placement placement;
 	int ret = 0;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 
 	if (unlikely(ret != 0)) {
 		if (ret != -ERESTARTSYS) {
@@ -715,7 +736,8 @@
 
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
-	evict_mem.bus.io_reserved = false;
+	evict_mem.bus.io_reserved_vm = false;
+	evict_mem.bus.io_reserved_count = 0;
 
 	placement.fpfn = 0;
 	placement.lpfn = 0;
@@ -802,8 +824,7 @@
 
 	BUG_ON(ret != 0);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
 	ttm_bo_unreserve(bo);
@@ -1036,6 +1057,7 @@
 {
 	int ret = 0;
 	struct ttm_mem_reg mem;
+	struct ttm_bo_device *bdev = bo->bdev;
 
 	BUG_ON(!atomic_read(&bo->reserved));
 
@@ -1044,15 +1066,16 @@
 	 * Have the driver move function wait for idle when necessary,
 	 * instead of doing it here.
 	 */
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 	if (ret)
 		return ret;
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
-	mem.bus.io_reserved = false;
+	mem.bus.io_reserved_vm = false;
+	mem.bus.io_reserved_count = 0;
 	/*
 	 * Determine where to move the buffer.
 	 */
@@ -1163,7 +1186,6 @@
 	}
 	bo->destroy = destroy;
 
-	spin_lock_init(&bo->lock);
 	kref_init(&bo->kref);
 	kref_init(&bo->list_kref);
 	atomic_set(&bo->cpu_writers, 0);
@@ -1172,6 +1194,7 @@
 	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	INIT_LIST_HEAD(&bo->swap);
+	INIT_LIST_HEAD(&bo->io_reserve_lru);
 	bo->bdev = bdev;
 	bo->glob = bdev->glob;
 	bo->type = type;
@@ -1181,7 +1204,8 @@
 	bo->mem.num_pages = bo->num_pages;
 	bo->mem.mm_node = NULL;
 	bo->mem.page_alignment = page_alignment;
-	bo->mem.bus.io_reserved = false;
+	bo->mem.bus.io_reserved_vm = false;
+	bo->mem.bus.io_reserved_count = 0;
 	bo->buffer_start = buffer_start & PAGE_MASK;
 	bo->priv_flags = 0;
 	bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
@@ -1355,6 +1379,10 @@
 	BUG_ON(type >= TTM_NUM_MEM_TYPES);
 	man = &bdev->man[type];
 	BUG_ON(man->has_type);
+	man->io_reserve_fastpath = true;
+	man->use_io_reserve_lru = false;
+	mutex_init(&man->io_reserve_mutex);
+	INIT_LIST_HEAD(&man->io_reserve_lru);
 
 	ret = bdev->driver->init_mem_type(bdev, type, man);
 	if (ret)
@@ -1526,7 +1554,8 @@
 	bdev->dev_mapping = NULL;
 	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
-
+	bdev->val_seq = 0;
+	spin_lock_init(&bdev->fence_lock);
 	mutex_lock(&glob->device_list_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
 	mutex_unlock(&glob->device_list_mutex);
@@ -1560,7 +1589,7 @@
 	return true;
 }
 
-void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
 	loff_t offset = (loff_t) bo->addr_space_offset;
@@ -1569,8 +1598,20 @@
 	if (!bdev->dev_mapping)
 		return;
 	unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
-	ttm_mem_io_free(bdev, &bo->mem);
+	ttm_mem_io_free_vm(bo);
 }
+
+void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+
+	ttm_mem_io_lock(man, false);
+	ttm_bo_unmap_virtual_locked(bo);
+	ttm_mem_io_unlock(man);
+}
+
+
 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
 
 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
@@ -1650,6 +1691,7 @@
 		bool lazy, bool interruptible, bool no_wait)
 {
 	struct ttm_bo_driver *driver = bo->bdev->driver;
+	struct ttm_bo_device *bdev = bo->bdev;
 	void *sync_obj;
 	void *sync_obj_arg;
 	int ret = 0;
@@ -1663,9 +1705,9 @@
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&tmp_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 			continue;
 		}
 
@@ -1674,29 +1716,29 @@
 
 		sync_obj = driver->sync_obj_ref(bo->sync_obj);
 		sync_obj_arg = bo->sync_obj_arg;
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
 					    lazy, interruptible);
 		if (unlikely(ret != 0)) {
 			driver->sync_obj_unref(&sync_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 			return ret;
 		}
-		spin_lock(&bo->lock);
+		spin_lock(&bdev->fence_lock);
 		if (likely(bo->sync_obj == sync_obj &&
 			   bo->sync_obj_arg == sync_obj_arg)) {
 			void *tmp_obj = bo->sync_obj;
 			bo->sync_obj = NULL;
 			clear_bit(TTM_BO_PRIV_FLAG_MOVING,
 				  &bo->priv_flags);
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&sync_obj);
 			driver->sync_obj_unref(&tmp_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 		} else {
-			spin_unlock(&bo->lock);
+			spin_unlock(&bdev->fence_lock);
 			driver->sync_obj_unref(&sync_obj);
-			spin_lock(&bo->lock);
+			spin_lock(&bdev->fence_lock);
 		}
 	}
 	return 0;
@@ -1705,6 +1747,7 @@
 
 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	int ret = 0;
 
 	/*
@@ -1714,9 +1757,9 @@
 	ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
 	if (unlikely(ret != 0))
 		return ret;
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, true, no_wait);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bdev->fence_lock);
 	if (likely(ret == 0))
 		atomic_inc(&bo->cpu_writers);
 	ttm_bo_unreserve(bo);
@@ -1782,16 +1825,15 @@
 	put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
 
-	while (put_count--)
-		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ttm_bo_list_ref_sub(bo, put_count, true);
 
 	/**
 	 * Wait for GPU, then move to system cached.
 	 */
 
-	spin_lock(&bo->lock);
+	spin_lock(&bo->bdev->fence_lock);
 	ret = ttm_bo_wait(bo, false, false, false);
-	spin_unlock(&bo->lock);
+	spin_unlock(&bo->bdev->fence_lock);
 
 	if (unlikely(ret != 0))
 		goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3106d5b..77dbf40 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -75,37 +75,123 @@
 }
 EXPORT_SYMBOL(ttm_bo_move_ttm);
 
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 {
+	if (likely(man->io_reserve_fastpath))
+		return 0;
+
+	if (interruptible)
+		return mutex_lock_interruptible(&man->io_reserve_mutex);
+
+	mutex_lock(&man->io_reserve_mutex);
+	return 0;
+}
+
+void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
+{
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	mutex_unlock(&man->io_reserve_mutex);
+}
+
+static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
+{
+	struct ttm_buffer_object *bo;
+
+	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
+		return -EAGAIN;
+
+	bo = list_first_entry(&man->io_reserve_lru,
+			      struct ttm_buffer_object,
+			      io_reserve_lru);
+	list_del_init(&bo->io_reserve_lru);
+	ttm_bo_unmap_virtual_locked(bo);
+
+	return 0;
+}
+
+static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+			      struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+	int ret = 0;
+
+	if (!bdev->driver->io_mem_reserve)
+		return 0;
+	if (likely(man->io_reserve_fastpath))
+		return bdev->driver->io_mem_reserve(bdev, mem);
+
+	if (bdev->driver->io_mem_reserve &&
+	    mem->bus.io_reserved_count++ == 0) {
+retry:
+		ret = bdev->driver->io_mem_reserve(bdev, mem);
+		if (ret == -EAGAIN) {
+			ret = ttm_mem_io_evict(man);
+			if (ret == 0)
+				goto retry;
+		}
+	}
+	return ret;
+}
+
+static void ttm_mem_io_free(struct ttm_bo_device *bdev,
+			    struct ttm_mem_reg *mem)
+{
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+
+	if (likely(man->io_reserve_fastpath))
+		return;
+
+	if (bdev->driver->io_mem_reserve &&
+	    --mem->bus.io_reserved_count == 0 &&
+	    bdev->driver->io_mem_free)
+		bdev->driver->io_mem_free(bdev, mem);
+
+}
+
+int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
+{
+	struct ttm_mem_reg *mem = &bo->mem;
 	int ret;
 
-	if (!mem->bus.io_reserved) {
-		mem->bus.io_reserved = true;
-		ret = bdev->driver->io_mem_reserve(bdev, mem);
+	if (!mem->bus.io_reserved_vm) {
+		struct ttm_mem_type_manager *man =
+			&bo->bdev->man[mem->mem_type];
+
+		ret = ttm_mem_io_reserve(bo->bdev, mem);
 		if (unlikely(ret != 0))
 			return ret;
+		mem->bus.io_reserved_vm = true;
+		if (man->use_io_reserve_lru)
+			list_add_tail(&bo->io_reserve_lru,
+				      &man->io_reserve_lru);
 	}
 	return 0;
 }
 
-void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
 {
-	if (bdev->driver->io_mem_reserve) {
-		if (mem->bus.io_reserved) {
-			mem->bus.io_reserved = false;
-			bdev->driver->io_mem_free(bdev, mem);
-		}
+	struct ttm_mem_reg *mem = &bo->mem;
+
+	if (mem->bus.io_reserved_vm) {
+		mem->bus.io_reserved_vm = false;
+		list_del_init(&bo->io_reserve_lru);
+		ttm_mem_io_free(bo->bdev, mem);
 	}
 }
 
 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
 			void **virtual)
 {
+	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 	int ret;
 	void *addr;
 
 	*virtual = NULL;
+	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bdev, mem);
+	ttm_mem_io_unlock(man);
 	if (ret || !mem->bus.is_iomem)
 		return ret;
 
@@ -117,7 +203,9 @@
 		else
 			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
 		if (!addr) {
+			(void) ttm_mem_io_lock(man, false);
 			ttm_mem_io_free(bdev, mem);
+			ttm_mem_io_unlock(man);
 			return -ENOMEM;
 		}
 	}
@@ -134,7 +222,9 @@
 
 	if (virtual && mem->bus.addr == NULL)
 		iounmap(virtual);
+	(void) ttm_mem_io_lock(man, false);
 	ttm_mem_io_free(bdev, mem);
+	ttm_mem_io_unlock(man);
 }
 
 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -231,7 +321,7 @@
 	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 	struct ttm_tt *ttm = bo->ttm;
 	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct ttm_mem_reg old_copy = *old_mem;
+	struct ttm_mem_reg old_copy;
 	void *old_iomap;
 	void *new_iomap;
 	int ret;
@@ -280,8 +370,7 @@
 	}
 	mb();
 out2:
-	ttm_bo_free_old_node(bo);
-
+	old_copy = *old_mem;
 	*old_mem = *new_mem;
 	new_mem->mm_node = NULL;
 
@@ -292,9 +381,10 @@
 	}
 
 out1:
-	ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
+	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
 out:
 	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
+	ttm_bo_mem_put(bo, &old_copy);
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -337,11 +427,11 @@
 	 * TODO: Explicit member copy would probably be better here.
 	 */
 
-	spin_lock_init(&fbo->lock);
 	init_waitqueue_head(&fbo->event_queue);
 	INIT_LIST_HEAD(&fbo->ddestroy);
 	INIT_LIST_HEAD(&fbo->lru);
 	INIT_LIST_HEAD(&fbo->swap);
+	INIT_LIST_HEAD(&fbo->io_reserve_lru);
 	fbo->vm_node = NULL;
 	atomic_set(&fbo->cpu_writers, 0);
 
@@ -453,6 +543,8 @@
 		unsigned long start_page, unsigned long num_pages,
 		struct ttm_bo_kmap_obj *map)
 {
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
 	unsigned long offset, size;
 	int ret;
 
@@ -467,7 +559,9 @@
 	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
 		return -EPERM;
 #endif
+	(void) ttm_mem_io_lock(man, false);
 	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
+	ttm_mem_io_unlock(man);
 	if (ret)
 		return ret;
 	if (!bo->mem.bus.is_iomem) {
@@ -482,12 +576,15 @@
 
 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
 {
+	struct ttm_buffer_object *bo = map->bo;
+	struct ttm_mem_type_manager *man =
+		&bo->bdev->man[bo->mem.mem_type];
+
 	if (!map->virtual)
 		return;
 	switch (map->bo_kmap_type) {
 	case ttm_bo_map_iomap:
 		iounmap(map->virtual);
-		ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
 		break;
 	case ttm_bo_map_vmap:
 		vunmap(map->virtual);
@@ -500,6 +597,9 @@
 	default:
 		BUG();
 	}
+	(void) ttm_mem_io_lock(man, false);
+	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
+	ttm_mem_io_unlock(man);
 	map->virtual = NULL;
 	map->page = NULL;
 }
@@ -520,7 +620,7 @@
 	struct ttm_buffer_object *ghost_obj;
 	void *tmp_obj = NULL;
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	if (bo->sync_obj) {
 		tmp_obj = bo->sync_obj;
 		bo->sync_obj = NULL;
@@ -529,7 +629,7 @@
 	bo->sync_obj_arg = sync_obj_arg;
 	if (evict) {
 		ret = ttm_bo_wait(bo, false, false, false);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (tmp_obj)
 			driver->sync_obj_unref(&tmp_obj);
 		if (ret)
@@ -552,7 +652,7 @@
 		 */
 
 		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (tmp_obj)
 			driver->sync_obj_unref(&tmp_obj);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index fe6cb77..221b924 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -83,6 +83,8 @@
 	int i;
 	unsigned long address = (unsigned long)vmf->virtual_address;
 	int retval = VM_FAULT_NOPAGE;
+	struct ttm_mem_type_manager *man =
+		&bdev->man[bo->mem.mem_type];
 
 	/*
 	 * Work around locking order reversal in fault / nopfn
@@ -118,24 +120,28 @@
 	 * move.
 	 */
 
-	spin_lock(&bo->lock);
+	spin_lock(&bdev->fence_lock);
 	if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
 		ret = ttm_bo_wait(bo, false, true, false);
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 		if (unlikely(ret != 0)) {
 			retval = (ret != -ERESTARTSYS) ?
 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 			goto out_unlock;
 		}
 	} else
-		spin_unlock(&bo->lock);
+		spin_unlock(&bdev->fence_lock);
 
-
-	ret = ttm_mem_io_reserve(bdev, &bo->mem);
-	if (ret) {
-		retval = VM_FAULT_SIGBUS;
+	ret = ttm_mem_io_lock(man, true);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_NOPAGE;
 		goto out_unlock;
 	}
+	ret = ttm_mem_io_reserve_vm(bo);
+	if (unlikely(ret != 0)) {
+		retval = VM_FAULT_SIGBUS;
+		goto out_io_unlock;
+	}
 
 	page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 	    bo->vm_node->start - vma->vm_pgoff;
@@ -144,7 +150,7 @@
 
 	if (unlikely(page_offset >= bo->num_pages)) {
 		retval = VM_FAULT_SIGBUS;
-		goto out_unlock;
+		goto out_io_unlock;
 	}
 
 	/*
@@ -182,7 +188,7 @@
 			page = ttm_tt_get_page(ttm, page_offset);
 			if (unlikely(!page && i == 0)) {
 				retval = VM_FAULT_OOM;
-				goto out_unlock;
+				goto out_io_unlock;
 			} else if (unlikely(!page)) {
 				break;
 			}
@@ -200,14 +206,15 @@
 		else if (unlikely(ret != 0)) {
 			retval =
 			    (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
-			goto out_unlock;
+			goto out_io_unlock;
 		}
 
 		address += PAGE_SIZE;
 		if (unlikely(++page_offset >= page_last))
 			break;
 	}
-
+out_io_unlock:
+	ttm_mem_io_unlock(man);
 out_unlock:
 	ttm_bo_unreserve(bo);
 	return retval;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c29..3832fe1 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -32,7 +32,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
 
@@ -41,10 +41,77 @@
 		if (!entry->reserved)
 			continue;
 
+		if (entry->removed) {
+			ttm_bo_add_to_lru(bo);
+			entry->removed = false;
+
+		}
 		entry->reserved = false;
-		ttm_bo_unreserve(bo);
+		atomic_set(&bo->reserved, 0);
+		wake_up_all(&bo->event_queue);
 	}
 }
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		if (!entry->removed) {
+			entry->put_count = ttm_bo_del_from_lru(bo);
+			entry->removed = true;
+		}
+	}
+}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		if (entry->put_count) {
+			ttm_bo_list_ref_sub(bo, entry->put_count, true);
+			entry->put_count = 0;
+		}
+	}
+}
+
+static int ttm_eu_wait_unreserved_locked(struct list_head *list,
+					 struct ttm_buffer_object *bo)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	int ret;
+
+	ttm_eu_del_from_lru_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ret = ttm_bo_wait_unreserved(bo, true);
+	spin_lock(&glob->lru_lock);
+	if (unlikely(ret != 0))
+		ttm_eu_backoff_reservation_locked(list);
+	return ret;
+}
+
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+	struct ttm_bo_global *glob;
+
+	if (list_empty(list))
+		return;
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
+	spin_lock(&glob->lru_lock);
+	ttm_eu_backoff_reservation_locked(list);
+	spin_unlock(&glob->lru_lock);
+}
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
 /*
@@ -59,37 +126,76 @@
  * buffers in different orders.
  */
 
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct list_head *list)
 {
+	struct ttm_bo_global *glob;
 	struct ttm_validate_buffer *entry;
 	int ret;
+	uint32_t val_seq;
+
+	if (list_empty(list))
+		return 0;
+
+	list_for_each_entry(entry, list, head) {
+		entry->reserved = false;
+		entry->put_count = 0;
+		entry->removed = false;
+	}
+
+	entry = list_first_entry(list, struct ttm_validate_buffer, head);
+	glob = entry->bo->glob;
 
 retry:
+	spin_lock(&glob->lru_lock);
+	val_seq = entry->bo->bdev->val_seq++;
+
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
-		entry->reserved = false;
-		ret = ttm_bo_reserve(bo, true, false, true, val_seq);
-		if (ret != 0) {
-			ttm_eu_backoff_reservation(list);
-			if (ret == -EAGAIN) {
-				ret = ttm_bo_wait_unreserved(bo, true);
-				if (unlikely(ret != 0))
-					return ret;
-				goto retry;
-			} else
+retry_this_bo:
+		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+		switch (ret) {
+		case 0:
+			break;
+		case -EBUSY:
+			ret = ttm_eu_wait_unreserved_locked(list, bo);
+			if (unlikely(ret != 0)) {
+				spin_unlock(&glob->lru_lock);
+				ttm_eu_list_ref_sub(list);
 				return ret;
+			}
+			goto retry_this_bo;
+		case -EAGAIN:
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
+			ret = ttm_bo_wait_unreserved(bo, true);
+			if (unlikely(ret != 0))
+				return ret;
+			goto retry;
+		default:
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
+			return ret;
 		}
 
 		entry->reserved = true;
 		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-			ttm_eu_backoff_reservation(list);
+			ttm_eu_backoff_reservation_locked(list);
+			spin_unlock(&glob->lru_lock);
+			ttm_eu_list_ref_sub(list);
 			ret = ttm_bo_wait_cpu(bo, false);
 			if (ret)
 				return ret;
 			goto retry;
 		}
 	}
+
+	ttm_eu_del_from_lru_locked(list);
+	spin_unlock(&glob->lru_lock);
+	ttm_eu_list_ref_sub(list);
+
 	return 0;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -97,21 +203,36 @@
 void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
 {
 	struct ttm_validate_buffer *entry;
+	struct ttm_buffer_object *bo;
+	struct ttm_bo_global *glob;
+	struct ttm_bo_device *bdev;
+	struct ttm_bo_driver *driver;
+
+	if (list_empty(list))
+		return;
+
+	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+	bdev = bo->bdev;
+	driver = bdev->driver;
+	glob = bo->glob;
+
+	spin_lock(&bdev->fence_lock);
+	spin_lock(&glob->lru_lock);
 
 	list_for_each_entry(entry, list, head) {
-		struct ttm_buffer_object *bo = entry->bo;
-		struct ttm_bo_driver *driver = bo->bdev->driver;
-		void *old_sync_obj;
-
-		spin_lock(&bo->lock);
-		old_sync_obj = bo->sync_obj;
+		bo = entry->bo;
+		entry->old_sync_obj = bo->sync_obj;
 		bo->sync_obj = driver->sync_obj_ref(sync_obj);
 		bo->sync_obj_arg = entry->new_sync_obj_arg;
-		spin_unlock(&bo->lock);
-		ttm_bo_unreserve(bo);
+		ttm_bo_unreserve_locked(bo);
 		entry->reserved = false;
-		if (old_sync_obj)
-			driver->sync_obj_unref(&old_sync_obj);
+	}
+	spin_unlock(&glob->lru_lock);
+	spin_unlock(&bdev->fence_lock);
+
+	list_for_each_entry(entry, list, head) {
+		if (entry->old_sync_obj)
+			driver->sync_obj_unref(&entry->old_sync_obj);
 	}
 }
 EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index e7a58d0..10fc01f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -264,7 +264,6 @@
 	 */
 
 	struct vmw_sw_context ctx;
-	uint32_t val_seq;
 	struct mutex cmdbuf_mutex;
 
 	/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 76954e3..41b95ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -653,8 +653,7 @@
 	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
 	if (unlikely(ret != 0))
 		goto out_err;
-	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
-				     dev_priv->val_seq++);
+	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 	if (unlikely(ret != 0))
 		goto out_err;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index fe096a7..bfab60c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -480,9 +480,6 @@
 	info->fix.smem_start = 0;
 	info->fix.smem_len = fb_size;
 
-	info->fix.mmio_start = 0;
-	info->fix.mmio_len = 0;
-
 	info->pseudo_palette = par->pseudo_palette;
 	info->screen_base = par->vmalloc;
 	info->screen_size = fb_size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index c8768f3..e01cacb 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -33,6 +33,7 @@
 	struct fb_info *fb_info;
 	int pwr_state;
 	void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+	void (*reprobe)(struct pci_dev *pdev);
 	bool (*can_switch)(struct pci_dev *pdev);
 	int id;
 	bool active;
@@ -103,6 +104,7 @@
 
 int vga_switcheroo_register_client(struct pci_dev *pdev,
 				   void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+				   void (*reprobe)(struct pci_dev *pdev),
 				   bool (*can_switch)(struct pci_dev *pdev))
 {
 	int index;
@@ -117,6 +119,7 @@
 	vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
 	vgasr_priv.clients[index].pdev = pdev;
 	vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+	vgasr_priv.clients[index].reprobe = reprobe;
 	vgasr_priv.clients[index].can_switch = can_switch;
 	vgasr_priv.clients[index].id = -1;
 	if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
@@ -174,7 +177,8 @@
 	int i;
 	mutex_lock(&vgasr_mutex);
 	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
-		seq_printf(m, "%d:%c:%s:%s\n", i,
+		seq_printf(m, "%d:%s:%c:%s:%s\n", i,
+			   vgasr_priv.clients[i].id == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
 			   vgasr_priv.clients[i].active ? '+' : ' ',
 			   vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
 			   pci_name(vgasr_priv.clients[i].pdev));
@@ -190,9 +194,8 @@
 
 static int vga_switchon(struct vga_switcheroo_client *client)
 {
-	int ret;
-
-	ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
 	/* call the driver callback to turn on device */
 	client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
 	client->pwr_state = VGA_SWITCHEROO_ON;
@@ -203,12 +206,14 @@
 {
 	/* call the driver callback to turn off device */
 	client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
-	vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+	if (vgasr_priv.handler->power_state)
+		vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
 	client->pwr_state = VGA_SWITCHEROO_OFF;
 	return 0;
 }
 
-static int vga_switchto(struct vga_switcheroo_client *new_client)
+/* stage one happens before delay */
+static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
 {
 	int ret;
 	int i;
@@ -235,10 +240,28 @@
 		vga_switchon(new_client);
 
 	/* swap shadow resource to denote boot VGA device has changed so X starts on new device */
-	active->active = false;
-
 	active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
 	new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+	return 0;
+}
+
+/* post delay */
+static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
+{
+	int ret;
+	int i;
+	struct vga_switcheroo_client *active = NULL;
+
+	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+		if (vgasr_priv.clients[i].active == true) {
+			active = &vgasr_priv.clients[i];
+			break;
+		}
+	}
+	if (!active)
+		return 0;
+
+	active->active = false;
 
 	if (new_client->fb_info) {
 		struct fb_event event;
@@ -250,6 +273,9 @@
 	if (ret)
 		return ret;
 
+	if (new_client->reprobe)
+		new_client->reprobe(new_client->pdev);
+
 	if (active->pwr_state == VGA_SWITCHEROO_ON)
 		vga_switchoff(active);
 
@@ -265,6 +291,7 @@
 	const char *pdev_name;
 	int i, ret;
 	bool delay = false, can_switch;
+	bool just_mux = false;
 	int client_id = -1;
 	struct vga_switcheroo_client *client = NULL;
 
@@ -319,6 +346,15 @@
 	if (strncmp(usercmd, "DIS", 3) == 0)
 		client_id = VGA_SWITCHEROO_DIS;
 
+	if (strncmp(usercmd, "MIGD", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_IGD;
+	}
+	if (strncmp(usercmd, "MDIS", 4) == 0) {
+		just_mux = true;
+		client_id = VGA_SWITCHEROO_DIS;
+	}
+
 	if (client_id == -1)
 		goto out;
 
@@ -330,6 +366,12 @@
 	}
 
 	vgasr_priv.delayed_switch_active = false;
+
+	if (just_mux) {
+		ret = vgasr_priv.handler->switchto(client_id);
+		goto out;
+	}
+
 	/* okay we want a switch - test if devices are willing to switch */
 	can_switch = true;
 	for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
@@ -345,18 +387,22 @@
 
 	if (can_switch == true) {
 		pdev_name = pci_name(client->pdev);
-		ret = vga_switchto(client);
+		ret = vga_switchto_stage1(client);
 		if (ret)
-			printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
+
+		ret = vga_switchto_stage2(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: switching failed stage 2 %d\n", ret);
+
 	} else {
 		printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
 		vgasr_priv.delayed_switch_active = true;
 		vgasr_priv.delayed_client_id = client_id;
 
-		/* we should at least power up the card to
-		   make the switch faster */
-		if (client->pwr_state == VGA_SWITCHEROO_OFF)
-			vga_switchon(client);
+		ret = vga_switchto_stage1(client);
+		if (ret)
+			printk(KERN_ERR "vga_switcheroo: delayed switching stage 1 failed %d\n", ret);
 	}
 
 out:
@@ -438,9 +484,9 @@
 		goto err;
 
 	pdev_name = pci_name(client->pdev);
-	ret = vga_switchto(client);
+	ret = vga_switchto_stage2(client);
 	if (ret)
-		printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+		printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
 
 	vgasr_priv.delayed_switch_active = false;
 	err = 0;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ffbc278..24cca2f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -295,6 +295,23 @@
 	---help---
 	Support for Monterey Genius KB29E.
 
+config HID_MULTITOUCH
+	tristate "HID Multitouch panels"
+	depends on USB_HID
+	---help---
+	  Generic support for HID multitouch panels.
+
+	  Say Y here if you have one of the following devices:
+	  - Cypress TrueTouch panels
+	  - Hanvon dual touch panels
+	  - Pixcir dual touch panels
+	  - 'Sensing Win7-TwoFinger' panel by GeneralTouch
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called hid-multitouch.
+
 config HID_NTRIG
 	tristate "N-Trig touch screen"
 	depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6eae9a9..6efc2a0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -47,6 +47,7 @@
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
 obj-$(CONFIG_HID_MOSART)	+= hid-mosart.o
+obj-$(CONFIG_HID_MULTITOUCH)	+= hid-multitouch.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
 obj-$(CONFIG_HID_PRODIKEYS)	+= hid-prodikeys.o
diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c
index 375b509..1ea066c 100644
--- a/drivers/hid/hid-cando.c
+++ b/drivers/hid/hid-cando.c
@@ -236,6 +236,8 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 			USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
 		USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2611686..d678cf3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1312,7 +1312,9 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
@@ -1324,6 +1326,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
@@ -1335,11 +1338,13 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
@@ -1422,6 +1427,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@@ -1640,7 +1646,6 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
-	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f65cace..92a0d61 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -140,7 +140,9 @@
 #define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2	0x5577
 
 #define USB_VENDOR_ID_CANDO		0x2087
+#define USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH 0x0703
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH	0x0a01
+#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_10_1 0x0a02
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03
 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01
 
@@ -186,6 +188,7 @@
 #define USB_DEVICE_ID_CYPRESS_BARCODE_1	0xde61
 #define USB_DEVICE_ID_CYPRESS_BARCODE_2	0xde64
 #define USB_DEVICE_ID_CYPRESS_BARCODE_3	0xbca1
+#define USB_DEVICE_ID_CYPRESS_TRUETOUCH	0xc001
 
 #define USB_VENDOR_ID_DEALEXTREAME	0x10c5
 #define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701	0x819a
@@ -236,6 +239,7 @@
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR	0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH	0x0dfc
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
 
 #define USB_VENDOR_ID_GLAB		0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30	0x0038
@@ -318,6 +322,9 @@
 #define USB_DEVICE_ID_HANWANG_TABLET_FIRST	0x5000
 #define USB_DEVICE_ID_HANWANG_TABLET_LAST	0x8fff
 
+#define USB_VENDOR_ID_HANVON		0x20b3
+#define USB_DEVICE_ID_HANVON_MULTITOUCH	0x0a18
+
 #define USB_VENDOR_ID_HAPP		0x078b
 #define USB_DEVICE_ID_UGCI_DRIVING	0x0010
 #define USB_DEVICE_ID_UGCI_FLYING	0x0020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e60fdb8..7f552bf 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -290,6 +290,14 @@
 		goto ignore;
 	}
 
+	if (field->report_type == HID_FEATURE_REPORT) {
+		if (device->driver->feature_mapping) {
+			device->driver->feature_mapping(device, hidinput, field,
+				usage);
+		}
+		goto ignore;
+	}
+
 	if (device->driver->input_mapping) {
 		int ret = device->driver->input_mapping(device, hidinput, field,
 				usage, &bit, &max);
@@ -839,7 +847,6 @@
 	struct hid_input *hidinput = NULL;
 	struct input_dev *input_dev;
 	int i, j, k;
-	int max_report_type = HID_OUTPUT_REPORT;
 
 	INIT_LIST_HEAD(&hid->inputs);
 
@@ -856,10 +863,11 @@
 			return -1;
 	}
 
-	if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
-		max_report_type = HID_INPUT_REPORT;
+	for (k = HID_INPUT_REPORT; k <= HID_FEATURE_REPORT; k++) {
+		if (k == HID_OUTPUT_REPORT &&
+			hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
+			continue;
 
-	for (k = HID_INPUT_REPORT; k <= max_report_type; k++)
 		list_for_each_entry(report, &hid->report_enum[k].report_list, list) {
 
 			if (!report->maxfield)
@@ -912,6 +920,7 @@
 				hidinput = NULL;
 			}
 		}
+	}
 
 	if (hidinput && input_register_device(hidinput->input))
 		goto out_cleanup;
diff --git a/drivers/hid/hid-mosart.c b/drivers/hid/hid-mosart.c
index 9fb050c..aed7ffe 100644
--- a/drivers/hid/hid-mosart.c
+++ b/drivers/hid/hid-mosart.c
@@ -257,6 +257,7 @@
 static const struct hid_device_id mosart_devices[] = {
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_T91MT) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUSTEK_MULTITOUCH_YFO) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART) },
 	{ }
 };
 MODULE_DEVICE_TABLE(hid, mosart_devices);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
new file mode 100644
index 0000000..07d3183
--- /dev/null
+++ b/drivers/hid/hid-multitouch.c
@@ -0,0 +1,516 @@
+/*
+ *  HID driver for multitouch panels
+ *
+ *  Copyright (c) 2010-2011 Stephane Chatty <chatty@enac.fr>
+ *  Copyright (c) 2010-2011 Benjamin Tissoires <benjamin.tissoires@gmail.com>
+ *  Copyright (c) 2010-2011 Ecole Nationale de l'Aviation Civile, France
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/input/mt.h>
+#include "usbhid/usbhid.h"
+
+
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+/* quirks to control the device */
+#define MT_QUIRK_NOT_SEEN_MEANS_UP	(1 << 0)
+#define MT_QUIRK_SLOT_IS_CONTACTID	(1 << 1)
+#define MT_QUIRK_CYPRESS		(1 << 2)
+#define MT_QUIRK_SLOT_IS_CONTACTNUMBER	(1 << 3)
+#define MT_QUIRK_VALID_IS_INRANGE	(1 << 4)
+#define MT_QUIRK_VALID_IS_CONFIDENCE	(1 << 5)
+
+struct mt_slot {
+	__s32 x, y, p, w, h;
+	__s32 contactid;	/* the device ContactID assigned to this slot */
+	bool touch_state;	/* is the touch valid? */
+	bool seen_in_this_frame;/* has this slot been updated */
+};
+
+struct mt_device {
+	struct mt_slot curdata;	/* placeholder of incoming data */
+	struct mt_class *mtclass;	/* our mt device class */
+	unsigned last_field_index;	/* last field index of the report */
+	unsigned last_slot_field;	/* the last field of a slot */
+	__s8 inputmode;		/* InputMode HID feature, -1 if non-existent */
+	__u8 num_received;	/* how many contacts we received */
+	__u8 num_expected;	/* expected last contact index */
+	bool curvalid;		/* is the current contact valid? */
+	struct mt_slot slots[0];	/* first slot */
+};
+
+struct mt_class {
+	__s32 name;	/* MT_CLS */
+	__s32 quirks;
+	__s32 sn_move;	/* Signal/noise ratio for move events */
+	__s32 sn_pressure;	/* Signal/noise ratio for pressure events */
+	__u8 maxcontacts;
+};
+
+/* classes of device behavior */
+#define MT_CLS_DEFAULT	1
+#define MT_CLS_DUAL1	2
+#define MT_CLS_DUAL2	3
+#define MT_CLS_CYPRESS	4
+
+/*
+ * these device-dependent functions determine what slot corresponds
+ * to a valid contact that was just read.
+ */
+
+static int cypress_compute_slot(struct mt_device *td)
+{
+	if (td->curdata.contactid != 0 || td->num_received == 0)
+		return td->curdata.contactid;
+	else
+		return -1;
+}
+
+static int find_slot_from_contactid(struct mt_device *td)
+{
+	int i;
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (td->slots[i].contactid == td->curdata.contactid &&
+			td->slots[i].touch_state)
+			return i;
+	}
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		if (!td->slots[i].seen_in_this_frame &&
+			!td->slots[i].touch_state)
+			return i;
+	}
+	/* should not occurs. If this happens that means
+	 * that the device sent more touches that it says
+	 * in the report descriptor. It is ignored then. */
+	return -1;
+}
+
+struct mt_class mt_classes[] = {
+	{ .name = MT_CLS_DEFAULT,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE,
+		.maxcontacts = 10 },
+	{ .name = MT_CLS_DUAL1,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTID,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_DUAL2,
+		.quirks = MT_QUIRK_VALID_IS_INRANGE |
+			MT_QUIRK_SLOT_IS_CONTACTNUMBER,
+		.maxcontacts = 2 },
+	{ .name = MT_CLS_CYPRESS,
+		.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+			MT_QUIRK_CYPRESS,
+		.maxcontacts = 10 },
+
+	{ }
+};
+
+static void mt_feature_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage)
+{
+	if (usage->hid == HID_DG_INPUTMODE) {
+		struct mt_device *td = hid_get_drvdata(hdev);
+		td->inputmode = field->report->id;
+	}
+}
+
+static void set_abs(struct input_dev *input, unsigned int code,
+		struct hid_field *field, int snratio)
+{
+	int fmin = field->logical_minimum;
+	int fmax = field->logical_maximum;
+	int fuzz = snratio ? (fmax - fmin) / snratio : 0;
+	input_set_abs_params(input, code, fmin, fmax, fuzz, 0);
+}
+
+static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct mt_class *cls = td->mtclass;
+	switch (usage->hid & HID_USAGE_PAGE) {
+
+	case HID_UP_GENDESK:
+		switch (usage->hid) {
+		case HID_GD_X:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_X);
+			set_abs(hi->input, ABS_MT_POSITION_X, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_X, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_GD_Y:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_POSITION_Y);
+			set_abs(hi->input, ABS_MT_POSITION_Y, field,
+				cls->sn_move);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_Y, field, cls->sn_move);
+			td->last_slot_field = usage->hid;
+			return 1;
+		}
+		return 0;
+
+	case HID_UP_DIGITIZER:
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONFIDENCE:
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPSWITCH:
+			hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+			input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTID:
+			input_mt_init_slots(hi->input,
+					td->mtclass->maxcontacts);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_WIDTH:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MAJOR);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_HEIGHT:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_TOUCH_MINOR);
+			field->logical_maximum = 1;
+			field->logical_minimum = 0;
+			set_abs(hi->input, ABS_MT_ORIENTATION, field, 0);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_TIPPRESSURE:
+			hid_map_usage(hi, usage, bit, max,
+					EV_ABS, ABS_MT_PRESSURE);
+			set_abs(hi->input, ABS_MT_PRESSURE, field,
+				cls->sn_pressure);
+			/* touchscreen emulation */
+			set_abs(hi->input, ABS_PRESSURE, field,
+				cls->sn_pressure);
+			td->last_slot_field = usage->hid;
+			return 1;
+		case HID_DG_CONTACTCOUNT:
+			td->last_field_index = field->report->maxfield - 1;
+			return 1;
+		case HID_DG_CONTACTMAX:
+			/* we don't set td->last_slot_field as contactcount and
+			 * contact max are global to the report */
+			return -1;
+		}
+		/* let hid-input decide for the others */
+		return 0;
+
+	case 0xff000000:
+		/* we do not want to map these: no input-oriented meaning */
+		return -1;
+	}
+
+	return 0;
+}
+
+static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+		struct hid_field *field, struct hid_usage *usage,
+		unsigned long **bit, int *max)
+{
+	if (usage->type == EV_KEY || usage->type == EV_ABS)
+		set_bit(usage->type, hi->input->evbit);
+
+	return -1;
+}
+
+static int mt_compute_slot(struct mt_device *td)
+{
+	__s32 quirks = td->mtclass->quirks;
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
+		return td->curdata.contactid;
+
+	if (quirks & MT_QUIRK_CYPRESS)
+		return cypress_compute_slot(td);
+
+	if (quirks & MT_QUIRK_SLOT_IS_CONTACTNUMBER)
+		return td->num_received;
+
+	return find_slot_from_contactid(td);
+}
+
+/*
+ * this function is called when a whole contact has been processed,
+ * so that it can assign it to a slot and store the data there
+ */
+static void mt_complete_slot(struct mt_device *td)
+{
+	td->curdata.seen_in_this_frame = true;
+	if (td->curvalid) {
+		int slotnum = mt_compute_slot(td);
+
+		if (slotnum >= 0 && slotnum < td->mtclass->maxcontacts)
+			td->slots[slotnum] = td->curdata;
+	}
+	td->num_received++;
+}
+
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mt_emit_event(struct mt_device *td, struct input_dev *input)
+{
+	int i;
+
+	for (i = 0; i < td->mtclass->maxcontacts; ++i) {
+		struct mt_slot *s = &(td->slots[i]);
+		if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+			!s->seen_in_this_frame) {
+			s->touch_state = false;
+		}
+
+		input_mt_slot(input, i);
+		input_mt_report_slot_state(input, MT_TOOL_FINGER,
+			s->touch_state);
+		if (s->touch_state) {
+			input_event(input, EV_ABS, ABS_MT_POSITION_X, s->x);
+			input_event(input, EV_ABS, ABS_MT_POSITION_Y, s->y);
+			input_event(input, EV_ABS, ABS_MT_PRESSURE, s->p);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, s->w);
+			input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, s->h);
+		}
+		s->seen_in_this_frame = false;
+
+	}
+
+	input_mt_report_pointer_emulation(input, true);
+	input_sync(input);
+	td->num_received = 0;
+}
+
+
+
+static int mt_event(struct hid_device *hid, struct hid_field *field,
+				struct hid_usage *usage, __s32 value)
+{
+	struct mt_device *td = hid_get_drvdata(hid);
+	__s32 quirks = td->mtclass->quirks;
+
+	if (hid->claimed & HID_CLAIMED_INPUT) {
+		switch (usage->hid) {
+		case HID_DG_INRANGE:
+			if (quirks & MT_QUIRK_VALID_IS_INRANGE)
+				td->curvalid = value;
+			break;
+		case HID_DG_TIPSWITCH:
+			if (quirks & MT_QUIRK_NOT_SEEN_MEANS_UP)
+				td->curvalid = value;
+			td->curdata.touch_state = value;
+			break;
+		case HID_DG_CONFIDENCE:
+			if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
+				td->curvalid = value;
+			break;
+		case HID_DG_CONTACTID:
+			td->curdata.contactid = value;
+			break;
+		case HID_DG_TIPPRESSURE:
+			td->curdata.p = value;
+			break;
+		case HID_GD_X:
+			td->curdata.x = value;
+			break;
+		case HID_GD_Y:
+			td->curdata.y = value;
+			break;
+		case HID_DG_WIDTH:
+			td->curdata.w = value;
+			break;
+		case HID_DG_HEIGHT:
+			td->curdata.h = value;
+			break;
+		case HID_DG_CONTACTCOUNT:
+			/*
+			 * Includes multi-packet support where subsequent
+			 * packets are sent with zero contactcount.
+			 */
+			if (value)
+				td->num_expected = value;
+			break;
+
+		default:
+			/* fallback to the generic hidinput handling */
+			return 0;
+		}
+
+		if (usage->hid == td->last_slot_field)
+			mt_complete_slot(td);
+
+		if (field->index == td->last_field_index
+			&& td->num_received >= td->num_expected)
+			mt_emit_event(td, field->hidinput->input);
+
+	}
+
+	/* we have handled the hidinput part, now remains hiddev */
+	if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+		hid->hiddev_hid_event(hid, field, usage, value);
+
+	return 1;
+}
+
+static void mt_set_input_mode(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	struct hid_report *r;
+	struct hid_report_enum *re;
+
+	if (td->inputmode < 0)
+		return;
+
+	re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+	r = re->report_id_hash[td->inputmode];
+	if (r) {
+		r->field[0]->value[0] = 0x02;
+		usbhid_submit_report(hdev, r, USB_DIR_OUT);
+	}
+}
+
+static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+	int ret, i;
+	struct mt_device *td;
+	struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
+
+	for (i = 0; mt_classes[i].name ; i++) {
+		if (id->driver_data == mt_classes[i].name) {
+			mtclass = &(mt_classes[i]);
+			break;
+		}
+	}
+
+	/* This allows the driver to correctly support devices
+	 * that emit events over several HID messages.
+	 */
+	hdev->quirks |= HID_QUIRK_NO_INPUT_SYNC;
+
+	td = kzalloc(sizeof(struct mt_device) +
+				mtclass->maxcontacts * sizeof(struct mt_slot),
+				GFP_KERNEL);
+	if (!td) {
+		dev_err(&hdev->dev, "cannot allocate multitouch data\n");
+		return -ENOMEM;
+	}
+	td->mtclass = mtclass;
+	td->inputmode = -1;
+	hid_set_drvdata(hdev, td);
+
+	ret = hid_parse(hdev);
+	if (ret != 0)
+		goto fail;
+
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	if (ret)
+		goto fail;
+
+	mt_set_input_mode(hdev);
+
+	return 0;
+
+fail:
+	kfree(td);
+	return ret;
+}
+
+#ifdef CONFIG_PM
+static int mt_reset_resume(struct hid_device *hdev)
+{
+	mt_set_input_mode(hdev);
+	return 0;
+}
+#endif
+
+static void mt_remove(struct hid_device *hdev)
+{
+	struct mt_device *td = hid_get_drvdata(hdev);
+	hid_hw_stop(hdev);
+	kfree(td);
+	hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mt_devices[] = {
+
+	/* Cypress panel */
+	{ .driver_data = MT_CLS_CYPRESS,
+		HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS,
+			USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
+
+	/* GeneralTouch panel */
+	{ .driver_data = MT_CLS_DUAL2,
+		HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
+			USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) },
+
+	/* PixCir-based panels */
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
+			USB_DEVICE_ID_HANVON_MULTITOUCH) },
+	{ .driver_data = MT_CLS_DUAL1,
+		HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
+			USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, mt_devices);
+
+static const struct hid_usage_id mt_grabbed_usages[] = {
+	{ HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+	{ HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mt_driver = {
+	.name = "hid-multitouch",
+	.id_table = mt_devices,
+	.probe = mt_probe,
+	.remove = mt_remove,
+	.input_mapping = mt_input_mapping,
+	.input_mapped = mt_input_mapped,
+	.feature_mapping = mt_feature_mapping,
+	.usage_table = mt_grabbed_usages,
+	.event = mt_event,
+#ifdef CONFIG_PM
+	.reset_resume = mt_reset_resume,
+#endif
+};
+
+static int __init mt_init(void)
+{
+	return hid_register_driver(&mt_driver);
+}
+
+static void __exit mt_exit(void)
+{
+	hid_unregister_driver(&mt_driver);
+}
+
+module_init(mt_init);
+module_exit(mt_exit);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 76b9a14..9a94b64 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -35,7 +35,6 @@
 	{ USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
 	{ USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT },
-	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index bdc13d2..35f00da 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -809,10 +809,10 @@
 	  will be called dme1737.
 
 config SENSORS_EMC1403
-	tristate "SMSC EMC1403 thermal sensor"
+	tristate "SMSC EMC1403/23 thermal sensor"
 	depends on I2C
 	help
-	  If you say yes here you get support for the SMSC EMC1403
+	  If you say yes here you get support for the SMSC EMC1403/23
 	  temperature monitoring chip.
 
 	  Threshold values can be configured using sysfs.
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 0727ad2..9e234b9 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -20,7 +20,7 @@
  * Alarms	16-bit map of active alarms
  * Analog Out	0..1250 mV output
  *
- * Chassis Intrusion: clear CI latch with 'echo 1 > chassis_clear'
+ * Chassis Intrusion: clear CI latch with 'echo 0 > intrusion0_alarm'
  *
  * Test hardware: Intel SE440BX-2 desktop motherboard --Grant
  *
@@ -476,13 +476,16 @@
 static DEVICE_ATTR(aout_output, S_IRUGO | S_IWUSR, show_aout, set_aout);
 
 /* chassis_clear */
-static ssize_t chassis_clear(struct device *dev,
+static ssize_t chassis_clear_legacy(struct device *dev,
 		struct device_attribute *attr,
 		const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	unsigned long val = simple_strtol(buf, NULL, 10);
 
+	dev_warn(dev, "Attribute chassis_clear is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
 	if (val == 1) {
 		i2c_smbus_write_byte_data(client,
 				ADM9240_REG_CHASSIS_CLEAR, 0x80);
@@ -490,7 +493,29 @@
 	}
 	return count;
 }
-static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear);
+static DEVICE_ATTR(chassis_clear, S_IWUSR, NULL, chassis_clear_legacy);
+
+static ssize_t chassis_clear(struct device *dev,
+		struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct adm9240_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	i2c_smbus_write_byte_data(client, ADM9240_REG_CHASSIS_CLEAR, 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+	dev_dbg(&client->dev, "chassis intrusion latch cleared\n");
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR, show_alarm,
+		chassis_clear, 12);
 
 static struct attribute *adm9240_attributes[] = {
 	&sensor_dev_attr_in0_input.dev_attr.attr,
@@ -532,6 +557,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_aout_output.attr,
 	&dev_attr_chassis_clear.attr,
+	&sensor_dev_attr_intrusion0_alarm.dev_attr.attr,
 	&dev_attr_cpu0_vid.attr,
 	NULL
 };
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index aac85f3..c42c5a6 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -4,7 +4,7 @@
 
 	This driver is based on the lm75 and other lm_sensors/hwmon drivers
 
-	Written by Steve Hardy <steve@linuxrealtime.co.uk>
+	Written by Steve Hardy <shardy@redhat.com>
 
 	Datasheet available at: http://focus.ti.com/lit/ds/symlink/ads7828.pdf
 
@@ -271,7 +271,7 @@
 	i2c_del_driver(&ads7828_driver);
 }
 
-MODULE_AUTHOR("Steve Hardy <steve@linuxrealtime.co.uk>");
+MODULE_AUTHOR("Steve Hardy <shardy@redhat.com>");
 MODULE_DESCRIPTION("ADS7828 driver");
 MODULE_LICENSE("GPL");
 
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index e9a610b..d9c5927 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -77,12 +77,14 @@
  * in4   +12V
  * in5   VTR   (+3.3V stby)
  * in6   Vbat
+ * in7   Vtrip (sch5127 only)
  *
  * --------------------------------------------------------------------- */
 
-/* Voltages (in) numbered 0-6 (ix) */
-#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) \
-						  : 0x94 + (ix))
+/* Voltages (in) numbered 0-7 (ix) */
+#define	DME1737_REG_IN(ix)		((ix) < 5 ? 0x20 + (ix) : \
+					 (ix) < 7 ? 0x94 + (ix) : \
+						    0x1f)
 #define	DME1737_REG_IN_MIN(ix)		((ix) < 5 ? 0x44 + (ix) * 2 \
 						  : 0x91 + (ix) * 2)
 #define	DME1737_REG_IN_MAX(ix)		((ix) < 5 ? 0x45 + (ix) * 2 \
@@ -101,10 +103,11 @@
  *    IN_TEMP_LSB(1) = [temp3, temp1]
  *    IN_TEMP_LSB(2) = [in4, temp2]
  *    IN_TEMP_LSB(3) = [in3, in0]
- *    IN_TEMP_LSB(4) = [in2, in1] */
+ *    IN_TEMP_LSB(4) = [in2, in1]
+ *    IN_TEMP_LSB(5) = [res, in7] */
 #define DME1737_REG_IN_TEMP_LSB(ix)	(0x84 + (ix))
-static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0};
-static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4};
+static const u8 DME1737_REG_IN_LSB[] = {3, 4, 4, 3, 2, 0, 0, 5};
+static const u8 DME1737_REG_IN_LSB_SHL[] = {4, 4, 0, 0, 0, 0, 4, 4};
 static const u8 DME1737_REG_TEMP_LSB[] = {1, 2, 1};
 static const u8 DME1737_REG_TEMP_LSB_SHL[] = {4, 4, 0};
 
@@ -145,7 +148,7 @@
 #define DME1737_REG_ALARM1		0x41
 #define DME1737_REG_ALARM2		0x42
 #define DME1737_REG_ALARM3		0x83
-static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17};
+static const u8 DME1737_BIT_ALARM_IN[] = {0, 1, 2, 3, 8, 16, 17, 18};
 static const u8 DME1737_BIT_ALARM_TEMP[] = {4, 5, 6};
 static const u8 DME1737_BIT_ALARM_FAN[] = {10, 11, 12, 13, 22, 23};
 
@@ -190,6 +193,7 @@
 #define HAS_PWM_MIN		(1 << 4)		/* bit 4 */
 #define HAS_FAN(ix)		(1 << ((ix) + 5))	/* bits 5-10 */
 #define HAS_PWM(ix)		(1 << ((ix) + 11))	/* bits 11-16 */
+#define HAS_IN7			(1 << 17)		/* bit 17 */
 
 /* ---------------------------------------------------------------------
  * Data structures and manipulation thereof
@@ -213,9 +217,9 @@
 	u32 has_features;
 
 	/* Register values */
-	u16 in[7];
-	u8  in_min[7];
-	u8  in_max[7];
+	u16 in[8];
+	u8  in_min[8];
+	u8  in_max[8];
 	s16 temp[3];
 	s8  temp_min[3];
 	s8  temp_max[3];
@@ -247,7 +251,7 @@
 static const int IN_NOMINAL_SCH5027[] = {5000, 2250, 3300, 1125, 1125, 3300,
 					 3300};
 static const int IN_NOMINAL_SCH5127[] = {2500, 2250, 3300, 1125, 1125, 3300,
-					 3300};
+					 3300, 1500};
 #define IN_NOMINAL(type)	((type) == sch311x ? IN_NOMINAL_SCH311x : \
 				 (type) == sch5027 ? IN_NOMINAL_SCH5027 : \
 				 (type) == sch5127 ? IN_NOMINAL_SCH5127 : \
@@ -580,7 +584,7 @@
 {
 	struct dme1737_data *data = dev_get_drvdata(dev);
 	int ix;
-	u8 lsb[5];
+	u8 lsb[6];
 
 	mutex_lock(&data->update_lock);
 
@@ -603,6 +607,9 @@
 			/* Voltage inputs are stored as 16 bit values even
 			 * though they have only 12 bits resolution. This is
 			 * to make it consistent with the temp inputs. */
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] = dme1737_read(data,
 					DME1737_REG_IN(ix)) << 8;
 			data->in_min[ix] = dme1737_read(data,
@@ -635,10 +642,16 @@
 		 * which the registers are read (MSB first, then LSB) is
 		 * important! */
 		for (ix = 0; ix < ARRAY_SIZE(lsb); ix++) {
+			if (ix == 5 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			lsb[ix] = dme1737_read(data,
 					DME1737_REG_IN_TEMP_LSB(ix));
 		}
 		for (ix = 0; ix < ARRAY_SIZE(data->in); ix++) {
+			if (ix == 7 && !(data->has_features & HAS_IN7)) {
+				continue;
+			}
 			data->in[ix] |= (lsb[DME1737_REG_IN_LSB[ix]] <<
 					DME1737_REG_IN_LSB_SHL[ix]) & 0xf0;
 		}
@@ -762,7 +775,7 @@
 
 /* ---------------------------------------------------------------------
  * Voltage sysfs attributes
- * ix = [0-5]
+ * ix = [0-7]
  * --------------------------------------------------------------------- */
 
 #define SYS_IN_INPUT	0
@@ -1439,7 +1452,7 @@
  * Sysfs device attribute defines and structs
  * --------------------------------------------------------------------- */
 
-/* Voltages 0-6 */
+/* Voltages 0-7 */
 
 #define SENSOR_DEVICE_ATTR_IN(ix) \
 static SENSOR_DEVICE_ATTR_2(in##ix##_input, S_IRUGO, \
@@ -1458,6 +1471,7 @@
 SENSOR_DEVICE_ATTR_IN(4);
 SENSOR_DEVICE_ATTR_IN(5);
 SENSOR_DEVICE_ATTR_IN(6);
+SENSOR_DEVICE_ATTR_IN(7);
 
 /* Temperatures 1-3 */
 
@@ -1576,7 +1590,7 @@
  * created unconditionally. The attributes that need modification of their
  * permissions are created read-only and write permissions are added or removed
  * on the fly when required */
-static struct attribute *dme1737_attr[] ={
+static struct attribute *dme1737_attr[] = {
 	/* Voltages */
 	&sensor_dev_attr_in0_input.dev_attr.attr,
 	&sensor_dev_attr_in0_min.dev_attr.attr,
@@ -1681,7 +1695,7 @@
 };
 
 
-/* The following struct holds temp zone hysteresis  related attributes, which
+/* The following struct holds temp zone hysteresis related attributes, which
  * are not available in all chips. The following chips support them:
  * DME1737, SCH311x */
 static struct attribute *dme1737_zone_hyst_attr[] = {
@@ -1695,6 +1709,21 @@
 	.attrs = dme1737_zone_hyst_attr,
 };
 
+/* The following struct holds voltage in7 related attributes, which
+ * are not available in all chips. The following chips support them:
+ * SCH5127 */
+static struct attribute *dme1737_in7_attr[] = {
+	&sensor_dev_attr_in7_input.dev_attr.attr,
+	&sensor_dev_attr_in7_min.dev_attr.attr,
+	&sensor_dev_attr_in7_max.dev_attr.attr,
+	&sensor_dev_attr_in7_alarm.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group dme1737_in7_group = {
+	.attrs = dme1737_in7_attr,
+};
+
 /* The following structs hold the PWM attributes, some of which are optional.
  * Their creation depends on the chip configuration which is determined during
  * module load. */
@@ -1986,6 +2015,9 @@
 	if (data->has_features & HAS_ZONE_HYST) {
 		sysfs_remove_group(&dev->kobj, &dme1737_zone_hyst_group);
 	}
+	if (data->has_features & HAS_IN7) {
+		sysfs_remove_group(&dev->kobj, &dme1737_in7_group);
+	}
 	sysfs_remove_group(&dev->kobj, &dme1737_group);
 
 	if (!data->client) {
@@ -1999,43 +2031,58 @@
 	int err, ix;
 
 	/* Create a name attribute for ISA devices */
-	if (!data->client &&
-	    (err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr))) {
-		goto exit;
+	if (!data->client) {
+		err = sysfs_create_file(&dev->kobj, &dev_attr_name.attr);
+		if (err) {
+			goto exit;
+		}
 	}
 
 	/* Create standard sysfs attributes */
-	if ((err = sysfs_create_group(&dev->kobj, &dme1737_group))) {
+	err = sysfs_create_group(&dev->kobj, &dme1737_group);
+	if (err) {
 		goto exit_remove;
 	}
 
 	/* Create chip-dependent sysfs attributes */
-	if ((data->has_features & HAS_TEMP_OFFSET) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_temp_offset_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_TEMP_OFFSET) {
+		err = sysfs_create_group(&dev->kobj,
+					 &dme1737_temp_offset_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_VID) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_vid_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_VID) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_vid_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE3) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone3_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE3) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone3_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
-	if ((data->has_features & HAS_ZONE_HYST) &&
-	    (err = sysfs_create_group(&dev->kobj,
-				      &dme1737_zone_hyst_group))) {
-		goto exit_remove;
+	if (data->has_features & HAS_ZONE_HYST) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_zone_hyst_group);
+		if (err) {
+			goto exit_remove;
+		}
+	}
+	if (data->has_features & HAS_IN7) {
+		err = sysfs_create_group(&dev->kobj, &dme1737_in7_group);
+		if (err) {
+			goto exit_remove;
+		}
 	}
 
 	/* Create fan sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) {
 		if (data->has_features & HAS_FAN(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_fan_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_fan_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
 		}
@@ -2044,14 +2091,17 @@
 	/* Create PWM sysfs attributes */
 	for (ix = 0; ix < ARRAY_SIZE(dme1737_pwm_group); ix++) {
 		if (data->has_features & HAS_PWM(ix)) {
-			if ((err = sysfs_create_group(&dev->kobj,
-						&dme1737_pwm_group[ix]))) {
+			err = sysfs_create_group(&dev->kobj,
+						 &dme1737_pwm_group[ix]);
+			if (err) {
 				goto exit_remove;
 			}
-			if ((data->has_features & HAS_PWM_MIN) && ix < 3 &&
-			    (err = sysfs_create_file(&dev->kobj,
-					dme1737_auto_pwm_min_attr[ix]))) {
-				goto exit_remove;
+			if ((data->has_features & HAS_PWM_MIN) && (ix < 3)) {
+				err = sysfs_create_file(&dev->kobj,
+						dme1737_auto_pwm_min_attr[ix]);
+				if (err) {
+					goto exit_remove;
+				}
 			}
 		}
 	}
@@ -2188,7 +2238,7 @@
 		data->has_features |= HAS_ZONE3;
 		break;
 	case sch5127:
-		data->has_features |= HAS_FAN(2) | HAS_PWM(2);
+		data->has_features |= HAS_FAN(2) | HAS_PWM(2) | HAS_IN7;
 		break;
 	default:
 		break;
@@ -2281,8 +2331,9 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-		      dme1737_sio_inb(sio_cip, 0x61))) {
+	addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		dme1737_sio_inb(sio_cip, 0x61);
+	if (!addr) {
 		err = -ENODEV;
 		goto exit;
 	}
@@ -2363,13 +2414,15 @@
 	mutex_init(&data->update_lock);
 
 	/* Initialize the DME1737 chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2446,8 +2499,9 @@
 	dme1737_sio_outb(sio_cip, 0x07, 0x0a);
 
 	/* Get the base address of the runtime registers */
-	if (!(base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
-			   dme1737_sio_inb(sio_cip, 0x61))) {
+	base_addr = (dme1737_sio_inb(sio_cip, 0x60) << 8) |
+		     dme1737_sio_inb(sio_cip, 0x61);
+	if (!base_addr) {
 		pr_err("Base address not set\n");
 		err = -ENODEV;
 		goto exit;
@@ -2476,18 +2530,21 @@
 	if (err)
 		goto exit;
 
-	if (!(pdev = platform_device_alloc("dme1737", addr))) {
+	pdev = platform_device_alloc("dme1737", addr);
+	if (!pdev) {
 		pr_err("Failed to allocate device\n");
 		err = -ENOMEM;
 		goto exit;
 	}
 
-	if ((err = platform_device_add_resources(pdev, &res, 1))) {
+	err = platform_device_add_resources(pdev, &res, 1);
+	if (err) {
 		pr_err("Failed to add device resource (err = %d)\n", err);
 		goto exit_device_put;
 	}
 
-	if ((err = platform_device_add(pdev))) {
+	err = platform_device_add(pdev);
+	if (err) {
 		pr_err("Failed to add device (err = %d)\n", err);
 		goto exit_device_put;
 	}
@@ -2514,11 +2571,12 @@
 		dev_err(dev, "Failed to request region 0x%04x-0x%04x.\n",
 			(unsigned short)res->start,
 			(unsigned short)res->start + DME1737_EXTENT - 1);
-                err = -EBUSY;
-                goto exit;
-        }
+		err = -EBUSY;
+		goto exit;
+	}
 
-	if (!(data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL))) {
+	data = kzalloc(sizeof(struct dme1737_data), GFP_KERNEL);
+	if (!data) {
 		err = -ENOMEM;
 		goto exit_release_region;
 	}
@@ -2565,13 +2623,15 @@
 		 data->type == sch5127 ? "SCH5127" : "SCH311x", data->addr);
 
 	/* Initialize the chip */
-	if ((err = dme1737_init_device(dev))) {
+	err = dme1737_init_device(dev);
+	if (err) {
 		dev_err(dev, "Failed to initialize device.\n");
 		goto exit_kfree;
 	}
 
 	/* Create sysfs files */
-	if ((err = dme1737_create_files(dev))) {
+	err = dme1737_create_files(dev);
+	if (err) {
 		dev_err(dev, "Failed to create sysfs files.\n");
 		goto exit_kfree;
 	}
@@ -2628,7 +2688,8 @@
 	int err;
 	unsigned short addr;
 
-	if ((err = i2c_add_driver(&dme1737_i2c_driver))) {
+	err = i2c_add_driver(&dme1737_i2c_driver);
+	if (err) {
 		goto exit;
 	}
 
@@ -2641,12 +2702,14 @@
 		return 0;
 	}
 
-	if ((err = platform_driver_register(&dme1737_isa_driver))) {
+	err = platform_driver_register(&dme1737_isa_driver);
+	if (err) {
 		goto exit_del_i2c_driver;
 	}
 
 	/* Sets global pdev as a side effect */
-	if ((err = dme1737_isa_device_add(addr))) {
+	err = dme1737_isa_device_add(addr);
+	if (err) {
 		goto exit_del_isa_driver;
 	}
 
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index 8dee3f3..5dea9fa 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -269,23 +269,30 @@
 			struct i2c_board_info *info)
 {
 	int id;
-	/* Check if thermal chip is SMSC and EMC1403 */
+	/* Check if thermal chip is SMSC and EMC1403 or EMC1423 */
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_SMSC_ID_REG);
 	if (id != 0x5d)
 		return -ENODEV;
 
+	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
+	switch (id) {
+	case 0x21:
+		strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
+		break;
+	case 0x23:
+		strlcpy(info->type, "emc1423", I2C_NAME_SIZE);
+		break;
 	/* Note: 0x25 is the 1404 which is very similar and this
 	   driver could be extended */
-	id = i2c_smbus_read_byte_data(client, THERMAL_PID_REG);
-	if (id != 0x21)
+	default:
 		return -ENODEV;
+	}
 
 	id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG);
 	if (id != 0x01)
 		return -ENODEV;
 
-	strlcpy(info->type, "emc1403", I2C_NAME_SIZE);
 	return 0;
 }
 
@@ -342,6 +349,7 @@
 
 static const struct i2c_device_id emc1403_idtable[] = {
 	{ "emc1403", 0 },
+	{ "emc1423", 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, emc1403_idtable);
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index d4d4ca6..aa6d8b6 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -49,7 +49,6 @@
 #include <linux/kref.h>
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
 
 /* Insmod parameters */
@@ -850,7 +849,7 @@
 
 static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
 				WDIOF_CARDRESET,
 		.identity = "FSC watchdog"
@@ -858,7 +857,6 @@
 	int i, ret = 0;
 	struct fschmd_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		ident.firmware_version = data->revision;
@@ -915,7 +913,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index a428a92..316b648 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -38,6 +38,8 @@
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1570,26 +1572,25 @@
 	case 0xffff:	/* No device at all */
 		goto exit;
 	default:
-		pr_debug(DRVNAME ": Unsupported chip (DEVID=0x%x)\n",
-			 chip_type);
+		pr_debug("Unsupported chip (DEVID=0x%x)\n", chip_type);
 		goto exit;
 	}
 
 	superio_select(PME);
 	if (!(superio_inb(IT87_ACT_REG) & 0x01)) {
-		pr_info("it87: Device not activated, skipping\n");
+		pr_info("Device not activated, skipping\n");
 		goto exit;
 	}
 
 	*address = superio_inw(IT87_BASE_REG) & ~(IT87_EXTENT - 1);
 	if (*address == 0) {
-		pr_info("it87: Base address not set, skipping\n");
+		pr_info("Base address not set, skipping\n");
 		goto exit;
 	}
 
 	err = 0;
 	sio_data->revision = superio_inb(DEVREV) & 0x0f;
-	pr_info("it87: Found IT%04xF chip at 0x%x, revision %d\n",
+	pr_info("Found IT%04xF chip at 0x%x, revision %d\n",
 		chip_type, *address, sio_data->revision);
 
 	/* in8 (Vbat) is always internal */
@@ -1615,7 +1616,7 @@
 		} else {
 			/* We need at least 4 VID pins */
 			if (reg & 0x0f) {
-				pr_info("it87: VID is disabled (pins used for GPIO)\n");
+				pr_info("VID is disabled (pins used for GPIO)\n");
 				sio_data->skip_vid = 1;
 			}
 		}
@@ -1651,7 +1652,7 @@
 		if (sio_data->type == it8720 && !(reg & (1 << 1))) {
 			reg |= (1 << 1);
 			superio_outb(IT87_SIO_PINX2_REG, reg);
-			pr_notice("it87: Routing internal VCCH to in7\n");
+			pr_notice("Routing internal VCCH to in7\n");
 		}
 		if (reg & (1 << 0))
 			sio_data->internal |= (1 << 0);
@@ -1661,7 +1662,7 @@
 		sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
 	}
 	if (sio_data->beep_pin)
-		pr_info("it87: Beeping is supported\n");
+		pr_info("Beeping is supported\n");
 
 	/* Disable specific features based on DMI strings */
 	board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -1675,8 +1676,7 @@
 			   the PWM2 duty cycle, so we disable it.
 			   I use the board name string as the trigger in case
 			   the same board is ever used in other systems. */
-			pr_info("it87: Disabling pwm2 due to "
-				"hardware constraints\n");
+			pr_info("Disabling pwm2 due to hardware constraints\n");
 			sio_data->skip_pwm = (1 << 1);
 		}
 	}
@@ -2189,28 +2189,26 @@
 	pdev = platform_device_alloc(DRVNAME, address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct it87_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 72ff2c4..4cb24ea 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -19,6 +19,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -858,7 +860,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + LM78_EXTENT; port++) {
 		if (!request_region(port, 1, "lm78")) {
-			pr_debug("lm78: Failed to request port 0x%x\n", port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -920,7 +922,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("lm78: Found an %s chip at %#x\n",
+		pr_info("Found an %s chip at %#x\n",
 			val & 0x80 ? "LM79" : "LM78", (int)address);
 
  release:
@@ -942,21 +944,19 @@
 	pdev = platform_device_alloc("lm78", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "lm78: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "lm78: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "lm78: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 68e69a4..3d99b88 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -33,6 +33,8 @@
  *  the standard Super-I/O addresses is used (0x2E/0x2F or 0x4E/0x4F).
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1031,16 +1033,15 @@
 
 		val = superio_inb(sioaddr, ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO "pc87360: Device 0x%02x not "
-			       "activated\n", logdev[i]);
+			pr_info("Device 0x%02x not activated\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, BASE) << 8)
 		    | superio_inb(sioaddr, BASE + 1);
 		if (!val) {
-			printk(KERN_INFO "pc87360: Base address not set for "
-			       "device 0x%02x\n", logdev[i]);
+			pr_info("Base address not set for device 0x%02x\n",
+				logdev[i]);
 			continue;
 		}
 
@@ -1050,17 +1051,15 @@
 			confreg[0] = superio_inb(sioaddr, 0xF0);
 			confreg[1] = superio_inb(sioaddr, 0xF1);
 
-#ifdef DEBUG
-			printk(KERN_DEBUG "pc87360: Fan 1: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>2)&1,
-			       (confreg[0]>>3)&1, (confreg[0]>>4)&1);
-			printk(KERN_DEBUG "pc87360: Fan 2: mon=%d "
-			       "ctrl=%d inv=%d\n", (confreg[0]>>5)&1,
-			       (confreg[0]>>6)&1, (confreg[0]>>7)&1);
-			printk(KERN_DEBUG "pc87360: Fan 3: mon=%d "
-			       "ctrl=%d inv=%d\n", confreg[1]&1,
-			       (confreg[1]>>1)&1, (confreg[1]>>2)&1);
-#endif
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 1,
+				 (confreg[0] >> 2) & 1, (confreg[0] >> 3) & 1,
+				 (confreg[0] >> 4) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 2,
+				 (confreg[0] >> 5) & 1, (confreg[0] >> 6) & 1,
+				 (confreg[0] >> 7) & 1);
+			pr_debug("Fan %d: mon=%d ctrl=%d inv=%d\n", 3,
+				 confreg[1] & 1, (confreg[1] >> 1) & 1,
+				 (confreg[1] >> 2) & 1);
 		} else if (i==1) { /* Voltages */
 			/* Are we using thermistors? */
 			if (*devid == 0xE9) { /* PC87366 */
@@ -1071,14 +1070,12 @@
 				confreg[3] = superio_inb(sioaddr, 0x25);
 
 				if (confreg[2] & 0x40) {
-					printk(KERN_INFO "pc87360: Using "
-					       "thermistors for temperature "
-					       "monitoring\n");
+					pr_info("Using thermistors for "
+						"temperature monitoring\n");
 				}
 				if (confreg[3] & 0xE0) {
-					printk(KERN_INFO "pc87360: VID "
-					       "inputs routed (mode %u)\n",
-					       confreg[3] >> 5);
+					pr_info("VID inputs routed (mode %u)\n",
+						confreg[3] >> 5);
 				}
 			}
 		}
@@ -1616,7 +1613,7 @@
 	pdev = platform_device_alloc("pc87360", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "pc87360: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
@@ -1639,15 +1636,13 @@
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device resources addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resources addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "pc87360: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1666,8 +1661,7 @@
 
 	if (pc87360_find(0x2e, &devid, extra_isa)
 	 && pc87360_find(0x4e, &devid, extra_isa)) {
-		printk(KERN_WARNING "pc87360: PC8736x not detected, "
-		       "module not inserted.\n");
+		pr_warn("PC8736x not detected, module not inserted\n");
 		return -ENODEV;
 	}
 
@@ -1680,8 +1674,7 @@
 	}
 
 	if (address == 0x0000) {
-		printk(KERN_WARNING "pc87360: No active logical device, "
-		       "module not inserted.\n");
+		pr_warn("No active logical device, module not inserted\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 9ec4daa..8da2181 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -22,6 +22,8 @@
  *  mode, and voltages aren't supported at all.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1077,7 +1079,7 @@
 	data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
 	if (!data) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Out of memory\n");
+		pr_err("Out of memory\n");
 		goto exit;
 	}
 
@@ -1196,28 +1198,26 @@
 	pdev = platform_device_alloc(DRVNAME, res[0].start);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR DRVNAME ": Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, res, res_count);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add_data(pdev, sio_data,
 				       sizeof(struct pc87427_sio_data));
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Platform data allocation failed\n");
+		pr_err("Platform data allocation failed\n");
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR DRVNAME ": Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
@@ -1249,23 +1249,23 @@
 
 		val = superio_inb(sioaddr, SIOREG_ACT);
 		if (!(val & 0x01)) {
-			printk(KERN_INFO DRVNAME ": Logical device 0x%02x "
-			       "not activated\n", logdev[i]);
+			pr_info("Logical device 0x%02x not activated\n",
+				logdev[i]);
 			continue;
 		}
 
 		val = superio_inb(sioaddr, SIOREG_MAP);
 		if (val & 0x01) {
-			printk(KERN_WARNING DRVNAME ": Logical device 0x%02x "
-			       "is memory-mapped, can't use\n", logdev[i]);
+			pr_warn("Logical device 0x%02x is memory-mapped, "
+				"can't use\n", logdev[i]);
 			continue;
 		}
 
 		val = (superio_inb(sioaddr, SIOREG_IOBASE) << 8)
 		    | superio_inb(sioaddr, SIOREG_IOBASE + 1);
 		if (!val) {
-			printk(KERN_INFO DRVNAME ": I/O base address not set "
-			       "for logical device 0x%02x\n", logdev[i]);
+			pr_info("I/O base address not set for logical device "
+				"0x%02x\n", logdev[i]);
 			continue;
 		}
 		sio_data->address[i] = val;
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 13e8d21..25e9166 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -689,6 +689,13 @@
 	return 0;
 }
 
+static void via686a_update_fan_div(struct via686a_data *data)
+{
+	int reg = via686a_read_value(data, VIA686A_REG_FANDIV);
+	data->fan_div[0] = (reg >> 4) & 0x03;
+	data->fan_div[1] = reg >> 6;
+}
+
 static void __devinit via686a_init_device(struct via686a_data *data)
 {
 	u8 reg;
@@ -702,6 +709,9 @@
 	via686a_write_value(data, VIA686A_REG_TEMP_MODE,
 			    (reg & ~VIA686A_TEMP_MODE_MASK)
 			    | VIA686A_TEMP_MODE_CONTINUOUS);
+
+	/* Pre-read fan clock divisor values */
+	via686a_update_fan_div(data);
 }
 
 static struct via686a_data *via686a_update_device(struct device *dev)
@@ -753,9 +763,7 @@
 		    (via686a_read_value(data, VIA686A_REG_TEMP_LOW23) &
 		     0xc0) >> 6;
 
-		i = via686a_read_value(data, VIA686A_REG_FANDIV);
-		data->fan_div[0] = (i >> 4) & 0x03;
-		data->fan_div[1] = i >> 6;
+		via686a_update_fan_div(data);
 		data->alarms =
 		    via686a_read_value(data,
 				       VIA686A_REG_ALARM1) |
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index c84b9b4..eed43a0 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -33,6 +33,8 @@
 
 */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -1798,8 +1800,7 @@
 	 * individually for the probing phase. */
 	for (port = address; port < address + W83781D_EXTENT; port++) {
 		if (!request_region(port, 1, "w83781d")) {
-			pr_debug("w83781d: Failed to request port 0x%x\n",
-				 port);
+			pr_debug("Failed to request port 0x%x\n", port);
 			goto release;
 		}
 	}
@@ -1811,7 +1812,7 @@
 	if (inb_p(address + 2) != val
 	 || inb_p(address + 3) != val
 	 || inb_p(address + 7) != val) {
-		pr_debug("w83781d: Detection failed at step 1\n");
+		pr_debug("Detection failed at step %d\n", 1);
 		goto release;
 	}
 #undef REALLY_SLOW_IO
@@ -1820,14 +1821,14 @@
 	   MSB (busy flag) should be clear initially, set after the write. */
 	save = inb_p(address + W83781D_ADDR_REG_OFFSET);
 	if (save & 0x80) {
-		pr_debug("w83781d: Detection failed at step 2\n");
+		pr_debug("Detection failed at step %d\n", 2);
 		goto release;
 	}
 	val = ~save & 0x7f;
 	outb_p(val, address + W83781D_ADDR_REG_OFFSET);
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) != (val | 0x80)) {
 		outb_p(save, address + W83781D_ADDR_REG_OFFSET);
-		pr_debug("w83781d: Detection failed at step 3\n");
+		pr_debug("Detection failed at step %d\n", 3);
 		goto release;
 	}
 
@@ -1835,7 +1836,7 @@
 	outb_p(W83781D_REG_CONFIG, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val & 0x80) {
-		pr_debug("w83781d: Detection failed at step 4\n");
+		pr_debug("Detection failed at step %d\n", 4);
 		goto release;
 	}
 	outb_p(W83781D_REG_BANK, address + W83781D_ADDR_REG_OFFSET);
@@ -1844,19 +1845,19 @@
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if ((!(save & 0x80) && (val != 0xa3))
 	 || ((save & 0x80) && (val != 0x5c))) {
-		pr_debug("w83781d: Detection failed at step 5\n");
+		pr_debug("Detection failed at step %d\n", 5);
 		goto release;
 	}
 	outb_p(W83781D_REG_I2C_ADDR, address + W83781D_ADDR_REG_OFFSET);
 	val = inb_p(address + W83781D_DATA_REG_OFFSET);
 	if (val < 0x03 || val > 0x77) {	/* Not a valid I2C address */
-		pr_debug("w83781d: Detection failed at step 6\n");
+		pr_debug("Detection failed at step %d\n", 6);
 		goto release;
 	}
 
 	/* The busy flag should be clear again */
 	if (inb_p(address + W83781D_ADDR_REG_OFFSET) & 0x80) {
-		pr_debug("w83781d: Detection failed at step 7\n");
+		pr_debug("Detection failed at step %d\n", 7);
 		goto release;
 	}
 
@@ -1871,7 +1872,7 @@
 		found = 1;
 
 	if (found)
-		pr_info("w83781d: Found a %s chip at %#x\n",
+		pr_info("Found a %s chip at %#x\n",
 			val == 0x30 ? "W83782D" : "W83781D", (int)address);
 
  release:
@@ -1894,21 +1895,19 @@
 	pdev = platform_device_alloc("w83781d", address);
 	if (!pdev) {
 		err = -ENOMEM;
-		printk(KERN_ERR "w83781d: Device allocation failed\n");
+		pr_err("Device allocation failed\n");
 		goto exit;
 	}
 
 	err = platform_device_add_resources(pdev, &res, 1);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device resource addition failed "
-		       "(%d)\n", err);
+		pr_err("Device resource addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
 	err = platform_device_add(pdev);
 	if (err) {
-		printk(KERN_ERR "w83781d: Device addition failed (%d)\n",
-		       err);
+		pr_err("Device addition failed (%d)\n", err);
 		goto exit_device_put;
 	}
 
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 679718e..63841f8 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -691,7 +691,7 @@
 }
 
 static ssize_t
-show_regs_chassis(struct device *dev, struct device_attribute *attr,
+show_chassis(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -699,6 +699,16 @@
 }
 
 static ssize_t
+show_regs_chassis(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis");
+	return show_chassis(dev, attr, buf);
+}
+
+static ssize_t
 show_chassis_clear(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct w83792d_data *data = w83792d_update_device(dev);
@@ -706,7 +716,7 @@
 }
 
 static ssize_t
-store_chassis_clear(struct device *dev, struct device_attribute *attr,
+store_chassis_clear_legacy(struct device *dev, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
@@ -714,6 +724,10 @@
 	u32 val;
 	u8 temp1 = 0, temp2 = 0;
 
+	dev_warn(dev,
+		 "Attribute %s is deprecated, use intrusion0_alarm instead\n",
+		 "chassis_clear");
+
 	val = simple_strtoul(buf, NULL, 10);
 	mutex_lock(&data->update_lock);
 	data->chassis_clear = SENSORS_LIMIT(val, 0 ,1);
@@ -726,6 +740,27 @@
 	return count;
 }
 
+static ssize_t
+store_chassis_clear(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83792d_data *data = i2c_get_clientdata(client);
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
+
+	mutex_lock(&data->update_lock);
+	reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
+	w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
+	mutex_unlock(&data->update_lock);
+
+	return count;
+}
+
 /* For Smart Fan I / Thermal Cruise */
 static ssize_t
 show_thermal_cruise(struct device *dev, struct device_attribute *attr,
@@ -1012,7 +1047,9 @@
 static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO, show_alarm, NULL, 23);
 static DEVICE_ATTR(chassis, S_IRUGO, show_regs_chassis, NULL);
 static DEVICE_ATTR(chassis_clear, S_IRUGO | S_IWUSR,
-			show_chassis_clear, store_chassis_clear);
+			show_chassis_clear, store_chassis_clear_legacy);
+static DEVICE_ATTR(intrusion0_alarm, S_IRUGO | S_IWUSR,
+			show_chassis, store_chassis_clear);
 static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0);
 static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1);
 static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2);
@@ -1214,6 +1251,7 @@
 	&dev_attr_alarms.attr,
 	&dev_attr_chassis.attr,
 	&dev_attr_chassis_clear.attr,
+	&dev_attr_intrusion0_alarm.attr,
 	&sensor_dev_attr_tolerance1.dev_attr.attr,
 	&sensor_dev_attr_thermal_cruise1.dev_attr.attr,
 	&sensor_dev_attr_tolerance2.dev_attr.attr,
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 8e540ad..e3bdedf 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -51,7 +51,6 @@
 #define WATCHDOG_TIMEOUT 2	/* 2 minute default timeout */
 
 /* Addresses to scan */
-static DEFINE_MUTEX(watchdog_mutex);
 static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
 						I2C_CLIENT_END };
 
@@ -421,18 +420,43 @@
 
 /* Write any value to clear chassis alarm */
 static ssize_t
+store_chassis_clear_legacy(struct device *dev,
+			   struct device_attribute *attr, const char *buf,
+			   size_t count)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct w83793_data *data = i2c_get_clientdata(client);
+	u8 val;
+
+	dev_warn(dev, "Attribute chassis is deprecated, "
+		 "use intrusion0_alarm instead\n");
+
+	mutex_lock(&data->update_lock);
+	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	val |= 0x80;
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	mutex_unlock(&data->update_lock);
+	return count;
+}
+
+/* Write 0 to clear chassis alarm */
+static ssize_t
 store_chassis_clear(struct device *dev,
 		    struct device_attribute *attr, const char *buf,
 		    size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct w83793_data *data = i2c_get_clientdata(client);
-	u8 val;
+	unsigned long val;
+	u8 reg;
+
+	if (strict_strtoul(buf, 10, &val) || val != 0)
+		return -EINVAL;
 
 	mutex_lock(&data->update_lock);
-	val = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
-	val |= 0x80;
-	w83793_write_value(client, W83793_REG_CLR_CHASSIS, val);
+	reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
+	w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
+	data->valid = 0;		/* Force cache refresh */
 	mutex_unlock(&data->update_lock);
 	return count;
 }
@@ -1102,6 +1126,8 @@
 
 static struct sensor_device_attribute_2 sda_single_files[] = {
 	SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
+		      store_chassis_clear_legacy, ALARM_STATUS, 30),
+	SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
 		      store_chassis_clear, ALARM_STATUS, 30),
 	SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
 		      store_beep_enable, NOT_USED, NOT_USED),
@@ -1323,7 +1349,7 @@
 static long watchdog_ioctl(struct file *filp, unsigned int cmd,
 			   unsigned long arg)
 {
-	static struct watchdog_info ident = {
+	struct watchdog_info ident = {
 		.options = WDIOF_KEEPALIVEPING |
 			   WDIOF_SETTIMEOUT |
 			   WDIOF_CARDRESET,
@@ -1333,7 +1359,6 @@
 	int val, ret = 0;
 	struct w83793_data *data = filp->private_data;
 
-	mutex_lock(&watchdog_mutex);
 	switch (cmd) {
 	case WDIOC_GETSUPPORT:
 		if (!nowayout)
@@ -1387,7 +1412,6 @@
 	default:
 		ret = -ENOTTY;
 	}
-	mutex_unlock(&watchdog_mutex);
 	return ret;
 }
 
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index cdbc744..845232d 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -458,6 +458,7 @@
 {
 	struct w83795_data *data = i2c_get_clientdata(client);
 	int i, limit;
+	u8 lsb;
 
 	/* Read the voltage limits */
 	for (i = 0; i < ARRAY_SIZE(data->in); i++) {
@@ -479,9 +480,8 @@
 	}
 
 	/* Read the fan limits */
+	lsb = 0; /* Silent false gcc warning */
 	for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
-		u8 lsb;
-
 		/* Each register contains LSB for 2 fans, but we want to
 		 * read it only once to save time */
 		if ((i & 1) == 0 && (data->has_fan & (3 << i)))
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index a39e6cf..38319a6 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -600,12 +600,14 @@
 /*
  * registering functions to load algorithms at runtime
  */
-static int i2c_bit_prepare_bus(struct i2c_adapter *adap)
+static int __i2c_bit_add_bus(struct i2c_adapter *adap,
+			     int (*add_adapter)(struct i2c_adapter *))
 {
 	struct i2c_algo_bit_data *bit_adap = adap->algo_data;
+	int ret;
 
 	if (bit_test) {
-		int ret = test_bus(bit_adap, adap->name);
+		ret = test_bus(bit_adap, adap->name);
 		if (ret < 0)
 			return -ENODEV;
 	}
@@ -614,30 +616,27 @@
 	adap->algo = &i2c_bit_algo;
 	adap->retries = 3;
 
+	ret = add_adapter(adap);
+	if (ret < 0)
+		return ret;
+
+	/* Complain if SCL can't be read */
+	if (bit_adap->getscl == NULL) {
+		dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
+		dev_warn(&adap->dev, "Bus may be unreliable\n");
+	}
 	return 0;
 }
 
 int i2c_bit_add_bus(struct i2c_adapter *adap)
 {
-	int err;
-
-	err = i2c_bit_prepare_bus(adap);
-	if (err)
-		return err;
-
-	return i2c_add_adapter(adap);
+	return __i2c_bit_add_bus(adap, i2c_add_adapter);
 }
 EXPORT_SYMBOL(i2c_bit_add_bus);
 
 int i2c_bit_add_numbered_bus(struct i2c_adapter *adap)
 {
-	int err;
-
-	err = i2c_bit_prepare_bus(adap);
-	if (err)
-		return err;
-
-	return i2c_add_numbered_adapter(adap);
+	return __i2c_bit_add_bus(adap, i2c_add_numbered_adapter);
 }
 EXPORT_SYMBOL(i2c_bit_add_numbered_bus);
 
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 02835ce..7979aef7e 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -72,6 +72,7 @@
 #include <linux/acpi.h>
 #include <linux/io.h>
 #include <linux/dmi.h>
+#include <linux/slab.h>
 
 /* I801 SMBus address offsets */
 #define SMBHSTSTS(p)	(0 + (p)->smba)
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index a605a50..ff1e127 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -432,7 +432,7 @@
 
 static void __devexit nforce2_remove(struct pci_dev *dev)
 {
-	struct nforce2_smbus *smbuses = (void*) pci_get_drvdata(dev);
+	struct nforce2_smbus *smbuses = pci_get_drvdata(dev);
 
 	nforce2_set_reference(NULL);
 	if (smbuses[0].base) {
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index c9fffd0..2bd3469 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -434,7 +434,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
@@ -498,7 +498,7 @@
 	}
 
 	if (timeout == 0) {
-		/* controler has timedout, re-init the h/w */
+		/* controller has timedout, re-init the h/w */
 		dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n");
 		(void) init_hw(dev);
 		status = -ETIMEDOUT;
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b4cc56..c7db698 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -1362,7 +1362,7 @@
  *
  * Returns negative errno, or else the number of bytes written.
  */
-int i2c_master_send(struct i2c_client *client, const char *buf, int count)
+int i2c_master_send(const struct i2c_client *client, const char *buf, int count)
 {
 	int ret;
 	struct i2c_adapter *adap = client->adapter;
@@ -1389,7 +1389,7 @@
  *
  * Returns negative errno, or else the number of bytes read.
  */
-int i2c_master_recv(struct i2c_client *client, char *buf, int count)
+int i2c_master_recv(const struct i2c_client *client, char *buf, int count)
 {
 	struct i2c_adapter *adap = client->adapter;
 	struct i2c_msg msg;
@@ -1679,7 +1679,7 @@
  * This executes the SMBus "receive byte" protocol, returning negative errno
  * else the byte received from the device.
  */
-s32 i2c_smbus_read_byte(struct i2c_client *client)
+s32 i2c_smbus_read_byte(const struct i2c_client *client)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1699,7 +1699,7 @@
  * This executes the SMBus "send byte" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value)
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value)
 {
 	return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
 	                      I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL);
@@ -1714,7 +1714,7 @@
  * This executes the SMBus "read byte" protocol, returning negative errno
  * else a data byte received from the device.
  */
-s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1735,7 +1735,8 @@
  * This executes the SMBus "write byte" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_byte_data(struct i2c_client *client, u8 command, u8 value)
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command,
+			      u8 value)
 {
 	union i2c_smbus_data data;
 	data.byte = value;
@@ -1753,7 +1754,7 @@
  * This executes the SMBus "read word" protocol, returning negative errno
  * else a 16-bit unsigned "word" received from the device.
  */
-s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command)
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1774,7 +1775,8 @@
  * This executes the SMBus "write word" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_word_data(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command,
+			      u16 value)
 {
 	union i2c_smbus_data data;
 	data.word = value;
@@ -1793,7 +1795,8 @@
  * This executes the SMBus "process call" protocol, returning negative errno
  * else a 16-bit unsigned "word" received from the device.
  */
-s32 i2c_smbus_process_call(struct i2c_client *client, u8 command, u16 value)
+s32 i2c_smbus_process_call(const struct i2c_client *client, u8 command,
+			   u16 value)
 {
 	union i2c_smbus_data data;
 	int status;
@@ -1821,7 +1824,7 @@
  * support this; its emulation through I2C messaging relies on a specific
  * mechanism (I2C_M_RECV_LEN) which may not be implemented.
  */
-s32 i2c_smbus_read_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command,
 			      u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1848,7 +1851,7 @@
  * This executes the SMBus "block write" protocol, returning negative errno
  * else zero on success.
  */
-s32 i2c_smbus_write_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command,
 			       u8 length, const u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1864,7 +1867,7 @@
 EXPORT_SYMBOL(i2c_smbus_write_block_data);
 
 /* Returns the number of read bytes */
-s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command,
 				  u8 length, u8 *values)
 {
 	union i2c_smbus_data data;
@@ -1884,7 +1887,7 @@
 }
 EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data);
 
-s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, u8 command,
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command,
 				   u8 length, const u8 *values)
 {
 	union i2c_smbus_data data;
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 4d91d80..90b7a01 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -5,6 +5,18 @@
 menu "Multiplexer I2C Chip support"
 	depends on I2C_MUX
 
+config I2C_MUX_GPIO
+	tristate "GPIO-based I2C multiplexer"
+	depends on GENERIC_GPIO
+	help
+	  If you say yes to this option, support will be included for a
+	  GPIO based I2C multiplexer. This driver provides access to
+	  I2C busses connected through a MUX, which is controlled
+	  through GPIO pins.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called gpio-i2cmux.
+
 config I2C_MUX_PCA9541
 	tristate "NXP PCA9541 I2C Master Selector"
 	depends on EXPERIMENTAL
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index d743806..4640436 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,6 +1,7 @@
 #
 # Makefile for multiplexer I2C chip drivers.
 
+obj-$(CONFIG_I2C_MUX_GPIO)	+= gpio-i2cmux.o
 obj-$(CONFIG_I2C_MUX_PCA9541)	+= pca9541.o
 obj-$(CONFIG_I2C_MUX_PCA954x)	+= pca954x.o
 
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
new file mode 100644
index 0000000..7b6ce62
--- /dev/null
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -0,0 +1,184 @@
+/*
+ * I2C multiplexer using GPIO API
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c-mux.h>
+#include <linux/gpio-i2cmux.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+
+struct gpiomux {
+	struct i2c_adapter *parent;
+	struct i2c_adapter **adap; /* child busses */
+	struct gpio_i2cmux_platform_data data;
+};
+
+static void gpiomux_set(const struct gpiomux *mux, unsigned val)
+{
+	int i;
+
+	for (i = 0; i < mux->data.n_gpios; i++)
+		gpio_set_value(mux->data.gpios[i], val & (1 << i));
+}
+
+static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan)
+{
+	struct gpiomux *mux = data;
+
+	gpiomux_set(mux, mux->data.values[chan]);
+
+	return 0;
+}
+
+static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan)
+{
+	struct gpiomux *mux = data;
+
+	gpiomux_set(mux, mux->data.idle);
+
+	return 0;
+}
+
+static int __devinit gpiomux_probe(struct platform_device *pdev)
+{
+	struct gpiomux *mux;
+	struct gpio_i2cmux_platform_data *pdata;
+	struct i2c_adapter *parent;
+	int (*deselect) (struct i2c_adapter *, void *, u32);
+	unsigned initial_state;
+	int i, ret;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(&pdev->dev, "Missing platform data\n");
+		return -ENODEV;
+	}
+
+	parent = i2c_get_adapter(pdata->parent);
+	if (!parent) {
+		dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
+			pdata->parent);
+		return -ENODEV;
+	}
+
+	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+	if (!mux) {
+		ret = -ENOMEM;
+		goto alloc_failed;
+	}
+
+	mux->parent = parent;
+	mux->data = *pdata;
+	mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values,
+			    GFP_KERNEL);
+	if (!mux->adap) {
+		ret = -ENOMEM;
+		goto alloc_failed2;
+	}
+
+	if (pdata->idle != GPIO_I2CMUX_NO_IDLE) {
+		initial_state = pdata->idle;
+		deselect = gpiomux_deselect;
+	} else {
+		initial_state = pdata->values[0];
+		deselect = NULL;
+	}
+
+	for (i = 0; i < pdata->n_gpios; i++) {
+		ret = gpio_request(pdata->gpios[i], "gpio-i2cmux");
+		if (ret)
+			goto err_request_gpio;
+		gpio_direction_output(pdata->gpios[i],
+				      initial_state & (1 << i));
+	}
+
+	for (i = 0; i < pdata->n_values; i++) {
+		u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
+
+		mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i,
+						   gpiomux_select, deselect);
+		if (!mux->adap[i]) {
+			ret = -ENODEV;
+			dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
+			goto add_adapter_failed;
+		}
+	}
+
+	dev_info(&pdev->dev, "%d port mux on %s adapter\n",
+		 pdata->n_values, parent->name);
+
+	platform_set_drvdata(pdev, mux);
+
+	return 0;
+
+add_adapter_failed:
+	for (; i > 0; i--)
+		i2c_del_mux_adapter(mux->adap[i - 1]);
+	i = pdata->n_gpios;
+err_request_gpio:
+	for (; i > 0; i--)
+		gpio_free(pdata->gpios[i - 1]);
+	kfree(mux->adap);
+alloc_failed2:
+	kfree(mux);
+alloc_failed:
+	i2c_put_adapter(parent);
+
+	return ret;
+}
+
+static int __devexit gpiomux_remove(struct platform_device *pdev)
+{
+	struct gpiomux *mux = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < mux->data.n_values; i++)
+		i2c_del_mux_adapter(mux->adap[i]);
+
+	for (i = 0; i < mux->data.n_gpios; i++)
+		gpio_free(mux->data.gpios[i]);
+
+	platform_set_drvdata(pdev, NULL);
+	i2c_put_adapter(mux->parent);
+	kfree(mux->adap);
+	kfree(mux);
+
+	return 0;
+}
+
+static struct platform_driver gpiomux_driver = {
+	.probe	= gpiomux_probe,
+	.remove	= __devexit_p(gpiomux_remove),
+	.driver	= {
+		.owner	= THIS_MODULE,
+		.name	= "gpio-i2cmux",
+	},
+};
+
+static int __init gpiomux_init(void)
+{
+	return platform_driver_register(&gpiomux_driver);
+}
+
+static void __exit gpiomux_exit(void)
+{
+	platform_driver_unregister(&gpiomux_driver);
+}
+
+module_init(gpiomux_init);
+module_exit(gpiomux_exit);
+
+MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
+MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:gpio-i2cmux");
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 09dda0b..c3f5aca 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -189,6 +189,7 @@
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
 
+#ifdef notyet
 int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
 {
 	struct rdma_cq_setup setup;
@@ -200,6 +201,7 @@
 	setup.ovfl_mode = 1;
 	return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
 }
+#endif
 
 static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
 {
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index 4bb997a..83d2e19 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -689,7 +689,7 @@
  * A T3 WQ implements both the SQ and RQ.
  */
 struct t3_wq {
-	union t3_wr *queue;		/* DMA accessable memory */
+	union t3_wr *queue;		/* DMA accessible memory */
 	dma_addr_t dma_addr;		/* DMA address for HW */
 	DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */
 	u32 error;			/* 1 once we go to ERROR */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h
index a237d49..c5406da 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.h
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h
@@ -335,8 +335,6 @@
 int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
-int iwch_quiesce_qps(struct iwch_cq *chp);
-int iwch_resume_qps(struct iwch_cq *chp);
 void stop_read_rep_timer(struct iwch_qp *qhp);
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
 		      struct iwch_mr *mhp, int shift);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 0993137..1b4cd09 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -1149,59 +1149,3 @@
 	PDBG("%s exit state %d\n", __func__, qhp->attr.state);
 	return ret;
 }
-
-static int quiesce_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_quiesce_tid(qhp->ep);
-	qhp->flags |= QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-static int resume_qp(struct iwch_qp *qhp)
-{
-	spin_lock_irq(&qhp->lock);
-	iwch_resume_tid(qhp->ep);
-	qhp->flags &= ~QP_QUIESCED;
-	spin_unlock_irq(&qhp->lock);
-	return 0;
-}
-
-int iwch_quiesce_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
-			quiesce_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
-			quiesce_qp(qhp);
-	}
-	return 0;
-}
-
-int iwch_resume_qps(struct iwch_cq *chp)
-{
-	int i;
-	struct iwch_qp *qhp;
-
-	for (i=0; i < T3_MAX_NUM_QP; i++) {
-		qhp = get_qhp(chp->rhp, i);
-		if (!qhp)
-			continue;
-		if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
-			resume_qp(qhp);
-			continue;
-		}
-		if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
-			resume_qp(qhp);
-	}
-	return 0;
-}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 16032cd..2fe19ec 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -46,7 +46,6 @@
 #include <linux/timer.h>
 #include <linux/io.h>
 #include <linux/kfifo.h>
-#include <linux/mutex.h>
 
 #include <asm/byteorder.h>
 
@@ -760,7 +759,6 @@
 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
-int c4iw_post_zb_read(struct c4iw_qp *qhp);
 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 057cb25..2080090 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -892,36 +892,6 @@
 	}
 }
 
-int c4iw_post_zb_read(struct c4iw_qp *qhp)
-{
-	union t4_wr *wqe;
-	struct sk_buff *skb;
-	u8 len16;
-
-	PDBG("%s enter\n", __func__);
-	skb = alloc_skb(40, GFP_KERNEL);
-	if (!skb) {
-		printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
-		return -ENOMEM;
-	}
-	set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
-
-	wqe = (union t4_wr *)skb_put(skb, sizeof wqe->read);
-	memset(wqe, 0, sizeof wqe->read);
-	wqe->read.r2 = cpu_to_be64(0);
-	wqe->read.stag_sink = cpu_to_be32(1);
-	wqe->read.to_sink_hi = cpu_to_be32(0);
-	wqe->read.to_sink_lo = cpu_to_be32(1);
-	wqe->read.stag_src = cpu_to_be32(1);
-	wqe->read.plen = cpu_to_be32(0);
-	wqe->read.to_src_hi = cpu_to_be32(0);
-	wqe->read.to_src_lo = cpu_to_be32(1);
-	len16 = DIV_ROUND_UP(sizeof wqe->read, 16);
-	init_wr_hdr(wqe, 0, FW_RI_RDMA_READ_WR, FW_RI_COMPLETION_FLAG, len16);
-
-	return c4iw_ofld_send(&qhp->rhp->rdev, skb);
-}
-
 static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
 			   gfp_t gfp)
 {
@@ -1029,7 +999,6 @@
 	wqe->cookie = (unsigned long) &ep->com.wr_wait;
 
 	wqe->u.fini.type = FW_RI_TYPE_FINI;
-	c4iw_init_wr_wait(&ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
@@ -1125,7 +1094,6 @@
 	if (qhp->attr.mpa_attr.initiator)
 		build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
 
-	c4iw_init_wr_wait(&qhp->ep->com.wr_wait);
 	ret = c4iw_ofld_send(&rhp->rdev, skb);
 	if (ret)
 		goto out;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 765f0fc..b33f045 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -530,9 +530,8 @@
 	for (j = 0; j < 6; j++) {
 		if (!pdev->resource[j].start)
 			continue;
-		ipath_cdbg(VERBOSE, "BAR %d start %llx, end %llx, len %llx\n",
-			   j, (unsigned long long)pdev->resource[j].start,
-			   (unsigned long long)pdev->resource[j].end,
+		ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
+			   j, &pdev->resource[j],
 			   (unsigned long long)pci_resource_len(pdev, j));
 	}
 
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 5a219a2..e8df155 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -397,10 +397,14 @@
 		cq->resize_buf = NULL;
 		cq->resize_umem = NULL;
 	} else {
+		struct mlx4_ib_cq_buf tmp_buf;
+		int tmp_cqe = 0;
+
 		spin_lock_irq(&cq->lock);
 		if (cq->resize_buf) {
 			mlx4_ib_cq_resize_copy_cqes(cq);
-			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
+			tmp_buf = cq->buf;
+			tmp_cqe = cq->ibcq.cqe;
 			cq->buf      = cq->resize_buf->buf;
 			cq->ibcq.cqe = cq->resize_buf->cqe;
 
@@ -408,6 +412,9 @@
 			cq->resize_buf = NULL;
 		}
 		spin_unlock_irq(&cq->lock);
+
+		if (tmp_cqe)
+			mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
 	}
 
 	goto out;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index c9a8dd6..57ffa50 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -211,6 +211,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 5648659..03a59534 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -171,6 +171,8 @@
 	if (agent) {
 		send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
 					      IB_MGMT_MAD_DATA, GFP_ATOMIC);
+		if (IS_ERR(send_buf))
+			return;
 		/*
 		 * We rely here on the fact that MLX QPs don't use the
 		 * address handle after the send is posted (this is
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3892e2c..5a4c364 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -908,8 +908,8 @@
 					nesvnic->nic_index &&
 					mc_index < max_pft_entries_avaiable) {
 						nes_debug(NES_DBG_NIC_RX,
-					"mc_index=%d skipping nic_index=%d,\
-					used for=%d \n", mc_index,
+					"mc_index=%d skipping nic_index=%d, "
+					"used for=%d \n", mc_index,
 					nesvnic->nic_index,
 					nesadapter->pft_mcast_map[mc_index]);
 				mc_index++;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 64c9e7d..73225ee 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -766,7 +766,7 @@
 	void (*f_sdma_hw_start_up)(struct qib_pportdata *);
 	void (*f_sdma_init_early)(struct qib_pportdata *);
 	void (*f_set_cntr_sample)(struct qib_pportdata *, u32, u32);
-	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32);
+	void (*f_update_usrhead)(struct qib_ctxtdata *, u64, u32, u32, u32);
 	u32 (*f_hdrqempty)(struct qib_ctxtdata *);
 	u64 (*f_portcntr)(struct qib_pportdata *, u32);
 	u32 (*f_read_cntrs)(struct qib_devdata *, loff_t, char **,
diff --git a/drivers/infiniband/hw/qib/qib_cq.c b/drivers/infiniband/hw/qib/qib_cq.c
index a86cbf8..5246aa4 100644
--- a/drivers/infiniband/hw/qib/qib_cq.c
+++ b/drivers/infiniband/hw/qib/qib_cq.c
@@ -100,7 +100,8 @@
 	wc->head = next;
 
 	if (cq->notify == IB_CQ_NEXT_COMP ||
-	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
+	    (cq->notify == IB_CQ_SOLICITED &&
+	     (solicited || entry->status != IB_WC_SUCCESS))) {
 		cq->notify = IB_CQ_NONE;
 		cq->triggered++;
 		/*
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 9cd1936..23e584f 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -71,6 +71,11 @@
  */
 #define QIB_PIO_MAXIBHDR 128
 
+/*
+ * QIB_MAX_PKT_RCV is the max # if packets processed per receive interrupt.
+ */
+#define QIB_MAX_PKT_RECV 64
+
 struct qlogic_ib_stats qib_stats;
 
 const char *qib_get_unit_name(int unit)
@@ -284,14 +289,147 @@
  * Returns 1 if error was a CRC, else 0.
  * Needed for some chip's synthesized error counters.
  */
-static u32 qib_rcv_hdrerr(struct qib_pportdata *ppd, u32 ctxt,
-			  u32 eflags, u32 l, u32 etail, __le32 *rhf_addr,
-			  struct qib_message_header *hdr)
+static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
+			  u32 ctxt, u32 eflags, u32 l, u32 etail,
+			  __le32 *rhf_addr, struct qib_message_header *rhdr)
 {
 	u32 ret = 0;
 
 	if (eflags & (QLOGIC_IB_RHF_H_ICRCERR | QLOGIC_IB_RHF_H_VCRCERR))
 		ret = 1;
+	else if (eflags == QLOGIC_IB_RHF_H_TIDERR) {
+		/* For TIDERR and RC QPs premptively schedule a NAK */
+		struct qib_ib_header *hdr = (struct qib_ib_header *) rhdr;
+		struct qib_other_headers *ohdr = NULL;
+		struct qib_ibport *ibp = &ppd->ibport_data;
+		struct qib_qp *qp = NULL;
+		u32 tlen = qib_hdrget_length_in_bytes(rhf_addr);
+		u16 lid  = be16_to_cpu(hdr->lrh[1]);
+		int lnh = be16_to_cpu(hdr->lrh[0]) & 3;
+		u32 qp_num;
+		u32 opcode;
+		u32 psn;
+		int diff;
+		unsigned long flags;
+
+		/* Sanity check packet */
+		if (tlen < 24)
+			goto drop;
+
+		if (lid < QIB_MULTICAST_LID_BASE) {
+			lid &= ~((1 << ppd->lmc) - 1);
+			if (unlikely(lid != ppd->lid))
+				goto drop;
+		}
+
+		/* Check for GRH */
+		if (lnh == QIB_LRH_BTH)
+			ohdr = &hdr->u.oth;
+		else if (lnh == QIB_LRH_GRH) {
+			u32 vtf;
+
+			ohdr = &hdr->u.l.oth;
+			if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR)
+				goto drop;
+			vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow);
+			if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION)
+				goto drop;
+		} else
+			goto drop;
+
+		/* Get opcode and PSN from packet */
+		opcode = be32_to_cpu(ohdr->bth[0]);
+		opcode >>= 24;
+		psn = be32_to_cpu(ohdr->bth[2]);
+
+		/* Get the destination QP number. */
+		qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
+		if (qp_num != QIB_MULTICAST_QPN) {
+			int ruc_res;
+			qp = qib_lookup_qpn(ibp, qp_num);
+			if (!qp)
+				goto drop;
+
+			/*
+			 * Handle only RC QPs - for other QP types drop error
+			 * packet.
+			 */
+			spin_lock(&qp->r_lock);
+
+			/* Check for valid receive state. */
+			if (!(ib_qib_state_ops[qp->state] &
+			      QIB_PROCESS_RECV_OK)) {
+				ibp->n_pkt_drops++;
+				goto unlock;
+			}
+
+			switch (qp->ibqp.qp_type) {
+			case IB_QPT_RC:
+				spin_lock_irqsave(&qp->s_lock, flags);
+				ruc_res =
+					qib_ruc_check_hdr(
+						ibp, hdr,
+						lnh == QIB_LRH_GRH,
+						qp,
+						be32_to_cpu(ohdr->bth[0]));
+				if (ruc_res) {
+					spin_unlock_irqrestore(&qp->s_lock,
+							       flags);
+					goto unlock;
+				}
+				spin_unlock_irqrestore(&qp->s_lock, flags);
+
+				/* Only deal with RDMA Writes for now */
+				if (opcode <
+				    IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
+					diff = qib_cmp24(psn, qp->r_psn);
+					if (!qp->r_nak_state && diff >= 0) {
+						ibp->n_rc_seqnak++;
+						qp->r_nak_state =
+							IB_NAK_PSN_ERROR;
+						/* Use the expected PSN. */
+						qp->r_ack_psn = qp->r_psn;
+						/*
+						 * Wait to send the sequence
+						 * NAK until all packets
+						 * in the receive queue have
+						 * been processed.
+						 * Otherwise, we end up
+						 * propagating congestion.
+						 */
+						if (list_empty(&qp->rspwait)) {
+							qp->r_flags |=
+								QIB_R_RSP_NAK;
+							atomic_inc(
+								&qp->refcount);
+							list_add_tail(
+							 &qp->rspwait,
+							 &rcd->qp_wait_list);
+						}
+					} /* Out of sequence NAK */
+				} /* QP Request NAKs */
+				break;
+			case IB_QPT_SMI:
+			case IB_QPT_GSI:
+			case IB_QPT_UD:
+			case IB_QPT_UC:
+			default:
+				/* For now don't handle any other QP types */
+				break;
+			}
+
+unlock:
+			spin_unlock(&qp->r_lock);
+			/*
+			 * Notify qib_destroy_qp() if it is waiting
+			 * for us to finish.
+			 */
+			if (atomic_dec_and_test(&qp->refcount))
+				wake_up(&qp->wait);
+		} /* Unicast QP */
+	} /* Valid packet with TIDErr */
+
+drop:
 	return ret;
 }
 
@@ -335,7 +473,7 @@
 		smp_rmb();  /* prevent speculative reads of dma'ed hdrq */
 	}
 
-	for (last = 0, i = 1; !last && i <= 64; i += !last) {
+	for (last = 0, i = 1; !last; i += !last) {
 		hdr = dd->f_get_msgheader(dd, rhf_addr);
 		eflags = qib_hdrget_err_flags(rhf_addr);
 		etype = qib_hdrget_rcv_type(rhf_addr);
@@ -371,7 +509,7 @@
 		 * packets; only qibhdrerr should be set.
 		 */
 		if (unlikely(eflags))
-			crcs += qib_rcv_hdrerr(ppd, rcd->ctxt, eflags, l,
+			crcs += qib_rcv_hdrerr(rcd, ppd, rcd->ctxt, eflags, l,
 					       etail, rhf_addr, hdr);
 		else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
 			qib_ib_rcv(rcd, hdr, ebuf, tlen);
@@ -384,6 +522,9 @@
 		l += rsize;
 		if (l >= maxcnt)
 			l = 0;
+		if (i == QIB_MAX_PKT_RECV)
+			last = 1;
+
 		rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
 		if (dd->flags & QIB_NODMA_RTAIL) {
 			u32 seq = qib_hdrget_seq(rhf_addr);
@@ -402,7 +543,7 @@
 		 */
 		lval = l;
 		if (!last && !(i & 0xf)) {
-			dd->f_update_usrhead(rcd, lval, updegr, etail);
+			dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 			updegr = 0;
 		}
 	}
@@ -444,7 +585,7 @@
 	 * if no packets were processed.
 	 */
 	lval = (u64)rcd->head | dd->rhdrhead_intr_off;
-	dd->f_update_usrhead(rcd, lval, updegr, etail);
+	dd->f_update_usrhead(rcd, lval, updegr, etail, i);
 	return crcs;
 }
 
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 79d9971..75bfad1 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1379,17 +1379,17 @@
 		/* find device (with ACTIVE ports) with fewest ctxts in use */
 		for (ndev = 0; ndev < devmax; ndev++) {
 			struct qib_devdata *dd = qib_lookup(ndev);
-			unsigned cused = 0, cfree = 0;
+			unsigned cused = 0, cfree = 0, pusable = 0;
 			if (!dd)
 				continue;
 			if (port && port <= dd->num_pports &&
 			    usable(dd->pport + port - 1))
-				dusable = 1;
+				pusable = 1;
 			else
 				for (i = 0; i < dd->num_pports; i++)
 					if (usable(dd->pport + i))
-						dusable++;
-			if (!dusable)
+						pusable++;
+			if (!pusable)
 				continue;
 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
 			     ctxt++)
@@ -1397,7 +1397,7 @@
 					cused++;
 				else
 					cfree++;
-			if (cfree && cused < inuse) {
+			if (pusable && cfree && cused < inuse) {
 				udd = dd;
 				inuse = cused;
 			}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a5e29db..774dea8 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2074,7 +2074,7 @@
 }
 
 static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 6fd8d74..127a0d5 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2297,7 +2297,7 @@
 	nchipctxts = qib_read_kreg32(dd, kr_portcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1) {
-		dd->qpn_mask = 0x3f;
+		dd->qpn_mask = 0x3e;
 		dd->first_user_ctxt = qib_n_krcv_queues * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
 			dd->first_user_ctxt = nchipctxts;
@@ -2703,7 +2703,7 @@
 }
 
 static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 584d443..abd409d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -71,6 +71,9 @@
 
 static u32 ahb_mod(struct qib_devdata *, int, int, int, u32, u32);
 static void ibsd_wr_allchans(struct qib_pportdata *, int, unsigned, unsigned);
+static void serdes_7322_los_enable(struct qib_pportdata *, int);
+static int serdes_7322_init_old(struct qib_pportdata *);
+static int serdes_7322_init_new(struct qib_pportdata *);
 
 #define BMASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
 
@@ -111,6 +114,21 @@
 module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
 MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
 
+/*
+ * Receive header queue sizes
+ */
+static unsigned qib_rcvhdrcnt;
+module_param_named(rcvhdrcnt, qib_rcvhdrcnt, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrcnt, "receive header count");
+
+static unsigned qib_rcvhdrsize;
+module_param_named(rcvhdrsize, qib_rcvhdrsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrsize, "receive header size in 32-bit words");
+
+static unsigned qib_rcvhdrentsize;
+module_param_named(rcvhdrentsize, qib_rcvhdrentsize, uint, S_IRUGO);
+MODULE_PARM_DESC(rcvhdrentsize, "receive header entry size in 32-bit words");
+
 #define MAX_ATTEN_LEN 64 /* plenty for any real system */
 /* for read back, default index is ~5m copper cable */
 static char txselect_list[MAX_ATTEN_LEN] = "10";
@@ -314,7 +332,7 @@
 #define krp_serdesctrl KREG_IBPORT_IDX(IBSerdesCtrl)
 
 /*
- * Per-context kernel registers.  Acess only with qib_read_kreg_ctxt()
+ * Per-context kernel registers.  Access only with qib_read_kreg_ctxt()
  * or qib_write_kreg_ctxt()
  */
 #define krc_rcvhdraddr KREG_IDX(RcvHdrAddr0)
@@ -544,6 +562,7 @@
 
 #define TXDDS_TABLE_SZ 16 /* number of entries per speed in onchip table */
 #define TXDDS_EXTRA_SZ 13 /* number of extra tx settings entries */
+#define TXDDS_MFG_SZ 2    /* number of mfg tx settings entries */
 #define SERDES_CHANS 4 /* yes, it's obvious, but one less magic number */
 
 #define H1_FORCE_VAL 8
@@ -604,6 +623,7 @@
 	u8 ibmalfusesnap;
 	struct qib_qsfp_data qsfp_data;
 	char epmsgbuf[192]; /* for port error interrupt msg buffer */
+	u8 bounced;
 };
 
 static struct {
@@ -1677,6 +1697,8 @@
 	    (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
 		force_h1(ppd);
 		ppd->cpspec->qdr_reforce = 1;
+		if (!ppd->dd->cspec->r1)
+			serdes_7322_los_enable(ppd, 0);
 	} else if (ppd->cpspec->qdr_reforce &&
 		(ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR)) &&
 		 (ibclt == IB_7322_LT_STATE_CFGENH ||
@@ -1692,18 +1714,37 @@
 	      ibclt <= IB_7322_LT_STATE_SLEEPQUIET)))
 		adj_tx_serdes(ppd);
 
-	if (!ppd->cpspec->qdr_dfe_on && ibclt != IB_7322_LT_STATE_LINKUP &&
-	    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
-		ppd->cpspec->qdr_dfe_on = 1;
-		ppd->cpspec->qdr_dfe_time = 0;
-		/* On link down, reenable QDR adaptation */
-		qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
-			ppd->dd->cspec->r1 ?
-				    QDR_STATIC_ADAPT_DOWN_R1 :
-				    QDR_STATIC_ADAPT_DOWN);
+	if (ibclt != IB_7322_LT_STATE_LINKUP) {
+		u8 ltstate = qib_7322_phys_portstate(ibcst);
+		u8 pibclt = (u8)SYM_FIELD(ppd->lastibcstat, IBCStatusA_0,
+					  LinkTrainingState);
+		if (!ppd->dd->cspec->r1 &&
+		    pibclt == IB_7322_LT_STATE_LINKUP &&
+		    ltstate != IB_PHYSPORTSTATE_LINK_ERR_RECOVER &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_RETRAIN &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
+		    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
+			/* If the link went down (but no into recovery,
+			 * turn LOS back on */
+			serdes_7322_los_enable(ppd, 1);
+		if (!ppd->cpspec->qdr_dfe_on &&
+		    ibclt <= IB_7322_LT_STATE_SLEEPQUIET) {
+			ppd->cpspec->qdr_dfe_on = 1;
+			ppd->cpspec->qdr_dfe_time = 0;
+			/* On link down, reenable QDR adaptation */
+			qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+					    ppd->dd->cspec->r1 ?
+					    QDR_STATIC_ADAPT_DOWN_R1 :
+					    QDR_STATIC_ADAPT_DOWN);
+			printk(KERN_INFO QIB_DRV_NAME
+				" IB%u:%u re-enabled QDR adaptation "
+				"ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+		}
 	}
 }
 
+static int qib_7322_set_ib_cfg(struct qib_pportdata *, int, u32);
+
 /*
  * This is per-pport error handling.
  * will likely get it's own MSIx interrupt (one for each port,
@@ -1840,7 +1881,23 @@
 		    IB_PHYSPORTSTATE_DISABLED)
 			qib_set_ib_7322_lstate(ppd, 0,
 			       QLOGIC_IB_IBCC_LINKINITCMD_DISABLE);
-		else
+		else {
+			u32 lstate;
+			/*
+			 * We need the current logical link state before
+			 * lflags are set in handle_e_ibstatuschanged.
+			 */
+			lstate = qib_7322_iblink_state(ibcs);
+
+			if (IS_QMH(dd) && !ppd->cpspec->bounced &&
+			    ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			    (lstate >= IB_PORT_INIT &&
+				lstate <= IB_PORT_ACTIVE)) {
+				ppd->cpspec->bounced = 1;
+				qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE,
+					IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL);
+			}
+
 			/*
 			 * Since going into a recovery state causes the link
 			 * state to go down and since recovery is transitory,
@@ -1854,6 +1911,7 @@
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT &&
 			    ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE)
 				qib_handle_e_ibstatuschanged(ppd, ibcs);
+		}
 	}
 	if (*msg && iserr)
 		qib_dev_porterr(dd, ppd->port, "%s error\n", msg);
@@ -2785,7 +2843,6 @@
 				ctxtrbits &= ~rmask;
 				if (dd->rcd[i]) {
 					qib_kreceive(dd->rcd[i], NULL, &npkts);
-					adjust_rcv_timeout(dd->rcd[i], npkts);
 				}
 			}
 			rmask <<= 1;
@@ -2835,7 +2892,6 @@
 		       (1ULL << QIB_I_RCVURG_LSB)) << rcd->ctxt);
 
 	qib_kreceive(rcd, NULL, &npkts);
-	adjust_rcv_timeout(rcd, npkts);
 
 	return IRQ_HANDLED;
 }
@@ -3157,6 +3213,10 @@
 	case BOARD_QME7342:
 		n = "InfiniPath_QME7342";
 		break;
+	case 8:
+		n = "InfiniPath_QME7362";
+		dd->flags |= QIB_HAS_QSFP;
+		break;
 	case 15:
 		n = "InfiniPath_QLE7342_TEST";
 		dd->flags |= QIB_HAS_QSFP;
@@ -3475,11 +3535,6 @@
 	nchipctxts = qib_read_kreg32(dd, kr_contextcnt);
 	dd->cspec->numctxts = nchipctxts;
 	if (qib_n_krcv_queues > 1 && dd->num_pports) {
-		/*
-		 * Set the mask for which bits from the QPN are used
-		 * to select a context number.
-		 */
-		dd->qpn_mask = 0x3f;
 		dd->first_user_ctxt = NUM_IB_PORTS +
 			(qib_n_krcv_queues - 1) * dd->num_pports;
 		if (dd->first_user_ctxt > nchipctxts)
@@ -3530,8 +3585,11 @@
 
 	/* kr_rcvegrcnt changes based on the number of contexts enabled */
 	dd->cspec->rcvegrcnt = qib_read_kreg32(dd, kr_rcvegrcnt);
-	dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
-				dd->num_pports > 1 ? 1024U : 2048U);
+	if (qib_rcvhdrcnt)
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
+	else
+		dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+				    dd->num_pports > 1 ? 1024U : 2048U);
 }
 
 static int qib_7322_get_ib_cfg(struct qib_pportdata *ppd, int which)
@@ -4002,8 +4060,14 @@
 }
 
 static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
-				    u32 updegr, u32 egrhd)
+				    u32 updegr, u32 egrhd, u32 npkts)
 {
+	/*
+	 * Need to write timeout register before updating rcvhdrhead to ensure
+	 * that the timer is enabled on reception of a packet.
+	 */
+	if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
+		adjust_rcv_timeout(rcd, npkts);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
 	if (updegr)
@@ -5522,7 +5586,7 @@
 		u64 now = get_jiffies_64();
 		if (time_after64(now, pwrup))
 			break;
-		msleep(1);
+		msleep(20);
 	}
 	ret = qib_refresh_qsfp_cache(ppd, &qd->cache);
 	/*
@@ -5579,6 +5643,7 @@
 	u32 pidx, unit, port, deflt, h1;
 	unsigned long val;
 	int any = 0, seth1;
+	int txdds_size;
 
 	str = txselect_list;
 
@@ -5587,6 +5652,10 @@
 	for (pidx = 0; pidx < dd->num_pports; ++pidx)
 		dd->pport[pidx].cpspec->no_eep = deflt;
 
+	txdds_size = TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ;
+	if (IS_QME(dd) || IS_QMH(dd))
+		txdds_size += TXDDS_MFG_SZ;
+
 	while (*nxt && nxt[1]) {
 		str = ++nxt;
 		unit = simple_strtoul(str, &nxt, 0);
@@ -5609,7 +5678,7 @@
 				;
 			continue;
 		}
-		if (val >= TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)
+		if (val >= txdds_size)
 			continue;
 		seth1 = 0;
 		h1 = 0; /* gcc thinks it might be used uninitted */
@@ -5661,10 +5730,11 @@
 		return -ENOSPC;
 	}
 	val = simple_strtoul(str, &n, 0);
-	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ)) {
+	if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+				TXDDS_MFG_SZ)) {
 		printk(KERN_INFO QIB_DRV_NAME
 		       "txselect_values must start with a number < %d\n",
-			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+			TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
 		return -EINVAL;
 	}
 	strcpy(txselect_list, str);
@@ -5810,7 +5880,8 @@
 		unsigned n, regno;
 		unsigned long flags;
 
-		if (!dd->qpn_mask || !dd->pport[pidx].link_speed_supported)
+		if (dd->n_krcv_queues < 2 ||
+			!dd->pport[pidx].link_speed_supported)
 			continue;
 
 		ppd = &dd->pport[pidx];
@@ -6097,8 +6168,10 @@
 		ppd++;
 	}
 
-	dd->rcvhdrentsize = QIB_RCVHDR_ENTSIZE;
-	dd->rcvhdrsize = QIB_DFLT_RCVHDRSIZE;
+	dd->rcvhdrentsize = qib_rcvhdrentsize ?
+		qib_rcvhdrentsize : QIB_RCVHDR_ENTSIZE;
+	dd->rcvhdrsize = qib_rcvhdrsize ?
+		qib_rcvhdrsize : QIB_DFLT_RCVHDRSIZE;
 	dd->rhf_offset = dd->rcvhdrentsize - sizeof(u64) / sizeof(u32);
 
 	/* we always allocate at least 2048 bytes for eager buffers */
@@ -6495,7 +6568,7 @@
 		/* make sure we see an updated copy next time around */
 		sendctrl_7322_mod(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
 		sleeps++;
-		msleep(1);
+		msleep(20);
 	}
 
 	switch (which) {
@@ -6993,6 +7066,12 @@
 	{  0, 1,  0, 12 },	/* QMH7342 backplane settings */
 };
 
+static const struct txdds_ent txdds_extra_mfg[TXDDS_MFG_SZ] = {
+	/* amp, pre, main, post */
+	{ 0, 0, 0, 0 },         /* QME7342 mfg settings */
+	{ 0, 0, 0, 6 },         /* QME7342 P2 mfg settings */
+};
+
 static const struct txdds_ent *get_atten_table(const struct txdds_ent *txdds,
 					       unsigned atten)
 {
@@ -7066,6 +7145,16 @@
 		*sdr_dds = &txdds_extra_sdr[idx];
 		*ddr_dds = &txdds_extra_ddr[idx];
 		*qdr_dds = &txdds_extra_qdr[idx];
+	} else if ((IS_QME(ppd->dd) || IS_QMH(ppd->dd)) &&
+		   ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+					  TXDDS_MFG_SZ)) {
+		idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
+		printk(KERN_INFO QIB_DRV_NAME
+			" IB%u:%u use idx %u into txdds_mfg\n",
+			ppd->dd->unit, ppd->port, idx);
+		*sdr_dds = &txdds_extra_mfg[idx];
+		*ddr_dds = &txdds_extra_mfg[idx];
+		*qdr_dds = &txdds_extra_mfg[idx];
 	} else {
 		/* this shouldn't happen, it's range checked */
 		*sdr_dds = txdds_sdr + qib_long_atten;
@@ -7210,9 +7299,30 @@
 	}
 }
 
+static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
+{
+	u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
+	printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n",
+		ppd->dd->unit, ppd->port, (enable ? "on" : "off"));
+	if (enable)
+		data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	else
+		data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
+	qib_write_kreg_port(ppd, krp_serdesctrl, data);
+}
+
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
-	u64 data;
+	int ret = 0;
+	if (ppd->dd->cspec->r1)
+		ret = serdes_7322_init_old(ppd);
+	else
+		ret = serdes_7322_init_new(ppd);
+	return ret;
+}
+
+static int serdes_7322_init_old(struct qib_pportdata *ppd)
+{
 	u32 le_val;
 
 	/*
@@ -7270,11 +7380,7 @@
 	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
 	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
 
-	data = qib_read_kreg_port(ppd, krp_serdesctrl);
-	/* Turn off IB latency mode */
-	data &= ~SYM_MASK(IBSerdesCtrl_0, IB_LAT_MODE);
-	qib_write_kreg_port(ppd, krp_serdesctrl, data |
-		SYM_MASK(IBSerdesCtrl_0, RXLOSEN));
+	serdes_7322_los_enable(ppd, 1);
 
 	/* rxbistena; set 0 to avoid effects of it switch later */
 	ibsd_wr_allchans(ppd, 9, 0 << 15, 1 << 15);
@@ -7314,6 +7420,205 @@
 	return 0;
 }
 
+static int serdes_7322_init_new(struct qib_pportdata *ppd)
+{
+	u64 tstart;
+	u32 le_val, rxcaldone;
+	int chan, chan_done = (1 << SERDES_CHANS) - 1;
+
+	/*
+	 * Initialize the Tx DDS tables.  Also done every QSFP event,
+	 * for adapters with QSFP
+	 */
+	init_txdds_table(ppd, 0);
+
+	/* Clear cmode-override, may be set from older driver */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 0 << 14, 1 << 14);
+
+	/* ensure no tx overrides from earlier driver loads */
+	qib_write_kreg_port(ppd, krp_tx_deemph_override,
+		SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+		reset_tx_deemphasis_override));
+
+	/* START OF LSI SUGGESTED SERDES BRINGUP */
+	/* Reset - Calibration Setup */
+	/*       Stop DFE adaptaion */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(9, 1));
+	/*       Disable LE1 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(5, 5));
+	/*       Disable autoadapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, 0, BMASK(15, 15));
+	/*       Disable LE2 */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(6, 6));
+	/*       Disable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	/*       Disable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(12, 12));
+	/*       Disable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(3, 3));
+	/*       Disable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(4, 4));
+	/*       Disable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 13, 0, BMASK(13, 13));
+	/*       Disable RX Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	/*       Disable RX Offset Calibration */
+	ibsd_wr_allchans(ppd, 12, 0, BMASK(4, 4));
+	/*       Select BB CDR */
+	ibsd_wr_allchans(ppd, 2, (1 << 15), BMASK(15, 15));
+	/*       CDR Step Size */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(9, 8));
+	/*       Enable phase Calibration */
+	ibsd_wr_allchans(ppd, 12, (1 << 5), BMASK(5, 5));
+	/*       DFE Bandwidth [2:14-12] */
+	ibsd_wr_allchans(ppd, 2, (4 << 12), BMASK(14, 12));
+	/*       DFE Config (4 taps only) */
+	ibsd_wr_allchans(ppd, 16, 0, BMASK(1, 0));
+	/*       Gain Loop Bandwidth */
+	if (!ppd->dd->cspec->r1) {
+		ibsd_wr_allchans(ppd, 12, 1 << 12, BMASK(12, 12));
+		ibsd_wr_allchans(ppd, 12, 2 << 8, BMASK(11, 8));
+	} else {
+		ibsd_wr_allchans(ppd, 19, (3 << 11), BMASK(13, 11));
+	}
+	/*       Baseline Wander Correction Gain [13:4-0] (leave as default) */
+	/*       Baseline Wander Correction Gain [3:7-5] (leave as default) */
+	/*       Data Rate Select [5:7-6] (leave as default) */
+	/*       RX Parralel Word Width [3:10-8] (leave as default) */
+
+	/* RX REST */
+	/*       Single- or Multi-channel reset */
+	/*       RX Analog reset */
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, 0, BMASK(15, 13));
+	msleep(20);
+	/*       RX Analog reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 14), BMASK(14, 14));
+	msleep(20);
+	/*       RX Digital reset */
+	ibsd_wr_allchans(ppd, 0, (1 << 13), BMASK(13, 13));
+	msleep(20);
+
+	/* setup LoS params; these are subsystem, so chan == 5 */
+	/* LoS filter threshold_count on, ch 0-3, set to 8 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 5, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 8 << 4, BMASK(7, 4));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 8, 8 << 11, BMASK(14, 11));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 8 << 4, BMASK(7, 4));
+
+	/* LoS filter threshold_count off, ch 0-3, set to 4 */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 6, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 7, 4 << 8, BMASK(11, 8));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 4 << 0, BMASK(3, 0));
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 10, 4 << 8, BMASK(11, 8));
+
+	/* LoS filter select enabled */
+	ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 5, 9, 1 << 15, 1 << 15);
+
+	/* LoS target data:  SDR=4, DDR=2, QDR=1 */
+	ibsd_wr_allchans(ppd, 14, (1 << 3), BMASK(5, 3)); /* QDR */
+	ibsd_wr_allchans(ppd, 20, (2 << 10), BMASK(12, 10)); /* DDR */
+	ibsd_wr_allchans(ppd, 20, (4 << 13), BMASK(15, 13)); /* SDR */
+
+	/* Turn on LOS on initial SERDES init */
+	serdes_7322_los_enable(ppd, 1);
+	/* FLoop LOS gate: PPM filter  enabled */
+	ibsd_wr_allchans(ppd, 38, 0 << 10, 1 << 10);
+
+	/* RX LATCH CALIBRATION */
+	/*       Enable Eyefinder Phase Calibration latch */
+	ibsd_wr_allchans(ppd, 15, 1, BMASK(0, 0));
+	/*       Enable RX Offset Calibration latch */
+	ibsd_wr_allchans(ppd, 12, (1 << 4), BMASK(4, 4));
+	msleep(20);
+	/*       Start Calibration */
+	ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
+	tstart = get_jiffies_64();
+	while (chan_done &&
+	       !time_after64(tstart, tstart + msecs_to_jiffies(500))) {
+		msleep(20);
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(9, 9)) == 0 &&
+			    (~chan_done & (1 << chan)) == 0)
+				chan_done &= ~(1 << chan);
+		}
+	}
+	if (chan_done) {
+		printk(KERN_INFO QIB_DRV_NAME
+			 " Serdes %d calibration not done after .5 sec: 0x%x\n",
+			 IBSD(ppd->hw_pidx), chan_done);
+	} else {
+		for (chan = 0; chan < SERDES_CHANS; ++chan) {
+			rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
+					    (chan + (chan >> 1)),
+					    25, 0, 0);
+			if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
+				printk(KERN_INFO QIB_DRV_NAME
+					 " Serdes %d chan %d calibration "
+					 "failed\n", IBSD(ppd->hw_pidx), chan);
+		}
+	}
+
+	/*       Turn off Calibration */
+	ibsd_wr_allchans(ppd, 4, 0, BMASK(10, 10));
+	msleep(20);
+
+	/* BRING RX UP */
+	/*       Set LE2 value (May be overridden in qsfp_7322_event) */
+	le_val = IS_QME(ppd->dd) ? LE2_QME : LE2_DEFAULT;
+	ibsd_wr_allchans(ppd, 13, (le_val << 7), BMASK(9, 7));
+	/*       Set LE2 Loop bandwidth */
+	ibsd_wr_allchans(ppd, 3, (7 << 5), BMASK(7, 5));
+	/*       Enable LE2 */
+	ibsd_wr_allchans(ppd, 13, (1 << 6), BMASK(6, 6));
+	msleep(20);
+	/*       Enable H0 only */
+	ibsd_wr_allchans(ppd, 1, 1, BMASK(9, 1));
+	/* gain hi stop 32 (22) (6:1) lo stop 7 (10:7) target 22 (13) (15:11) */
+	le_val = (ppd->dd->cspec->r1 || IS_QME(ppd->dd)) ? 0xb6c0 : 0x6bac;
+	ibsd_wr_allchans(ppd, 21, le_val, 0xfffe);
+	/*       Enable VGA */
+	ibsd_wr_allchans(ppd, 5, 0, BMASK(0, 0));
+	msleep(20);
+	/*       Set Frequency Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, (7 << 5), BMASK(8, 5));
+	/*       Enable Frequency Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 4), BMASK(4, 4));
+	/*       Set Timing Loop Bandwidth */
+	ibsd_wr_allchans(ppd, 2, 0, BMASK(11, 9));
+	/*       Enable Timing Loop */
+	ibsd_wr_allchans(ppd, 2, (1 << 3), BMASK(3, 3));
+	msleep(50);
+	/*       Enable DFE
+	 *       Set receive adaptation mode.  SDR and DDR adaptation are
+	 *       always on, and QDR is initially enabled; later disabled.
+	 */
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(0), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(1), 0ULL);
+	qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
+			    ppd->dd->cspec->r1 ?
+			    QDR_STATIC_ADAPT_DOWN_R1 : QDR_STATIC_ADAPT_DOWN);
+	ppd->cpspec->qdr_dfe_on = 1;
+	/*       Disable LE1  */
+	ibsd_wr_allchans(ppd, 13, (0 << 5), (1 << 5));
+	/*       Disable auto adapt for LE1 */
+	ibsd_wr_allchans(ppd, 1, (0 << 15), BMASK(15, 15));
+	msleep(20);
+	/*       Enable AFE Offset Cancel */
+	ibsd_wr_allchans(ppd, 12, (1 << 12), BMASK(12, 12));
+	/*       Enable Baseline Wander Correction */
+	ibsd_wr_allchans(ppd, 12, (1 << 13), BMASK(13, 13));
+	/* Termination: rxtermctrl_r2d addr 11 bits [12:11] = 1 */
+	ibsd_wr_allchans(ppd, 11, (1 << 11), BMASK(12, 11));
+	/* VGA output common mode */
+	ibsd_wr_allchans(ppd, 12, (3 << 2), BMASK(3, 2));
+
+	return 0;
+}
+
 /* start adjust QMH serdes parameters */
 
 static void set_man_code(struct qib_pportdata *ppd, int chan, int code)
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index f3b5039..7896afb 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -92,9 +92,11 @@
 /* set number of contexts we'll actually use */
 void qib_set_ctxtcnt(struct qib_devdata *dd)
 {
-	if (!qib_cfgctxts)
+	if (!qib_cfgctxts) {
 		dd->cfgctxts = dd->first_user_ctxt + num_online_cpus();
-	else if (qib_cfgctxts < dd->num_pports)
+		if (dd->cfgctxts > dd->ctxtcnt)
+			dd->cfgctxts = dd->ctxtcnt;
+	} else if (qib_cfgctxts < dd->num_pports)
 		dd->cfgctxts = dd->ctxtcnt;
 	else if (qib_cfgctxts <= dd->ctxtcnt)
 		dd->cfgctxts = qib_cfgctxts;
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 54a40828..a693c56 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -131,7 +131,8 @@
 			/* start a 75msec timer to clear symbol errors */
 			mod_timer(&ppd->symerr_clear_timer,
 				  msecs_to_jiffies(75));
-		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP) {
+		} else if (ltstate == IB_PHYSPORTSTATE_LINKUP &&
+			   !(ppd->lflags & QIBL_LINKACTIVE)) {
 			/* active, but not active defered */
 			qib_hol_up(ppd); /* useful only for 6120 now */
 			*ppd->statusp |=
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb1..8fd19a4 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -152,6 +151,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		isge->mr = dev->dma_mr;
 		isge->vaddr = (void *) sge->addr;
 		isge->length = sge->length;
@@ -170,19 +171,34 @@
 		     off + sge->length > mr->length ||
 		     (mr->access_flags & acc) != acc))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	isge->mr = mr;
 	isge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@
 	isge->m = m;
 	isge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /**
@@ -214,7 +230,6 @@
 	struct qib_mregion *mr;
 	unsigned n, m;
 	size_t off;
-	int ret = 0;
 	unsigned long flags;
 
 	/*
@@ -231,6 +246,8 @@
 		if (!dev->dma_mr)
 			goto bail;
 		atomic_inc(&dev->dma_mr->refcount);
+		spin_unlock_irqrestore(&rkt->lock, flags);
+
 		sge->mr = dev->dma_mr;
 		sge->vaddr = (void *) vaddr;
 		sge->length = len;
@@ -248,19 +265,34 @@
 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
 		     (mr->access_flags & acc) == 0))
 		goto bail;
+	atomic_inc(&mr->refcount);
+	spin_unlock_irqrestore(&rkt->lock, flags);
 
 	off += mr->offset;
-	m = 0;
-	n = 0;
-	while (off >= mr->map[m]->segs[n].length) {
-		off -= mr->map[m]->segs[n].length;
-		n++;
-		if (n >= QIB_SEGSZ) {
-			m++;
-			n = 0;
+	if (mr->page_shift) {
+		/*
+		page sizes are uniform power of 2 so no loop is necessary
+		entries_spanned_by_off is the number of times the loop below
+		would have executed.
+		*/
+		size_t entries_spanned_by_off;
+
+		entries_spanned_by_off = off >> mr->page_shift;
+		off -= (entries_spanned_by_off << mr->page_shift);
+		m = entries_spanned_by_off/QIB_SEGSZ;
+		n = entries_spanned_by_off%QIB_SEGSZ;
+	} else {
+		m = 0;
+		n = 0;
+		while (off >= mr->map[m]->segs[n].length) {
+			off -= mr->map[m]->segs[n].length;
+			n++;
+			if (n >= QIB_SEGSZ) {
+				m++;
+				n = 0;
+			}
 		}
 	}
-	atomic_inc(&mr->refcount);
 	sge->mr = mr;
 	sge->vaddr = mr->map[m]->segs[n].vaddr + off;
 	sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@
 	sge->m = m;
 	sge->n = n;
 ok:
-	ret = 1;
+	return 1;
 bail:
 	spin_unlock_irqrestore(&rkt->lock, flags);
-	return ret;
+	return 0;
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 94b0d1f..5ad224e 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -668,8 +668,8 @@
 	lid = be16_to_cpu(pip->lid);
 	/* Must be a valid unicast LID address. */
 	if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) {
 		if (ppd->lid != lid)
 			qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT);
 		if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7))
@@ -683,8 +683,8 @@
 	msl = pip->neighbormtu_mastersmsl & 0xF;
 	/* Must be a valid unicast LID address. */
 	if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE)
-		goto err;
-	if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) {
 		spin_lock_irqsave(&ibp->lock, flags);
 		if (ibp->sm_ah) {
 			if (smlid != ibp->sm_lid)
@@ -707,8 +707,9 @@
 		if (lwe == 0xFF)
 			lwe = ppd->link_width_supported;
 		else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
-			goto err;
-		set_link_width_enabled(ppd, lwe);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lwe != ppd->link_width_enabled)
+			set_link_width_enabled(ppd, lwe);
 	}
 
 	lse = pip->linkspeedactive_enabled & 0xF;
@@ -721,8 +722,9 @@
 		if (lse == 15)
 			lse = ppd->link_speed_supported;
 		else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
-			goto err;
-		set_link_speed_enabled(ppd, lse);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else if (lse != ppd->link_speed_enabled)
+			set_link_speed_enabled(ppd, lse);
 	}
 
 	/* Set link down default state. */
@@ -738,7 +740,7 @@
 					IB_LINKINITCMD_POLL);
 		break;
 	default:
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
@@ -748,15 +750,17 @@
 
 	mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF);
 	if (mtu == -1)
-		goto err;
-	qib_set_mtu(ppd, mtu);
+		smp->status |= IB_SMP_INVALID_FIELD;
+	else
+		qib_set_mtu(ppd, mtu);
 
 	/* Set operational VLs */
 	vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF;
 	if (vls) {
 		if (vls > ppd->vls_supported)
-			goto err;
-		(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
+			smp->status |= IB_SMP_INVALID_FIELD;
+		else
+			(void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls);
 	}
 
 	if (pip->mkey_violations == 0)
@@ -770,10 +774,10 @@
 
 	ore = pip->localphyerrors_overrunerrors;
 	if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	if (set_overrunthreshold(ppd, (ore & 0xF)))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
 
@@ -792,7 +796,7 @@
 	state = pip->linkspeed_portstate & 0xF;
 	lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
 	if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 
 	/*
 	 * Only state changes of DOWN, ARM, and ACTIVE are valid
@@ -812,8 +816,10 @@
 			lstate = QIB_IB_LINKDOWN;
 		else if (lstate == 3)
 			lstate = QIB_IB_LINKDOWN_DISABLE;
-		else
-			goto err;
+		else {
+			smp->status |= IB_SMP_INVALID_FIELD;
+			break;
+		}
 		spin_lock_irqsave(&ppd->lflags_lock, flags);
 		ppd->lflags &= ~QIBL_LINKV;
 		spin_unlock_irqrestore(&ppd->lflags_lock, flags);
@@ -835,8 +841,7 @@
 		qib_set_linkstate(ppd, QIB_IB_LINKACTIVE);
 		break;
 	default:
-		/* XXX We have already partially updated our state! */
-		goto err;
+		smp->status |= IB_SMP_INVALID_FIELD;
 	}
 
 	ret = subn_get_portinfo(smp, ibdev, port);
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 5f95f0f..08944e2 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -39,7 +39,6 @@
 /* Fast memory region */
 struct qib_fmr {
 	struct ib_fmr ibfmr;
-	u8 page_shift;
 	struct qib_mregion mr;        /* must be last */
 };
 
@@ -107,6 +106,7 @@
 			goto bail;
 	}
 	mr->mr.mapsz = m;
+	mr->mr.page_shift = 0;
 	mr->mr.max_segs = count;
 
 	/*
@@ -231,6 +231,8 @@
 	mr->mr.access_flags = mr_access_flags;
 	mr->umem = umem;
 
+	if (is_power_of_2(umem->page_size))
+		mr->mr.page_shift = ilog2(umem->page_size);
 	m = 0;
 	n = 0;
 	list_for_each_entry(chunk, &umem->chunk_list, list) {
@@ -390,7 +392,7 @@
 	fmr->mr.offset = 0;
 	fmr->mr.access_flags = mr_access_flags;
 	fmr->mr.max_segs = fmr_attr->max_pages;
-	fmr->page_shift = fmr_attr->page_shift;
+	fmr->mr.page_shift = fmr_attr->page_shift;
 
 	atomic_set(&fmr->mr.refcount, 0);
 	ret = &fmr->ibfmr;
@@ -437,7 +439,7 @@
 	spin_lock_irqsave(&rkt->lock, flags);
 	fmr->mr.user_base = iova;
 	fmr->mr.iova = iova;
-	ps = 1 << fmr->page_shift;
+	ps = 1 << fmr->mr.page_shift;
 	fmr->mr.length = list_len * ps;
 	m = 0;
 	n = 0;
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 6c39851..e16751f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -48,13 +48,12 @@
 
 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
 					struct qpn_map *map, unsigned off,
-					unsigned r)
+					unsigned n)
 {
 	if (qpt->mask) {
 		off++;
-		if ((off & qpt->mask) >> 1 != r)
-			off = ((off & qpt->mask) ?
-				(off | qpt->mask) + 1 : off) | (r << 1);
+		if (((off & qpt->mask) >> 1) >= n)
+			off = (off | qpt->mask) + 2;
 	} else
 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
 	return off;
@@ -123,7 +122,6 @@
 	u32 i, offset, max_scan, qpn;
 	struct qpn_map *map;
 	u32 ret;
-	int r;
 
 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
 		unsigned n;
@@ -139,15 +137,11 @@
 		goto bail;
 	}
 
-	r = smp_processor_id();
-	if (r >= dd->n_krcv_queues)
-		r %= dd->n_krcv_queues;
-	qpn = qpt->last + 1;
+	qpn = qpt->last + 2;
 	if (qpn >= QPN_MAX)
 		qpn = 2;
-	if (qpt->mask && ((qpn & qpt->mask) >> 1) != r)
-		qpn = ((qpn & qpt->mask) ? (qpn | qpt->mask) + 1 : qpn) |
-			(r << 1);
+	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
+		qpn = (qpn | qpt->mask) + 2;
 	offset = qpn & BITS_PER_PAGE_MASK;
 	map = &qpt->map[qpn / BITS_PER_PAGE];
 	max_scan = qpt->nmaps - !offset;
@@ -163,7 +157,8 @@
 				ret = qpn;
 				goto bail;
 			}
-			offset = find_next_offset(qpt, map, offset, r);
+			offset = find_next_offset(qpt, map, offset,
+				dd->n_krcv_queues);
 			qpn = mk_qpn(qpt, map, offset);
 			/*
 			 * This test differs from alloc_pidmap().
@@ -183,13 +178,13 @@
 			if (qpt->nmaps == QPNMAP_ENTRIES)
 				break;
 			map = &qpt->map[qpt->nmaps++];
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else if (map < &qpt->map[qpt->nmaps]) {
 			++map;
-			offset = qpt->mask ? (r << 1) : 0;
+			offset = 0;
 		} else {
 			map = &qpt->map[0];
-			offset = qpt->mask ? (r << 1) : 2;
+			offset = 2;
 		}
 		qpn = mk_qpn(qpt, map, offset);
 	}
@@ -468,6 +463,10 @@
 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
 		del_timer(&qp->s_timer);
 	}
+
+	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
+		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
+
 	spin_lock(&dev->pending_lock);
 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
@@ -1061,7 +1060,6 @@
 		}
 		qp->ibqp.qp_num = err;
 		qp->port_num = init_attr->port_num;
-		qp->processor_id = smp_processor_id();
 		qib_reset_qp(qp, init_attr->qp_type);
 		break;
 
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 955fb71..8245237 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1407,6 +1407,7 @@
 			    struct qib_ctxtdata *rcd)
 {
 	struct qib_swqe *wqe;
+	struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 	enum ib_wc_status status;
 	unsigned long flags;
 	int diff;
@@ -1414,6 +1415,29 @@
 	u32 aeth;
 	u64 val;
 
+	if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
+		/*
+		 * If ACK'd PSN on SDMA busy list try to make progress to
+		 * reclaim SDMA credits.
+		 */
+		if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
+		    (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
+
+			/*
+			 * If send tasklet not running attempt to progress
+			 * SDMA queue.
+			 */
+			if (!(qp->s_flags & QIB_S_BUSY)) {
+				/* Acquire SDMA Lock */
+				spin_lock_irqsave(&ppd->sdma_lock, flags);
+				/* Invoke sdma make progress */
+				qib_sdma_make_progress(ppd);
+				/* Release SDMA Lock */
+				spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&qp->s_lock, flags);
 
 	/* Ignore invalid responses. */
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index e1b3da2..4a51fd1 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -445,13 +445,14 @@
 	qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
 	src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
 
-	/* Get the number of bytes the message was padded by. */
+	/*
+	 * Get the number of bytes the message was padded by
+	 * and drop incomplete packets.
+	 */
 	pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-	if (unlikely(tlen < (hdrsize + pad + 4))) {
-		/* Drop incomplete packets. */
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	if (unlikely(tlen < (hdrsize + pad + 4)))
+		goto drop;
+
 	tlen -= hdrsize + pad + 4;
 
 	/*
@@ -460,10 +461,8 @@
 	 */
 	if (qp->ibqp.qp_num) {
 		if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-			     hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			     hdr->lrh[3] == IB_LID_PERMISSIVE))
+			goto drop;
 		if (qp->ibqp.qp_num > 1) {
 			u16 pkey1, pkey2;
 
@@ -476,7 +475,7 @@
 						0xF,
 					      src_qp, qp->ibqp.qp_num,
 					      hdr->lrh[3], hdr->lrh[1]);
-				goto bail;
+				return;
 			}
 		}
 		if (unlikely(qkey != qp->qkey)) {
@@ -484,30 +483,24 @@
 				      (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
 				      src_qp, qp->ibqp.qp_num,
 				      hdr->lrh[3], hdr->lrh[1]);
-			goto bail;
+			return;
 		}
 		/* Drop invalid MAD packets (see 13.5.3.1). */
 		if (unlikely(qp->ibqp.qp_num == 1 &&
 			     (tlen != 256 ||
-			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+			      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
+			goto drop;
 	} else {
 		struct ib_smp *smp;
 
 		/* Drop invalid MAD packets (see 13.5.3.1). */
-		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
+			goto drop;
 		smp = (struct ib_smp *) data;
 		if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
 		     hdr->lrh[3] == IB_LID_PERMISSIVE) &&
-		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-			ibp->n_pkt_drops++;
-			goto bail;
-		}
+		    smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+			goto drop;
 	}
 
 	/*
@@ -519,14 +512,12 @@
 	    opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
 		wc.ex.imm_data = ohdr->u.ud.imm_data;
 		wc.wc_flags = IB_WC_WITH_IMM;
-		hdrsize += sizeof(u32);
+		tlen -= sizeof(u32);
 	} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
 		wc.ex.imm_data = 0;
 		wc.wc_flags = 0;
-	} else {
-		ibp->n_pkt_drops++;
-		goto bail;
-	}
+	} else
+		goto drop;
 
 	/*
 	 * A GRH is expected to preceed the data even if not
@@ -556,8 +547,7 @@
 	/* Silently drop packets which are too big. */
 	if (unlikely(wc.byte_len > qp->r_len)) {
 		qp->r_flags |= QIB_R_REUSE_SGE;
-		ibp->n_pkt_drops++;
-		return;
+		goto drop;
 	}
 	if (has_grh) {
 		qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
@@ -594,5 +584,8 @@
 	qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
 		     (ohdr->bth[0] &
 			cpu_to_be32(IB_BTH_SOLICITED)) != 0);
-bail:;
+	return;
+
+drop:
+	ibp->n_pkt_drops++;
 }
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 4c19e06..66208bc 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -382,6 +382,7 @@
 
 		kmem_cache_free(pq->pkt_slab, pkt);
 	}
+	INIT_LIST_HEAD(list);
 }
 
 /*
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index bd57c12..63b22a9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -301,6 +301,7 @@
 	int access_flags;
 	u32 max_segs;           /* number of qib_segs in all the arrays */
 	u32 mapsz;              /* size of the map array */
+	u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
 	atomic_t refcount;
 	struct qib_segarray *map[0];    /* the segments */
 };
@@ -435,7 +436,6 @@
 	spinlock_t r_lock;      /* used for APM */
 	spinlock_t s_lock;
 	atomic_t s_dma_busy;
-	unsigned processor_id;	/* Processor ID QP is bound to */
 	u32 s_flags;
 	u32 s_cur_size;         /* size of send packet in bytes */
 	u32 s_len;              /* total length of s_sge */
@@ -813,13 +813,8 @@
  */
 static inline void qib_schedule_send(struct qib_qp *qp)
 {
-	if (qib_send_ok(qp)) {
-		if (qp->processor_id == smp_processor_id())
-			queue_work(qib_wq, &qp->s_work);
-		else
-			queue_work_on(qp->processor_id,
-				      qib_wq, &qp->s_work);
-	}
+	if (qib_send_ok(qp))
+		queue_work(qib_wq, &qp->s_work);
 }
 
 static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
index 9d9a9dc..55855ee 100644
--- a/drivers/infiniband/ulp/ipoib/Kconfig
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -1,7 +1,6 @@
 config INFINIBAND_IPOIB
 	tristate "IP-over-InfiniBand"
 	depends on NETDEVICES && INET && (IPV6 || IPV6=n)
-	select INET_LRO
 	---help---
 	  Support for the IP-over-InfiniBand protocol (IPoIB). This
 	  transports IP packets over InfiniBand so you can use your IB
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 753a983..ab97f92 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -50,7 +50,7 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
 #include <rdma/ib_sa.h>
-#include <linux/inet_lro.h>
+#include <linux/sched.h>
 
 /* constants */
 
@@ -100,9 +100,6 @@
 	IPOIB_MCAST_FLAG_BUSY	  = 2,	/* joining or already joined */
 	IPOIB_MCAST_FLAG_ATTACHED = 3,
 
-	IPOIB_MAX_LRO_DESCRIPTORS = 8,
-	IPOIB_LRO_MAX_AGGR 	  = 64,
-
 	MAX_SEND_CQE		  = 16,
 	IPOIB_CM_COPYBREAK	  = 256,
 };
@@ -262,11 +259,6 @@
 	u16     max_coalesced_frames;
 };
 
-struct ipoib_lro {
-	struct net_lro_mgr lro_mgr;
-	struct net_lro_desc lro_desc[IPOIB_MAX_LRO_DESCRIPTORS];
-};
-
 /*
  * Device private locking: network stack tx_lock protects members used
  * in TX fast path, lock protects everything else.  lock nests inside
@@ -352,8 +344,6 @@
 	int	hca_caps;
 	struct ipoib_ethtool_st ethtool;
 	struct timer_list poll_timer;
-
-	struct ipoib_lro lro;
 };
 
 struct ipoib_ah {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index bb10041..c1c49f2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1480,6 +1480,7 @@
 
 		if (test_bit(IPOIB_FLAG_CSUM, &priv->flags)) {
 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+			priv->dev->features |= NETIF_F_GRO;
 			if (priv->hca_caps & IB_DEVICE_UD_TSO)
 				dev->features |= NETIF_F_TSO;
 		}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index 1a1657c..19f7f52 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -106,63 +106,12 @@
 	return 0;
 }
 
-static const char ipoib_stats_keys[][ETH_GSTRING_LEN] = {
-	"LRO aggregated", "LRO flushed",
-	"LRO avg aggr", "LRO no desc"
-};
-
-static void ipoib_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
-	switch (stringset) {
-	case ETH_SS_STATS:
-		memcpy(data, *ipoib_stats_keys,	sizeof(ipoib_stats_keys));
-		break;
-	}
-}
-
-static int ipoib_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return ARRAY_SIZE(ipoib_stats_keys);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void ipoib_get_ethtool_stats(struct net_device *dev,
-				struct ethtool_stats *stats, uint64_t *data)
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-	int index = 0;
-
-	/* Get LRO statistics */
-	data[index++] = priv->lro.lro_mgr.stats.aggregated;
-	data[index++] = priv->lro.lro_mgr.stats.flushed;
-	if (priv->lro.lro_mgr.stats.flushed)
-		data[index++] = priv->lro.lro_mgr.stats.aggregated /
-				priv->lro.lro_mgr.stats.flushed;
-	else
-		data[index++] = 0;
-	data[index++] = priv->lro.lro_mgr.stats.no_desc;
-}
-
-static int ipoib_set_flags(struct net_device *dev, u32 flags)
-{
-	return ethtool_op_set_flags(dev, flags, ETH_FLAG_LRO);
-}
-
 static const struct ethtool_ops ipoib_ethtool_ops = {
 	.get_drvinfo		= ipoib_get_drvinfo,
 	.get_rx_csum		= ipoib_get_rx_csum,
 	.set_tso		= ipoib_set_tso,
 	.get_coalesce		= ipoib_get_coalesce,
 	.set_coalesce		= ipoib_set_coalesce,
-	.get_flags		= ethtool_op_get_flags,
-	.set_flags		= ipoib_set_flags,
-	.get_strings		= ipoib_get_strings,
-	.get_sset_count		= ipoib_get_sset_count,
-	.get_ethtool_stats	= ipoib_get_ethtool_stats,
 };
 
 void ipoib_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index dfa7190..806d029 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -295,10 +295,7 @@
 	if (test_bit(IPOIB_FLAG_CSUM, &priv->flags) && likely(wc->csum_ok))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-	if (dev->features & NETIF_F_LRO)
-		lro_receive_skb(&priv->lro.lro_mgr, skb, NULL);
-	else
-		netif_receive_skb(skb);
+	napi_gro_receive(&priv->napi, skb);
 
 repost:
 	if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
@@ -450,9 +447,6 @@
 	}
 
 	if (done < budget) {
-		if (dev->features & NETIF_F_LRO)
-			lro_flush_all(&priv->lro.lro_mgr);
-
 		napi_complete(napi);
 		if (unlikely(ib_req_notify_cq(priv->recv_cq,
 					      IB_CQ_NEXT_COMP |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 9ff7bc7..7a07a72 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -60,15 +60,6 @@
 module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
 MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
 
-static int lro;
-module_param(lro, bool, 0444);
-MODULE_PARM_DESC(lro,  "Enable LRO (Large Receive Offload)");
-
-static int lro_max_aggr = IPOIB_LRO_MAX_AGGR;
-module_param(lro_max_aggr, int, 0644);
-MODULE_PARM_DESC(lro_max_aggr, "LRO: Max packets to be aggregated "
-		"(default = 64)");
-
 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
 int ipoib_debug_level;
 
@@ -976,54 +967,6 @@
 	.create	= ipoib_hard_header,
 };
 
-static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
-		       void **tcph, u64 *hdr_flags, void *priv)
-{
-	unsigned int ip_len;
-	struct iphdr *iph;
-
-	if (unlikely(skb->protocol != htons(ETH_P_IP)))
-		return -1;
-
-	/*
-	 * In the future we may add an else clause that verifies the
-	 * checksum and allows devices which do not calculate checksum
-	 * to use LRO.
-	 */
-	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY))
-		return -1;
-
-	/* Check for non-TCP packet */
-	skb_reset_network_header(skb);
-	iph = ip_hdr(skb);
-	if (iph->protocol != IPPROTO_TCP)
-		return -1;
-
-	ip_len = ip_hdrlen(skb);
-	skb_set_transport_header(skb, ip_len);
-	*tcph = tcp_hdr(skb);
-
-	/* check if IP header and TCP header are complete */
-	if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
-		return -1;
-
-	*hdr_flags = LRO_IPV4 | LRO_TCP;
-	*iphdr = iph;
-
-	return 0;
-}
-
-static void ipoib_lro_setup(struct ipoib_dev_priv *priv)
-{
-	priv->lro.lro_mgr.max_aggr	 = lro_max_aggr;
-	priv->lro.lro_mgr.max_desc	 = IPOIB_MAX_LRO_DESCRIPTORS;
-	priv->lro.lro_mgr.lro_arr	 = priv->lro.lro_desc;
-	priv->lro.lro_mgr.get_skb_header = get_skb_hdr;
-	priv->lro.lro_mgr.features	 = LRO_F_NAPI;
-	priv->lro.lro_mgr.dev		 = priv->dev;
-	priv->lro.lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
-}
-
 static const struct net_device_ops ipoib_netdev_ops = {
 	.ndo_open		 = ipoib_open,
 	.ndo_stop		 = ipoib_stop,
@@ -1067,8 +1010,6 @@
 
 	priv->dev = dev;
 
-	ipoib_lro_setup(priv);
-
 	spin_lock_init(&priv->lock);
 
 	mutex_init(&priv->vlan_mutex);
@@ -1218,8 +1159,7 @@
 		priv->dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
 	}
 
-	if (lro)
-		priv->dev->features |= NETIF_F_LRO;
+	priv->dev->features |= NETIF_F_GRO;
 
 	if (priv->dev->features & NETIF_F_SG && priv->hca_caps & IB_DEVICE_UD_TSO)
 		priv->dev->features |= NETIF_F_TSO;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 1e1e347..4b62105 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -441,18 +441,28 @@
 	wait_for_completion(&target->done);
 }
 
+static bool srp_change_state(struct srp_target_port *target,
+			    enum srp_target_state old,
+			    enum srp_target_state new)
+{
+	bool changed = false;
+
+	spin_lock_irq(&target->lock);
+	if (target->state == old) {
+		target->state = new;
+		changed = true;
+	}
+	spin_unlock_irq(&target->lock);
+	return changed;
+}
+
 static void srp_remove_work(struct work_struct *work)
 {
 	struct srp_target_port *target =
 		container_of(work, struct srp_target_port, work);
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_DEAD) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
 		return;
-	}
-	target->state = SRP_TARGET_REMOVED;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	spin_lock(&target->srp_host->target_lock);
 	list_del(&target->list);
@@ -539,33 +549,34 @@
 			scsi_sg_count(scmnd), scmnd->sc_data_direction);
 }
 
-static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
+static void srp_remove_req(struct srp_target_port *target,
+			   struct srp_request *req, s32 req_lim_delta)
 {
+	unsigned long flags;
+
 	srp_unmap_data(req->scmnd, target, req);
-	list_move_tail(&req->list, &target->free_reqs);
+	spin_lock_irqsave(&target->lock, flags);
+	target->req_lim += req_lim_delta;
+	req->scmnd = NULL;
+	list_add_tail(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
 }
 
 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
 {
 	req->scmnd->result = DID_RESET << 16;
 	req->scmnd->scsi_done(req->scmnd);
-	srp_remove_req(target, req);
+	srp_remove_req(target, req, 0);
 }
 
 static int srp_reconnect_target(struct srp_target_port *target)
 {
 	struct ib_qp_attr qp_attr;
-	struct srp_request *req, *tmp;
 	struct ib_wc wc;
-	int ret;
+	int i, ret;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state != SRP_TARGET_LIVE) {
-		spin_unlock_irq(target->scsi_host->host_lock);
+	if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
 		return -EAGAIN;
-	}
-	target->state = SRP_TARGET_CONNECTING;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	srp_disconnect_target(target);
 	/*
@@ -590,27 +601,23 @@
 	while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
 		; /* nothing */
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		srp_reset_req(target, req);
-	spin_unlock_irq(target->scsi_host->host_lock);
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd)
+			srp_reset_req(target, req);
+	}
 
-	target->rx_head	 = 0;
-	target->tx_head	 = 0;
-	target->tx_tail  = 0;
+	INIT_LIST_HEAD(&target->free_tx);
+	for (i = 0; i < SRP_SQ_SIZE; ++i)
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 
 	target->qp_in_error = 0;
 	ret = srp_connect_target(target);
 	if (ret)
 		goto err;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-	if (target->state == SRP_TARGET_CONNECTING) {
-		ret = 0;
-		target->state = SRP_TARGET_LIVE;
-	} else
+	if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
 		ret = -EAGAIN;
-	spin_unlock_irq(target->scsi_host->host_lock);
 
 	return ret;
 
@@ -620,17 +627,20 @@
 
 	/*
 	 * We couldn't reconnect, so kill our target port off.
-	 * However, we have to defer the real removal because we might
-	 * be in the context of the SCSI error handler now, which
-	 * would deadlock if we call scsi_remove_host().
+	 * However, we have to defer the real removal because we
+	 * are in the context of the SCSI error handler now, which
+	 * will deadlock if we call scsi_remove_host().
+	 *
+	 * Schedule our work inside the lock to avoid a race with
+	 * the flush_scheduled_work() in srp_remove_one().
 	 */
-	spin_lock_irq(target->scsi_host->host_lock);
+	spin_lock_irq(&target->lock);
 	if (target->state == SRP_TARGET_CONNECTING) {
 		target->state = SRP_TARGET_DEAD;
 		INIT_WORK(&target->work, srp_remove_work);
 		schedule_work(&target->work);
 	}
-	spin_unlock_irq(target->scsi_host->host_lock);
+	spin_unlock_irq(&target->lock);
 
 	return ret;
 }
@@ -758,7 +768,7 @@
 		struct srp_direct_buf *buf = (void *) cmd->add_data;
 
 		buf->va  = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
-		buf->key = cpu_to_be32(dev->mr->rkey);
+		buf->key = cpu_to_be32(target->rkey);
 		buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
 	} else if (srp_map_fmr(target, scat, count, req,
 			       (void *) cmd->add_data)) {
@@ -783,7 +793,7 @@
 			buf->desc_list[i].va  =
 				cpu_to_be64(ib_sg_dma_address(ibdev, sg));
 			buf->desc_list[i].key =
-				cpu_to_be32(dev->mr->rkey);
+				cpu_to_be32(target->rkey);
 			buf->desc_list[i].len = cpu_to_be32(dma_len);
 			datalen += dma_len;
 		}
@@ -796,7 +806,7 @@
 		buf->table_desc.va  =
 			cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
 		buf->table_desc.key =
-			cpu_to_be32(target->srp_host->srp_dev->mr->rkey);
+			cpu_to_be32(target->rkey);
 		buf->table_desc.len =
 			cpu_to_be32(count * sizeof (struct srp_direct_buf));
 
@@ -812,9 +822,23 @@
 }
 
 /*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.  Lock cannot be dropped between call here and
- * call to __srp_post_send().
+ * Return an IU and possible credit to the free pool
+ */
+static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
+			  enum srp_iu_type iu_type)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&iu->list, &target->free_tx);
+	if (iu_type != SRP_IU_RSP)
+		++target->req_lim;
+	spin_unlock_irqrestore(&target->lock, flags);
+}
+
+/*
+ * Must be called with target->lock held to protect req_lim and free_tx.
+ * If IU is not sent, it must be returned using srp_put_tx_iu().
  *
  * Note:
  * An upper limit for the number of allocated information units for each
@@ -833,83 +857,59 @@
 
 	srp_send_completion(target->send_cq, target);
 
-	if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
+	if (list_empty(&target->free_tx))
 		return NULL;
 
 	/* Initiator responses to target requests do not consume credits */
-	if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
-		++target->zero_req_lim;
-		return NULL;
+	if (iu_type != SRP_IU_RSP) {
+		if (target->req_lim <= rsv) {
+			++target->zero_req_lim;
+			return NULL;
+		}
+
+		--target->req_lim;
 	}
 
-	iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
-	iu->type = iu_type;
+	iu = list_first_entry(&target->free_tx, struct srp_iu, list);
+	list_del(&iu->list);
 	return iu;
 }
 
-/*
- * Must be called with target->scsi_host->host_lock held to protect
- * req_lim and tx_head.
- */
-static int __srp_post_send(struct srp_target_port *target,
-			   struct srp_iu *iu, int len)
+static int srp_post_send(struct srp_target_port *target,
+			 struct srp_iu *iu, int len)
 {
 	struct ib_sge list;
 	struct ib_send_wr wr, *bad_wr;
-	int ret = 0;
 
 	list.addr   = iu->dma;
 	list.length = len;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next       = NULL;
-	wr.wr_id      = target->tx_head & SRP_SQ_MASK;
+	wr.wr_id      = (uintptr_t) iu;
 	wr.sg_list    = &list;
 	wr.num_sge    = 1;
 	wr.opcode     = IB_WR_SEND;
 	wr.send_flags = IB_SEND_SIGNALED;
 
-	ret = ib_post_send(target->qp, &wr, &bad_wr);
-
-	if (!ret) {
-		++target->tx_head;
-		if (iu->type != SRP_IU_RSP)
-			--target->req_lim;
-	}
-
-	return ret;
+	return ib_post_send(target->qp, &wr, &bad_wr);
 }
 
-static int srp_post_recv(struct srp_target_port *target)
+static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
 {
-	unsigned long flags;
-	struct srp_iu *iu;
-	struct ib_sge list;
 	struct ib_recv_wr wr, *bad_wr;
-	unsigned int next;
-	int ret;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	next	 = target->rx_head & SRP_RQ_MASK;
-	wr.wr_id = next;
-	iu	 = target->rx_ring[next];
+	struct ib_sge list;
 
 	list.addr   = iu->dma;
 	list.length = iu->size;
-	list.lkey   = target->srp_host->srp_dev->mr->lkey;
+	list.lkey   = target->lkey;
 
 	wr.next     = NULL;
+	wr.wr_id    = (uintptr_t) iu;
 	wr.sg_list  = &list;
 	wr.num_sge  = 1;
 
-	ret = ib_post_recv(target->qp, &wr, &bad_wr);
-	if (!ret)
-		++target->rx_head;
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
-
-	return ret;
+	return ib_post_recv(target->qp, &wr, &bad_wr);
 }
 
 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
@@ -917,23 +917,18 @@
 	struct srp_request *req;
 	struct scsi_cmnd *scmnd;
 	unsigned long flags;
-	s32 delta;
-
-	delta = (s32) be32_to_cpu(rsp->req_lim_delta);
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
-
-	target->req_lim += delta;
-
-	req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
 
 	if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
-		if (be32_to_cpu(rsp->resp_data_len) < 4)
-			req->tsk_status = -1;
-		else
-			req->tsk_status = rsp->data[3];
-		complete(&req->done);
+		spin_lock_irqsave(&target->lock, flags);
+		target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+		spin_unlock_irqrestore(&target->lock, flags);
+
+		target->tsk_mgmt_status = -1;
+		if (be32_to_cpu(rsp->resp_data_len) >= 4)
+			target->tsk_mgmt_status = rsp->data[3];
+		complete(&target->tsk_mgmt_done);
 	} else {
+		req = &target->req_ring[rsp->tag];
 		scmnd = req->scmnd;
 		if (!scmnd)
 			shost_printk(KERN_ERR, target->scsi_host,
@@ -953,49 +948,42 @@
 		else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
 			scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
 
-		if (!req->tsk_mgmt) {
-			scmnd->host_scribble = (void *) -1L;
-			scmnd->scsi_done(scmnd);
-
-			srp_remove_req(target, req);
-		} else
-			req->cmd_done = 1;
+		srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+		scmnd->host_scribble = NULL;
+		scmnd->scsi_done(scmnd);
 	}
-
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 }
 
 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
 			       void *rsp, int len)
 {
-	struct ib_device *dev;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	unsigned long flags;
 	struct srp_iu *iu;
-	int err = 1;
+	int err;
 
-	dev = target->srp_host->srp_dev->dev;
-
-	spin_lock_irqsave(target->scsi_host->host_lock, flags);
+	spin_lock_irqsave(&target->lock, flags);
 	target->req_lim += req_delta;
-
 	iu = __srp_get_tx_iu(target, SRP_IU_RSP);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "no IU available to send response\n");
-		goto out;
+		return 1;
 	}
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
 	memcpy(iu->buf, rsp, len);
 	ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
 
-	err = __srp_post_send(target, iu, len);
-	if (err)
+	err = srp_post_send(target, iu, len);
+	if (err) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX
 			     "unable to post response: %d\n", err);
+		srp_put_tx_iu(target, iu, SRP_IU_RSP);
+	}
 
-out:
-	spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
 	return err;
 }
 
@@ -1032,14 +1020,11 @@
 
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
-	struct ib_device *dev;
-	struct srp_iu *iu;
+	struct ib_device *dev = target->srp_host->srp_dev->dev;
+	struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
 	int res;
 	u8 opcode;
 
-	iu = target->rx_ring[wc->wr_id];
-
-	dev = target->srp_host->srp_dev->dev;
 	ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
 				   DMA_FROM_DEVICE);
 
@@ -1080,7 +1065,7 @@
 	ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
 				      DMA_FROM_DEVICE);
 
-	res = srp_post_recv(target);
+	res = srp_post_recv(target, iu);
 	if (res != 0)
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Recv failed with error code %d\n", res);
@@ -1109,6 +1094,7 @@
 {
 	struct srp_target_port *target = target_ptr;
 	struct ib_wc wc;
+	struct srp_iu *iu;
 
 	while (ib_poll_cq(cq, 1, &wc) > 0) {
 		if (wc.status) {
@@ -1119,18 +1105,19 @@
 			break;
 		}
 
-		++target->tx_tail;
+		iu = (struct srp_iu *) wc.wr_id;
+		list_add(&iu->list, &target->free_tx);
 	}
 }
 
-static int srp_queuecommand_lck(struct scsi_cmnd *scmnd,
-			    void (*done)(struct scsi_cmnd *))
+static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
 {
-	struct srp_target_port *target = host_to_target(scmnd->device->host);
+	struct srp_target_port *target = host_to_target(shost);
 	struct srp_request *req;
 	struct srp_iu *iu;
 	struct srp_cmd *cmd;
 	struct ib_device *dev;
+	unsigned long flags;
 	int len;
 
 	if (target->state == SRP_TARGET_CONNECTING)
@@ -1139,11 +1126,19 @@
 	if (target->state == SRP_TARGET_DEAD ||
 	    target->state == SRP_TARGET_REMOVED) {
 		scmnd->result = DID_BAD_TARGET << 16;
-		done(scmnd);
+		scmnd->scsi_done(scmnd);
 		return 0;
 	}
 
+	spin_lock_irqsave(&target->lock, flags);
 	iu = __srp_get_tx_iu(target, SRP_IU_CMD);
+	if (iu) {
+		req = list_first_entry(&target->free_reqs, struct srp_request,
+				      list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&target->lock, flags);
+
 	if (!iu)
 		goto err;
 
@@ -1151,11 +1146,8 @@
 	ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
 				   DMA_TO_DEVICE);
 
-	req = list_first_entry(&target->free_reqs, struct srp_request, list);
-
-	scmnd->scsi_done     = done;
 	scmnd->result        = 0;
-	scmnd->host_scribble = (void *) (long) req->index;
+	scmnd->host_scribble = (void *) req;
 
 	cmd = iu->buf;
 	memset(cmd, 0, sizeof *cmd);
@@ -1167,37 +1159,38 @@
 
 	req->scmnd    = scmnd;
 	req->cmd      = iu;
-	req->cmd_done = 0;
-	req->tsk_mgmt = NULL;
 
 	len = srp_map_data(scmnd, target, req);
 	if (len < 0) {
 		shost_printk(KERN_ERR, target->scsi_host,
 			     PFX "Failed to map data\n");
-		goto err;
+		goto err_iu;
 	}
 
 	ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
 				      DMA_TO_DEVICE);
 
-	if (__srp_post_send(target, iu, len)) {
+	if (srp_post_send(target, iu, len)) {
 		shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
 		goto err_unmap;
 	}
 
-	list_move_tail(&req->list, &target->req_queue);
-
 	return 0;
 
 err_unmap:
 	srp_unmap_data(scmnd, target, req);
 
+err_iu:
+	srp_put_tx_iu(target, iu, SRP_IU_CMD);
+
+	spin_lock_irqsave(&target->lock, flags);
+	list_add(&req->list, &target->free_reqs);
+	spin_unlock_irqrestore(&target->lock, flags);
+
 err:
 	return SCSI_MLQUEUE_HOST_BUSY;
 }
 
-static DEF_SCSI_QCMD(srp_queuecommand)
-
 static int srp_alloc_iu_bufs(struct srp_target_port *target)
 {
 	int i;
@@ -1216,6 +1209,8 @@
 						  GFP_KERNEL, DMA_TO_DEVICE);
 		if (!target->tx_ring[i])
 			goto err;
+
+		list_add(&target->tx_ring[i]->list, &target->free_tx);
 	}
 
 	return 0;
@@ -1377,7 +1372,8 @@
 			break;
 
 		for (i = 0; i < SRP_RQ_SIZE; i++) {
-			target->status = srp_post_recv(target);
+			struct srp_iu *iu = target->rx_ring[i];
+			target->status = srp_post_recv(target, iu);
 			if (target->status)
 				break;
 		}
@@ -1442,25 +1438,24 @@
 }
 
 static int srp_send_tsk_mgmt(struct srp_target_port *target,
-			     struct srp_request *req, u8 func)
+			     u64 req_tag, unsigned int lun, u8 func)
 {
 	struct ib_device *dev = target->srp_host->srp_dev->dev;
 	struct srp_iu *iu;
 	struct srp_tsk_mgmt *tsk_mgmt;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
 	if (target->state == SRP_TARGET_DEAD ||
-	    target->state == SRP_TARGET_REMOVED) {
-		req->scmnd->result = DID_BAD_TARGET << 16;
-		goto out;
-	}
+	    target->state == SRP_TARGET_REMOVED)
+		return -1;
 
-	init_completion(&req->done);
+	init_completion(&target->tsk_mgmt_done);
 
+	spin_lock_irq(&target->lock);
 	iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
+	spin_unlock_irq(&target->lock);
+
 	if (!iu)
-		goto out;
+		return -1;
 
 	ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
 				   DMA_TO_DEVICE);
@@ -1468,70 +1463,46 @@
 	memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
 
 	tsk_mgmt->opcode 	= SRP_TSK_MGMT;
-	tsk_mgmt->lun 		= cpu_to_be64((u64) req->scmnd->device->lun << 48);
-	tsk_mgmt->tag 		= req->index | SRP_TAG_TSK_MGMT;
+	tsk_mgmt->lun		= cpu_to_be64((u64) lun << 48);
+	tsk_mgmt->tag		= req_tag | SRP_TAG_TSK_MGMT;
 	tsk_mgmt->tsk_mgmt_func = func;
-	tsk_mgmt->task_tag 	= req->index;
+	tsk_mgmt->task_tag	= req_tag;
 
 	ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
 				      DMA_TO_DEVICE);
-	if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
-		goto out;
+	if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
+		srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
+		return -1;
+	}
 
-	req->tsk_mgmt = iu;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
-
-	if (!wait_for_completion_timeout(&req->done,
+	if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
 					 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
 		return -1;
 
 	return 0;
-
-out:
-	spin_unlock_irq(target->scsi_host->host_lock);
-	return -1;
-}
-
-static int srp_find_req(struct srp_target_port *target,
-			struct scsi_cmnd *scmnd,
-			struct srp_request **req)
-{
-	if (scmnd->host_scribble == (void *) -1L)
-		return -1;
-
-	*req = &target->req_ring[(long) scmnd->host_scribble];
-
-	return 0;
 }
 
 static int srp_abort(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req;
+	struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
 	int ret = SUCCESS;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
 
-	if (target->qp_in_error)
+	if (!req || target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
-		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
+	if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+			      SRP_TSK_ABORT_TASK))
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	if (req->cmd_done) {
-		srp_remove_req(target, req);
-		scmnd->scsi_done(scmnd);
-	} else if (!req->tsk_status) {
-		srp_remove_req(target, req);
-		scmnd->result = DID_ABORT << 16;
-	} else
-		ret = FAILED;
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	if (req->scmnd) {
+		if (!target->tsk_mgmt_status) {
+			srp_remove_req(target, req, 0);
+			scmnd->result = DID_ABORT << 16;
+		} else
+			ret = FAILED;
+	}
 
 	return ret;
 }
@@ -1539,26 +1510,23 @@
 static int srp_reset_device(struct scsi_cmnd *scmnd)
 {
 	struct srp_target_port *target = host_to_target(scmnd->device->host);
-	struct srp_request *req, *tmp;
+	int i;
 
 	shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
 
 	if (target->qp_in_error)
 		return FAILED;
-	if (srp_find_req(target, scmnd, &req))
+	if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
+			      SRP_TSK_LUN_RESET))
 		return FAILED;
-	if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
-		return FAILED;
-	if (req->tsk_status)
+	if (target->tsk_mgmt_status)
 		return FAILED;
 
-	spin_lock_irq(target->scsi_host->host_lock);
-
-	list_for_each_entry_safe(req, tmp, &target->req_queue, list)
-		if (req->scmnd->device == scmnd->device)
+	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
+		struct srp_request *req = &target->req_ring[i];
+		if (req->scmnd && req->scmnd->device == scmnd->device)
 			srp_reset_req(target, req);
-
-	spin_unlock_irq(target->scsi_host->host_lock);
+	}
 
 	return SUCCESS;
 }
@@ -1987,9 +1955,12 @@
 	target->io_class   = SRP_REV16A_IB_IO_CLASS;
 	target->scsi_host  = target_host;
 	target->srp_host   = host;
+	target->lkey	   = host->srp_dev->mr->lkey;
+	target->rkey	   = host->srp_dev->mr->rkey;
 
+	spin_lock_init(&target->lock);
+	INIT_LIST_HEAD(&target->free_tx);
 	INIT_LIST_HEAD(&target->free_reqs);
-	INIT_LIST_HEAD(&target->req_queue);
 	for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
 		target->req_ring[i].index = i;
 		list_add_tail(&target->req_ring[i].list, &target->free_reqs);
@@ -2217,9 +2188,9 @@
 		 */
 		spin_lock(&host->target_lock);
 		list_for_each_entry(target, &host->target_list, list) {
-			spin_lock_irq(target->scsi_host->host_lock);
+			spin_lock_irq(&target->lock);
 			target->state = SRP_TARGET_REMOVED;
-			spin_unlock_irq(target->scsi_host->host_lock);
+			spin_unlock_irq(&target->lock);
 		}
 		spin_unlock(&host->target_lock);
 
@@ -2258,8 +2229,7 @@
 {
 	int ret;
 
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
-	BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
+	BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
 
 	if (srp_sg_tablesize > 255) {
 		printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index ed0dce9..9dc6fc3 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,16 +59,15 @@
 
 	SRP_RQ_SHIFT    	= 6,
 	SRP_RQ_SIZE		= 1 << SRP_RQ_SHIFT,
-	SRP_RQ_MASK		= SRP_RQ_SIZE - 1,
 
 	SRP_SQ_SIZE		= SRP_RQ_SIZE,
-	SRP_SQ_MASK		= SRP_SQ_SIZE - 1,
 	SRP_RSP_SQ_SIZE		= 1,
 	SRP_REQ_SQ_SIZE		= SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
 	SRP_TSK_MGMT_SQ_SIZE	= 1,
 	SRP_CMD_SQ_SIZE		= SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
 
-	SRP_TAG_TSK_MGMT	= 1 << (SRP_RQ_SHIFT + 1),
+	SRP_TAG_NO_REQ		= ~0U,
+	SRP_TAG_TSK_MGMT	= 1U << 31,
 
 	SRP_FMR_SIZE		= 256,
 	SRP_FMR_POOL_SIZE	= 1024,
@@ -113,15 +112,29 @@
 	struct list_head	list;
 	struct scsi_cmnd       *scmnd;
 	struct srp_iu	       *cmd;
-	struct srp_iu	       *tsk_mgmt;
 	struct ib_pool_fmr     *fmr;
-	struct completion	done;
 	short			index;
-	u8			cmd_done;
-	u8			tsk_status;
 };
 
 struct srp_target_port {
+	/* These are RW in the hot path, and commonly used together */
+	struct list_head	free_tx;
+	struct list_head	free_reqs;
+	spinlock_t		lock;
+	s32			req_lim;
+
+	/* These are read-only in the hot path */
+	struct ib_cq	       *send_cq ____cacheline_aligned_in_smp;
+	struct ib_cq	       *recv_cq;
+	struct ib_qp	       *qp;
+	u32			lkey;
+	u32			rkey;
+	enum srp_target_state	state;
+
+	/* Everything above this point is used in the hot path of
+	 * command processing. Try to keep them packed into cachelines.
+	 */
+
 	__be64			id_ext;
 	__be64			ioc_guid;
 	__be64			service_id;
@@ -138,24 +151,13 @@
 	int			path_query_id;
 
 	struct ib_cm_id	       *cm_id;
-	struct ib_cq	       *recv_cq;
-	struct ib_cq	       *send_cq;
-	struct ib_qp	       *qp;
 
 	int			max_ti_iu_len;
-	s32			req_lim;
 
 	int			zero_req_lim;
 
-	unsigned		rx_head;
-	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
-
-	unsigned		tx_head;
-	unsigned		tx_tail;
 	struct srp_iu	       *tx_ring[SRP_SQ_SIZE];
-
-	struct list_head	free_reqs;
-	struct list_head	req_queue;
+	struct srp_iu	       *rx_ring[SRP_RQ_SIZE];
 	struct srp_request	req_ring[SRP_CMD_SQ_SIZE];
 
 	struct work_struct	work;
@@ -163,16 +165,18 @@
 	struct list_head	list;
 	struct completion	done;
 	int			status;
-	enum srp_target_state	state;
 	int			qp_in_error;
+
+	struct completion	tsk_mgmt_done;
+	u8			tsk_mgmt_status;
 };
 
 struct srp_iu {
+	struct list_head	list;
 	u64			dma;
 	void		       *buf;
 	size_t			size;
 	enum dma_data_direction	direction;
-	enum srp_iu_type	type;
 };
 
 #endif /* IB_SRP_H */
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 5b59616..56eb471 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -255,6 +255,16 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called amijoy.
 
+config JOYSTICK_AS5011
+	tristate "Austria Microsystem AS5011 joystick"
+	depends on I2C
+	help
+	  Say Y here if you have an AS5011 digital joystick connected to your
+	  system.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called as5011.
+
 config JOYSTICK_JOYDUMP
 	tristate "Gameport data dumper"
 	select GAMEPORT
diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile
index f3a8cbe..92dc0de 100644
--- a/drivers/input/joystick/Makefile
+++ b/drivers/input/joystick/Makefile
@@ -7,6 +7,7 @@
 obj-$(CONFIG_JOYSTICK_A3D)		+= a3d.o
 obj-$(CONFIG_JOYSTICK_ADI)		+= adi.o
 obj-$(CONFIG_JOYSTICK_AMIGA)		+= amijoy.o
+obj-$(CONFIG_JOYSTICK_AS5011)		+= as5011.o
 obj-$(CONFIG_JOYSTICK_ANALOG)		+= analog.o
 obj-$(CONFIG_JOYSTICK_COBRA)		+= cobra.o
 obj-$(CONFIG_JOYSTICK_DB9)		+= db9.o
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
new file mode 100644
index 0000000..f6732b5
--- /dev/null
+++ b/drivers/input/joystick/as5011.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ * Sponsored by ARMadeus Systems
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Driver for Austria Microsystems joysticks AS5011
+ *
+ * TODO:
+ *	- Power on the chip when open() and power down when close()
+ *	- Manage power mode
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/as5011.h>
+#include <linux/slab.h>
+
+#define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick"
+#define MODULE_DEVICE_ALIAS "as5011"
+
+MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+/* registers */
+#define AS5011_CTRL1		0x76
+#define AS5011_CTRL2		0x75
+#define AS5011_XP		0x43
+#define AS5011_XN		0x44
+#define AS5011_YP		0x53
+#define AS5011_YN		0x54
+#define AS5011_X_REG		0x41
+#define AS5011_Y_REG		0x42
+#define AS5011_X_RES_INT	0x51
+#define AS5011_Y_RES_INT	0x52
+
+/* CTRL1 bits */
+#define AS5011_CTRL1_LP_PULSED		0x80
+#define AS5011_CTRL1_LP_ACTIVE		0x40
+#define AS5011_CTRL1_LP_CONTINUE	0x20
+#define AS5011_CTRL1_INT_WUP_EN		0x10
+#define AS5011_CTRL1_INT_ACT_EN		0x08
+#define AS5011_CTRL1_EXT_CLK_EN		0x04
+#define AS5011_CTRL1_SOFT_RST		0x02
+#define AS5011_CTRL1_DATA_VALID		0x01
+
+/* CTRL2 bits */
+#define AS5011_CTRL2_EXT_SAMPLE_EN	0x08
+#define AS5011_CTRL2_RC_BIAS_ON		0x04
+#define AS5011_CTRL2_INV_SPINNING	0x02
+
+#define AS5011_MAX_AXIS	80
+#define AS5011_MIN_AXIS	(-80)
+#define AS5011_FUZZ	8
+#define AS5011_FLAT	40
+
+struct as5011_device {
+	struct input_dev *input_dev;
+	struct i2c_client *i2c_client;
+	unsigned int button_gpio;
+	unsigned int button_irq;
+	unsigned int axis_irq;
+};
+
+static int as5011_i2c_write(struct i2c_client *client,
+			    uint8_t aregaddr,
+			    uint8_t avalue)
+{
+	uint8_t data[2] = { aregaddr, avalue };
+	struct i2c_msg msg = {
+		client->addr, I2C_M_IGNORE_NAK, 2, (uint8_t *)data
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, &msg, 1);
+	return error < 0 ? error : 0;
+}
+
+static int as5011_i2c_read(struct i2c_client *client,
+			   uint8_t aregaddr, signed char *value)
+{
+	uint8_t data[2] = { aregaddr };
+	struct i2c_msg msg_set[2] = {
+		{ client->addr, I2C_M_REV_DIR_ADDR, 1, (uint8_t *)data },
+		{ client->addr, I2C_M_RD | I2C_M_NOSTART, 1, (uint8_t *)data }
+	};
+	int error;
+
+	error = i2c_transfer(client->adapter, msg_set, 2);
+	if (error < 0)
+		return error;
+
+	*value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0];
+	return 0;
+}
+
+static irqreturn_t as5011_button_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int val = gpio_get_value_cansleep(as5011->button_gpio);
+
+	input_report_key(as5011->input_dev, BTN_JOYSTICK, !val);
+	input_sync(as5011->input_dev);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id)
+{
+	struct as5011_device *as5011 = dev_id;
+	int error;
+	signed char x, y;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x);
+	if (error < 0)
+		goto out;
+
+	error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y);
+	if (error < 0)
+		goto out;
+
+	input_report_abs(as5011->input_dev, ABS_X, x);
+	input_report_abs(as5011->input_dev, ABS_Y, y);
+	input_sync(as5011->input_dev);
+
+out:
+	return IRQ_HANDLED;
+}
+
+static int __devinit as5011_configure_chip(struct as5011_device *as5011,
+				const struct as5011_platform_data *plat_dat)
+{
+	struct i2c_client *client = as5011->i2c_client;
+	int error;
+	signed char value;
+
+	/* chip soft reset */
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_SOFT_RST);
+	if (error < 0) {
+		dev_err(&client->dev, "Soft reset failed\n");
+		return error;
+	}
+
+	mdelay(10);
+
+	error = as5011_i2c_write(client, AS5011_CTRL1,
+				 AS5011_CTRL1_LP_PULSED |
+				 AS5011_CTRL1_LP_ACTIVE |
+				 AS5011_CTRL1_INT_ACT_EN);
+	if (error < 0) {
+		dev_err(&client->dev, "Power config failed\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_CTRL2,
+				 AS5011_CTRL2_INV_SPINNING);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't invert spinning\n");
+		return error;
+	}
+
+	/* write threshold */
+	error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't write threshold\n");
+		return error;
+	}
+
+	/* to free irq gpio in chip */
+	error = as5011_i2c_read(client, AS5011_X_RES_INT, &value);
+	if (error < 0) {
+		dev_err(&client->dev, "Can't read i2c X resolution value\n");
+		return error;
+	}
+
+	return 0;
+}
+
+static int __devinit as5011_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	const struct as5011_platform_data *plat_data;
+	struct as5011_device *as5011;
+	struct input_dev *input_dev;
+	int irq;
+	int error;
+
+	plat_data = client->dev.platform_data;
+	if (!plat_data)
+		return -EINVAL;
+
+	if (!plat_data->axis_irq) {
+		dev_err(&client->dev, "No axis IRQ?\n");
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter,
+				     I2C_FUNC_PROTOCOL_MANGLING)) {
+		dev_err(&client->dev,
+			"need i2c bus that supports protocol mangling\n");
+		return -ENODEV;
+	}
+
+	as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL);
+	input_dev = input_allocate_device();
+	if (!as5011 || !input_dev) {
+		dev_err(&client->dev,
+			"Can't allocate memory for device structure\n");
+		error = -ENOMEM;
+		goto err_free_mem;
+	}
+
+	as5011->i2c_client = client;
+	as5011->input_dev = input_dev;
+	as5011->button_gpio = plat_data->button_gpio;
+	as5011->axis_irq = plat_data->axis_irq;
+
+	input_dev->name = "Austria Microsystem as5011 joystick";
+	input_dev->id.bustype = BUS_I2C;
+	input_dev->dev.parent = &client->dev;
+
+	__set_bit(EV_KEY, input_dev->evbit);
+	__set_bit(EV_ABS, input_dev->evbit);
+	__set_bit(BTN_JOYSTICK, input_dev->keybit);
+
+	input_set_abs_params(input_dev, ABS_X,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+	input_set_abs_params(as5011->input_dev, ABS_Y,
+		AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT);
+
+	error = gpio_request(as5011->button_gpio, "AS5011 button");
+	if (error < 0) {
+		dev_err(&client->dev, "Failed to request button gpio\n");
+		goto err_free_mem;
+	}
+
+	irq = gpio_to_irq(as5011->button_gpio);
+	if (irq < 0) {
+		dev_err(&client->dev,
+			"Failed to get irq number for button gpio\n");
+		goto err_free_button_gpio;
+	}
+
+	as5011->button_irq = irq;
+
+	error = request_threaded_irq(as5011->button_irq,
+				     NULL, as5011_button_interrupt,
+				     IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				     "as5011_button", as5011);
+	if (error < 0) {
+		dev_err(&client->dev,
+			"Can't allocate button irq %d\n", as5011->button_irq);
+		goto err_free_button_gpio;
+	}
+
+	error = as5011_configure_chip(as5011, plat_data);
+	if (error)
+		goto err_free_button_irq;
+
+	error = request_threaded_irq(as5011->axis_irq, NULL,
+				     as5011_axis_interrupt,
+				     plat_data->axis_irqflags,
+				     "as5011_joystick", as5011);
+	if (error) {
+		dev_err(&client->dev,
+			"Can't allocate axis irq %d\n", plat_data->axis_irq);
+		goto err_free_button_irq;
+	}
+
+	error = input_register_device(as5011->input_dev);
+	if (error) {
+		dev_err(&client->dev, "Failed to register input device\n");
+		goto err_free_axis_irq;
+	}
+
+	i2c_set_clientdata(client, as5011);
+
+	return 0;
+
+err_free_axis_irq:
+	free_irq(as5011->axis_irq, as5011);
+err_free_button_irq:
+	free_irq(as5011->button_irq, as5011);
+err_free_button_gpio:
+	gpio_free(as5011->button_gpio);
+err_free_mem:
+	input_free_device(input_dev);
+	kfree(as5011);
+
+	return error;
+}
+
+static int __devexit as5011_remove(struct i2c_client *client)
+{
+	struct as5011_device *as5011 = i2c_get_clientdata(client);
+
+	free_irq(as5011->axis_irq, as5011);
+	free_irq(as5011->button_irq, as5011);
+	gpio_free(as5011->button_gpio);
+
+	input_unregister_device(as5011->input_dev);
+	kfree(as5011);
+
+	return 0;
+}
+
+static const struct i2c_device_id as5011_id[] = {
+	{ MODULE_DEVICE_ALIAS, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, as5011_id);
+
+static struct i2c_driver as5011_driver = {
+	.driver = {
+		.name = "as5011",
+	},
+	.probe		= as5011_probe,
+	.remove		= __devexit_p(as5011_remove),
+	.id_table	= as5011_id,
+};
+
+static int __init as5011_init(void)
+{
+	return i2c_add_driver(&as5011_driver);
+}
+module_init(as5011_init);
+
+static void __exit as5011_exit(void)
+{
+	i2c_del_driver(&as5011_driver);
+}
+module_exit(as5011_exit);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index f829998..7b3c0b8 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -12,18 +12,6 @@
 
 if INPUT_KEYBOARD
 
-config KEYBOARD_AAED2000
-	tristate "AAED-2000 keyboard"
-	depends on MACH_AAED2000
-	select INPUT_POLLDEV
-	default y
-	help
-	  Say Y here to enable the keyboard on the Agilent AAED-2000
-	  development board.
-
-	  To compile this driver as a module, choose M here: the
-	  module will be called aaed2000_kbd.
-
 config KEYBOARD_ADP5520
 	tristate "Keypad Support for ADP5520 PMIC"
 	depends on PMIC_ADP5520
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 8933e9c..4e5571b 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -4,7 +4,6 @@
 
 # Each configuration option enables a list of files.
 
-obj-$(CONFIG_KEYBOARD_AAED2000)		+= aaed2000_kbd.o
 obj-$(CONFIG_KEYBOARD_ADP5520)		+= adp5520-keys.o
 obj-$(CONFIG_KEYBOARD_ADP5588)		+= adp5588-keys.o
 obj-$(CONFIG_KEYBOARD_AMIGA)		+= amikbd.o
diff --git a/drivers/input/keyboard/aaed2000_kbd.c b/drivers/input/keyboard/aaed2000_kbd.c
deleted file mode 100644
index 18222a6..0000000
--- a/drivers/input/keyboard/aaed2000_kbd.c
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *  Keyboard driver for the AAED-2000 dev board
- *
- *  Copyright (c) 2006 Nicolas Bellido Y Ortega
- *
- *  Based on corgikbd.c
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/input-polldev.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <mach/hardware.h>
-#include <mach/aaed2000.h>
-
-#define KB_ROWS			12
-#define KB_COLS			8
-#define KB_ROWMASK(r)		(1 << (r))
-#define SCANCODE(r,c)		(((c) * KB_ROWS) + (r))
-#define NR_SCANCODES		(KB_COLS * KB_ROWS)
-
-#define SCAN_INTERVAL		(50) /* ms */
-#define KB_ACTIVATE_DELAY	(20) /* us */
-
-static unsigned char aaedkbd_keycode[NR_SCANCODES] = {
-	KEY_9, KEY_0, KEY_MINUS, KEY_EQUAL, KEY_BACKSPACE, 0, KEY_SPACE, KEY_KP6, 0, KEY_KPDOT, 0, 0,
-	KEY_K, KEY_M, KEY_O, KEY_DOT, KEY_SLASH, 0, KEY_F, 0, 0, 0, KEY_LEFTSHIFT, 0,
-	KEY_I, KEY_P, KEY_LEFTBRACE, KEY_RIGHTBRACE, KEY_BACKSLASH, 0, 0, 0, 0, 0, KEY_RIGHTSHIFT, 0,
-	KEY_8, KEY_L, KEY_SEMICOLON, KEY_APOSTROPHE, KEY_ENTER, 0, 0, 0, 0, 0, 0, 0,
-	KEY_J, KEY_H, KEY_B, KEY_KP8, KEY_KP4, 0, KEY_C, KEY_D, KEY_S, KEY_A, 0, KEY_CAPSLOCK,
-	KEY_Y, KEY_U, KEY_N, KEY_T, 0, 0, KEY_R, KEY_E, KEY_W, KEY_Q, 0, KEY_TAB,
-	KEY_7, KEY_6, KEY_G, 0, KEY_5, 0, KEY_4, KEY_3, KEY_2, KEY_1, 0, KEY_GRAVE,
-	0, 0, KEY_COMMA, 0, KEY_KP2, 0, KEY_V, KEY_LEFTALT, KEY_X, KEY_Z, 0, KEY_LEFTCTRL
-};
-
-struct aaedkbd {
-	unsigned char keycode[ARRAY_SIZE(aaedkbd_keycode)];
-	struct input_polled_dev *poll_dev;
-	int kbdscan_state[KB_COLS];
-	int kbdscan_count[KB_COLS];
-};
-
-#define KBDSCAN_STABLE_COUNT 2
-
-static void aaedkbd_report_col(struct aaedkbd *aaedkbd,
-				unsigned int col, unsigned int rowd)
-{
-	unsigned int scancode, pressed;
-	unsigned int row;
-
-	for (row = 0; row < KB_ROWS; row++) {
-		scancode = SCANCODE(row, col);
-		pressed = rowd & KB_ROWMASK(row);
-
-		input_report_key(aaedkbd->poll_dev->input,
-				 aaedkbd->keycode[scancode], pressed);
-	}
-}
-
-/* Scan the hardware keyboard and push any changes up through the input layer */
-static void aaedkbd_poll(struct input_polled_dev *dev)
-{
-	struct aaedkbd *aaedkbd = dev->private;
-	unsigned int col, rowd;
-
-	col = 0;
-	do {
-		AAEC_GPIO_KSCAN = col + 8;
-		udelay(KB_ACTIVATE_DELAY);
-		rowd = AAED_EXT_GPIO & AAED_EGPIO_KBD_SCAN;
-
-		if (rowd != aaedkbd->kbdscan_state[col]) {
-			aaedkbd->kbdscan_count[col] = 0;
-			aaedkbd->kbdscan_state[col] = rowd;
-		} else if (++aaedkbd->kbdscan_count[col] >= KBDSCAN_STABLE_COUNT) {
-			aaedkbd_report_col(aaedkbd, col, rowd);
-			col++;
-		}
-	} while (col < KB_COLS);
-
-	AAEC_GPIO_KSCAN = 0x07;
-	input_sync(dev->input);
-}
-
-static int __devinit aaedkbd_probe(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd;
-	struct input_polled_dev *poll_dev;
-	struct input_dev *input_dev;
-	int i;
-	int error;
-
-	aaedkbd = kzalloc(sizeof(struct aaedkbd), GFP_KERNEL);
-	poll_dev = input_allocate_polled_device();
-	if (!aaedkbd || !poll_dev) {
-		error = -ENOMEM;
-		goto fail;
-	}
-
-	platform_set_drvdata(pdev, aaedkbd);
-
-	aaedkbd->poll_dev = poll_dev;
-	memcpy(aaedkbd->keycode, aaedkbd_keycode, sizeof(aaedkbd->keycode));
-
-	poll_dev->private = aaedkbd;
-	poll_dev->poll = aaedkbd_poll;
-	poll_dev->poll_interval = SCAN_INTERVAL;
-
-	input_dev = poll_dev->input;
-	input_dev->name = "AAED-2000 Keyboard";
-	input_dev->phys = "aaedkbd/input0";
-	input_dev->id.bustype = BUS_HOST;
-	input_dev->id.vendor = 0x0001;
-	input_dev->id.product = 0x0001;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &pdev->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP);
-	input_dev->keycode = aaedkbd->keycode;
-	input_dev->keycodesize = sizeof(unsigned char);
-	input_dev->keycodemax = ARRAY_SIZE(aaedkbd_keycode);
-
-	for (i = 0; i < ARRAY_SIZE(aaedkbd_keycode); i++)
-		set_bit(aaedkbd->keycode[i], input_dev->keybit);
-	clear_bit(0, input_dev->keybit);
-
-	error = input_register_polled_device(aaedkbd->poll_dev);
-	if (error)
-		goto fail;
-
-	return 0;
-
- fail:	kfree(aaedkbd);
-	input_free_polled_device(poll_dev);
-	return error;
-}
-
-static int __devexit aaedkbd_remove(struct platform_device *pdev)
-{
-	struct aaedkbd *aaedkbd = platform_get_drvdata(pdev);
-
-	input_unregister_polled_device(aaedkbd->poll_dev);
-	input_free_polled_device(aaedkbd->poll_dev);
-	kfree(aaedkbd);
-
-	return 0;
-}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:aaed2000-keyboard");
-
-static struct platform_driver aaedkbd_driver = {
-	.probe		= aaedkbd_probe,
-	.remove		= __devexit_p(aaedkbd_remove),
-	.driver		= {
-		.name	= "aaed2000-keyboard",
-		.owner	= THIS_MODULE,
-	},
-};
-
-static int __init aaedkbd_init(void)
-{
-	return platform_driver_register(&aaedkbd_driver);
-}
-
-static void __exit aaedkbd_exit(void)
-{
-	platform_driver_unregister(&aaedkbd_driver);
-}
-
-module_init(aaedkbd_init);
-module_exit(aaedkbd_exit);
-
-MODULE_AUTHOR("Nicolas Bellido Y Ortega");
-MODULE_DESCRIPTION("AAED-2000 Keyboard Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index bcb1fde..307eef7 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -229,7 +229,7 @@
 	tristate "TQC PS/2 multiplexer"
 	help
 	  Say Y here if you have the PS/2 line multiplexer like the one
-	  present on TQC boads.
+	  present on TQC boards.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called ps2mult.
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 5ae0fc4..bb9f5d3 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -424,6 +424,13 @@
 			DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
 		},
 	},
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
 	{ }
 };
 
@@ -545,6 +552,17 @@
 };
 #endif
 
+static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
+	{
+		/* Dell Vostro V13 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
+		},
+	},
+	{ }
+};
+
 /*
  * Some Wistron based laptops need us to explicitly enable the 'Dritek
  * keyboard extension' to make their extra keys start generating scancodes.
@@ -896,6 +914,9 @@
 	if (dmi_check_system(i8042_dmi_nomux_table))
 		i8042_nomux = true;
 
+	if (dmi_check_system(i8042_dmi_notimeout_table))
+		i8042_notimeout = true;
+
 	if (dmi_check_system(i8042_dmi_dritek_table))
 		i8042_dritek = true;
 #endif /* CONFIG_X86 */
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index c04ff00..ac4c936 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -63,6 +63,10 @@
 module_param_named(noloop, i8042_noloop, bool, 0);
 MODULE_PARM_DESC(noloop, "Disable the AUX Loopback command while probing for the AUX port");
 
+static bool i8042_notimeout;
+module_param_named(notimeout, i8042_notimeout, bool, 0);
+MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+
 #ifdef CONFIG_X86
 static bool i8042_dritek;
 module_param_named(dritek, i8042_dritek, bool, 0);
@@ -504,7 +508,7 @@
 	} else {
 
 		dfl = ((str & I8042_STR_PARITY) ? SERIO_PARITY : 0) |
-		      ((str & I8042_STR_TIMEOUT) ? SERIO_TIMEOUT : 0);
+		      ((str & I8042_STR_TIMEOUT && !i8042_notimeout) ? SERIO_TIMEOUT : 0);
 
 		port_no = (str & I8042_STR_AUXDATA) ?
 				I8042_AUX_PORT_NO : I8042_KBD_PORT_NO;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 07ac77d..0c9f4b1 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -610,7 +610,7 @@
 
 config TOUCHSCREEN_USB_ETT_TC45USB
 	default y
-	bool "ET&T USB series TC4UM/TC5UH touchscreen controler support" if EMBEDDED
+	bool "ET&T USB series TC4UM/TC5UH touchscreen controller support" if EMBEDDED
 	depends on TOUCHSCREEN_USB_COMPOSITE
 
 config TOUCHSCREEN_USB_NEXIO
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index d82a38e..4e4e58c 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -10,14 +10,16 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/pm.h>
 
 #include "ad7879.h"
 
 #define AD7879_DEVID		0x79	/* AD7879-1/AD7889-1 */
 
 #ifdef CONFIG_PM
-static int ad7879_i2c_suspend(struct i2c_client *client, pm_message_t message)
+static int ad7879_i2c_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_suspend(ts);
@@ -25,17 +27,17 @@
 	return 0;
 }
 
-static int ad7879_i2c_resume(struct i2c_client *client)
+static int ad7879_i2c_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct ad7879 *ts = i2c_get_clientdata(client);
 
 	ad7879_resume(ts);
 
 	return 0;
 }
-#else
-# define ad7879_i2c_suspend NULL
-# define ad7879_i2c_resume  NULL
+
+static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
 #endif
 
 /* All registers are word-sized.
@@ -117,11 +119,12 @@
 	.driver = {
 		.name	= "ad7879",
 		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &ad7879_i2c_pm,
+#endif
 	},
 	.probe		= ad7879_i2c_probe,
 	.remove		= __devexit_p(ad7879_i2c_remove),
-	.suspend	= ad7879_i2c_suspend,
-	.resume		= ad7879_i2c_resume,
 	.id_table	= ad7879_id,
 };
 
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index d0c3a72..a93c5c2 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -280,8 +280,9 @@
 }
 
 #ifdef CONFIG_PM
-static int cy8ctmg110_suspend(struct i2c_client *client, pm_message_t mesg)
+static int cy8ctmg110_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -293,8 +294,9 @@
 	return 0;
 }
 
-static int cy8ctmg110_resume(struct i2c_client *client)
+static int cy8ctmg110_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct cy8ctmg110 *ts = i2c_get_clientdata(client);
 
 	if (device_may_wakeup(&client->dev))
@@ -305,6 +307,8 @@
 	}
 	return 0;
 }
+
+static SIMPLE_DEV_PM_OPS(cy8ctmg110_pm, cy8ctmg110_suspend, cy8ctmg110_resume);
 #endif
 
 static int __devexit cy8ctmg110_remove(struct i2c_client *client)
@@ -335,14 +339,13 @@
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.name	= CY8CTMG110_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm	= &cy8ctmg110_pm,
+#endif
 	},
 	.id_table	= cy8ctmg110_idtable,
 	.probe		= cy8ctmg110_probe,
 	.remove		= __devexit_p(cy8ctmg110_remove),
-#ifdef CONFIG_PM
-	.suspend	= cy8ctmg110_suspend,
-	.resume		= cy8ctmg110_resume,
-#endif
 };
 
 static int __init cy8ctmg110_init(void)
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7a3a916..7f8f538 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -261,8 +261,9 @@
 }
 
 #ifdef CONFIG_PM
-static int eeti_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int eeti_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -279,8 +280,9 @@
 	return 0;
 }
 
-static int eeti_ts_resume(struct i2c_client *client)
+static int eeti_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct eeti_ts_priv *priv = i2c_get_clientdata(client);
 	struct input_dev *input_dev = priv->input;
 
@@ -296,9 +298,8 @@
 
 	return 0;
 }
-#else
-#define eeti_ts_suspend NULL
-#define eeti_ts_resume NULL
+
+static SIMPLE_DEV_PM_OPS(eeti_ts_pm, eeti_ts_suspend, eeti_ts_resume);
 #endif
 
 static const struct i2c_device_id eeti_ts_id[] = {
@@ -310,11 +311,12 @@
 static struct i2c_driver eeti_ts_driver = {
 	.driver = {
 		.name = "eeti_ts",
+#ifdef CONFIG_PM
+		.pm = &eeti_ts_pm,
+#endif
 	},
 	.probe = eeti_ts_probe,
 	.remove = __devexit_p(eeti_ts_remove),
-	.suspend = eeti_ts_suspend,
-	.resume = eeti_ts_resume,
 	.id_table = eeti_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/mcs5000_ts.c b/drivers/input/touchscreen/mcs5000_ts.c
index 6ee9940..2d84c80 100644
--- a/drivers/input/touchscreen/mcs5000_ts.c
+++ b/drivers/input/touchscreen/mcs5000_ts.c
@@ -261,25 +261,27 @@
 }
 
 #ifdef CONFIG_PM
-static int mcs5000_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int mcs5000_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
+
 	/* Touch sleep mode */
 	i2c_smbus_write_byte_data(client, MCS5000_TS_OP_MODE, OP_MODE_SLEEP);
 
 	return 0;
 }
 
-static int mcs5000_ts_resume(struct i2c_client *client)
+static int mcs5000_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct mcs5000_ts_data *data = i2c_get_clientdata(client);
 
 	mcs5000_ts_phys_init(data);
 
 	return 0;
 }
-#else
-#define mcs5000_ts_suspend	NULL
-#define mcs5000_ts_resume	NULL
+
+static SIMPLE_DEV_PM_OPS(mcs5000_ts_pm, mcs5000_ts_suspend, mcs5000_ts_resume);
 #endif
 
 static const struct i2c_device_id mcs5000_ts_id[] = {
@@ -291,10 +293,11 @@
 static struct i2c_driver mcs5000_ts_driver = {
 	.probe		= mcs5000_ts_probe,
 	.remove		= __devexit_p(mcs5000_ts_remove),
-	.suspend	= mcs5000_ts_suspend,
-	.resume		= mcs5000_ts_resume,
 	.driver = {
 		.name = "mcs5000_ts",
+#ifdef CONFIG_PM
+		.pm   = &mcs5000_ts_pm,
+#endif
 	},
 	.id_table	= mcs5000_ts_id,
 };
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index defe5dd..5803bd0 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -23,6 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/input.h>
 #include <linux/interrupt.h>
+#include <linux/pm.h>
 #include <linux/slab.h>
 #include <asm/io.h>
 #include <linux/i2c.h>
@@ -226,8 +227,9 @@
 	return 0;
 }
 
-static int migor_ts_suspend(struct i2c_client *client, pm_message_t mesg)
+static int migor_ts_suspend(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -236,8 +238,9 @@
 	return 0;
 }
 
-static int migor_ts_resume(struct i2c_client *client)
+static int migor_ts_resume(struct device *dev)
 {
+	struct i2c_client *client = to_i2c_client(dev);
 	struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
 
 	if (device_may_wakeup(&client->dev))
@@ -246,6 +249,8 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(migor_ts_pm, migor_ts_suspend, migor_ts_resume);
+
 static const struct i2c_device_id migor_ts_id[] = {
 	{ "migor_ts", 0 },
 	{ }
@@ -255,11 +260,10 @@
 static struct i2c_driver migor_ts_driver = {
 	.driver = {
 		.name = "migor_ts",
+		.pm = &migor_ts_pm,
 	},
 	.probe = migor_ts_probe,
 	.remove = migor_ts_remove,
-	.suspend = migor_ts_suspend,
-	.resume = migor_ts_resume,
 	.id_table = migor_ts_id,
 };
 
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 8ed53ad..5cb8449 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -3,6 +3,7 @@
  *
  * Copyright (c) 2008 Jaya Kumar
  * Copyright (c) 2010 Red Hat, Inc.
+ * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <pingc@wacom.com>
  *
  * This file is subject to the terms and conditions of the GNU General Public
  * License. See the file COPYING in the main directory of this archive for
@@ -64,11 +65,11 @@
 
 /* touch query reply packet */
 struct w8001_touch_query {
+	u16 x;
+	u16 y;
 	u8 panel_res;
 	u8 capacity_res;
 	u8 sensor_id;
-	u16 x;
-	u16 y;
 };
 
 /*
@@ -87,9 +88,14 @@
 	char phys[32];
 	int type;
 	unsigned int pktlen;
+	u16 max_touch_x;
+	u16 max_touch_y;
+	u16 max_pen_x;
+	u16 max_pen_y;
+	char name[64];
 };
 
-static void parse_data(u8 *data, struct w8001_coord *coord)
+static void parse_pen_data(u8 *data, struct w8001_coord *coord)
 {
 	memset(coord, 0, sizeof(*coord));
 
@@ -113,11 +119,30 @@
 	coord->tilt_y = data[8] & 0x7F;
 }
 
-static void parse_touch(struct w8001 *w8001)
+static void parse_single_touch(u8 *data, struct w8001_coord *coord)
+{
+	coord->x = (data[1] << 7) | data[2];
+	coord->y = (data[3] << 7) | data[4];
+	coord->tsw = data[0] & 0x01;
+}
+
+static void scale_touch_coordinates(struct w8001 *w8001,
+				    unsigned int *x, unsigned int *y)
+{
+	if (w8001->max_pen_x && w8001->max_touch_x)
+		*x = *x * w8001->max_pen_x / w8001->max_touch_x;
+
+	if (w8001->max_pen_y && w8001->max_touch_y)
+		*y = *y * w8001->max_pen_y / w8001->max_touch_y;
+}
+
+static void parse_multi_touch(struct w8001 *w8001)
 {
 	struct input_dev *dev = w8001->dev;
 	unsigned char *data = w8001->data;
+	unsigned int x, y;
 	int i;
+	int count = 0;
 
 	for (i = 0; i < 2; i++) {
 		bool touch = data[0] & (1 << i);
@@ -125,15 +150,29 @@
 		input_mt_slot(dev, i);
 		input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
 		if (touch) {
-			int x = (data[6 * i + 1] << 7) | (data[6 * i + 2]);
-			int y = (data[6 * i + 3] << 7) | (data[6 * i + 4]);
+			x = (data[6 * i + 1] << 7) | data[6 * i + 2];
+			y = (data[6 * i + 3] << 7) | data[6 * i + 4];
 			/* data[5,6] and [11,12] is finger capacity */
 
+			/* scale to pen maximum */
+			scale_touch_coordinates(w8001, &x, &y);
+
 			input_report_abs(dev, ABS_MT_POSITION_X, x);
 			input_report_abs(dev, ABS_MT_POSITION_Y, y);
+			count++;
 		}
 	}
 
+	/* emulate single touch events when stylus is out of proximity.
+	 * This is to make single touch backward support consistent
+	 * across all Wacom single touch devices.
+	 */
+	if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+		w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED;
+		input_mt_report_pointer_emulation(dev, true);
+	}
+
 	input_sync(dev);
 }
 
@@ -152,6 +191,15 @@
 	query->y = data[5] << 9;
 	query->y |= data[6] << 2;
 	query->y |= (data[2] >> 3) & 0x3;
+
+	/* Early days' single-finger touch models need the following defaults */
+	if (!query->x && !query->y) {
+		query->x = 1024;
+		query->y = 1024;
+		if (query->panel_res)
+			query->x = query->y = (1 << query->panel_res);
+		query->panel_res = 10;
+	}
 }
 
 static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord)
@@ -161,16 +209,15 @@
 	/*
 	 * We have 1 bit for proximity (rdy) and 3 bits for tip, side,
 	 * side2/eraser. If rdy && f2 are set, this can be either pen + side2,
-	 * or eraser. assume
+	 * or eraser. Assume:
 	 * - if dev is already in proximity and f2 is toggled → pen + side2
 	 * - if dev comes into proximity with f2 set → eraser
 	 * If f2 disappears after assuming eraser, fake proximity out for
 	 * eraser and in for pen.
 	 */
 
-	if (!w8001->type) {
-		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
-	} else if (w8001->type == BTN_TOOL_RUBBER) {
+	switch (w8001->type) {
+	case BTN_TOOL_RUBBER:
 		if (!coord->f2) {
 			input_report_abs(dev, ABS_PRESSURE, 0);
 			input_report_key(dev, BTN_TOUCH, 0);
@@ -180,8 +227,21 @@
 			input_sync(dev);
 			w8001->type = BTN_TOOL_PEN;
 		}
-	} else {
+		break;
+
+	case BTN_TOOL_FINGER:
+		input_report_key(dev, BTN_TOUCH, 0);
+		input_report_key(dev, BTN_TOOL_FINGER, 0);
+		input_sync(dev);
+		/* fall through */
+
+	case KEY_RESERVED:
+		w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN;
+		break;
+
+	default:
 		input_report_key(dev, BTN_STYLUS2, coord->f2);
+		break;
 	}
 
 	input_report_abs(dev, ABS_X, coord->x);
@@ -193,7 +253,26 @@
 	input_sync(dev);
 
 	if (!coord->rdy)
-		w8001->type = 0;
+		w8001->type = KEY_RESERVED;
+}
+
+static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord)
+{
+	struct input_dev *dev = w8001->dev;
+	unsigned int x = coord->x;
+	unsigned int y = coord->y;
+
+	/* scale to pen maximum */
+	scale_touch_coordinates(w8001, &x, &y);
+
+	input_report_abs(dev, ABS_X, x);
+	input_report_abs(dev, ABS_Y, y);
+	input_report_key(dev, BTN_TOUCH, coord->tsw);
+	input_report_key(dev, BTN_TOOL_FINGER, coord->tsw);
+
+	input_sync(dev);
+
+	w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED;
 }
 
 static irqreturn_t w8001_interrupt(struct serio *serio,
@@ -214,9 +293,18 @@
 
 	case W8001_PKTLEN_TOUCH93 - 1:
 	case W8001_PKTLEN_TOUCH9A - 1:
-		/* ignore one-finger touch packet. */
-		if (w8001->pktlen == w8001->idx)
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
+		if (tmp != W8001_TOUCH_BYTE)
+			break;
+
+		if (w8001->pktlen == w8001->idx) {
 			w8001->idx = 0;
+			if (w8001->type != BTN_TOOL_PEN &&
+			    w8001->type != BTN_TOOL_RUBBER) {
+				parse_single_touch(w8001->data, &coord);
+				report_single_touch(w8001, &coord);
+			}
+		}
 		break;
 
 	/* Pen coordinates packet */
@@ -225,18 +313,18 @@
 		if (unlikely(tmp == W8001_TAB_BYTE))
 			break;
 
-		tmp = (w8001->data[0] & W8001_TOUCH_BYTE);
+		tmp = w8001->data[0] & W8001_TOUCH_BYTE;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
 		w8001->idx = 0;
-		parse_data(w8001->data, &coord);
+		parse_pen_data(w8001->data, &coord);
 		report_pen_events(w8001, &coord);
 		break;
 
 	/* control packet */
 	case W8001_PKTLEN_TPCCTL - 1:
-		tmp = (w8001->data[0] & W8001_TOUCH_MASK);
+		tmp = w8001->data[0] & W8001_TOUCH_MASK;
 		if (tmp == W8001_TOUCH_BYTE)
 			break;
 
@@ -249,7 +337,7 @@
 	/* 2 finger touch packet */
 	case W8001_PKTLEN_TOUCH2FG - 1:
 		w8001->idx = 0;
-		parse_touch(w8001);
+		parse_multi_touch(w8001);
 		break;
 	}
 
@@ -279,6 +367,7 @@
 {
 	struct input_dev *dev = w8001->dev;
 	struct w8001_coord coord;
+	struct w8001_touch_query touch;
 	int error;
 
 	error = w8001_command(w8001, W8001_CMD_STOP, false);
@@ -287,14 +376,21 @@
 
 	msleep(250);	/* wait 250ms before querying the device */
 
+	dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+	strlcat(w8001->name, "Wacom Serial", sizeof(w8001->name));
+
 	/* penabled? */
 	error = w8001_command(w8001, W8001_CMD_QUERY, true);
 	if (!error) {
+		__set_bit(BTN_TOUCH, dev->keybit);
 		__set_bit(BTN_TOOL_PEN, dev->keybit);
 		__set_bit(BTN_TOOL_RUBBER, dev->keybit);
 		__set_bit(BTN_STYLUS, dev->keybit);
 		__set_bit(BTN_STYLUS2, dev->keybit);
-		parse_data(w8001->response, &coord);
+
+		parse_pen_data(w8001->response, &coord);
+		w8001->max_pen_x = coord.x;
+		w8001->max_pen_y = coord.y;
 
 		input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
@@ -303,6 +399,8 @@
 			input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
 			input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0);
 		}
+		w8001->id = 0x90;
+		strlcat(w8001->name, " Penabled", sizeof(w8001->name));
 	}
 
 	/* Touch enabled? */
@@ -313,24 +411,38 @@
 	 * second byte is empty, which indicates touch is not supported.
 	 */
 	if (!error && w8001->response[1]) {
-		struct w8001_touch_query touch;
+		__set_bit(BTN_TOUCH, dev->keybit);
+		__set_bit(BTN_TOOL_FINGER, dev->keybit);
 
 		parse_touchquery(w8001->response, &touch);
+		w8001->max_touch_x = touch.x;
+		w8001->max_touch_y = touch.y;
+
+		/* scale to pen maximum */
+		if (w8001->max_pen_x && w8001->max_pen_y) {
+			touch.x = w8001->max_pen_x;
+			touch.y = w8001->max_pen_y;
+		}
 
 		input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
 		input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
-		__set_bit(BTN_TOOL_FINGER, dev->keybit);
 
 		switch (touch.sensor_id) {
 		case 0:
 		case 2:
 			w8001->pktlen = W8001_PKTLEN_TOUCH93;
+			w8001->id = 0x93;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
 			break;
+
 		case 1:
 		case 3:
 		case 4:
 			w8001->pktlen = W8001_PKTLEN_TOUCH9A;
+			strlcat(w8001->name, " 1FG", sizeof(w8001->name));
+			w8001->id = 0x9a;
 			break;
+
 		case 5:
 			w8001->pktlen = W8001_PKTLEN_TOUCH2FG;
 
@@ -341,10 +453,18 @@
 						0, touch.y, 0, 0);
 			input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
 						0, MT_TOOL_MAX, 0, 0);
+
+			strlcat(w8001->name, " 2FG", sizeof(w8001->name));
+			if (w8001->max_pen_x && w8001->max_pen_y)
+				w8001->id = 0xE3;
+			else
+				w8001->id = 0xE2;
 			break;
 		}
 	}
 
+	strlcat(w8001->name, " Touchscreen", sizeof(w8001->name));
+
 	return w8001_command(w8001, W8001_CMD_START, false);
 }
 
@@ -384,22 +504,10 @@
 	}
 
 	w8001->serio = serio;
-	w8001->id = serio->id.id;
 	w8001->dev = input_dev;
 	init_completion(&w8001->cmd_done);
 	snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys);
 
-	input_dev->name = "Wacom W8001 Penabled Serial TouchScreen";
-	input_dev->phys = w8001->phys;
-	input_dev->id.bustype = BUS_RS232;
-	input_dev->id.vendor = SERIO_W8001;
-	input_dev->id.product = w8001->id;
-	input_dev->id.version = 0x0100;
-	input_dev->dev.parent = &serio->dev;
-
-	input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
-	__set_bit(BTN_TOUCH, input_dev->keybit);
-
 	serio_set_drvdata(serio, w8001);
 	err = serio_open(serio, drv);
 	if (err)
@@ -409,6 +517,14 @@
 	if (err)
 		goto fail3;
 
+	input_dev->name = w8001->name;
+	input_dev->phys = w8001->phys;
+	input_dev->id.product = w8001->id;
+	input_dev->id.bustype = BUS_RS232;
+	input_dev->id.vendor = 0x056a;
+	input_dev->id.version = 0x0100;
+	input_dev->dev.parent = &serio->dev;
+
 	err = input_register_device(w8001->dev);
 	if (err)
 		goto fail3;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 178942a..8a3c5cf 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2318,7 +2318,7 @@
 		 __func__, le16_to_cpu(udev->descriptor.idVendor),
 		 le16_to_cpu(udev->descriptor.idProduct));
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, BAS_CHANNELS, 0, 0, cidmode,
 			    GIGASET_MODULENAME);
 	if (!cs)
@@ -2576,7 +2576,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &gigops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index d151dcb..0ef09d0 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -513,7 +513,7 @@
 		return -ENODEV;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		goto error;
@@ -771,7 +771,7 @@
 		return rc;
 	}
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 					  GIGASET_MODULENAME, GIGASET_DEVNAME,
 					  &ops, THIS_MODULE);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4a66338..5e3300d 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -695,7 +695,7 @@
 
 	dev_info(&udev->dev, "%s: Device matched ... !\n", __func__);
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
 	if (!cs)
 		return -ENODEV;
@@ -894,7 +894,7 @@
 {
 	int result;
 
-	/* allocate memory for our driver state and intialize it */
+	/* allocate memory for our driver state and initialize it */
 	driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
 				    GIGASET_MODULENAME, GIGASET_DEVNAME,
 				    &ops, THIS_MODULE);
diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h
index 74a6ccf..8121e04 100644
--- a/drivers/isdn/hardware/mISDN/ipac.h
+++ b/drivers/isdn/hardware/mISDN/ipac.h
@@ -29,7 +29,7 @@
 	u32			type;
 	u32			off;		/* offset to isac regs */
 	char			*name;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	read_reg_func		*read_reg;
 	write_reg_func		*write_reg;
 	fifo_func		*read_fifo;
@@ -70,7 +70,7 @@
 	struct hscx_hw		hscx[2];
 	char			*name;
 	void			*hw;
-	spinlock_t		*hwlock;	/* lock HW acccess */
+	spinlock_t		*hwlock;	/* lock HW access */
 	struct module		*owner;
 	u32			type;
 	read_reg_func		*read_reg;
diff --git a/drivers/isdn/hardware/mISDN/isar.h b/drivers/isdn/hardware/mISDN/isar.h
index 4a134ac..9962bdf 100644
--- a/drivers/isdn/hardware/mISDN/isar.h
+++ b/drivers/isdn/hardware/mISDN/isar.h
@@ -44,7 +44,7 @@
 struct isar_hw {
 	struct	isar_ch	ch[2];
 	void		*hw;
-	spinlock_t	*hwlock;	/* lock HW acccess */
+	spinlock_t	*hwlock;	/* lock HW access */
 	char		*name;
 	struct module	*owner;
 	read_reg_func	*read_reg;
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 76d9e67..309bacf 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -112,7 +112,7 @@
  * Disable rx-data:
  * If cmx is realized in hardware, rx data will be disabled if requested by
  * the upper layer. If dtmf decoding is done by software and enabled, rx data
- * will not be diabled but blocked to the upper layer.
+ * will not be disabled but blocked to the upper layer.
  *
  * HFC conference engine:
  * If it is possible to realize all features using hardware, hardware will be
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 33facd0..80a3ae3 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -98,7 +98,6 @@
 #define LP5521_EXT_CLK_USED		0x08
 
 struct lp5521_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -225,25 +224,22 @@
 		    curr);
 }
 
-static void lp5521_init_engine(struct lp5521_chip *chip,
-			const struct attribute_group *attr_group)
+static void lp5521_init_engine(struct lp5521_chip *chip)
 {
 	int i;
 	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
 		chip->engines[i].id = i + 1;
 		chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
 		chip->engines[i].prog_page = i;
-		chip->engines[i].attributes = &attr_group[i];
 	}
 }
 
-static int lp5521_configure(struct i2c_client *client,
-			const struct attribute_group *attr_group)
+static int lp5521_configure(struct i2c_client *client)
 {
 	struct lp5521_chip *chip = i2c_get_clientdata(client);
 	int ret;
 
-	lp5521_init_engine(chip, attr_group);
+	lp5521_init_engine(chip);
 
 	/* Set all PWMs to direct control mode */
 	ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
@@ -329,9 +325,6 @@
 /* Set engine mode and create appropriate sysfs attributes, if required. */
 static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
 {
-	struct lp5521_chip *chip = engine_to_lp5521(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -343,18 +336,10 @@
 	} else if (mode == LP5521_CMD_LOAD) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 		lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5521_CMD_DISABLED) {
 		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -373,6 +358,8 @@
 	while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
 		/* separate sscanfs because length is working only for %s */
 		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		if (ret != 2)
+			goto fail;
 		ret = sscanf(c, "%2x", &cmd);
 		if (ret != 1)
 			goto fail;
@@ -387,7 +374,10 @@
 		goto fail;
 
 	mutex_lock(&chip->lock);
-	ret = lp5521_load_program(engine, pattern);
+	if (engine->mode == LP5521_CMD_LOAD)
+		ret = lp5521_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -574,20 +564,8 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
-	NULL
-};
-
-static struct attribute *lp5521_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	NULL
 };
@@ -596,12 +574,6 @@
 	.attrs = lp5521_attributes,
 };
 
-static const struct attribute_group lp5521_engine_group[] = {
-	{.attrs = lp5521_engine1_attributes },
-	{.attrs = lp5521_engine2_attributes },
-	{.attrs = lp5521_engine3_attributes },
-};
-
 static int lp5521_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -616,12 +588,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5521_group);
 
-	for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
-		if (chip->engines[i].mode == LP5521_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj,
-					chip->engines[i].attributes);
-	}
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5521_led_attribute_group);
@@ -651,7 +617,8 @@
 		return -EINVAL;
 	}
 
-	snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+	snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: client->name, chan);
 	led->cdev.brightness_set = lp5521_set_brightness;
 	led->cdev.name = name;
 	res = led_classdev_register(dev, &led->cdev);
@@ -723,7 +690,7 @@
 
 	dev_info(&client->dev, "%s programmable led chip found\n", id->name);
 
-	ret = lp5521_configure(client, lp5521_engine_group);
+	ret = lp5521_configure(client);
 	if (ret < 0) {
 		dev_err(&client->dev, "error configuring chip\n");
 		goto fail2;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 0cc4ead..d0c4068 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -105,7 +105,6 @@
 #define SHIFT_MASK(id)			(((id) - 1) * 2)
 
 struct lp5523_engine {
-	const struct attribute_group *attributes;
 	int		id;
 	u8		mode;
 	u8		prog_page;
@@ -403,14 +402,23 @@
 	struct i2c_client *client = to_i2c_client(dev);
 	struct lp5523_chip *chip = i2c_get_clientdata(client);
 	u16 mux = 0;
+	ssize_t ret;
 
 	if (lp5523_mux_parse(buf, &mux, len))
 		return -EINVAL;
 
-	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
-		return -EINVAL;
+	mutex_lock(&chip->lock);
+	ret = -EINVAL;
+	if (chip->engines[nr - 1].mode != LP5523_CMD_LOAD)
+		goto leave;
 
-	return len;
+	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+		goto leave;
+
+	ret = len;
+leave:
+	mutex_unlock(&chip->lock);
+	return ret;
 }
 
 #define store_leds(nr)						\
@@ -556,7 +564,11 @@
 
 	mutex_lock(&chip->lock);
 
-	ret = lp5523_load_program(engine, pattern);
+	if (engine->mode == LP5523_CMD_LOAD)
+		ret = lp5523_load_program(engine, pattern);
+	else
+		ret = -EINVAL;
+
 	mutex_unlock(&chip->lock);
 
 	if (ret) {
@@ -737,37 +749,18 @@
 	&dev_attr_engine2_mode.attr,
 	&dev_attr_engine3_mode.attr,
 	&dev_attr_selftest.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine1_attributes[] = {
 	&dev_attr_engine1_load.attr,
 	&dev_attr_engine1_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine2_attributes[] = {
 	&dev_attr_engine2_load.attr,
 	&dev_attr_engine2_leds.attr,
-	NULL
-};
-
-static struct attribute *lp5523_engine3_attributes[] = {
 	&dev_attr_engine3_load.attr,
 	&dev_attr_engine3_leds.attr,
-	NULL
 };
 
 static const struct attribute_group lp5523_group = {
 	.attrs = lp5523_attributes,
 };
 
-static const struct attribute_group lp5523_engine_group[] = {
-	{.attrs = lp5523_engine1_attributes },
-	{.attrs = lp5523_engine2_attributes },
-	{.attrs = lp5523_engine3_attributes },
-};
-
 static int lp5523_register_sysfs(struct i2c_client *client)
 {
 	struct device *dev = &client->dev;
@@ -788,10 +781,6 @@
 
 	sysfs_remove_group(&dev->kobj, &lp5523_group);
 
-	for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
-		if (chip->engines[i].mode == LP5523_CMD_LOAD)
-			sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
-
 	for (i = 0; i < chip->num_leds; i++)
 		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
 				&lp5523_led_attribute_group);
@@ -802,10 +791,6 @@
 /*--------------------------------------------------------------*/
 static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
 {
-	/*  engine to chip */
-	struct lp5523_chip *chip = engine_to_lp5523(engine);
-	struct i2c_client *client = chip->client;
-	struct device *dev = &client->dev;
 	int ret = 0;
 
 	/* if in that mode already do nothing, except for run */
@@ -817,18 +802,10 @@
 	} else if (mode == LP5523_CMD_LOAD) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 		lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
-
-		ret = sysfs_create_group(&dev->kobj, engine->attributes);
-		if (ret)
-			return ret;
 	} else if (mode == LP5523_CMD_DISABLED) {
 		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
 	}
 
-	/* remove load attribute from sysfs if not in load mode */
-	if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
-		sysfs_remove_group(&dev->kobj, engine->attributes);
-
 	engine->mode = mode;
 
 	return ret;
@@ -845,7 +822,6 @@
 	engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
 	engine->prog_page = id - 1;
 	engine->mux_page = id + 2;
-	engine->attributes = &lp5523_engine_group[id - 1];
 
 	return 0;
 }
@@ -870,7 +846,8 @@
 			return -EINVAL;
 		}
 
-		snprintf(name, 32, "lp5523:channel%d", chan);
+		snprintf(name, sizeof(name), "%s:channel%d",
+			pdata->label ?: "lp5523", chan);
 
 		led->cdev.name = name;
 		led->cdev.brightness_set = lp5523_set_brightness;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 43d0875..afac338 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -200,6 +200,32 @@
 	pca9532_setled(led);
 }
 
+static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs)
+{
+	int i = n_devs;
+
+	if (!data)
+		return;
+
+	while (--i >= 0) {
+		switch (data->leds[i].type) {
+		case PCA9532_TYPE_NONE:
+			break;
+		case PCA9532_TYPE_LED:
+			led_classdev_unregister(&data->leds[i].ldev);
+			cancel_work_sync(&data->leds[i].work);
+			break;
+		case PCA9532_TYPE_N2100_BEEP:
+			if (data->idev != NULL) {
+				input_unregister_device(data->idev);
+				cancel_work_sync(&data->work);
+				data->idev = NULL;
+			}
+			break;
+		}
+	}
+}
+
 static int pca9532_configure(struct i2c_client *client,
 	struct pca9532_data *data, struct pca9532_platform_data *pdata)
 {
@@ -274,25 +300,7 @@
 	return 0;
 
 exit:
-	if (i > 0)
-		for (i = i - 1; i >= 0; i--)
-			switch (data->leds[i].type) {
-			case PCA9532_TYPE_NONE:
-				break;
-			case PCA9532_TYPE_LED:
-				led_classdev_unregister(&data->leds[i].ldev);
-				cancel_work_sync(&data->leds[i].work);
-				break;
-			case PCA9532_TYPE_N2100_BEEP:
-				if (data->idev != NULL) {
-					input_unregister_device(data->idev);
-					input_free_device(data->idev);
-					cancel_work_sync(&data->work);
-					data->idev = NULL;
-				}
-				break;
-			}
-
+	pca9532_destroy_devices(data, i);
 	return err;
 }
 
@@ -329,25 +337,7 @@
 static int pca9532_remove(struct i2c_client *client)
 {
 	struct pca9532_data *data = i2c_get_clientdata(client);
-	int i;
-	for (i = 0; i < 16; i++)
-		switch (data->leds[i].type) {
-		case PCA9532_TYPE_NONE:
-			break;
-		case PCA9532_TYPE_LED:
-			led_classdev_unregister(&data->leds[i].ldev);
-			cancel_work_sync(&data->leds[i].work);
-			break;
-		case PCA9532_TYPE_N2100_BEEP:
-			if (data->idev != NULL) {
-				input_unregister_device(data->idev);
-				input_free_device(data->idev);
-				cancel_work_sync(&data->work);
-				data->idev = NULL;
-			}
-			break;
-		}
-
+	pca9532_destroy_devices(data, 16);
 	kfree(data);
 	return 0;
 }
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index f948e57..2b513a2 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -26,6 +26,7 @@
 	int brightness;
 	int old_status;
 	struct notifier_block notifier;
+	unsigned invert;
 };
 
 static int fb_notifier_callback(struct notifier_block *p,
@@ -36,23 +37,64 @@
 	struct led_classdev *led = n->led;
 	struct fb_event *fb_event = data;
 	int *blank = fb_event->data;
+	int new_status = *blank ? BLANK : UNBLANK;
 
 	switch (event) {
 	case FB_EVENT_BLANK :
-		if (*blank && n->old_status == UNBLANK) {
+		if (new_status == n->old_status)
+			break;
+
+		if ((n->old_status == UNBLANK) ^ n->invert) {
 			n->brightness = led->brightness;
 			led_set_brightness(led, LED_OFF);
-			n->old_status = BLANK;
-		} else if (!*blank && n->old_status == BLANK) {
+		} else {
 			led_set_brightness(led, n->brightness);
-			n->old_status = UNBLANK;
 		}
+
+		n->old_status = new_status;
+
 		break;
 	}
 
 	return 0;
 }
 
+static ssize_t bl_trig_invert_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+
+	return sprintf(buf, "%u\n", n->invert);
+}
+
+static ssize_t bl_trig_invert_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t num)
+{
+	struct led_classdev *led = dev_get_drvdata(dev);
+	struct bl_trig_notifier *n = led->trigger_data;
+	unsigned long invert;
+	int ret;
+
+	ret = strict_strtoul(buf, 10, &invert);
+	if (ret < 0)
+		return ret;
+
+	if (invert > 1)
+		return -EINVAL;
+
+	n->invert = invert;
+
+	/* After inverting, we need to update the LED. */
+	if ((n->old_status == BLANK) ^ n->invert)
+		led_set_brightness(led, LED_OFF);
+	else
+		led_set_brightness(led, n->brightness);
+
+	return num;
+}
+static DEVICE_ATTR(inverted, 0644, bl_trig_invert_show, bl_trig_invert_store);
+
 static void bl_trig_activate(struct led_classdev *led)
 {
 	int ret;
@@ -66,6 +108,10 @@
 		return;
 	}
 
+	ret = device_create_file(led->dev, &dev_attr_inverted);
+	if (ret)
+		goto err_invert;
+
 	n->led = led;
 	n->brightness = led->brightness;
 	n->old_status = UNBLANK;
@@ -74,6 +120,12 @@
 	ret = fb_register_client(&n->notifier);
 	if (ret)
 		dev_err(led->dev, "unable to register backlight trigger\n");
+
+	return;
+
+err_invert:
+	led->trigger_data = NULL;
+	kfree(n);
 }
 
 static void bl_trig_deactivate(struct led_classdev *led)
@@ -82,6 +134,7 @@
 		(struct bl_trig_notifier *) led->trigger_data;
 
 	if (n) {
+		device_remove_file(led->dev, &dev_attr_inverted);
 		fb_unregister_client(&n->notifier);
 		kfree(n);
 	}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index b6e7ddc..4daf9e5 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -387,11 +387,10 @@
 	/* Set the DMA ops to the ones from the PCI device, this could be
 	 * fishy if we didn't know that on PowerMac it's always direct ops
 	 * or iommu ops that will work fine
+	 *
+	 * To get all the fields, copy all archdata
 	 */
-	dev->ofdev.dev.archdata.dma_ops =
-		chip->lbus.pdev->dev.archdata.dma_ops;
-	dev->ofdev.dev.archdata.dma_data =
-		chip->lbus.pdev->dev.archdata.dma_data;
+	dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
 #endif /* CONFIG_PCI */
 
 #ifdef DEBUG
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index 4454927..2e041fd 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -2213,6 +2213,9 @@
 static int fcu_of_probe(struct platform_device* dev, const struct of_device_id *match)
 {
 	state = state_detached;
+	of_dev = dev;
+
+	dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
 
 	/* Lookup the fans in the device tree */
 	fcu_lookup_fans(dev->dev.of_node);
@@ -2235,6 +2238,7 @@
 	},
 	{},
 };
+MODULE_DEVICE_TABLE(of, fcu_match);
 
 static struct of_platform_driver fcu_of_platform_driver = 
 {
@@ -2252,8 +2256,6 @@
  */
 static int __init therm_pm72_init(void)
 {
-	struct device_node *np;
-
 	rackmac = of_machine_is_compatible("RackMac3,1");
 
 	if (!of_machine_is_compatible("PowerMac7,2") &&
@@ -2261,34 +2263,12 @@
 	    !rackmac)
 	    	return -ENODEV;
 
-	printk(KERN_INFO "PowerMac G5 Thermal control driver %s\n", VERSION);
-
-	np = of_find_node_by_type(NULL, "fcu");
-	if (np == NULL) {
-		/* Some machines have strangely broken device-tree */
-		np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e");
-		if (np == NULL) {
-			    printk(KERN_ERR "Can't find FCU in device-tree !\n");
-			    return -ENODEV;
-		}
-	}
-	of_dev = of_platform_device_create(np, "temperature", NULL);
-	if (of_dev == NULL) {
-		printk(KERN_ERR "Can't register FCU platform device !\n");
-		return -ENODEV;
-	}
-
-	of_register_platform_driver(&fcu_of_platform_driver);
-	
-	return 0;
+	return of_register_platform_driver(&fcu_of_platform_driver);
 }
 
 static void __exit therm_pm72_exit(void)
 {
 	of_unregister_platform_driver(&fcu_of_platform_driver);
-
-	if (of_dev)
-		of_device_unregister(of_dev);
 }
 
 module_init(therm_pm72_init);
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 1cec02f..ade1e65 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -15,7 +15,7 @@
 
 #define MAX_PMU_LEVEL 0xFF
 
-static struct backlight_ops pmu_backlight_data;
+static const struct backlight_ops pmu_backlight_data;
 static DEFINE_SPINLOCK(pmu_backlight_lock);
 static int sleeping, uses_pmu_bl;
 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
@@ -115,7 +115,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops pmu_backlight_data = {
+static const struct backlight_ops pmu_backlight_data = {
 	.get_brightness	= pmu_backlight_get_brightness,
 	.update_status	= pmu_backlight_update_status,
 
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index cd29c82..8b021eb 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -2257,7 +2257,7 @@
 		&& (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
 }
 
-static struct platform_suspend_ops pmu_pm_ops = {
+static const struct platform_suspend_ops pmu_pm_ops = {
 	.enter = powerbook_sleep,
 	.valid = pmu_sleep_valid,
 };
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
index 2c00980..7e40035 100644
--- a/drivers/media/video/cx18/cx23418.h
+++ b/drivers/media/video/cx18/cx23418.h
@@ -177,7 +177,7 @@
    IN[0] - Task handle.
    IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
 		      3 = 2D H/V separable, 4 = 2D symmetric non-separable
-   IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
+   IN[2] - chroma type: 0 - disable, 1 = 1D horizontal
    ReturnCode - One of the ERR_CAPTURE_... */
 #define CX18_CPU_SET_SPATIAL_FILTER_TYPE     	(CPU_CMD_MASK_CAPTURE | 0x000C)
 
diff --git a/drivers/media/video/cx25840/cx25840-ir.c b/drivers/media/video/cx25840/cx25840-ir.c
index 627926f..7eb79af 100644
--- a/drivers/media/video/cx25840/cx25840-ir.c
+++ b/drivers/media/video/cx25840/cx25840-ir.c
@@ -261,7 +261,7 @@
 	u32 rem;
 
 	/*
-	 * The 2 lsb's of the pulse width timer count are not accessable, hence
+	 * The 2 lsb's of the pulse width timer count are not accessible, hence
 	 * the (1 << 2)
 	 */
 	n = ((u64) ns) * CX25840_IR_REFCLK_FREQ / 1000000; /* millicycles */
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 188841b..ebd5c43 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -33,7 +33,7 @@
 #define regr(reg)               readl((reg) + vpif_base)
 #define regw(value, reg)        writel(value, (reg + vpif_base))
 
-/* Register Addresss Offsets */
+/* Register Address Offsets */
 #define VPIF_PID			(0x0000)
 #define VPIF_CH0_CTRL			(0x0004)
 #define VPIF_CH1_CTRL			(0x0008)
diff --git a/drivers/media/video/davinci/vpss.c b/drivers/media/video/davinci/vpss.c
index 7918680..3e5cf27 100644
--- a/drivers/media/video/davinci/vpss.c
+++ b/drivers/media/video/davinci/vpss.c
@@ -85,7 +85,7 @@
 /*
  * vpss operations. Depends on platform. Not all functions are available
  * on all platforms. The api, first check if a functio is available before
- * invoking it. In the probe, the function ptrs are intialized based on
+ * invoking it. In the probe, the function ptrs are initialized based on
  * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
  */
 struct vpss_hw_ops {
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 83de97a..029a4ba 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -1286,7 +1286,7 @@
 	videobuf_mmap_free(q);
 
 	/* Even if apply changes fails we should continue
-	   freeing allocated memeory */
+	   freeing allocated memory */
 	if (vout->streaming) {
 		u32 mask = 0;
 
diff --git a/drivers/media/video/saa7164/saa7164-core.c b/drivers/media/video/saa7164/saa7164-core.c
index d6bf3f8..58af67f 100644
--- a/drivers/media/video/saa7164/saa7164-core.c
+++ b/drivers/media/video/saa7164/saa7164-core.c
@@ -655,8 +655,8 @@
 		goto out;
 	}
 
-	/* Check that the hardware is accessable. If the status bytes are
-	 * 0xFF then the device is not accessable, the the IRQ belongs
+	/* Check that the hardware is accessible. If the status bytes are
+	 * 0xFF then the device is not accessible, the the IRQ belongs
 	 * to another driver.
 	 * 4 x u32 interrupt registers.
 	 */
diff --git a/drivers/media/video/sn9c102/sn9c102_sensor.h b/drivers/media/video/sn9c102/sn9c102_sensor.h
index 494957b..7f38549 100644
--- a/drivers/media/video/sn9c102/sn9c102_sensor.h
+++ b/drivers/media/video/sn9c102/sn9c102_sensor.h
@@ -147,7 +147,7 @@
 
 struct sn9c102_sensor {
 	char name[32], /* sensor name */
-	     maintainer[64]; /* name of the mantainer <email> */
+	     maintainer[64]; /* name of the maintainer <email> */
 
 	enum sn9c102_bridge supported_bridge; /* supported SN9C1xx bridges */
 
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index e63b40f..c799e4e 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -789,7 +789,7 @@
  * Get the value of a TVP7002 decoder device register.
  * Returns zero when successful, -EINVAL if register read fails or
  * access to I2C client fails, -EPERM if the call is not allowed
- * by diabled CAP_SYS_ADMIN.
+ * by disabled CAP_SYS_ADMIN.
  */
 static int tvp7002_g_register(struct v4l2_subdev *sd,
 						struct v4l2_dbg_register *reg)
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index e25aca5..2f973cd 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -13,14 +13,12 @@
 #include <linux/pci.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-chip-ident.h>
 #include <media/videobuf-dma-sg.h>
-#include <linux/device.h>
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/pm_qos_params.h>
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index c00fe82..e9a3eab 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -465,6 +465,7 @@
 		if (!host->card) {
 			host->card = card;
 			if (device_register(&card->dev)) {
+				put_device(&card->dev);
 				kfree(host->card);
 				host->card = NULL;
 			}
@@ -510,14 +511,18 @@
 {
 	int rc;
 
-	if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
-		return -ENOMEM;
+	while (1) {
+		if (!idr_pre_get(&memstick_host_idr, GFP_KERNEL))
+			return -ENOMEM;
 
-	spin_lock(&memstick_host_lock);
-	rc = idr_get_new(&memstick_host_idr, host, &host->id);
-	spin_unlock(&memstick_host_lock);
-	if (rc)
-		return rc;
+		spin_lock(&memstick_host_lock);
+		rc = idr_get_new(&memstick_host_idr, host, &host->id);
+		spin_unlock(&memstick_host_lock);
+		if (!rc)
+			break;
+		else if (rc != -EAGAIN)
+			return rc;
+	}
 
 	dev_set_name(&host->dev, "memstick%u", host->id);
 
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 02362ec..57b42bf 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -23,7 +23,6 @@
 
 #define DRIVER_NAME "mspro_block"
 
-static DEFINE_MUTEX(mspro_block_mutex);
 static int major;
 module_param(major, int, 0644);
 
@@ -160,6 +159,13 @@
 	int                   (*mrq_handler)(struct memstick_dev *card,
 					     struct memstick_request **mrq);
 
+
+	/* Default request setup function for data access method preferred by
+	 * this host instance.
+	 */
+	void                  (*setup_transfer)(struct memstick_dev *card,
+						u64 offset, size_t length);
+
 	struct attribute_group attr_group;
 
 	struct scatterlist    req_sg[MSPRO_BLOCK_MAX_SEGS];
@@ -181,7 +187,6 @@
 	struct mspro_block_data *msb = disk->private_data;
 	int rc = -ENXIO;
 
-	mutex_lock(&mspro_block_mutex);
 	mutex_lock(&mspro_block_disk_lock);
 
 	if (msb && msb->card) {
@@ -193,7 +198,6 @@
 	}
 
 	mutex_unlock(&mspro_block_disk_lock);
-	mutex_unlock(&mspro_block_mutex);
 
 	return rc;
 }
@@ -225,11 +229,7 @@
 
 static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode)
 {
-	int ret;
-	mutex_lock(&mspro_block_mutex);
-	ret = mspro_block_disk_release(disk);
-	mutex_unlock(&mspro_block_mutex);
-	return ret;
+	return mspro_block_disk_release(disk);
 }
 
 static int mspro_block_bd_getgeo(struct block_device *bdev,
@@ -663,14 +663,43 @@
 	}
 }
 
+/*** Transfer setup functions for different access methods. ***/
+
+/** Setup data transfer request for SET_CMD TPC with arguments in card
+ *  registers.
+ *
+ *  @card    Current media instance
+ *  @offset  Target data offset in bytes
+ *  @length  Required transfer length in bytes.
+ */
+static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
+				    size_t length)
+{
+	struct mspro_block_data *msb = memstick_get_drvdata(card);
+	struct mspro_param_register param = {
+		.system = msb->system,
+		.data_count = cpu_to_be16((uint16_t)(length / msb->page_size)),
+		/* ISO C90 warning precludes direct initialization for now. */
+		.data_address = 0,
+		.tpc_param = 0
+	};
+
+	do_div(offset, msb->page_size);
+	param.data_address = cpu_to_be32((uint32_t)offset);
+
+	card->next_request = h_mspro_block_req_init;
+	msb->mrq_handler = h_mspro_block_transfer_data;
+	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
+			  &param, sizeof(param));
+}
+
 /*** Data transfer ***/
 
 static int mspro_block_issue_req(struct memstick_dev *card, int chunk)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	sector_t t_sec;
+	u64 t_off;
 	unsigned int count;
-	struct mspro_param_register param;
 
 try_again:
 	while (chunk) {
@@ -685,30 +714,17 @@
 			continue;
 		}
 
-		t_sec = blk_rq_pos(msb->block_req) << 9;
-		sector_div(t_sec, msb->page_size);
-
+		t_off = blk_rq_pos(msb->block_req);
+		t_off <<= 9;
 		count = blk_rq_bytes(msb->block_req);
-		count /= msb->page_size;
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16(count);
-		param.data_address = cpu_to_be32((uint32_t)t_sec);
-		param.tpc_param = 0;
+		msb->setup_transfer(card, t_off, count);
 
 		msb->data_dir = rq_data_dir(msb->block_req);
 		msb->transfer_cmd = msb->data_dir == READ
 				    ? MSPRO_CMD_READ_DATA
 				    : MSPRO_CMD_WRITE_DATA;
 
-		dev_dbg(&card->dev, "data transfer: cmd %x, "
-			"lba %x, count %x\n", msb->transfer_cmd,
-			be32_to_cpu(param.data_address), count);
-
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  &param, sizeof(param));
 		memstick_new_req(card->host);
 		return 0;
 	}
@@ -963,18 +979,16 @@
 static int mspro_block_read_attributes(struct memstick_dev *card)
 {
 	struct mspro_block_data *msb = memstick_get_drvdata(card);
-	struct mspro_param_register param = {
-		.system = msb->system,
-		.data_count = cpu_to_be16(1),
-		.data_address = 0,
-		.tpc_param = 0
-	};
 	struct mspro_attribute *attr = NULL;
 	struct mspro_sys_attr *s_attr = NULL;
 	unsigned char *buffer = NULL;
 	int cnt, rc, attr_count;
-	unsigned int addr;
-	unsigned short page_count;
+	/* While normally physical device offsets, represented here by
+	 * attr_offset and attr_len will be of large numeric types, we can be
+	 * sure, that attributes are close enough to the beginning of the
+	 * device, to save ourselves some trouble.
+	 */
+	unsigned int addr, attr_offset = 0, attr_len = msb->page_size;
 
 	attr = kmalloc(msb->page_size, GFP_KERNEL);
 	if (!attr)
@@ -987,10 +1001,8 @@
 	msb->data_dir = READ;
 	msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-	card->next_request = h_mspro_block_req_init;
-	msb->mrq_handler = h_mspro_block_transfer_data;
-	memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG, &param,
-			  sizeof(param));
+	msb->setup_transfer(card, attr_offset, attr_len);
+
 	memstick_new_req(card->host);
 	wait_for_completion(&card->mrq_complete);
 	if (card->current_mrq.error) {
@@ -1021,13 +1033,12 @@
 	}
 	msb->attr_group.name = "media_attributes";
 
-	buffer = kmalloc(msb->page_size, GFP_KERNEL);
+	buffer = kmalloc(attr_len, GFP_KERNEL);
 	if (!buffer) {
 		rc = -ENOMEM;
 		goto out_free_attr;
 	}
-	memcpy(buffer, (char *)attr, msb->page_size);
-	page_count = 1;
+	memcpy(buffer, (char *)attr, attr_len);
 
 	for (cnt = 0; cnt < attr_count; ++cnt) {
 		s_attr = kzalloc(sizeof(struct mspro_sys_attr), GFP_KERNEL);
@@ -1038,9 +1049,10 @@
 
 		msb->attr_group.attrs[cnt] = &s_attr->dev_attr.attr;
 		addr = be32_to_cpu(attr->entries[cnt].address);
-		rc = be32_to_cpu(attr->entries[cnt].size);
+		s_attr->size = be32_to_cpu(attr->entries[cnt].size);
 		dev_dbg(&card->dev, "adding attribute %d: id %x, address %x, "
-			"size %x\n", cnt, attr->entries[cnt].id, addr, rc);
+			"size %zx\n", cnt, attr->entries[cnt].id, addr,
+			s_attr->size);
 		s_attr->id = attr->entries[cnt].id;
 		if (mspro_block_attr_name(s_attr->id))
 			snprintf(s_attr->name, sizeof(s_attr->name), "%s",
@@ -1054,57 +1066,47 @@
 		s_attr->dev_attr.attr.mode = S_IRUGO;
 		s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id);
 
-		if (!rc)
+		if (!s_attr->size)
 			continue;
 
-		s_attr->size = rc;
-		s_attr->data = kmalloc(rc, GFP_KERNEL);
+		s_attr->data = kmalloc(s_attr->size, GFP_KERNEL);
 		if (!s_attr->data) {
 			rc = -ENOMEM;
 			goto out_free_buffer;
 		}
 
-		if (((addr / msb->page_size)
-		     == be32_to_cpu(param.data_address))
-		    && (((addr + rc - 1) / msb->page_size)
-			== be32_to_cpu(param.data_address))) {
+		if (((addr / msb->page_size) == (attr_offset / msb->page_size))
+		    && (((addr + s_attr->size - 1) / msb->page_size)
+			== (attr_offset / msb->page_size))) {
 			memcpy(s_attr->data, buffer + addr % msb->page_size,
-			       rc);
+			       s_attr->size);
 			continue;
 		}
 
-		if (page_count <= (rc / msb->page_size)) {
+		attr_offset = (addr / msb->page_size) * msb->page_size;
+
+		if ((attr_offset + attr_len) < (addr + s_attr->size)) {
 			kfree(buffer);
-			page_count = (rc / msb->page_size) + 1;
-			buffer = kmalloc(page_count * msb->page_size,
-					 GFP_KERNEL);
+			attr_len = (((addr + s_attr->size) / msb->page_size)
+				    + 1 ) * msb->page_size - attr_offset;
+			buffer = kmalloc(attr_len, GFP_KERNEL);
 			if (!buffer) {
 				rc = -ENOMEM;
 				goto out_free_attr;
 			}
 		}
 
-		param.system = msb->system;
-		param.data_count = cpu_to_be16((rc / msb->page_size) + 1);
-		param.data_address = cpu_to_be32(addr / msb->page_size);
-		param.tpc_param = 0;
-
-		sg_init_one(&msb->req_sg[0], buffer,
-			    be16_to_cpu(param.data_count) * msb->page_size);
+		sg_init_one(&msb->req_sg[0], buffer, attr_len);
 		msb->seg_count = 1;
 		msb->current_seg = 0;
 		msb->current_page = 0;
 		msb->data_dir = READ;
 		msb->transfer_cmd = MSPRO_CMD_READ_ATRB;
 
-		dev_dbg(&card->dev, "reading attribute pages %x, %x\n",
-			be32_to_cpu(param.data_address),
-			be16_to_cpu(param.data_count));
+		dev_dbg(&card->dev, "reading attribute range %x, %x\n",
+			attr_offset, attr_len);
 
-		card->next_request = h_mspro_block_req_init;
-		msb->mrq_handler = h_mspro_block_transfer_data;
-		memstick_init_req(&card->current_mrq, MS_TPC_WRITE_REG,
-				  (char *)&param, sizeof(param));
+		msb->setup_transfer(card, attr_offset, attr_len);
 		memstick_new_req(card->host);
 		wait_for_completion(&card->mrq_complete);
 		if (card->current_mrq.error) {
@@ -1112,7 +1114,8 @@
 			goto out_free_buffer;
 		}
 
-		memcpy(s_attr->data, buffer + addr % msb->page_size, rc);
+		memcpy(s_attr->data, buffer + addr % msb->page_size,
+		       s_attr->size);
 	}
 
 	rc = 0;
@@ -1130,6 +1133,8 @@
 	int rc = 0;
 
 	msb->system = MEMSTICK_SYS_SERIAL;
+	msb->setup_transfer = h_mspro_block_setup_cmd;
+
 	card->reg_addr.r_offset = offsetof(struct mspro_register, status);
 	card->reg_addr.r_length = sizeof(struct ms_status_register);
 	card->reg_addr.w_offset = offsetof(struct mspro_register, param);
@@ -1206,10 +1211,12 @@
 
 	msb->page_size = be16_to_cpu(sys_info->unit_size);
 
-	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL))
-		return -ENOMEM;
-
 	mutex_lock(&mspro_block_disk_lock);
+	if (!idr_pre_get(&mspro_block_disk_idr, GFP_KERNEL)) {
+		mutex_unlock(&mspro_block_disk_lock);
+		return -ENOMEM;
+	}
+
 	rc = idr_get_new(&mspro_block_disk_idr, card, &disk_id);
 	mutex_unlock(&mspro_block_disk_lock);
 
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f2b894c..d89d925 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -61,6 +61,7 @@
 	struct memstick_request *req;
 	unsigned char           cmd_flags;
 	unsigned char           io_pos;
+	unsigned char           ifmode;
 	unsigned int            io_word[2];
 };
 
@@ -136,15 +137,14 @@
 #define PAD_PU_PD_ON_MS_SOCK0 0x5f8f0000
 #define PAD_PU_PD_ON_MS_SOCK1 0x0f0f0000
 
+#define CLOCK_CONTROL_BY_MMIO 0x00000008
 #define CLOCK_CONTROL_40MHZ   0x00000001
-#define CLOCK_CONTROL_50MHZ   0x0000000a
-#define CLOCK_CONTROL_60MHZ   0x00000008
-#define CLOCK_CONTROL_62_5MHZ 0x0000000c
+#define CLOCK_CONTROL_50MHZ   0x00000002
+#define CLOCK_CONTROL_60MHZ   0x00000010
+#define CLOCK_CONTROL_62_5MHZ 0x00000004
 #define CLOCK_CONTROL_OFF     0x00000000
 
 #define PCI_CTL_CLOCK_DLY_ADDR   0x000000b0
-#define PCI_CTL_CLOCK_DLY_MASK_A 0x00000f00
-#define PCI_CTL_CLOCK_DLY_MASK_B 0x0000f000
 
 enum {
 	CMD_READY    = 0x01,
@@ -390,8 +390,13 @@
 
 	if (host->req->data_dir == READ)
 		cmd |= TPC_DIR;
-	if (host->req->need_card_int)
-		cmd |= TPC_WAIT_INT;
+
+	if (host->req->need_card_int) {
+		if (host->ifmode == MEMSTICK_SERIAL)
+			cmd |= TPC_GET_INT;
+		else
+			cmd |= TPC_WAIT_INT;
+	}
 
 	data = host->req->data;
 
@@ -529,7 +534,10 @@
 		if (irq_status & INT_STATUS_ANY_ERR) {
 			if (irq_status & INT_STATUS_CRC_ERR)
 				host->req->error = -EILSEQ;
-			else
+			else if (irq_status & INT_STATUS_TPC_ERR) {
+				dev_dbg(&host->chip->pdev->dev, "TPC_ERR\n");
+				jmb38x_ms_complete_cmd(msh, 0);
+			} else
 				host->req->error = -ETIME;
 		} else {
 			if (host->cmd_flags & DMA_DATA) {
@@ -644,7 +652,6 @@
 		ndelay(20);
 	}
 	dev_dbg(&host->chip->pdev->dev, "reset_req timeout\n");
-	/* return -EIO; */
 
 reset_next:
 	writel(HOST_CONTROL_RESET | HOST_CONTROL_CLOCK_EN
@@ -675,7 +682,7 @@
 {
 	struct jmb38x_ms_host *host = memstick_priv(msh);
 	unsigned int host_ctl = readl(host->addr + HOST_CONTROL);
-	unsigned int clock_ctl = CLOCK_CONTROL_40MHZ, clock_delay = 0;
+	unsigned int clock_ctl = CLOCK_CONTROL_BY_MMIO, clock_delay = 0;
 	int rc = 0;
 
 	switch (param) {
@@ -687,9 +694,7 @@
 
 			host_ctl = 7;
 			host_ctl |= HOST_CONTROL_POWER_EN
-				    | HOST_CONTROL_CLOCK_EN
-				    | HOST_CONTROL_HW_OC_P
-				    | HOST_CONTROL_TDELAY_EN;
+				 | HOST_CONTROL_CLOCK_EN;
 			writel(host_ctl, host->addr + HOST_CONTROL);
 
 			writel(host->id ? PAD_PU_PD_ON_MS_SOCK1
@@ -712,46 +717,88 @@
 			return -EINVAL;
 		break;
 	case MEMSTICK_INTERFACE:
+		dev_dbg(&host->chip->pdev->dev,
+			"Set Host Interface Mode to %d\n", value);
+		host_ctl &= ~(HOST_CONTROL_FAST_CLK | HOST_CONTROL_REI |
+			      HOST_CONTROL_REO);
+		host_ctl |= HOST_CONTROL_TDELAY_EN | HOST_CONTROL_HW_OC_P;
 		host_ctl &= ~(3 << HOST_CONTROL_IF_SHIFT);
-		pci_read_config_dword(host->chip->pdev,
-				      PCI_CTL_CLOCK_DLY_ADDR,
-				      &clock_delay);
-		clock_delay &= host->id ? ~PCI_CTL_CLOCK_DLY_MASK_B
-					: ~PCI_CTL_CLOCK_DLY_MASK_A;
 
 		if (value == MEMSTICK_SERIAL) {
-			host_ctl &= ~HOST_CONTROL_FAST_CLK;
-			host_ctl &= ~HOST_CONTROL_REO;
 			host_ctl |= HOST_CONTROL_IF_SERIAL
 				    << HOST_CONTROL_IF_SHIFT;
 			host_ctl |= HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 0;
 		} else if (value == MEMSTICK_PAR4) {
-			host_ctl |= HOST_CONTROL_FAST_CLK | HOST_CONTROL_REO;
+			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR4
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~HOST_CONTROL_REI;
-			clock_ctl = CLOCK_CONTROL_40MHZ;
-			clock_delay |= host->id ? (4 << 12) : (4 << 8);
+			host_ctl |= HOST_CONTROL_REO;
+			clock_ctl |= CLOCK_CONTROL_40MHZ;
+			clock_delay = 4;
 		} else if (value == MEMSTICK_PAR8) {
 			host_ctl |= HOST_CONTROL_FAST_CLK;
 			host_ctl |= HOST_CONTROL_IF_PAR8
 				    << HOST_CONTROL_IF_SHIFT;
-			host_ctl &= ~(HOST_CONTROL_REI | HOST_CONTROL_REO);
-			clock_ctl = CLOCK_CONTROL_50MHZ;
+			clock_ctl |= CLOCK_CONTROL_50MHZ;
+			clock_delay = 0;
 		} else
 			return -EINVAL;
 
 		writel(host_ctl, host->addr + HOST_CONTROL);
+		writel(CLOCK_CONTROL_OFF, host->addr + CLOCK_CONTROL);
 		writel(clock_ctl, host->addr + CLOCK_CONTROL);
-		pci_write_config_dword(host->chip->pdev,
-				       PCI_CTL_CLOCK_DLY_ADDR,
-				       clock_delay);
+		pci_write_config_byte(host->chip->pdev,
+				      PCI_CTL_CLOCK_DLY_ADDR + 1,
+				      clock_delay);
+		host->ifmode = value;
 		break;
 	};
 	return 0;
 }
 
+#define PCI_PMOS0_CONTROL		0xae
+#define  PMOS0_ENABLE			0x01
+#define  PMOS0_OVERCURRENT_LEVEL_2_4V	0x06
+#define  PMOS0_EN_OVERCURRENT_DEBOUNCE	0x40
+#define  PMOS0_SW_LED_POLARITY_ENABLE	0x80
+#define  PMOS0_ACTIVE_BITS (PMOS0_ENABLE | PMOS0_EN_OVERCURRENT_DEBOUNCE | \
+			    PMOS0_OVERCURRENT_LEVEL_2_4V)
+#define PCI_PMOS1_CONTROL		0xbd
+#define  PMOS1_ACTIVE_BITS		0x4a
+#define PCI_CLOCK_CTL			0xb9
+
+static int jmb38x_ms_pmos(struct pci_dev *pdev, int flag)
+{
+	unsigned char val;
+
+	pci_read_config_byte(pdev, PCI_PMOS0_CONTROL, &val);
+	if (flag)
+		val |= PMOS0_ACTIVE_BITS;
+	else
+		val &= ~PMOS0_ACTIVE_BITS;
+	pci_write_config_byte(pdev, PCI_PMOS0_CONTROL, val);
+	dev_dbg(&pdev->dev, "JMB38x: set PMOS0 val 0x%x\n", val);
+
+	if (pci_resource_flags(pdev, 1)) {
+		pci_read_config_byte(pdev, PCI_PMOS1_CONTROL, &val);
+		if (flag)
+			val |= PMOS1_ACTIVE_BITS;
+		else
+			val &= ~PMOS1_ACTIVE_BITS;
+		pci_write_config_byte(pdev, PCI_PMOS1_CONTROL, val);
+		dev_dbg(&pdev->dev, "JMB38x: set PMOS1 val 0x%x\n", val);
+	}
+
+	pci_read_config_byte(pdev, PCI_CLOCK_CTL, &val);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val & ~0x0f);
+	pci_write_config_byte(pdev, PCI_CLOCK_CTL, val | 0x01);
+	dev_dbg(&pdev->dev, "Clock Control by PCI config is disabled!\n");
+
+        return 0;
+}
+
 #ifdef CONFIG_PM
 
 static int jmb38x_ms_suspend(struct pci_dev *dev, pm_message_t state)
@@ -784,8 +831,7 @@
 		return rc;
 	pci_set_master(dev);
 
-	pci_read_config_dword(dev, 0xac, &rc);
-	pci_write_config_dword(dev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(dev, 1);
 
 	for (rc = 0; rc < jm->host_cnt; ++rc) {
 		if (!jm->hosts[rc])
@@ -894,8 +940,7 @@
 		goto err_out;
 	}
 
-	pci_read_config_dword(pdev, 0xac, &rc);
-	pci_write_config_dword(pdev, 0xac, rc | 0x00470000);
+	jmb38x_ms_pmos(pdev, 1);
 
 	cnt = jmb38x_ms_count_slots(pdev);
 	if (!cnt) {
@@ -976,6 +1021,8 @@
 		jmb38x_ms_free_host(jm->hosts[cnt]);
 	}
 
+	jmb38x_ms_pmos(dev, 0);
+
 	pci_set_drvdata(dev, NULL);
 	pci_release_regions(dev);
 	pci_disable_device(dev);
@@ -983,8 +1030,9 @@
 }
 
 static struct pci_device_id jmb38x_ms_id_tbl [] = {
-	{ PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS, PCI_ANY_ID,
-	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB385_MS) },
+	{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMB390_MS) },
 	{ }
 };
 
diff --git a/drivers/message/fusion/lsi/mpi_log_sas.h b/drivers/message/fusion/lsi/mpi_log_sas.h
index 691620d..8b04810 100644
--- a/drivers/message/fusion/lsi/mpi_log_sas.h
+++ b/drivers/message/fusion/lsi/mpi_log_sas.h
@@ -268,7 +268,7 @@
 
 /* Compatibility Error : IR Disabled */
 #define IR_LOGINFO_COMPAT_ERROR_RAID_DISABLED                  (0x00010030)
-/* Compatibility Error : Inquiry Comand failed */
+/* Compatibility Error : Inquiry Command failed */
 #define IR_LOGINFO_COMPAT_ERROR_INQUIRY_FAILED                 (0x00010031)
 /* Compatibility Error : Device not direct access device */
 #define IR_LOGINFO_COMPAT_ERROR_NOT_DIRECT_ACCESS              (0x00010032)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 3e57b61..3358c0a 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -7977,7 +7977,7 @@
 		NULL,						/* 2Eh */
 		NULL,						/* 2Fh */
 		"Compatibility Error: IR Disabled",		/* 30h */
-		"Compatibility Error: Inquiry Comand Failed",	/* 31h */
+		"Compatibility Error: Inquiry Command Failed",	/* 31h */
 		"Compatibility Error: Device not Direct Access "
 		    "Device ",					/* 32h */
 		"Compatibility Error: Removable Device Found",	/* 33h */
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index d48c2c6..8aefb18 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1146,7 +1146,7 @@
  *
  * This function will delete scheduled target reset from the list and
  * try to send next target reset. This will be called from completion
- * context of any Task managment command.
+ * context of any Task management command.
  */
 
 void
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f87a9d4..ae7cad1 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -309,7 +309,7 @@
  *	@ireq: I2O block request
  *	@mptr: message body pointer
  *
- *	Builds the SG list and map it to be accessable by the controller.
+ *	Builds the SG list and map it to be accessible by the controller.
  *
  *	Returns 0 on failure or 1 on success.
  */
diff --git a/drivers/mfd/sh_mobile_sdhi.c b/drivers/mfd/sh_mobile_sdhi.c
index f1714f9..0a7df44 100644
--- a/drivers/mfd/sh_mobile_sdhi.c
+++ b/drivers/mfd/sh_mobile_sdhi.c
@@ -131,11 +131,17 @@
 	 */
 	mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
 
+	/*
+	 * All SDHI blocks support SDIO IRQ signalling.
+	 */
+	mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
+
 	if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
 		priv->param_tx.slave_id = p->dma_slave_tx;
 		priv->param_rx.slave_id = p->dma_slave_rx;
 		priv->dma_priv.chan_priv_tx = &priv->param_tx;
 		priv->dma_priv.chan_priv_rx = &priv->param_rx;
+		priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
 		mmc_data->dma = &priv->dma_priv;
 	}
 
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index b3b2aaf8..8d221ba 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -218,6 +218,18 @@
 	"SPKVDD2",
 };
 
+static const char *wm8958_main_supplies[] = {
+	"DBVDD1",
+	"DBVDD2",
+	"DBVDD3",
+	"DCVDD",
+	"AVDD1",
+	"AVDD2",
+	"CPVDD",
+	"SPKVDD1",
+	"SPKVDD2",
+};
+
 #ifdef CONFIG_PM
 static int wm8994_device_suspend(struct device *dev)
 {
@@ -239,7 +251,7 @@
 	if (ret < 0)
 		dev_err(dev, "Failed to save LDO registers: %d\n", ret);
 
-	ret = regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_disable(wm8994->num_supplies,
 				     wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to disable supplies: %d\n", ret);
@@ -254,7 +266,7 @@
 	struct wm8994 *wm8994 = dev_get_drvdata(dev);
 	int ret;
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(dev, "Failed to enable supplies: %d\n", ret);
@@ -305,9 +317,10 @@
 /*
  * Instantiate the generic non-control parts of the device.
  */
-static int wm8994_device_init(struct wm8994 *wm8994, unsigned long id, int irq)
+static int wm8994_device_init(struct wm8994 *wm8994, int irq)
 {
 	struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+	const char *devname;
 	int ret, i;
 
 	mutex_init(&wm8994->io_lock);
@@ -323,25 +336,48 @@
 		goto err;
 	}
 
+	switch (wm8994->type) {
+	case WM8994:
+		wm8994->num_supplies = ARRAY_SIZE(wm8994_main_supplies);
+		break;
+	case WM8958:
+		wm8994->num_supplies = ARRAY_SIZE(wm8958_main_supplies);
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
 	wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
-				   ARRAY_SIZE(wm8994_main_supplies),
+				   wm8994->num_supplies,
 				   GFP_KERNEL);
 	if (!wm8994->supplies) {
 		ret = -ENOMEM;
 		goto err;
 	}
 
-	for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
-		wm8994->supplies[i].supply = wm8994_main_supplies[i];
-
-	ret = regulator_bulk_get(wm8994->dev, ARRAY_SIZE(wm8994_main_supplies),
+	switch (wm8994->type) {
+	case WM8994:
+		for (i = 0; i < ARRAY_SIZE(wm8994_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8994_main_supplies[i];
+		break;
+	case WM8958:
+		for (i = 0; i < ARRAY_SIZE(wm8958_main_supplies); i++)
+			wm8994->supplies[i].supply = wm8958_main_supplies[i];
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+		
+	ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies,
 				 wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
 		goto err_supplies;
 	}
 
-	ret = regulator_bulk_enable(ARRAY_SIZE(wm8994_main_supplies),
+	ret = regulator_bulk_enable(wm8994->num_supplies,
 				    wm8994->supplies);
 	if (ret != 0) {
 		dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret);
@@ -353,7 +389,22 @@
 		dev_err(wm8994->dev, "Failed to read ID register\n");
 		goto err_enable;
 	}
-	if (ret != 0x8994) {
+	switch (ret) {
+	case 0x8994:
+		devname = "WM8994";
+		if (wm8994->type != WM8994)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8994;
+		break;
+	case 0x8958:
+		devname = "WM8958";
+		if (wm8994->type != WM8958)
+			dev_warn(wm8994->dev, "Device registered as type %d\n",
+				 wm8994->type);
+		wm8994->type = WM8958;
+		break;
+	default:
 		dev_err(wm8994->dev, "Device is not a WM8994, ID is %x\n",
 			ret);
 		ret = -EINVAL;
@@ -370,14 +421,16 @@
 	switch (ret) {
 	case 0:
 	case 1:
-		dev_warn(wm8994->dev, "revision %c not fully supported\n",
-			'A' + ret);
+		if (wm8994->type == WM8994)
+			dev_warn(wm8994->dev,
+				 "revision %c not fully supported\n",
+				 'A' + ret);
 		break;
 	default:
-		dev_info(wm8994->dev, "revision %c\n", 'A' + ret);
 		break;
 	}
 
+	dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
 
 	if (pdata) {
 		wm8994->irq_base = pdata->irq_base;
@@ -423,10 +476,10 @@
 err_irq:
 	wm8994_irq_exit(wm8994);
 err_enable:
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
 err_get:
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 err_supplies:
 	kfree(wm8994->supplies);
 err:
@@ -439,9 +492,9 @@
 {
 	mfd_remove_devices(wm8994->dev);
 	wm8994_irq_exit(wm8994);
-	regulator_bulk_disable(ARRAY_SIZE(wm8994_main_supplies),
+	regulator_bulk_disable(wm8994->num_supplies,
 			       wm8994->supplies);
-	regulator_bulk_free(ARRAY_SIZE(wm8994_main_supplies), wm8994->supplies);
+	regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
 	kfree(wm8994->supplies);
 	kfree(wm8994);
 }
@@ -506,8 +559,9 @@
 	wm8994->read_dev = wm8994_i2c_read_device;
 	wm8994->write_dev = wm8994_i2c_write_device;
 	wm8994->irq = i2c->irq;
+	wm8994->type = id->driver_data;
 
-	return wm8994_device_init(wm8994, id->driver_data, i2c->irq);
+	return wm8994_device_init(wm8994, i2c->irq);
 }
 
 static int wm8994_i2c_remove(struct i2c_client *i2c)
@@ -535,7 +589,8 @@
 #endif
 
 static const struct i2c_device_id wm8994_i2c_id[] = {
-	{ "wm8994", 0 },
+	{ "wm8994", WM8994 },
+	{ "wm8958", WM8958 },
 	{ }
 };
 MODULE_DEVICE_TABLE(i2c, wm8994_i2c_id);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 4d073f1..1e1a4be 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -402,7 +402,7 @@
 	  DAC7512 16-bit digital-to-analog converter.
 
 	  This driver can also be built as a module. If so, the module
-	  will be calles ti_dac7512.
+	  will be called ti_dac7512.
 
 config VMWARE_BALLOON
 	tristate "VMware Balloon Driver"
diff --git a/drivers/misc/arm-charlcd.c b/drivers/misc/arm-charlcd.c
index 9e3879e..fe8616a 100644
--- a/drivers/misc/arm-charlcd.c
+++ b/drivers/misc/arm-charlcd.c
@@ -313,7 +313,7 @@
 	INIT_DELAYED_WORK(&lcd->init_work, charlcd_init_work);
 	schedule_delayed_work(&lcd->init_work, 0);
 
-	dev_info(&pdev->dev, "initalized ARM character LCD at %08x\n",
+	dev_info(&pdev->dev, "initialized ARM character LCD at %08x\n",
 		lcd->phybase);
 
 	return 0;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2a1e804..4d2ea8e 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,7 +45,7 @@
 
 MODULE_AUTHOR("VMware, Inc.");
 MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
-MODULE_VERSION("1.2.1.1-k");
+MODULE_VERSION("1.2.1.2-k");
 MODULE_ALIAS("dmi:*:svnVMware*:*");
 MODULE_ALIAS("vmware_vmmemctl");
 MODULE_LICENSE("GPL");
@@ -315,7 +315,8 @@
  * fear that guest will need it. Host may reject some pages, we need to
  * check the return value and maybe submit a different page.
  */
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn)
+static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+				     unsigned int *hv_status)
 {
 	unsigned long status, dummy;
 	u32 pfn32;
@@ -326,7 +327,7 @@
 
 	STATS_INC(b->stats.lock);
 
-	status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
+	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
 	if (vmballoon_check_status(b, status))
 		return true;
 
@@ -410,6 +411,7 @@
 {
 	struct page *page;
 	gfp_t flags;
+	unsigned int hv_status;
 	bool locked = false;
 
 	do {
@@ -429,11 +431,12 @@
 		}
 
 		/* inform monitor */
-		locked = vmballoon_send_lock_page(b, page_to_pfn(page));
+		locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
 		if (!locked) {
 			STATS_INC(b->stats.refused_alloc);
 
-			if (b->reset_required) {
+			if (hv_status == VMW_BALLOON_ERROR_RESET ||
+			    hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
 				__free_page(page);
 				return -EIO;
 			}
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 57e4416..2a876c4 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -16,6 +16,7 @@
 
 config MMC_BLOCK_MINORS
 	int "Number of minors per block device"
+	depends on MMC_BLOCK
 	range 4 256
 	default 8
 	help
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 217f820..bfc8a8a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -257,7 +257,7 @@
 	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
 	err = mmc_wait_for_cmd(card->host, &cmd, 0);
 	if (err)
-		printk(KERN_ERR "%s: error %d sending status comand",
+		printk(KERN_ERR "%s: error %d sending status command",
 		       req->rq_disk->disk_name, err);
 	return cmd.resp[0];
 }
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index bb22ffd..ef10387 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -16,3 +16,14 @@
 
 	  This option sets a default which can be overridden by the
 	  module parameter "removable=0" or "removable=1".
+
+config MMC_CLKGATE
+	bool "MMC host clock gating (EXPERIMENTAL)"
+	depends on EXPERIMENTAL
+	help
+	  This will attempt to aggressively gate the clock to the MMC card.
+	  This is done to save power due to gating off the logic and bus
+	  noise when the MMC card is not in use. Your host driver has to
+	  support handling this in order for it to be of any use.
+
+	  If unsure, say N.
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index af8dc6a..63667a8 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,14 +303,14 @@
 			type, card->rca);
 	}
 
-	ret = device_add(&card->dev);
-	if (ret)
-		return ret;
-
 #ifdef CONFIG_DEBUG_FS
 	mmc_add_card_debugfs(card);
 #endif
 
+	ret = device_add(&card->dev);
+	if (ret)
+		return ret;
+
 	mmc_card_set_present(card);
 
 	return 0;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index a3a780f..6625c05 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/log2.h>
 #include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -130,6 +131,8 @@
 
 		if (mrq->done)
 			mrq->done(mrq);
+
+		mmc_host_clk_gate(host);
 	}
 }
 
@@ -190,6 +193,7 @@
 			mrq->stop->mrq = mrq;
 		}
 	}
+	mmc_host_clk_ungate(host);
 	host->ops->request(host, mrq);
 }
 
@@ -295,8 +299,9 @@
 		unsigned int timeout_us, limit_us;
 
 		timeout_us = data->timeout_ns / 1000;
-		timeout_us += data->timeout_clks * 1000 /
-			(card->host->ios.clock / 1000);
+		if (mmc_host_clk_rate(card->host))
+			timeout_us += data->timeout_clks * 1000 /
+				(mmc_host_clk_rate(card->host) / 1000);
 
 		if (data->flags & MMC_DATA_WRITE)
 			/*
@@ -614,6 +619,8 @@
 		 ios->power_mode, ios->chip_select, ios->vdd,
 		 ios->bus_width, ios->timing);
 
+	if (ios->clock > 0)
+		mmc_set_ungated(host);
 	host->ops->set_ios(host, ios);
 }
 
@@ -641,6 +648,61 @@
 	mmc_set_ios(host);
 }
 
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_old = host->ios.clock;
+	host->ios.clock = 0;
+	host->clk_gated = true;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+	/*
+	 * We should previously have gated the clock, so the clock shall
+	 * be 0 here! The clock may however be 0 during initialization,
+	 * when some request operations are performed before setting
+	 * the frequency. When ungate is requested in that situation
+	 * we just ignore the call.
+	 */
+	if (host->clk_old) {
+		BUG_ON(host->ios.clock);
+		/* This call will also set host->clk_gated to false */
+		mmc_set_clock(host, host->clk_old);
+	}
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	/*
+	 * We've been given a new frequency while the clock is gated,
+	 * so make sure we regard this as ungating it.
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_gated = false;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+#endif
+
 /*
  * Change the bus mode (open drain/push-pull) of a host.
  */
@@ -1424,35 +1486,57 @@
 }
 EXPORT_SYMBOL(mmc_set_blocklen);
 
+static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
+{
+	host->f_init = freq;
+
+#ifdef CONFIG_MMC_DEBUG
+	pr_info("%s: %s: trying to init card at %u Hz\n",
+		mmc_hostname(host), __func__, host->f_init);
+#endif
+	mmc_power_up(host);
+	sdio_reset(host);
+	mmc_go_idle(host);
+
+	mmc_send_if_cond(host, host->ocr_avail);
+
+	/* Order's important: probe SDIO, then SD, then MMC */
+	if (!mmc_attach_sdio(host))
+		return 0;
+	if (!mmc_attach_sd(host))
+		return 0;
+	if (!mmc_attach_mmc(host))
+		return 0;
+
+	mmc_power_off(host);
+	return -EIO;
+}
+
 void mmc_rescan(struct work_struct *work)
 {
+	static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 	struct mmc_host *host =
 		container_of(work, struct mmc_host, detect.work);
-	u32 ocr;
-	int err;
-	unsigned long flags;
 	int i;
-	const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
 
-	spin_lock_irqsave(&host->lock, flags);
-
-	if (host->rescan_disable) {
-		spin_unlock_irqrestore(&host->lock, flags);
+	if (host->rescan_disable)
 		return;
-	}
-
-	spin_unlock_irqrestore(&host->lock, flags);
-
 
 	mmc_bus_get(host);
 
-	/* if there is a card registered, check whether it is still present */
-	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead)
+	/*
+	 * if there is a _removable_ card registered, check whether it is
+	 * still present
+	 */
+	if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
+	    && mmc_card_is_removable(host))
 		host->bus_ops->detect(host);
 
+	/*
+	 * Let mmc_bus_put() free the bus/bus_ops if we've found that
+	 * the card is no longer present.
+	 */
 	mmc_bus_put(host);
-
-
 	mmc_bus_get(host);
 
 	/* if there still is a card present, stop here */
@@ -1461,8 +1545,6 @@
 		goto out;
 	}
 
-	/* detect a newly inserted card */
-
 	/*
 	 * Only we can add a new handler, so it's safe to
 	 * release the lock here.
@@ -1472,72 +1554,16 @@
 	if (host->ops->get_cd && host->ops->get_cd(host) == 0)
 		goto out;
 
+	mmc_claim_host(host);
 	for (i = 0; i < ARRAY_SIZE(freqs); i++) {
-		mmc_claim_host(host);
-
-		if (freqs[i] >= host->f_min)
-			host->f_init = freqs[i];
-		else if (!i || freqs[i-1] > host->f_min)
-			host->f_init = host->f_min;
-		else {
-			mmc_release_host(host);
-			goto out;
-		}
-#ifdef CONFIG_MMC_DEBUG
-		pr_info("%s: %s: trying to init card at %u Hz\n",
-			mmc_hostname(host), __func__, host->f_init);
-#endif
-		mmc_power_up(host);
-		sdio_reset(host);
-		mmc_go_idle(host);
-
-		mmc_send_if_cond(host, host->ocr_avail);
-
-		/*
-		 * First we search for SDIO...
-		 */
-		err = mmc_send_io_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sdio(host, ocr)) {
-				mmc_claim_host(host);
-				/*
-				 * Try SDMEM (but not MMC) even if SDIO
-				 * is broken.
-				 */
-				if (mmc_send_app_op_cond(host, 0, &ocr))
-					goto out_fail;
-
-				if (mmc_attach_sd(host, ocr))
-					mmc_power_off(host);
-			}
-			goto out;
-		}
-
-		/*
-		 * ...then normal SD...
-		 */
-		err = mmc_send_app_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_sd(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-		/*
-		 * ...and finally MMC.
-		 */
-		err = mmc_send_op_cond(host, 0, &ocr);
-		if (!err) {
-			if (mmc_attach_mmc(host, ocr))
-				mmc_power_off(host);
-			goto out;
-		}
-
-out_fail:
-		mmc_release_host(host);
-		mmc_power_off(host);
+		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+			break;
+		if (freqs[i] < host->f_min)
+			break;
 	}
-out:
+	mmc_release_host(host);
+
+ out:
 	if (host->caps & MMC_CAP_NEEDS_POLL)
 		mmc_schedule_delayed_work(&host->detect, HZ);
 }
@@ -1721,6 +1747,18 @@
 		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
 			mmc_power_up(host);
 			mmc_select_voltage(host, host->ocr);
+			/*
+			 * Tell runtime PM core we just powered up the card,
+			 * since it still believes the card is powered off.
+			 * Note that currently runtime PM is only enabled
+			 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD
+			 */
+			if (mmc_card_sdio(host->card) &&
+			    (host->caps & MMC_CAP_POWER_OFF_CARD)) {
+				pm_runtime_disable(&host->card->dev);
+				pm_runtime_set_active(&host->card->dev);
+				pm_runtime_enable(&host->card->dev);
+			}
 		}
 		BUG_ON(!host->bus_ops->resume);
 		err = host->bus_ops->resume(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 77240cd..ca1fdde 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -33,6 +33,9 @@
 
 void mmc_set_chip_select(struct mmc_host *host, int mode);
 void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
 void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
 void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
@@ -54,9 +57,9 @@
 void mmc_start_host(struct mmc_host *host);
 void mmc_stop_host(struct mmc_host *host);
 
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr);
-int mmc_attach_sd(struct mmc_host *host, u32 ocr);
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr);
+int mmc_attach_mmc(struct mmc_host *host);
+int mmc_attach_sd(struct mmc_host *host);
+int mmc_attach_sdio(struct mmc_host *host);
 
 /* Module parameters */
 extern int use_spi_crc;
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index eed1405f..998797e 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -183,6 +183,11 @@
 			&mmc_clock_fops))
 		goto err_node;
 
+#ifdef CONFIG_MMC_CLKGATE
+	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+				root, &host->clk_delay))
+		goto err_node;
+#endif
 	return;
 
 err_node:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 10b8af2..b3ac6c5 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -3,6 +3,7 @@
  *
  *  Copyright (C) 2003 Russell King, All Rights Reserved.
  *  Copyright (C) 2007-2008 Pierre Ossman
+ *  Copyright (C) 2010 Linus Walleij
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -20,6 +21,7 @@
 #include <linux/suspend.h>
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
 
 #include "core.h"
 #include "host.h"
@@ -50,6 +52,205 @@
 static DEFINE_IDR(mmc_host_idr);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
+#ifdef CONFIG_MMC_CLKGATE
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+	unsigned long tick_ns;
+	unsigned long freq = host->ios.clock;
+	unsigned long flags;
+
+	if (!freq) {
+		pr_debug("%s: frequency set to 0 in disable function, "
+			 "this means the clock is already disabled.\n",
+			 mmc_hostname(host));
+		return;
+	}
+	/*
+	 * New requests may have appeared while we were scheduling,
+	 * then there is no reason to delay the check before
+	 * clk_disable().
+	 */
+	spin_lock_irqsave(&host->clk_lock, flags);
+
+	/*
+	 * Delay n bus cycles (at least 8 from MMC spec) before attempting
+	 * to disable the MCI block clock. The reference count may have
+	 * gone up again after this delay due to rescheduling!
+	 */
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		tick_ns = DIV_ROUND_UP(1000000000, freq);
+		ndelay(host->clk_delay * tick_ns);
+	} else {
+		/* New users appeared while waiting for this work */
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		return;
+	}
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (!host->clk_requests) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		/* This will set host->ios.clock to 0 */
+		mmc_gate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+	}
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+	struct mmc_host *host = container_of(work, struct mmc_host,
+					      clk_gate_work);
+
+	mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ *	mmc_host_clk_ungate - ungate hardware MCI clocks
+ *	@host: host to ungate.
+ *
+ *	Makes sure the host ios.clock is restored to a non-zero value
+ *	past this call.	Increase clock reference count and ungate clock
+ *	if we're the first user.
+ */
+void mmc_host_clk_ungate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	mutex_lock(&host->clk_gate_mutex);
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated) {
+		spin_unlock_irqrestore(&host->clk_lock, flags);
+		mmc_ungate_clock(host);
+		spin_lock_irqsave(&host->clk_lock, flags);
+		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+	}
+	host->clk_requests++;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_may_gate_card - check if this card may be gated
+ *	@card: card to check.
+ */
+static bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	/* If there is no card we may gate it */
+	if (!card)
+		return true;
+	/*
+	 * Don't gate SDIO cards! These need to be clocked at all times
+	 * since they may be independent systems generating interrupts
+	 * and other events. The clock requests counter from the core will
+	 * go down to zero since the core does not need it, but we will not
+	 * gate the clock, because there is somebody out there that may still
+	 * be using it.
+	 */
+	if (mmc_card_sdio(card))
+		return false;
+
+	return true;
+}
+
+/**
+ *	mmc_host_clk_gate - gate off hardware MCI clocks
+ *	@host: host to gate.
+ *
+ *	Calls the host driver with ios.clock set to zero as often as possible
+ *	in order to gate off hardware MCI clocks. Decrease clock reference
+ *	count and schedule disabling of clock.
+ */
+void mmc_host_clk_gate(struct mmc_host *host)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	host->clk_requests--;
+	if (mmc_host_may_gate_card(host->card) &&
+	    !host->clk_requests)
+		schedule_work(&host->clk_gate_work);
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ *	mmc_host_clk_rate - get current clock frequency setting
+ *	@host: host to get the clock frequency for.
+ *
+ *	Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	unsigned long freq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->clk_lock, flags);
+	if (host->clk_gated)
+		freq = host->clk_old;
+	else
+		freq = host->ios.clock;
+	spin_unlock_irqrestore(&host->clk_lock, flags);
+	return freq;
+}
+
+/**
+ *	mmc_host_clk_init - set up clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+	host->clk_requests = 0;
+	/* Hold MCI clock for 8 cycles by default */
+	host->clk_delay = 8;
+	host->clk_gated = false;
+	INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+	spin_lock_init(&host->clk_lock);
+	mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ *	mmc_host_clk_exit - shut down clock gating code
+ *	@host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+	/*
+	 * Wait for any outstanding gate and then make sure we're
+	 * ungated before exiting.
+	 */
+	if (cancel_work_sync(&host->clk_gate_work))
+		mmc_host_clk_gate_delayed(host);
+	if (host->clk_gated)
+		mmc_host_clk_ungate(host);
+	/* There should be only one user now */
+	WARN_ON(host->clk_requests > 1);
+}
+
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+#endif
+
 /**
  *	mmc_alloc_host - initialise the per-host structure.
  *	@extra: sizeof private data structure
@@ -82,6 +283,8 @@
 	host->class_dev.class = &mmc_host_class;
 	device_initialize(&host->class_dev);
 
+	mmc_host_clk_init(host);
+
 	spin_lock_init(&host->lock);
 	init_waitqueue_head(&host->wq);
 	INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -163,6 +366,8 @@
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
+
+	mmc_host_clk_exit(host);
 }
 
 EXPORT_SYMBOL(mmc_remove_host);
@@ -183,4 +388,3 @@
 }
 
 EXPORT_SYMBOL(mmc_free_host);
-
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 8c87e11..de199f9 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -10,10 +10,31 @@
  */
 #ifndef _MMC_CORE_HOST_H
 #define _MMC_CORE_HOST_H
+#include <linux/mmc/host.h>
 
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
+#ifdef CONFIG_MMC_CLKGATE
+void mmc_host_clk_ungate(struct mmc_host *host);
+void mmc_host_clk_gate(struct mmc_host *host);
+unsigned int mmc_host_clk_rate(struct mmc_host *host);
+
+#else
+static inline void mmc_host_clk_ungate(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_gate(struct mmc_host *host)
+{
+}
+
+static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+	return host->ios.clock;
+}
+#endif
+
 void mmc_host_deeper_disable(struct work_struct *work);
 
 #endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 77f93c3..16006ef 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -534,39 +534,57 @@
 	 */
 	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
 	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
-		unsigned ext_csd_bit, bus_width;
+		static unsigned ext_csd_bits[][2] = {
+			{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
+			{ EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
+			{ EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
+		};
+		static unsigned bus_widths[] = {
+			MMC_BUS_WIDTH_8,
+			MMC_BUS_WIDTH_4,
+			MMC_BUS_WIDTH_1
+		};
+		unsigned idx, bus_width = 0;
 
-		if (host->caps & MMC_CAP_8_BIT_DATA) {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_8;
-			bus_width = MMC_BUS_WIDTH_8;
-		} else {
-			if (ddr)
-				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4;
-			else
-				ext_csd_bit = EXT_CSD_BUS_WIDTH_4;
-			bus_width = MMC_BUS_WIDTH_4;
+		if (host->caps & MMC_CAP_8_BIT_DATA)
+			idx = 0;
+		else
+			idx = 1;
+		for (; idx < ARRAY_SIZE(bus_widths); idx++) {
+			bus_width = bus_widths[idx];
+			if (bus_width == MMC_BUS_WIDTH_1)
+				ddr = 0; /* no DDR for 1-bit width */
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					 EXT_CSD_BUS_WIDTH,
+					 ext_csd_bits[idx][0]);
+			if (!err) {
+				mmc_set_bus_width_ddr(card->host,
+						      bus_width, MMC_SDR_MODE);
+				/*
+				 * If controller can't handle bus width test,
+				 * use the highest bus width to maintain
+				 * compatibility with previous MMC behavior.
+				 */
+				if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+					break;
+				err = mmc_bus_test(card, bus_width);
+				if (!err)
+					break;
+			}
 		}
 
-		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-				 EXT_CSD_BUS_WIDTH, ext_csd_bit);
-
-		if (err && err != -EBADMSG)
-			goto free_card;
-
+		if (!err && ddr) {
+			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+					EXT_CSD_BUS_WIDTH,
+					ext_csd_bits[idx][1]);
+		}
 		if (err) {
 			printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
-			       "failed\n", mmc_hostname(card->host),
-			       1 << bus_width, ddr);
-			err = 0;
-		} else {
-			if (ddr)
-				mmc_card_set_ddr_mode(card);
-			else
-				ddr = MMC_SDR_MODE;
-
+				"failed\n", mmc_hostname(card->host),
+				1 << bus_width, ddr);
+			goto free_card;
+		} else if (ddr) {
+			mmc_card_set_ddr_mode(card);
 			mmc_set_bus_width_ddr(card->host, bus_width, ddr);
 		}
 	}
@@ -737,14 +755,21 @@
 /*
  * Starting point for MMC card init.
  */
-int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
+int mmc_attach_mmc(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus_ops(host);
+	if (host->ocr_avail_mmc)
+		host->ocr_avail = host->ocr_avail_mmc;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -784,20 +809,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
-	host->card = NULL;
 	mmc_claim_host(host);
+	host->card = NULL;
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 326447c..60842f8 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -462,3 +462,104 @@
 	return 0;
 }
 
+static int
+mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
+		  u8 len)
+{
+	struct mmc_request mrq;
+	struct mmc_command cmd;
+	struct mmc_data data;
+	struct scatterlist sg;
+	u8 *data_buf;
+	u8 *test_buf;
+	int i, err;
+	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
+	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
+
+	/* dma onto stack is unsafe/nonportable, but callers to this
+	 * routine normally provide temporary on-stack buffers ...
+	 */
+	data_buf = kmalloc(len, GFP_KERNEL);
+	if (!data_buf)
+		return -ENOMEM;
+
+	if (len == 8)
+		test_buf = testdata_8bit;
+	else if (len == 4)
+		test_buf = testdata_4bit;
+	else {
+		printk(KERN_ERR "%s: Invalid bus_width %d\n",
+		       mmc_hostname(host), len);
+		kfree(data_buf);
+		return -EINVAL;
+	}
+
+	if (opcode == MMC_BUS_TEST_W)
+		memcpy(data_buf, test_buf, len);
+
+	memset(&mrq, 0, sizeof(struct mmc_request));
+	memset(&cmd, 0, sizeof(struct mmc_command));
+	memset(&data, 0, sizeof(struct mmc_data));
+
+	mrq.cmd = &cmd;
+	mrq.data = &data;
+	cmd.opcode = opcode;
+	cmd.arg = 0;
+
+	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
+	 * rely on callers to never use this with "native" calls for reading
+	 * CSD or CID.  Native versions of those commands use the R2 type,
+	 * not R1 plus a data block.
+	 */
+	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+	data.blksz = len;
+	data.blocks = 1;
+	if (opcode == MMC_BUS_TEST_R)
+		data.flags = MMC_DATA_READ;
+	else
+		data.flags = MMC_DATA_WRITE;
+
+	data.sg = &sg;
+	data.sg_len = 1;
+	sg_init_one(&sg, data_buf, len);
+	mmc_wait_for_req(host, &mrq);
+	err = 0;
+	if (opcode == MMC_BUS_TEST_R) {
+		for (i = 0; i < len / 4; i++)
+			if ((test_buf[i] ^ data_buf[i]) != 0xff) {
+				err = -EIO;
+				break;
+			}
+	}
+	kfree(data_buf);
+
+	if (cmd.error)
+		return cmd.error;
+	if (data.error)
+		return data.error;
+
+	return err;
+}
+
+int mmc_bus_test(struct mmc_card *card, u8 bus_width)
+{
+	int err, width;
+
+	if (bus_width == MMC_BUS_WIDTH_8)
+		width = 8;
+	else if (bus_width == MMC_BUS_WIDTH_4)
+		width = 4;
+	else if (bus_width == MMC_BUS_WIDTH_1)
+		return 0; /* no need for test */
+	else
+		return -EINVAL;
+
+	/*
+	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there
+	 * is a problem.  This improves chances that the test will work.
+	 */
+	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
+	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
+	return err;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 653eb8e..e6d44b8 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@
 int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
 int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
 int mmc_card_sleepawake(struct mmc_host *host, int sleep);
+int mmc_bus_test(struct mmc_card *card, u8 bus_width);
 
 #endif
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 49da4df..d18c32b 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -764,14 +764,21 @@
 /*
  * Starting point for SD card init.
  */
-int mmc_attach_sd(struct mmc_host *host, u32 ocr)
+int mmc_attach_sd(struct mmc_host *host)
 {
 	int err;
+	u32 ocr;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_app_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_sd_attach_bus_ops(host);
+	if (host->ocr_avail_sd)
+		host->ocr_avail = host->ocr_avail_sd;
 
 	/*
 	 * We need to get OCR a different way for SPI.
@@ -795,7 +802,8 @@
 		ocr &= ~0x7F;
 	}
 
-	if (ocr & MMC_VDD_165_195) {
+	if ((ocr & MMC_VDD_165_195) &&
+	    !(host->ocr_avail_sd & MMC_VDD_165_195)) {
 		printk(KERN_WARNING "%s: SD card claims to support the "
 		       "incompletely defined 'low voltage range'. This "
 		       "will be ignored.\n", mmc_hostname(host));
@@ -820,20 +828,20 @@
 		goto err;
 
 	mmc_release_host(host);
-
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_card;
 
 	return 0;
 
 remove_card:
+	mmc_release_host(host);
 	mmc_remove_card(host->card);
 	host->card = NULL;
 	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index efef5f9..5c4a54d 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -627,15 +627,27 @@
 
 static int mmc_sdio_resume(struct mmc_host *host)
 {
-	int i, err;
+	int i, err = 0;
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
 
 	/* Basic card reinitialization. */
 	mmc_claim_host(host);
-	err = mmc_sdio_init_card(host, host->ocr, host->card,
+
+	/* No need to reinitialize powered-resumed nonremovable cards */
+	if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
+		err = mmc_sdio_init_card(host, host->ocr, host->card,
 				 (host->pm_flags & MMC_PM_KEEP_POWER));
+	else if (mmc_card_is_powered_resumed(host)) {
+		/* We may have switched to 1-bit mode during suspend */
+		err = sdio_enable_4bit_bus(host->card);
+		if (err > 0) {
+			mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+			err = 0;
+		}
+	}
+
 	if (!err && host->sdio_irqs)
 		mmc_signal_sdio_irq(host);
 	mmc_release_host(host);
@@ -690,16 +702,22 @@
 /*
  * Starting point for SDIO card init.
  */
-int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
+int mmc_attach_sdio(struct mmc_host *host)
 {
-	int err;
-	int i, funcs;
+	int err, i, funcs;
+	u32 ocr;
 	struct mmc_card *card;
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
 
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		return err;
+
 	mmc_attach_bus(host, &mmc_sdio_ops);
+	if (host->ocr_avail_sdio)
+		host->ocr_avail = host->ocr_avail_sdio;
 
 	/*
 	 * Sanity check the voltages that the card claims to
@@ -769,12 +787,12 @@
 			pm_runtime_enable(&card->sdio_func[i]->dev);
 	}
 
-	mmc_release_host(host);
-
 	/*
 	 * First add the card to the driver model...
 	 */
+	mmc_release_host(host);
 	err = mmc_add_card(host->card);
+	mmc_claim_host(host);
 	if (err)
 		goto remove_added;
 
@@ -792,15 +810,17 @@
 
 remove_added:
 	/* Remove without lock if the device has been added. */
+	mmc_release_host(host);
 	mmc_sdio_remove(host);
 	mmc_claim_host(host);
 remove:
 	/* And with lock if it hasn't been added. */
+	mmc_release_host(host);
 	if (host->card)
 		mmc_sdio_remove(host);
+	mmc_claim_host(host);
 err:
 	mmc_detach_bus(host);
-	mmc_release_host(host);
 
 	printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
 		mmc_hostname(host), err);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 203da44..d29b9c3 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -197,44 +197,12 @@
 
 #ifdef CONFIG_PM_RUNTIME
 
-static int sdio_bus_pm_prepare(struct device *dev)
-{
-	struct sdio_func *func = dev_to_sdio_func(dev);
-
-	/*
-	 * Resume an SDIO device which was suspended at run time at this
-	 * point, in order to allow standard SDIO suspend/resume paths
-	 * to keep working as usual.
-	 *
-	 * Ultimately, the SDIO driver itself will decide (in its
-	 * suspend handler, or lack thereof) whether the card should be
-	 * removed or kept, and if kept, at what power state.
-	 *
-	 * At this point, PM core have increased our use count, so it's
-	 * safe to directly resume the device. After system is resumed
-	 * again, PM core will drop back its runtime PM use count, and if
-	 * needed device will be suspended again.
-	 *
-	 * The end result is guaranteed to be a power state that is
-	 * coherent with the device's runtime PM use count.
-	 *
-	 * The return value of pm_runtime_resume is deliberately unchecked
-	 * since there is little point in failing system suspend if a
-	 * device can't be resumed.
-	 */
-	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
-		pm_runtime_resume(dev);
-
-	return 0;
-}
-
 static const struct dev_pm_ops sdio_bus_pm_ops = {
 	SET_RUNTIME_PM_OPS(
 		pm_generic_runtime_suspend,
 		pm_generic_runtime_resume,
 		pm_generic_runtime_idle
 	)
-	.prepare = sdio_bus_pm_prepare,
 };
 
 #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index e960a93..afe8c6f 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -142,6 +142,27 @@
 
 	  If unsure, say N.
 
+config MMC_SDHCI_DOVE
+	bool "SDHCI support on Marvell's Dove SoC"
+	depends on ARCH_DOVE
+	depends on MMC_SDHCI_PLTFM
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Secure Digital Host Controller Interface in
+	  Marvell's Dove SoC.
+
+	  If unsure, say N.
+
+config MMC_SDHCI_TEGRA
+	tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+	depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+	select MMC_SDHCI_IO_ACCESSORS
+	help
+	  This selects the Tegra SD/MMC controller. If you have a Tegra
+	  platform with SD or MMC devices, say Y or M here.
+
+	  If unsure, say N.
+
 config MMC_SDHCI_S3C
 	tristate "SDHCI support on Samsung S3C SoC"
 	depends on MMC_SDHCI && PLAT_SAMSUNG
@@ -460,11 +481,27 @@
 	help
 	  If you say yes here SD-Cards may work on the EZkit.
 
+config MMC_DW
+	tristate "Synopsys DesignWare Memory Card Interface"
+	depends on ARM
+	help
+	  This selects support for the Synopsys DesignWare Mobile Storage IP
+	  block, this provides host support for SD and MMC interfaces, in both
+	  PIO and external DMA modes.
+
+config MMC_DW_IDMAC
+	bool "Internal DMAC interface"
+	depends on MMC_DW
+	help
+	  This selects support for the internal DMAC block within the Synopsys
+	  Designware Mobile Storage IP block. This disables the external DMA
+	  interface.
+
 config MMC_SH_MMCIF
 	tristate "SuperH Internal MMCIF support"
 	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
 	help
-	  This selects the MMC Host Interface controler (MMCIF).
+	  This selects the MMC Host Interface controller (MMCIF).
 
 	  This driver supports MMCIF in sh7724/sh7757/sh7372.
 
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 7b645ff..e834fb2 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -31,6 +31,7 @@
 obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o
 obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o
 obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o
+obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
 obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o
 obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o
 obj-$(CONFIG_MMC_USHC)		+= ushc.o
@@ -39,6 +40,8 @@
 sdhci-platform-y				:= sdhci-pltfm.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX)	+= sdhci-cns3xxx.o
 sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE)		+= sdhci-dove.o
+sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA)	+= sdhci-tegra.o
 
 obj-$(CONFIG_MMC_SDHCI_OF)	+= sdhci-of.o
 sdhci-of-y				:= sdhci-of-core.o
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 41e5a60..ef72e87 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -192,7 +192,7 @@
 	au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));
 	au_sync();
 
-	/* Send the stop commmand */
+	/* Send the stop command */
 	au_writel(STOP_CMD, HOST_CMD(host));
 }
 
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index e15547c..0076c74 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -66,8 +66,8 @@
 #define DAVINCI_MMCBLNC      0x60
 #define DAVINCI_SDIOCTL      0x64
 #define DAVINCI_SDIOST0      0x68
-#define DAVINCI_SDIOEN       0x6C
-#define DAVINCI_SDIOST       0x70
+#define DAVINCI_SDIOIEN      0x6C
+#define DAVINCI_SDIOIST      0x70
 #define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */
 
 /* DAVINCI_MMCCTL definitions */
@@ -131,6 +131,14 @@
 #define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */
 #define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */
 
+/* DAVINCI_SDIOST0 definitions */
+#define SDIOST0_DAT1_HI       BIT(0)
+
+/* DAVINCI_SDIOIEN definitions */
+#define SDIOIEN_IOINTEN       BIT(0)
+
+/* DAVINCI_SDIOIST definitions */
+#define SDIOIST_IOINT         BIT(0)
 
 /* MMCSD Init clock in Hz in opendrain mode */
 #define MMCSD_INIT_CLOCK		200000
@@ -164,7 +172,7 @@
 	unsigned int mmc_input_clk;
 	void __iomem *base;
 	struct resource *mem_res;
-	int irq;
+	int mmc_irq, sdio_irq;
 	unsigned char bus_mode;
 
 #define DAVINCI_MMC_DATADIR_NONE	0
@@ -184,6 +192,7 @@
 	u32 rxdma, txdma;
 	bool use_dma;
 	bool do_dma;
+	bool sdio_int;
 
 	/* Scatterlist DMA uses one or more parameter RAM entries:
 	 * the main one (associated with rxdma or txdma) plus zero or
@@ -480,7 +489,7 @@
 	struct scatterlist	*sg;
 	unsigned		sg_len;
 	unsigned		bytes_left = host->bytes_left;
-	const unsigned		shift = ffs(rw_threshold) - 1;;
+	const unsigned		shift = ffs(rw_threshold) - 1;
 
 	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
 		template = &host->tx_template;
@@ -866,6 +875,19 @@
 {
 	host->data = NULL;
 
+	if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
+		/*
+		 * SDIO Interrupt Detection work-around as suggested by
+		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
+		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core
+		 */
+		if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
+					SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		}
+	}
+
 	if (host->do_dma) {
 		davinci_abort_dma(host);
 
@@ -932,6 +954,21 @@
 	mmc_davinci_reset_ctrl(host, 0);
 }
 
+static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
+{
+	struct mmc_davinci_host *host = dev_id;
+	unsigned int status;
+
+	status = readl(host->base + DAVINCI_SDIOIST);
+	if (status & SDIOIST_IOINT) {
+		dev_dbg(mmc_dev(host->mmc),
+			"SDIO interrupt status %x\n", status);
+		writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+		mmc_signal_sdio_irq(host->mmc);
+	}
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
 {
 	struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
@@ -1076,11 +1113,32 @@
 	return config->get_ro(pdev->id);
 }
 
+static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct mmc_davinci_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
+			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
+			mmc_signal_sdio_irq(host->mmc);
+		} else {
+			host->sdio_int = true;
+			writel(readl(host->base + DAVINCI_SDIOIEN) |
+			       SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
+		}
+	} else {
+		host->sdio_int = false;
+		writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
+		       host->base + DAVINCI_SDIOIEN);
+	}
+}
+
 static struct mmc_host_ops mmc_davinci_ops = {
 	.request	= mmc_davinci_request,
 	.set_ios	= mmc_davinci_set_ios,
 	.get_cd		= mmc_davinci_get_cd,
 	.get_ro		= mmc_davinci_get_ro,
+	.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
 };
 
 /*----------------------------------------------------------------------*/
@@ -1209,7 +1267,8 @@
 		host->nr_sg = MAX_NR_SG;
 
 	host->use_dma = use_dma;
-	host->irq = irq;
+	host->mmc_irq = irq;
+	host->sdio_irq = platform_get_irq(pdev, 1);
 
 	if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
 		host->use_dma = 0;
@@ -1270,6 +1329,13 @@
 	if (ret)
 		goto out;
 
+	if (host->sdio_irq >= 0) {
+		ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
+				  mmc_hostname(mmc), host);
+		if (!ret)
+			mmc->caps |= MMC_CAP_SDIO_IRQ;
+	}
+
 	rename_region(mem, mmc_hostname(mmc));
 
 	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
@@ -1313,7 +1379,9 @@
 		mmc_davinci_cpufreq_deregister(host);
 
 		mmc_remove_host(host->mmc);
-		free_irq(host->irq, host);
+		free_irq(host->mmc_irq, host);
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+			free_irq(host->sdio_irq, host);
 
 		davinci_release_dma_channels(host);
 
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
new file mode 100644
index 0000000..2fcc825
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.c
@@ -0,0 +1,1796 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/blkdev.h>
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/dw_mmc.h>
+#include <linux/bitops.h>
+
+#include "dw_mmc.h"
+
+/* Common flag combinations */
+#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DTO | SDMMC_INT_DCRC | \
+				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \
+				 SDMMC_INT_EBE)
+#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \
+				 SDMMC_INT_RESP_ERR)
+#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \
+				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE)
+#define DW_MCI_SEND_STATUS	1
+#define DW_MCI_RECV_STATUS	2
+#define DW_MCI_DMA_THRESHOLD	16
+
+#ifdef CONFIG_MMC_DW_IDMAC
+struct idmac_desc {
+	u32		des0;	/* Control Descriptor */
+#define IDMAC_DES0_DIC	BIT(1)
+#define IDMAC_DES0_LD	BIT(2)
+#define IDMAC_DES0_FD	BIT(3)
+#define IDMAC_DES0_CH	BIT(4)
+#define IDMAC_DES0_ER	BIT(5)
+#define IDMAC_DES0_CES	BIT(30)
+#define IDMAC_DES0_OWN	BIT(31)
+
+	u32		des1;	/* Buffer sizes */
+#define IDMAC_SET_BUFFER1_SIZE(d, s) \
+	((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+
+	u32		des2;	/* buffer 1 physical address */
+
+	u32		des3;	/* buffer 2 physical address */
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+/**
+ * struct dw_mci_slot - MMC slot state
+ * @mmc: The mmc_host representing this slot.
+ * @host: The MMC controller this slot is using.
+ * @ctype: Card type for this slot.
+ * @mrq: mmc_request currently being processed or waiting to be
+ *	processed, or NULL when the slot is idle.
+ * @queue_node: List node for placing this node in the @queue list of
+ *	&struct dw_mci.
+ * @clock: Clock rate configured by set_ios(). Protected by host->lock.
+ * @flags: Random state bits associated with the slot.
+ * @id: Number of this slot.
+ * @last_detect_state: Most recently observed card detect state.
+ */
+struct dw_mci_slot {
+	struct mmc_host		*mmc;
+	struct dw_mci		*host;
+
+	u32			ctype;
+
+	struct mmc_request	*mrq;
+	struct list_head	queue_node;
+
+	unsigned int		clock;
+	unsigned long		flags;
+#define DW_MMC_CARD_PRESENT	0
+#define DW_MMC_CARD_NEED_INIT	1
+	int			id;
+	int			last_detect_state;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int dw_mci_req_show(struct seq_file *s, void *v)
+{
+	struct dw_mci_slot *slot = s->private;
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_command *stop;
+	struct mmc_data	*data;
+
+	/* Make sure we get a consistent snapshot */
+	spin_lock_bh(&slot->host->lock);
+	mrq = slot->mrq;
+
+	if (mrq) {
+		cmd = mrq->cmd;
+		data = mrq->data;
+		stop = mrq->stop;
+
+		if (cmd)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   cmd->opcode, cmd->arg, cmd->flags,
+				   cmd->resp[0], cmd->resp[1], cmd->resp[2],
+				   cmd->resp[2], cmd->error);
+		if (data)
+			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
+				   data->bytes_xfered, data->blocks,
+				   data->blksz, data->flags, data->error);
+		if (stop)
+			seq_printf(s,
+				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
+				   stop->opcode, stop->arg, stop->flags,
+				   stop->resp[0], stop->resp[1], stop->resp[2],
+				   stop->resp[2], stop->error);
+	}
+
+	spin_unlock_bh(&slot->host->lock);
+
+	return 0;
+}
+
+static int dw_mci_req_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_req_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_req_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_req_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int dw_mci_regs_show(struct seq_file *s, void *v)
+{
+	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
+	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
+	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
+	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
+	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
+	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
+
+	return 0;
+}
+
+static int dw_mci_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, dw_mci_regs_show, inode->i_private);
+}
+
+static const struct file_operations dw_mci_regs_fops = {
+	.owner		= THIS_MODULE,
+	.open		= dw_mci_regs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
+{
+	struct mmc_host	*mmc = slot->mmc;
+	struct dw_mci *host = slot->host;
+	struct dentry *root;
+	struct dentry *node;
+
+	root = mmc->debugfs_root;
+	if (!root)
+		return;
+
+	node = debugfs_create_file("regs", S_IRUSR, root, host,
+				   &dw_mci_regs_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_file("req", S_IRUSR, root, slot,
+				   &dw_mci_req_fops);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("pending_events", S_IRUSR, root,
+				  (u32 *)&host->pending_events);
+	if (!node)
+		goto err;
+
+	node = debugfs_create_x32("completed_events", S_IRUSR, root,
+				  (u32 *)&host->completed_events);
+	if (!node)
+		goto err;
+
+	return;
+
+err:
+	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+static void dw_mci_set_timeout(struct dw_mci *host)
+{
+	/* timeout (maximum) */
+	mci_writel(host, TMOUT, 0xffffffff);
+}
+
+static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
+{
+	struct mmc_data	*data;
+	u32 cmdr;
+	cmd->error = -EINPROGRESS;
+
+	cmdr = cmd->opcode;
+
+	if (cmdr == MMC_STOP_TRANSMISSION)
+		cmdr |= SDMMC_CMD_STOP;
+	else
+		cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
+
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		/* We expect a response, so set this bit */
+		cmdr |= SDMMC_CMD_RESP_EXP;
+		if (cmd->flags & MMC_RSP_136)
+			cmdr |= SDMMC_CMD_RESP_LONG;
+	}
+
+	if (cmd->flags & MMC_RSP_CRC)
+		cmdr |= SDMMC_CMD_RESP_CRC;
+
+	data = cmd->data;
+	if (data) {
+		cmdr |= SDMMC_CMD_DAT_EXP;
+		if (data->flags & MMC_DATA_STREAM)
+			cmdr |= SDMMC_CMD_STRM_MODE;
+		if (data->flags & MMC_DATA_WRITE)
+			cmdr |= SDMMC_CMD_DAT_WR;
+	}
+
+	return cmdr;
+}
+
+static void dw_mci_start_command(struct dw_mci *host,
+				 struct mmc_command *cmd, u32 cmd_flags)
+{
+	host->cmd = cmd;
+	dev_vdbg(&host->pdev->dev,
+		 "start command: ARGR=0x%08x CMDR=0x%08x\n",
+		 cmd->arg, cmd_flags);
+
+	mci_writel(host, CMDARG, cmd->arg);
+	wmb();
+
+	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
+}
+
+static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
+{
+	dw_mci_start_command(host, data->stop, host->stop_cmdr);
+}
+
+/* DMA interface functions */
+static void dw_mci_stop_dma(struct dw_mci *host)
+{
+	if (host->use_dma) {
+		host->dma_ops->stop(host);
+		host->dma_ops->cleanup(host);
+	} else {
+		/* Data transfer was stopped by the interrupt handler */
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+	}
+}
+
+#ifdef CONFIG_MMC_DW_IDMAC
+static void dw_mci_dma_cleanup(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	if (data)
+		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+			     ((data->flags & MMC_DATA_WRITE)
+			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+}
+
+static void dw_mci_idmac_stop_dma(struct dw_mci *host)
+{
+	u32 temp;
+
+	/* Disable and reset the IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp &= ~SDMMC_CTRL_USE_IDMAC;
+	temp |= SDMMC_CTRL_DMA_RESET;
+	mci_writel(host, CTRL, temp);
+
+	/* Stop the IDMAC running */
+	temp = mci_readl(host, BMOD);
+	temp &= ~SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+}
+
+static void dw_mci_idmac_complete_dma(struct dw_mci *host)
+{
+	struct mmc_data *data = host->data;
+
+	dev_vdbg(&host->pdev->dev, "DMA complete\n");
+
+	host->dma_ops->cleanup(host);
+
+	/*
+	 * If the card was removed, data will be NULL. No point in trying to
+	 * send the stop command or waiting for NBUSY in this case.
+	 */
+	if (data) {
+		set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+		tasklet_schedule(&host->tasklet);
+	}
+}
+
+static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
+				    unsigned int sg_len)
+{
+	int i;
+	struct idmac_desc *desc = host->sg_cpu;
+
+	for (i = 0; i < sg_len; i++, desc++) {
+		unsigned int length = sg_dma_len(&data->sg[i]);
+		u32 mem_addr = sg_dma_address(&data->sg[i]);
+
+		/* Set the OWN bit and disable interrupts for this descriptor */
+		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
+
+		/* Buffer length */
+		IDMAC_SET_BUFFER1_SIZE(desc, length);
+
+		/* Physical address to DMA to/from */
+		desc->des2 = mem_addr;
+	}
+
+	/* Set first descriptor */
+	desc = host->sg_cpu;
+	desc->des0 |= IDMAC_DES0_FD;
+
+	/* Set last descriptor */
+	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
+	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
+	desc->des0 |= IDMAC_DES0_LD;
+
+	wmb();
+}
+
+static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
+{
+	u32 temp;
+
+	dw_mci_translate_sglist(host, host->data, sg_len);
+
+	/* Select IDMAC interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_USE_IDMAC;
+	mci_writel(host, CTRL, temp);
+
+	wmb();
+
+	/* Enable the IDMAC */
+	temp = mci_readl(host, BMOD);
+	temp |= SDMMC_IDMAC_ENABLE;
+	mci_writel(host, BMOD, temp);
+
+	/* Start it running */
+	mci_writel(host, PLDMND, 1);
+}
+
+static int dw_mci_idmac_init(struct dw_mci *host)
+{
+	struct idmac_desc *p;
+	int i;
+
+	/* Number of descriptors in the ring buffer */
+	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+
+	/* Forward link the descriptor list */
+	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
+
+	/* Set the last descriptor as the end-of-ring descriptor */
+	p->des3 = host->sg_dma;
+	p->des0 = IDMAC_DES0_ER;
+
+	/* Mask out interrupts - get Tx & Rx complete only */
+	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
+		   SDMMC_IDMAC_INT_TI);
+
+	/* Set the descriptor base address */
+	mci_writel(host, DBADDR, host->sg_dma);
+	return 0;
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+	.init = dw_mci_idmac_init,
+	.start = dw_mci_idmac_start_dma,
+	.stop = dw_mci_idmac_stop_dma,
+	.complete = dw_mci_idmac_complete_dma,
+	.cleanup = dw_mci_dma_cleanup,
+};
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+	struct scatterlist *sg;
+	unsigned int i, direction, sg_len;
+	u32 temp;
+
+	/* If we don't have a channel, we can't do DMA */
+	if (!host->use_dma)
+		return -ENODEV;
+
+	/*
+	 * We don't do DMA on "complex" transfers, i.e. with
+	 * non-word-aligned buffers or lengths. Also, we don't bother
+	 * with all the DMA setup overhead for short transfers.
+	 */
+	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
+		return -EINVAL;
+	if (data->blksz & 3)
+		return -EINVAL;
+
+	for_each_sg(data->sg, sg, data->sg_len, i) {
+		if (sg->offset & 3 || sg->length & 3)
+			return -EINVAL;
+	}
+
+	if (data->flags & MMC_DATA_READ)
+		direction = DMA_FROM_DEVICE;
+	else
+		direction = DMA_TO_DEVICE;
+
+	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+			    direction);
+
+	dev_vdbg(&host->pdev->dev,
+		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
+		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
+		 sg_len);
+
+	/* Enable the DMA interface */
+	temp = mci_readl(host, CTRL);
+	temp |= SDMMC_CTRL_DMA_ENABLE;
+	mci_writel(host, CTRL, temp);
+
+	/* Disable RX/TX IRQs, let DMA handle it */
+	temp = mci_readl(host, INTMASK);
+	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+	mci_writel(host, INTMASK, temp);
+
+	host->dma_ops->start(host, sg_len);
+
+	return 0;
+}
+
+static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
+{
+	u32 temp;
+
+	data->error = -EINPROGRESS;
+
+	WARN_ON(host->data);
+	host->sg = NULL;
+	host->data = data;
+
+	if (dw_mci_submit_data_dma(host, data)) {
+		host->sg = data->sg;
+		host->pio_offset = 0;
+		if (data->flags & MMC_DATA_READ)
+			host->dir_status = DW_MCI_RECV_STATUS;
+		else
+			host->dir_status = DW_MCI_SEND_STATUS;
+
+		temp = mci_readl(host, INTMASK);
+		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
+		mci_writel(host, INTMASK, temp);
+
+		temp = mci_readl(host, CTRL);
+		temp &= ~SDMMC_CTRL_DMA_ENABLE;
+		mci_writel(host, CTRL, temp);
+	}
+}
+
+static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
+{
+	struct dw_mci *host = slot->host;
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int cmd_status = 0;
+
+	mci_writel(host, CMDARG, arg);
+	wmb();
+	mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+
+	while (time_before(jiffies, timeout)) {
+		cmd_status = mci_readl(host, CMD);
+		if (!(cmd_status & SDMMC_CMD_START))
+			return;
+	}
+	dev_err(&slot->mmc->class_dev,
+		"Timeout sending command (cmd %#x arg %#x status %#x)\n",
+		cmd, arg, cmd_status);
+}
+
+static void dw_mci_setup_bus(struct dw_mci_slot *slot)
+{
+	struct dw_mci *host = slot->host;
+	u32 div;
+
+	if (slot->clock != host->current_speed) {
+		if (host->bus_hz % slot->clock)
+			/*
+			 * move the + 1 after the divide to prevent
+			 * over-clocking the card.
+			 */
+			div = ((host->bus_hz / slot->clock) >> 1) + 1;
+		else
+			div = (host->bus_hz  / slot->clock) >> 1;
+
+		dev_info(&slot->mmc->class_dev,
+			 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
+			 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
+			 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
+
+		/* disable clock */
+		mci_writel(host, CLKENA, 0);
+		mci_writel(host, CLKSRC, 0);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* set clock to desired speed */
+		mci_writel(host, CLKDIV, div);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		/* enable clock */
+		mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
+
+		/* inform CIU */
+		mci_send_cmd(slot,
+			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+
+		host->current_speed = slot->clock;
+	}
+
+	/* Set the current slot bus width */
+	mci_writel(host, CTYPE, slot->ctype);
+}
+
+static void dw_mci_start_request(struct dw_mci *host,
+				 struct dw_mci_slot *slot)
+{
+	struct mmc_request *mrq;
+	struct mmc_command *cmd;
+	struct mmc_data	*data;
+	u32 cmdflags;
+
+	mrq = slot->mrq;
+	if (host->pdata->select_slot)
+		host->pdata->select_slot(slot->id);
+
+	/* Slot specific timing and width adjustment */
+	dw_mci_setup_bus(slot);
+
+	host->cur_slot = slot;
+	host->mrq = mrq;
+
+	host->pending_events = 0;
+	host->completed_events = 0;
+	host->data_status = 0;
+
+	data = mrq->data;
+	if (data) {
+		dw_mci_set_timeout(host);
+		mci_writel(host, BYTCNT, data->blksz*data->blocks);
+		mci_writel(host, BLKSIZ, data->blksz);
+	}
+
+	cmd = mrq->cmd;
+	cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
+
+	/* this is the first command, send the initialization clock */
+	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
+		cmdflags |= SDMMC_CMD_INIT;
+
+	if (data) {
+		dw_mci_submit_data(host, data);
+		wmb();
+	}
+
+	dw_mci_start_command(host, cmd, cmdflags);
+
+	if (mrq->stop)
+		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+}
+
+static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
+				 struct mmc_request *mrq)
+{
+	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
+		 host->state);
+
+	spin_lock_bh(&host->lock);
+	slot->mrq = mrq;
+
+	if (host->state == STATE_IDLE) {
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		list_add_tail(&slot->queue_node, &host->queue);
+	}
+
+	spin_unlock_bh(&host->lock);
+}
+
+static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci *host = slot->host;
+
+	WARN_ON(slot->mrq);
+
+	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+		mrq->cmd->error = -ENOMEDIUM;
+		mmc_request_done(mmc, mrq);
+		return;
+	}
+
+	/* We don't support multiple blocks of weird lengths. */
+	dw_mci_queue_request(host, slot, mrq);
+}
+
+static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+
+	/* set default 1 bit mode */
+	slot->ctype = SDMMC_CTYPE_1BIT;
+
+	switch (ios->bus_width) {
+	case MMC_BUS_WIDTH_1:
+		slot->ctype = SDMMC_CTYPE_1BIT;
+		break;
+	case MMC_BUS_WIDTH_4:
+		slot->ctype = SDMMC_CTYPE_4BIT;
+		break;
+	}
+
+	if (ios->clock) {
+		/*
+		 * Use mirror of ios->clock to prevent race with mmc
+		 * core ios update when finding the minimum.
+		 */
+		slot->clock = ios->clock;
+	}
+
+	switch (ios->power_mode) {
+	case MMC_POWER_UP:
+		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
+		break;
+	default:
+		break;
+	}
+}
+
+static int dw_mci_get_ro(struct mmc_host *mmc)
+{
+	int read_only;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_ro function, else try on board write protect */
+	if (brd->get_ro)
+		read_only = brd->get_ro(slot->id);
+	else
+		read_only =
+			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+
+	dev_dbg(&mmc->class_dev, "card is %s\n",
+		read_only ? "read-only" : "read-write");
+
+	return read_only;
+}
+
+static int dw_mci_get_cd(struct mmc_host *mmc)
+{
+	int present;
+	struct dw_mci_slot *slot = mmc_priv(mmc);
+	struct dw_mci_board *brd = slot->host->pdata;
+
+	/* Use platform get_cd function, else try onboard card detect */
+	if (brd->get_cd)
+		present = !brd->get_cd(slot->id);
+	else
+		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
+			== 0 ? 1 : 0;
+
+	if (present)
+		dev_dbg(&mmc->class_dev, "card is present\n");
+	else
+		dev_dbg(&mmc->class_dev, "card is not present\n");
+
+	return present;
+}
+
+static const struct mmc_host_ops dw_mci_ops = {
+	.request	= dw_mci_request,
+	.set_ios	= dw_mci_set_ios,
+	.get_ro		= dw_mci_get_ro,
+	.get_cd		= dw_mci_get_cd,
+};
+
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
+	__releases(&host->lock)
+	__acquires(&host->lock)
+{
+	struct dw_mci_slot *slot;
+	struct mmc_host	*prev_mmc = host->cur_slot->mmc;
+
+	WARN_ON(host->cmd || host->data);
+
+	host->cur_slot->mrq = NULL;
+	host->mrq = NULL;
+	if (!list_empty(&host->queue)) {
+		slot = list_entry(host->queue.next,
+				  struct dw_mci_slot, queue_node);
+		list_del(&slot->queue_node);
+		dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
+			 mmc_hostname(slot->mmc));
+		host->state = STATE_SENDING_CMD;
+		dw_mci_start_request(host, slot);
+	} else {
+		dev_vdbg(&host->pdev->dev, "list empty\n");
+		host->state = STATE_IDLE;
+	}
+
+	spin_unlock(&host->lock);
+	mmc_request_done(prev_mmc, mrq);
+	spin_lock(&host->lock);
+}
+
+static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
+{
+	u32 status = host->cmd_status;
+
+	host->cmd_status = 0;
+
+	/* Read the response from the card (up to 16 bytes) */
+	if (cmd->flags & MMC_RSP_PRESENT) {
+		if (cmd->flags & MMC_RSP_136) {
+			cmd->resp[3] = mci_readl(host, RESP0);
+			cmd->resp[2] = mci_readl(host, RESP1);
+			cmd->resp[1] = mci_readl(host, RESP2);
+			cmd->resp[0] = mci_readl(host, RESP3);
+		} else {
+			cmd->resp[0] = mci_readl(host, RESP0);
+			cmd->resp[1] = 0;
+			cmd->resp[2] = 0;
+			cmd->resp[3] = 0;
+		}
+	}
+
+	if (status & SDMMC_INT_RTO)
+		cmd->error = -ETIMEDOUT;
+	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
+		cmd->error = -EILSEQ;
+	else if (status & SDMMC_INT_RESP_ERR)
+		cmd->error = -EIO;
+	else
+		cmd->error = 0;
+
+	if (cmd->error) {
+		/* newer ip versions need a delay between retries */
+		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
+			mdelay(20);
+
+		if (cmd->data) {
+			host->data = NULL;
+			dw_mci_stop_dma(host);
+		}
+	}
+}
+
+static void dw_mci_tasklet_func(unsigned long priv)
+{
+	struct dw_mci *host = (struct dw_mci *)priv;
+	struct mmc_data	*data;
+	struct mmc_command *cmd;
+	enum dw_mci_state state;
+	enum dw_mci_state prev_state;
+	u32 status;
+
+	spin_lock(&host->lock);
+
+	state = host->state;
+	data = host->data;
+
+	do {
+		prev_state = state;
+
+		switch (state) {
+		case STATE_IDLE:
+			break;
+
+		case STATE_SENDING_CMD:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			cmd = host->cmd;
+			host->cmd = NULL;
+			set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
+			dw_mci_command_complete(host, host->mrq->cmd);
+			if (!host->mrq->data || cmd->error) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_DATA;
+			/* fall through */
+
+		case STATE_SENDING_DATA:
+			if (test_and_clear_bit(EVENT_DATA_ERROR,
+					       &host->pending_events)) {
+				dw_mci_stop_dma(host);
+				if (data->stop)
+					send_stop_cmd(host, data);
+				state = STATE_DATA_ERROR;
+				break;
+			}
+
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+			prev_state = state = STATE_DATA_BUSY;
+			/* fall through */
+
+		case STATE_DATA_BUSY:
+			if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->data = NULL;
+			set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
+			status = host->data_status;
+
+			if (status & DW_MCI_DATA_ERROR_FLAGS) {
+				if (status & SDMMC_INT_DTO) {
+					dev_err(&host->pdev->dev,
+						"data timeout error\n");
+					data->error = -ETIMEDOUT;
+				} else if (status & SDMMC_INT_DCRC) {
+					dev_err(&host->pdev->dev,
+						"data CRC error\n");
+					data->error = -EILSEQ;
+				} else {
+					dev_err(&host->pdev->dev,
+						"data FIFO error "
+						"(status=%08x)\n",
+						status);
+					data->error = -EIO;
+				}
+			} else {
+				data->bytes_xfered = data->blocks * data->blksz;
+				data->error = 0;
+			}
+
+			if (!data->stop) {
+				dw_mci_request_end(host, host->mrq);
+				goto unlock;
+			}
+
+			prev_state = state = STATE_SENDING_STOP;
+			if (!data->error)
+				send_stop_cmd(host, data);
+			/* fall through */
+
+		case STATE_SENDING_STOP:
+			if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
+						&host->pending_events))
+				break;
+
+			host->cmd = NULL;
+			dw_mci_command_complete(host, host->mrq->stop);
+			dw_mci_request_end(host, host->mrq);
+			goto unlock;
+
+		case STATE_DATA_ERROR:
+			if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
+						&host->pending_events))
+				break;
+
+			state = STATE_DATA_BUSY;
+			break;
+		}
+	} while (state != prev_state);
+
+	host->state = state;
+unlock:
+	spin_unlock(&host->lock);
+
+}
+
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		mci_writew(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+	u16 *pdata = (u16 *)buf;
+
+	WARN_ON(cnt % 2 != 0);
+
+	cnt = cnt >> 1;
+	while (cnt > 0) {
+		*pdata++ = mci_readw(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		mci_writel(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
+{
+	u32 *pdata = (u32 *)buf;
+
+	WARN_ON(cnt % 4 != 0);
+	WARN_ON((unsigned long)pdata & 0x3);
+
+	cnt = cnt >> 2;
+	while (cnt > 0) {
+		*pdata++ = mci_readl(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		mci_writeq(host, DATA, *pdata++);
+		cnt--;
+	}
+}
+
+static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
+{
+	u64 *pdata = (u64 *)buf;
+
+	WARN_ON(cnt % 8 != 0);
+
+	cnt = cnt >> 3;
+	while (cnt > 0) {
+		*pdata++ = mci_readq(host, DATA);
+		cnt--;
+	}
+}
+
+static void dw_mci_read_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len, old_len, count = 0;
+
+	do {
+		len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+		if (count == 0)
+			old_len = len;
+
+		if (offset + len <= sg->length) {
+			host->pull_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+
+			if (offset == sg->length) {
+				flush_dcache_page(sg_page(sg));
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+			host->pull_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			flush_dcache_page(sg_page(sg));
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->pull_data(host, buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+		count++;
+	} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
+	len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_write_data_pio(struct dw_mci *host)
+{
+	struct scatterlist *sg = host->sg;
+	void *buf = sg_virt(sg);
+	unsigned int offset = host->pio_offset;
+	struct mmc_data	*data = host->data;
+	int shift = host->data_shift;
+	u32 status;
+	unsigned int nbytes = 0, len;
+
+	do {
+		len = SDMMC_FIFO_SZ -
+			(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+		if (offset + len <= sg->length) {
+			host->push_data(host, (void *)(buf + offset), len);
+
+			offset += len;
+			nbytes += len;
+			if (offset == sg->length) {
+				host->sg = sg = sg_next(sg);
+				if (!sg)
+					goto done;
+
+				offset = 0;
+				buf = sg_virt(sg);
+			}
+		} else {
+			unsigned int remaining = sg->length - offset;
+
+			host->push_data(host, (void *)(buf + offset),
+					remaining);
+			nbytes += remaining;
+
+			host->sg = sg = sg_next(sg);
+			if (!sg)
+				goto done;
+
+			offset = len - remaining;
+			buf = sg_virt(sg);
+			host->push_data(host, (void *)buf, offset);
+			nbytes += offset;
+		}
+
+		status = mci_readl(host, MINTSTS);
+		mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+		if (status & DW_MCI_DATA_ERROR_FLAGS) {
+			host->data_status = status;
+			data->bytes_xfered += nbytes;
+
+			smp_wmb();
+
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+
+			tasklet_schedule(&host->tasklet);
+			return;
+		}
+	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+
+	host->pio_offset = offset;
+	data->bytes_xfered += nbytes;
+
+	return;
+
+done:
+	data->bytes_xfered += nbytes;
+	smp_wmb();
+	set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
+}
+
+static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
+{
+	if (!host->cmd_status)
+		host->cmd_status = status;
+
+	smp_wmb();
+
+	set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+	tasklet_schedule(&host->tasklet);
+}
+
+static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
+{
+	struct dw_mci *host = dev_id;
+	u32 status, pending;
+	unsigned int pass_count = 0;
+
+	do {
+		status = mci_readl(host, RINTSTS);
+		pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+
+		/*
+		 * DTO fix - version 2.10a and below, and only if internal DMA
+		 * is configured.
+		 */
+		if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
+			if (!pending &&
+			    ((mci_readl(host, STATUS) >> 17) & 0x1fff))
+				pending |= SDMMC_INT_DATA_OVER;
+		}
+
+		if (!pending)
+			break;
+
+		if (pending & DW_MCI_CMD_ERROR_FLAGS) {
+			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
+			host->cmd_status = status;
+			smp_wmb();
+			set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+			/* if there is an error report DATA_ERROR */
+			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+			host->data_status = status;
+			smp_wmb();
+			set_bit(EVENT_DATA_ERROR, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_DATA_OVER) {
+			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+			if (!host->data_status)
+				host->data_status = status;
+			smp_wmb();
+			if (host->dir_status == DW_MCI_RECV_STATUS) {
+				if (host->sg != NULL)
+					dw_mci_read_data_pio(host);
+			}
+			set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+			tasklet_schedule(&host->tasklet);
+		}
+
+		if (pending & SDMMC_INT_RXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
+			if (host->sg)
+				dw_mci_read_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_TXDR) {
+			mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
+			if (host->sg)
+				dw_mci_write_data_pio(host);
+		}
+
+		if (pending & SDMMC_INT_CMD_DONE) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+			dw_mci_cmd_interrupt(host, status);
+		}
+
+		if (pending & SDMMC_INT_CD) {
+			mci_writel(host, RINTSTS, SDMMC_INT_CD);
+			tasklet_schedule(&host->card_tasklet);
+		}
+
+	} while (pass_count++ < 5);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	/* Handle DMA interrupts */
+	pending = mci_readl(host, IDSTS);
+	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
+		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
+		set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+		host->dma_ops->complete(host);
+	}
+#endif
+
+	return IRQ_HANDLED;
+}
+
+static void dw_mci_tasklet_card(unsigned long data)
+{
+	struct dw_mci *host = (struct dw_mci *)data;
+	int i;
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		struct mmc_host *mmc = slot->mmc;
+		struct mmc_request *mrq;
+		int present;
+		u32 ctrl;
+
+		present = dw_mci_get_cd(mmc);
+		while (present != slot->last_detect_state) {
+			spin_lock(&host->lock);
+
+			dev_dbg(&slot->mmc->class_dev, "card %s\n",
+				present ? "inserted" : "removed");
+
+			/* Card change detected */
+			slot->last_detect_state = present;
+
+			/* Power up slot */
+			if (present != 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id,
+							      mmc->ocr_avail);
+
+				set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+			}
+
+			/* Clean up queue if present */
+			mrq = slot->mrq;
+			if (mrq) {
+				if (mrq == host->mrq) {
+					host->data = NULL;
+					host->cmd = NULL;
+
+					switch (host->state) {
+					case STATE_IDLE:
+						break;
+					case STATE_SENDING_CMD:
+						mrq->cmd->error = -ENOMEDIUM;
+						if (!mrq->data)
+							break;
+						/* fall through */
+					case STATE_SENDING_DATA:
+						mrq->data->error = -ENOMEDIUM;
+						dw_mci_stop_dma(host);
+						break;
+					case STATE_DATA_BUSY:
+					case STATE_DATA_ERROR:
+						if (mrq->data->error == -EINPROGRESS)
+							mrq->data->error = -ENOMEDIUM;
+						if (!mrq->stop)
+							break;
+						/* fall through */
+					case STATE_SENDING_STOP:
+						mrq->stop->error = -ENOMEDIUM;
+						break;
+					}
+
+					dw_mci_request_end(host, mrq);
+				} else {
+					list_del(&slot->queue_node);
+					mrq->cmd->error = -ENOMEDIUM;
+					if (mrq->data)
+						mrq->data->error = -ENOMEDIUM;
+					if (mrq->stop)
+						mrq->stop->error = -ENOMEDIUM;
+
+					spin_unlock(&host->lock);
+					mmc_request_done(slot->mmc, mrq);
+					spin_lock(&host->lock);
+				}
+			}
+
+			/* Power down slot */
+			if (present == 0) {
+				if (host->pdata->setpower)
+					host->pdata->setpower(slot->id, 0);
+				clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+				/*
+				 * Clear down the FIFO - doing so generates a
+				 * block interrupt, hence setting the
+				 * scatter-gather pointer to NULL.
+				 */
+				host->sg = NULL;
+
+				ctrl = mci_readl(host, CTRL);
+				ctrl |= SDMMC_CTRL_FIFO_RESET;
+				mci_writel(host, CTRL, ctrl);
+
+#ifdef CONFIG_MMC_DW_IDMAC
+				ctrl = mci_readl(host, BMOD);
+				ctrl |= 0x01; /* Software reset of DMA */
+				mci_writel(host, BMOD, ctrl);
+#endif
+
+			}
+
+			spin_unlock(&host->lock);
+			present = dw_mci_get_cd(mmc);
+		}
+
+		mmc_detect_change(slot->mmc,
+			msecs_to_jiffies(host->pdata->detect_delay_ms));
+	}
+}
+
+static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
+{
+	struct mmc_host *mmc;
+	struct dw_mci_slot *slot;
+
+	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
+	if (!mmc)
+		return -ENOMEM;
+
+	slot = mmc_priv(mmc);
+	slot->id = id;
+	slot->mmc = mmc;
+	slot->host = host;
+
+	mmc->ops = &dw_mci_ops;
+	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
+	mmc->f_max = host->bus_hz;
+
+	if (host->pdata->get_ocr)
+		mmc->ocr_avail = host->pdata->get_ocr(id);
+	else
+		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+
+	/*
+	 * Start with slot power disabled, it will be enabled when a card
+	 * is detected.
+	 */
+	if (host->pdata->setpower)
+		host->pdata->setpower(id, 0);
+
+	mmc->caps = 0;
+	if (host->pdata->get_bus_wd)
+		if (host->pdata->get_bus_wd(slot->id) >= 4)
+			mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+	if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
+		mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+#ifdef CONFIG_MMC_DW_IDMAC
+	mmc->max_segs = host->ring_size;
+	mmc->max_blk_size = 65536;
+	mmc->max_blk_count = host->ring_size;
+	mmc->max_seg_size = 0x1000;
+	mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
+#else
+	if (host->pdata->blk_settings) {
+		mmc->max_segs = host->pdata->blk_settings->max_segs;
+		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
+		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
+		mmc->max_req_size = host->pdata->blk_settings->max_req_size;
+		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
+	} else {
+		/* Useful defaults if platform data is unset. */
+		mmc->max_segs = 64;
+		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+		mmc->max_blk_count = 512;
+		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+		mmc->max_seg_size = mmc->max_req_size;
+	}
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+	if (dw_mci_get_cd(mmc))
+		set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+	else
+		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
+
+	host->slot[id] = slot;
+	mmc_add_host(mmc);
+
+#if defined(CONFIG_DEBUG_FS)
+	dw_mci_init_debugfs(slot);
+#endif
+
+	/* Card initially undetected */
+	slot->last_detect_state = 0;
+
+	return 0;
+}
+
+static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
+{
+	/* Shutdown detect IRQ */
+	if (slot->host->pdata->exit)
+		slot->host->pdata->exit(id);
+
+	/* Debugfs stuff is cleaned up by mmc core */
+	mmc_remove_host(slot->mmc);
+	slot->host->slot[id] = NULL;
+	mmc_free_host(slot->mmc);
+}
+
+static void dw_mci_init_dma(struct dw_mci *host)
+{
+	/* Alloc memory for sg translation */
+	host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+					  &host->sg_dma, GFP_KERNEL);
+	if (!host->sg_cpu) {
+		dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
+			__func__);
+		goto no_dma;
+	}
+
+	/* Determine which DMA interface to use */
+#ifdef CONFIG_MMC_DW_IDMAC
+	host->dma_ops = &dw_mci_idmac_ops;
+	dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
+#endif
+
+	if (!host->dma_ops)
+		goto no_dma;
+
+	if (host->dma_ops->init) {
+		if (host->dma_ops->init(host)) {
+			dev_err(&host->pdev->dev, "%s: Unable to initialize "
+				"DMA Controller.\n", __func__);
+			goto no_dma;
+		}
+	} else {
+		dev_err(&host->pdev->dev, "DMA initialization not found.\n");
+		goto no_dma;
+	}
+
+	host->use_dma = 1;
+	return;
+
+no_dma:
+	dev_info(&host->pdev->dev, "Using PIO mode.\n");
+	host->use_dma = 0;
+	return;
+}
+
+static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	unsigned int ctrl;
+
+	mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+				SDMMC_CTRL_DMA_RESET));
+
+	/* wait till resets clear */
+	do {
+		ctrl = mci_readl(host, CTRL);
+		if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
+			      SDMMC_CTRL_DMA_RESET)))
+			return true;
+	} while (time_before(jiffies, timeout));
+
+	dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+	return false;
+}
+
+static int dw_mci_probe(struct platform_device *pdev)
+{
+	struct dw_mci *host;
+	struct resource	*regs;
+	struct dw_mci_board *pdata;
+	int irq, ret, i, width;
+	u32 fifo_size;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENXIO;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return irq;
+
+	host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->pdev = pdev;
+	host->pdata = pdata = pdev->dev.platform_data;
+	if (!pdata || !pdata->init) {
+		dev_err(&pdev->dev,
+			"Platform data must supply init function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->select_slot && pdata->num_slots > 1) {
+		dev_err(&pdev->dev,
+			"Platform data must supply select_slot function\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	if (!pdata->bus_hz) {
+		dev_err(&pdev->dev,
+			"Platform data must supply bus speed\n");
+		ret = -ENODEV;
+		goto err_freehost;
+	}
+
+	host->bus_hz = pdata->bus_hz;
+	host->quirks = pdata->quirks;
+
+	spin_lock_init(&host->lock);
+	INIT_LIST_HEAD(&host->queue);
+
+	ret = -ENOMEM;
+	host->regs = ioremap(regs->start, regs->end - regs->start + 1);
+	if (!host->regs)
+		goto err_freehost;
+
+	host->dma_ops = pdata->dma_ops;
+	dw_mci_init_dma(host);
+
+	/*
+	 * Get the host data width - this assumes that HCON has been set with
+	 * the correct values.
+	 */
+	i = (mci_readl(host, HCON) >> 7) & 0x7;
+	if (!i) {
+		host->push_data = dw_mci_push_data16;
+		host->pull_data = dw_mci_pull_data16;
+		width = 16;
+		host->data_shift = 1;
+	} else if (i == 2) {
+		host->push_data = dw_mci_push_data64;
+		host->pull_data = dw_mci_pull_data64;
+		width = 64;
+		host->data_shift = 3;
+	} else {
+		/* Check for a reserved value, and warn if it is */
+		WARN((i != 1),
+		     "HCON reports a reserved host data width!\n"
+		     "Defaulting to 32-bit access.\n");
+		host->push_data = dw_mci_push_data32;
+		host->pull_data = dw_mci_pull_data32;
+		width = 32;
+		host->data_shift = 2;
+	}
+
+	/* Reset all blocks */
+	if (!mci_wait_reset(&pdev->dev, host)) {
+		ret = -ENODEV;
+		goto err_dmaunmap;
+	}
+
+	/* Clear the interrupts for the host controller */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	/* Put in max timeout */
+	mci_writel(host, TMOUT, 0xFFFFFFFF);
+
+	/*
+	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1,
+	 *                          Tx Mark = fifo_size / 2 DMA Size = 8
+	 */
+	fifo_size = mci_readl(host, FIFOTH);
+	fifo_size = (fifo_size >> 16) & 0x7ff;
+	mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
+				  ((fifo_size/2) << 0)));
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+	tasklet_init(&host->card_tasklet,
+		     dw_mci_tasklet_card, (unsigned long)host);
+
+	ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
+	if (ret)
+		goto err_dmaunmap;
+
+	platform_set_drvdata(pdev, host);
+
+	if (host->pdata->num_slots)
+		host->num_slots = host->pdata->num_slots;
+	else
+		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
+
+	/* We need at least one slot to succeed */
+	for (i = 0; i < host->num_slots; i++) {
+		ret = dw_mci_init_slot(host, i);
+		if (ret) {
+			ret = -ENODEV;
+			goto err_init_slot;
+		}
+	}
+
+	/*
+	 * Enable interrupts for command done, data over, data empty, card det,
+	 * receive ready and error such as transmit, receive timeout, crc error
+	 */
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
+		   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
+		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
+	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
+
+	dev_info(&pdev->dev, "DW MMC controller at irq %d, "
+		 "%d bit host data width\n", irq, width);
+	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
+		dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
+
+	return 0;
+
+err_init_slot:
+	/* De-init any initialized slots */
+	while (i > 0) {
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+		i--;
+	}
+	free_irq(irq, host);
+
+err_dmaunmap:
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+	dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+			  host->sg_cpu, host->sg_dma);
+	iounmap(host->regs);
+
+err_freehost:
+	kfree(host);
+	return ret;
+}
+
+static int __exit dw_mci_remove(struct platform_device *pdev)
+{
+	struct dw_mci *host = platform_get_drvdata(pdev);
+	int i;
+
+	mci_writel(host, RINTSTS, 0xFFFFFFFF);
+	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < host->num_slots; i++) {
+		dev_dbg(&pdev->dev, "remove slot %d\n", i);
+		if (host->slot[i])
+			dw_mci_cleanup_slot(host->slot[i], i);
+	}
+
+	/* disable clock to CIU */
+	mci_writel(host, CLKENA, 0);
+	mci_writel(host, CLKSRC, 0);
+
+	free_irq(platform_get_irq(pdev, 0), host);
+	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+
+	if (host->use_dma && host->dma_ops->exit)
+		host->dma_ops->exit(host);
+
+	iounmap(host->regs);
+
+	kfree(host);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * TODO: we should probably disable the clock to the card in the suspend path.
+ */
+static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_suspend_host(slot->mmc);
+		if (ret < 0) {
+			while (--i >= 0) {
+				slot = host->slot[i];
+				if (slot)
+					mmc_resume_host(host->slot[i]->mmc);
+			}
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dw_mci_resume(struct platform_device *pdev)
+{
+	int i, ret;
+	struct dw_mci *host = platform_get_drvdata(pdev);
+
+	for (i = 0; i < host->num_slots; i++) {
+		struct dw_mci_slot *slot = host->slot[i];
+		if (!slot)
+			continue;
+		ret = mmc_resume_host(host->slot[i]->mmc);
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+#else
+#define dw_mci_suspend	NULL
+#define dw_mci_resume	NULL
+#endif /* CONFIG_PM */
+
+static struct platform_driver dw_mci_driver = {
+	.remove		= __exit_p(dw_mci_remove),
+	.suspend	= dw_mci_suspend,
+	.resume		= dw_mci_resume,
+	.driver		= {
+		.name		= "dw_mmc",
+	},
+};
+
+static int __init dw_mci_init(void)
+{
+	return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
+}
+
+static void __exit dw_mci_exit(void)
+{
+	platform_driver_unregister(&dw_mci_driver);
+}
+
+module_init(dw_mci_init);
+module_exit(dw_mci_exit);
+
+MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
+MODULE_AUTHOR("NXP Semiconductor VietNam");
+MODULE_AUTHOR("Imagination Technologies Ltd");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
new file mode 100644
index 0000000..5dd55a7
--- /dev/null
+++ b/drivers/mmc/host/dw_mmc.h
@@ -0,0 +1,168 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _DW_MMC_H_
+#define _DW_MMC_H_
+
+#define SDMMC_CTRL		0x000
+#define SDMMC_PWREN		0x004
+#define SDMMC_CLKDIV		0x008
+#define SDMMC_CLKSRC		0x00c
+#define SDMMC_CLKENA		0x010
+#define SDMMC_TMOUT		0x014
+#define SDMMC_CTYPE		0x018
+#define SDMMC_BLKSIZ		0x01c
+#define SDMMC_BYTCNT		0x020
+#define SDMMC_INTMASK		0x024
+#define SDMMC_CMDARG		0x028
+#define SDMMC_CMD		0x02c
+#define SDMMC_RESP0		0x030
+#define SDMMC_RESP1		0x034
+#define SDMMC_RESP2		0x038
+#define SDMMC_RESP3		0x03c
+#define SDMMC_MINTSTS		0x040
+#define SDMMC_RINTSTS		0x044
+#define SDMMC_STATUS		0x048
+#define SDMMC_FIFOTH		0x04c
+#define SDMMC_CDETECT		0x050
+#define SDMMC_WRTPRT		0x054
+#define SDMMC_GPIO		0x058
+#define SDMMC_TCBCNT		0x05c
+#define SDMMC_TBBCNT		0x060
+#define SDMMC_DEBNCE		0x064
+#define SDMMC_USRID		0x068
+#define SDMMC_VERID		0x06c
+#define SDMMC_HCON		0x070
+#define SDMMC_BMOD		0x080
+#define SDMMC_PLDMND		0x084
+#define SDMMC_DBADDR		0x088
+#define SDMMC_IDSTS		0x08c
+#define SDMMC_IDINTEN		0x090
+#define SDMMC_DSCADDR		0x094
+#define SDMMC_BUFADDR		0x098
+#define SDMMC_DATA		0x100
+#define SDMMC_DATA_ADR		0x100
+
+/* shift bit field */
+#define _SBF(f, v)		((v) << (f))
+
+/* Control register defines */
+#define SDMMC_CTRL_USE_IDMAC		BIT(25)
+#define SDMMC_CTRL_CEATA_INT_EN		BIT(11)
+#define SDMMC_CTRL_SEND_AS_CCSD		BIT(10)
+#define SDMMC_CTRL_SEND_CCSD		BIT(9)
+#define SDMMC_CTRL_ABRT_READ_DATA	BIT(8)
+#define SDMMC_CTRL_SEND_IRQ_RESP	BIT(7)
+#define SDMMC_CTRL_READ_WAIT		BIT(6)
+#define SDMMC_CTRL_DMA_ENABLE		BIT(5)
+#define SDMMC_CTRL_INT_ENABLE		BIT(4)
+#define SDMMC_CTRL_DMA_RESET		BIT(2)
+#define SDMMC_CTRL_FIFO_RESET		BIT(1)
+#define SDMMC_CTRL_RESET		BIT(0)
+/* Clock Enable register defines */
+#define SDMMC_CLKEN_LOW_PWR		BIT(16)
+#define SDMMC_CLKEN_ENABLE		BIT(0)
+/* time-out register defines */
+#define SDMMC_TMOUT_DATA(n)		_SBF(8, (n))
+#define SDMMC_TMOUT_DATA_MSK		0xFFFFFF00
+#define SDMMC_TMOUT_RESP(n)		((n) & 0xFF)
+#define SDMMC_TMOUT_RESP_MSK		0xFF
+/* card-type register defines */
+#define SDMMC_CTYPE_8BIT		BIT(16)
+#define SDMMC_CTYPE_4BIT		BIT(0)
+#define SDMMC_CTYPE_1BIT		0
+/* Interrupt status & mask register defines */
+#define SDMMC_INT_SDIO			BIT(16)
+#define SDMMC_INT_EBE			BIT(15)
+#define SDMMC_INT_ACD			BIT(14)
+#define SDMMC_INT_SBE			BIT(13)
+#define SDMMC_INT_HLE			BIT(12)
+#define SDMMC_INT_FRUN			BIT(11)
+#define SDMMC_INT_HTO			BIT(10)
+#define SDMMC_INT_DTO			BIT(9)
+#define SDMMC_INT_RTO			BIT(8)
+#define SDMMC_INT_DCRC			BIT(7)
+#define SDMMC_INT_RCRC			BIT(6)
+#define SDMMC_INT_RXDR			BIT(5)
+#define SDMMC_INT_TXDR			BIT(4)
+#define SDMMC_INT_DATA_OVER		BIT(3)
+#define SDMMC_INT_CMD_DONE		BIT(2)
+#define SDMMC_INT_RESP_ERR		BIT(1)
+#define SDMMC_INT_CD			BIT(0)
+#define SDMMC_INT_ERROR			0xbfc2
+/* Command register defines */
+#define SDMMC_CMD_START			BIT(31)
+#define SDMMC_CMD_CCS_EXP		BIT(23)
+#define SDMMC_CMD_CEATA_RD		BIT(22)
+#define SDMMC_CMD_UPD_CLK		BIT(21)
+#define SDMMC_CMD_INIT			BIT(15)
+#define SDMMC_CMD_STOP			BIT(14)
+#define SDMMC_CMD_PRV_DAT_WAIT		BIT(13)
+#define SDMMC_CMD_SEND_STOP		BIT(12)
+#define SDMMC_CMD_STRM_MODE		BIT(11)
+#define SDMMC_CMD_DAT_WR		BIT(10)
+#define SDMMC_CMD_DAT_EXP		BIT(9)
+#define SDMMC_CMD_RESP_CRC		BIT(8)
+#define SDMMC_CMD_RESP_LONG		BIT(7)
+#define SDMMC_CMD_RESP_EXP		BIT(6)
+#define SDMMC_CMD_INDX(n)		((n) & 0x1F)
+/* Status register defines */
+#define SDMMC_GET_FCNT(x)		(((x)>>17) & 0x1FF)
+#define SDMMC_FIFO_SZ			32
+/* Internal DMAC interrupt defines */
+#define SDMMC_IDMAC_INT_AI		BIT(9)
+#define SDMMC_IDMAC_INT_NI		BIT(8)
+#define SDMMC_IDMAC_INT_CES		BIT(5)
+#define SDMMC_IDMAC_INT_DU		BIT(4)
+#define SDMMC_IDMAC_INT_FBE		BIT(2)
+#define SDMMC_IDMAC_INT_RI		BIT(1)
+#define SDMMC_IDMAC_INT_TI		BIT(0)
+/* Internal DMAC bus mode bits */
+#define SDMMC_IDMAC_ENABLE		BIT(7)
+#define SDMMC_IDMAC_FB			BIT(1)
+#define SDMMC_IDMAC_SWRESET		BIT(0)
+
+/* Register access macros */
+#define mci_readl(dev, reg)			\
+	__raw_readl(dev->regs + SDMMC_##reg)
+#define mci_writel(dev, reg, value)			\
+	__raw_writel((value), dev->regs + SDMMC_##reg)
+
+/* 16-bit FIFO access macros */
+#define mci_readw(dev, reg)			\
+	__raw_readw(dev->regs + SDMMC_##reg)
+#define mci_writew(dev, reg, value)			\
+	__raw_writew((value), dev->regs + SDMMC_##reg)
+
+/* 64-bit FIFO access macros */
+#ifdef readq
+#define mci_readq(dev, reg)			\
+	__raw_readq(dev->regs + SDMMC_##reg)
+#define mci_writeq(dev, reg, value)			\
+	__raw_writeq((value), dev->regs + SDMMC_##reg)
+#else
+/*
+ * Dummy readq implementation for architectures that don't define it.
+ *
+ * We would assume that none of these architectures would configure
+ * the IP block with a 64bit FIFO width, so this code will never be
+ * executed on those machines. Defining these macros here keeps the
+ * rest of the code free from ifdefs.
+ */
+#define mci_readq(dev, reg)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+#define mci_writeq(dev, reg, value)			\
+	(*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+#endif
+
+#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index bdd2cbb..4428594 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -31,6 +31,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/dma.h>
 #include <asm/irq.h>
@@ -141,10 +142,49 @@
 
 	struct work_struct	datawork;
 	spinlock_t		lock;
+
+	struct regulator	*vcc;
 };
 
 static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
 
+static inline void mxcmci_init_ocr(struct mxcmci_host *host)
+{
+	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+
+	if (IS_ERR(host->vcc)) {
+		host->vcc = NULL;
+	} else {
+		host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
+		if (host->pdata && host->pdata->ocr_avail)
+			dev_warn(mmc_dev(host->mmc),
+				"pdata->ocr_avail will not be used\n");
+	}
+
+	if (host->vcc == NULL) {
+		/* fall-back to platform data */
+		if (host->pdata && host->pdata->ocr_avail)
+			host->mmc->ocr_avail = host->pdata->ocr_avail;
+		else
+			host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	}
+}
+
+static inline void mxcmci_set_power(struct mxcmci_host *host,
+				    unsigned char power_mode,
+				    unsigned int vdd)
+{
+	if (host->vcc) {
+		if (power_mode == MMC_POWER_UP)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
+		else if (power_mode == MMC_POWER_OFF)
+			mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
+	}
+
+	if (host->pdata && host->pdata->setpower)
+		host->pdata->setpower(mmc_dev(host->mmc), vdd);
+}
+
 static inline int mxcmci_use_dma(struct mxcmci_host *host)
 {
 	return host->do_dma;
@@ -680,9 +720,9 @@
 		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
 
 	if (host->power_mode != ios->power_mode) {
-		if (host->pdata && host->pdata->setpower)
-			host->pdata->setpower(mmc_dev(mmc), ios->vdd);
+		mxcmci_set_power(host, ios->power_mode, ios->vdd);
 		host->power_mode = ios->power_mode;
+
 		if (ios->power_mode == MMC_POWER_ON)
 			host->cmdat |= CMD_DAT_CONT_INIT;
 	}
@@ -807,10 +847,7 @@
 	host->pdata = pdev->dev.platform_data;
 	spin_lock_init(&host->lock);
 
-	if (host->pdata && host->pdata->ocr_avail)
-		mmc->ocr_avail = host->pdata->ocr_avail;
-	else
-		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+	mxcmci_init_ocr(host);
 
 	if (host->pdata && host->pdata->dat3_card_detect)
 		host->default_irq_mask =
@@ -915,6 +952,9 @@
 
 	mmc_remove_host(mmc);
 
+	if (host->vcc)
+		regulator_put(host->vcc);
+
 	if (host->pdata && host->pdata->exit)
 		host->pdata->exit(&pdev->dev, mmc);
 
@@ -927,7 +967,6 @@
 	clk_put(host->clk);
 
 	release_mem_region(host->res->start, resource_size(host->res));
-	release_resource(host->res);
 
 	mmc_free_host(mmc);
 
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
new file mode 100644
index 0000000..2aeef4f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -0,0 +1,70 @@
+/*
+ * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
+ *
+ * Author: Saeed Bishara <saeed@marvell.com>
+ *	   Mike Rapoport <mike@compulab.co.il>
+ * Based on sdhci-cns3xxx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/io.h>
+#include <linux/mmc/host.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
+{
+	u16 ret;
+
+	switch (reg) {
+	case SDHCI_HOST_VERSION:
+	case SDHCI_SLOT_INT_STATUS:
+		/* those registers don't exist */
+		return 0;
+	default:
+		ret = readw(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
+{
+	u32 ret;
+
+	switch (reg) {
+	case SDHCI_CAPABILITIES:
+		ret = readl(host->ioaddr + reg);
+		/* Mask the support for 3.0V */
+		ret &= ~SDHCI_CAN_VDD_300;
+		break;
+	default:
+		ret = readl(host->ioaddr + reg);
+	}
+	return ret;
+}
+
+static struct sdhci_ops sdhci_dove_ops = {
+	.read_w	= sdhci_dove_readw,
+	.read_l	= sdhci_dove_readl,
+};
+
+struct sdhci_pltfm_data sdhci_dove_pdata = {
+	.ops	= &sdhci_dove_ops,
+	.quirks	= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+		  SDHCI_QUIRK_NO_BUSY_IRQ |
+		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_FORCE_DMA,
+};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 3d9c246..0dc905b 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -176,6 +176,74 @@
 	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
 };
 
+/* O2Micro extra registers */
+#define O2_SD_LOCK_WP		0xD3
+#define O2_SD_MULTI_VCC3V	0xEE
+#define O2_SD_CLKREQ		0xEC
+#define O2_SD_CAPS		0xE0
+#define O2_SD_ADMA1		0xE2
+#define O2_SD_ADMA2		0xE7
+#define O2_SD_INF_MOD		0xF1
+
+static int o2_probe(struct sdhci_pci_chip *chip)
+{
+	int ret;
+	u8 scratch;
+
+	switch (chip->pdev->device) {
+	case PCI_DEVICE_ID_O2_8220:
+	case PCI_DEVICE_ID_O2_8221:
+	case PCI_DEVICE_ID_O2_8320:
+	case PCI_DEVICE_ID_O2_8321:
+		/* This extra setup is required due to broken ADMA. */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch &= 0x7f;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+
+		/* Set Multi 3 to VCC3V# */
+		pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
+
+		/* Disable CLK_REQ# support after media DET */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x20;
+		pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
+
+		/* Choose capabilities, enable SDMA.  We have to write 0x01
+		 * to the capabilities register first to unlock it.
+		 */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x01;
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
+		pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
+
+		/* Disable ADMA1/2 */
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
+		pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
+
+		/* Disable the infinite transfer mode */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x08;
+		pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
+
+		/* Lock WP */
+		ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
+		if (ret)
+			return ret;
+		scratch |= 0x80;
+		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
+	}
+
+	return 0;
+}
+
 static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
 {
 	u8 scratch;
@@ -204,6 +272,7 @@
 static int jmicron_probe(struct sdhci_pci_chip *chip)
 {
 	int ret;
+	u16 mmcdev = 0;
 
 	if (chip->pdev->revision == 0) {
 		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
@@ -225,12 +294,17 @@
 	 * 2. The MMC interface has a lower subfunction number
 	 *    than the SD interface.
 	 */
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
+	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
+		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
+
+	if (mmcdev) {
 		struct pci_dev *sd_dev;
 
 		sd_dev = NULL;
 		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
-			PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) {
+						mmcdev, sd_dev)) != NULL) {
 			if ((PCI_SLOT(chip->pdev->devfn) ==
 				PCI_SLOT(sd_dev->devfn)) &&
 				(chip->pdev->bus == sd_dev->bus))
@@ -290,13 +364,25 @@
 			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 	}
 
+	/* JM388 MMC doesn't support 1.8V while SD supports it */
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
+		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31 |
+			MMC_VDD_165_195; /* allow 1.8V */
+		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
+			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
+	}
+
 	/*
 	 * The secondary interface requires a bit set to get the
 	 * interrupts.
 	 */
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 1);
 
+	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
+
 	return 0;
 }
 
@@ -305,7 +391,8 @@
 	if (dead)
 		return;
 
-	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC)
+	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
 		jmicron_enable_mmc(slot->host, 0);
 }
 
@@ -313,7 +400,8 @@
 {
 	int i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 0);
 	}
@@ -325,7 +413,8 @@
 {
 	int ret, i;
 
-	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) {
+	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
+	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
 		for (i = 0;i < chip->num_slots;i++)
 			jmicron_enable_mmc(chip->slots[i]->host, 1);
 	}
@@ -339,6 +428,10 @@
 	return 0;
 }
 
+static const struct sdhci_pci_fixes sdhci_o2 = {
+	.probe		= o2_probe,
+};
+
 static const struct sdhci_pci_fixes sdhci_jmicron = {
 	.probe		= jmicron_probe,
 
@@ -510,6 +603,22 @@
 	},
 
 	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_SD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_JMICRON,
+		.device		= PCI_DEVICE_ID_JMICRON_JMB388_ESD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_jmicron,
+	},
+
+	{
 		.vendor		= PCI_VENDOR_ID_SYSKONNECT,
 		.device		= 0x8000,
 		.subvendor	= PCI_ANY_ID,
@@ -589,6 +698,46 @@
 		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
 	},
 
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8120,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8220,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8221,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8320,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
+	{
+		.vendor		= PCI_VENDOR_ID_O2,
+		.device		= PCI_DEVICE_ID_O2_8321,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (kernel_ulong_t)&sdhci_o2,
+	},
+
 	{	/* Generic SD host controller */
 		PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
 	},
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 0502f89..dbab040 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -170,6 +170,12 @@
 #ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
 	{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
 #endif
+#ifdef CONFIG_MMC_SDHCI_DOVE
+	{ "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
+#endif
+#ifdef CONFIG_MMC_SDHCI_TEGRA
+	{ "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
+#endif
 	{ },
 };
 MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index c1bfe48..ea2e44d 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -22,5 +22,7 @@
 
 extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
 extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
+extern struct sdhci_pltfm_data sdhci_dove_pdata;
+extern struct sdhci_pltfm_data sdhci_tegra_pdata;
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index aacb862..1720358 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -130,6 +130,15 @@
 	if (!clksrc)
 		return UINT_MAX;
 
+	/*
+	 * Clock divider's step is different as 1 from that of host controller
+	 * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
+	 */
+	if (ourhost->pdata->clk_type) {
+		rate = clk_round_rate(clksrc, wanted);
+		return wanted - rate;
+	}
+
 	rate = clk_get_rate(clksrc);
 
 	for (div = 1; div < 256; div *= 2) {
@@ -232,6 +241,42 @@
 	return min;
 }
 
+/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
+static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
+}
+
+/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
+static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/*
+	 * initial clock can be in the frequency range of
+	 * 100KHz-400KHz, so we set it as max value.
+	 */
+	return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
+}
+
+/* sdhci_cmu_set_clock - callback on clock change.*/
+static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	struct sdhci_s3c *ourhost = to_s3c(host);
+
+	/* don't bother if the clock is going off */
+	if (clock == 0)
+		return;
+
+	sdhci_s3c_set_clock(host, clock);
+
+	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
+
+	host->clock = clock;
+}
+
 static struct sdhci_ops sdhci_s3c_ops = {
 	.get_max_clock		= sdhci_s3c_get_max_clk,
 	.set_clock		= sdhci_s3c_set_clock,
@@ -361,6 +406,13 @@
 
 		clks++;
 		sc->clk_bus[ptr] = clk;
+
+		/*
+		 * save current clock index to know which clock bus
+		 * is used later in overriding functions.
+		 */
+		sc->cur_clk = ptr;
+
 		clk_enable(clk);
 
 		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
@@ -427,6 +479,20 @@
 	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
 	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
 
+	/*
+	 * If controller does not have internal clock divider,
+	 * we can use overriding functions instead of default.
+	 */
+	if (pdata->clk_type) {
+		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
+		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
+		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
+	}
+
+	/* It supports additional host capabilities if needed */
+	if (pdata->host_caps)
+		host->mmc->caps |= pdata->host_caps;
+
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(dev, "sdhci_add_host() failed\n");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
new file mode 100644
index 0000000..4823ee9
--- /dev/null
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+
+#include <mach/gpio.h>
+#include <mach/sdhci.h>
+
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
+{
+	u32 val;
+
+	if (unlikely(reg == SDHCI_PRESENT_STATE)) {
+		/* Use wp_gpio here instead? */
+		val = readl(host->ioaddr + reg);
+		return val | SDHCI_WRITE_PROTECT;
+	}
+
+	return readl(host->ioaddr + reg);
+}
+
+static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
+{
+	if (unlikely(reg == SDHCI_HOST_VERSION)) {
+		/* Erratum: Version register is invalid in HW. */
+		return SDHCI_SPEC_200;
+	}
+
+	return readw(host->ioaddr + reg);
+}
+
+static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
+{
+	/* Seems like we're getting spurious timeout and crc errors, so
+	 * disable signalling of them. In case of real errors software
+	 * timers should take care of eventually detecting them.
+	 */
+	if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
+		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
+
+	writel(val, host->ioaddr + reg);
+
+	if (unlikely(reg == SDHCI_INT_ENABLE)) {
+		/* Erratum: Must enable block gap interrupt detection */
+		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+		if (val & SDHCI_INT_CARD_INT)
+			gap_ctrl |= 0x8;
+		else
+			gap_ctrl &= ~0x8;
+		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
+	}
+}
+
+static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (!gpio_is_valid(plat->wp_gpio))
+		return -1;
+
+	return gpio_get_value(plat->wp_gpio);
+}
+
+static irqreturn_t carddetect_irq(int irq, void *data)
+{
+	struct sdhci_host *sdhost = (struct sdhci_host *)data;
+
+	tasklet_schedule(&sdhost->card_tasklet);
+	return IRQ_HANDLED;
+};
+
+static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
+{
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	u32 ctrl;
+
+	plat = pdev->dev.platform_data;
+
+	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+	if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
+		ctrl &= ~SDHCI_CTRL_4BITBUS;
+		ctrl |= SDHCI_CTRL_8BITBUS;
+	} else {
+		ctrl &= ~SDHCI_CTRL_8BITBUS;
+		if (bus_width == MMC_BUS_WIDTH_4)
+			ctrl |= SDHCI_CTRL_4BITBUS;
+		else
+			ctrl &= ~SDHCI_CTRL_4BITBUS;
+	}
+	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+	return 0;
+}
+
+
+static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
+				  struct sdhci_pltfm_data *pdata)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+	struct clk *clk;
+	int rc;
+
+	plat = pdev->dev.platform_data;
+	if (plat == NULL) {
+		dev_err(mmc_dev(host->mmc), "missing platform data\n");
+		return -ENXIO;
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		rc = gpio_request(plat->power_gpio, "sdhci_power");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate power gpio\n");
+			goto out;
+		}
+		tegra_gpio_enable(plat->power_gpio);
+		gpio_direction_output(plat->power_gpio, 1);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		rc = gpio_request(plat->cd_gpio, "sdhci_cd");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate cd gpio\n");
+			goto out_power;
+		}
+		tegra_gpio_enable(plat->cd_gpio);
+		gpio_direction_input(plat->cd_gpio);
+
+		rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
+				 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+				 mmc_hostname(host->mmc), host);
+
+		if (rc)	{
+			dev_err(mmc_dev(host->mmc), "request irq error\n");
+			goto out_cd;
+		}
+
+	}
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		rc = gpio_request(plat->wp_gpio, "sdhci_wp");
+		if (rc) {
+			dev_err(mmc_dev(host->mmc),
+				"failed to allocate wp gpio\n");
+			goto out_cd;
+		}
+		tegra_gpio_enable(plat->wp_gpio);
+		gpio_direction_input(plat->wp_gpio);
+	}
+
+	clk = clk_get(mmc_dev(host->mmc), NULL);
+	if (IS_ERR(clk)) {
+		dev_err(mmc_dev(host->mmc), "clk err\n");
+		rc = PTR_ERR(clk);
+		goto out_wp;
+	}
+	clk_enable(clk);
+	pltfm_host->clk = clk;
+
+	if (plat->is_8bit)
+		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+	return 0;
+
+out_wp:
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+out_cd:
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+out_power:
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+out:
+	return rc;
+}
+
+static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct tegra_sdhci_platform_data *plat;
+
+	plat = pdev->dev.platform_data;
+
+	if (gpio_is_valid(plat->wp_gpio)) {
+		tegra_gpio_disable(plat->wp_gpio);
+		gpio_free(plat->wp_gpio);
+	}
+
+	if (gpio_is_valid(plat->cd_gpio)) {
+		tegra_gpio_disable(plat->cd_gpio);
+		gpio_free(plat->cd_gpio);
+	}
+
+	if (gpio_is_valid(plat->power_gpio)) {
+		tegra_gpio_disable(plat->power_gpio);
+		gpio_free(plat->power_gpio);
+	}
+
+	clk_disable(pltfm_host->clk);
+	clk_put(pltfm_host->clk);
+}
+
+static struct sdhci_ops tegra_sdhci_ops = {
+	.get_ro     = tegra_sdhci_get_ro,
+	.read_l     = tegra_sdhci_readl,
+	.read_w     = tegra_sdhci_readw,
+	.write_l    = tegra_sdhci_writel,
+	.platform_8bit_width = tegra_sdhci_8bit,
+};
+
+struct sdhci_pltfm_data sdhci_tegra_pdata = {
+	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+		  SDHCI_QUIRK_SINGLE_POWER_WRITE |
+		  SDHCI_QUIRK_NO_HISPD_BIT |
+		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+	.ops  = &tegra_sdhci_ops,
+	.init = tegra_sdhci_pltfm_init,
+	.exit = tegra_sdhci_pltfm_exit,
+};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a25db42..9e15f41 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -23,6 +23,7 @@
 
 #include <linux/leds.h>
 
+#include <linux/mmc/mmc.h>
 #include <linux/mmc/host.h>
 
 #include "sdhci.h"
@@ -77,8 +78,11 @@
 	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
 		sdhci_readw(host, SDHCI_ACMD12_ERR),
 		sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n",
+	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 		sdhci_readl(host, SDHCI_CAPABILITIES),
+		sdhci_readl(host, SDHCI_CAPABILITIES_1));
+	printk(KERN_DEBUG DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+		sdhci_readw(host, SDHCI_COMMAND),
 		sdhci_readl(host, SDHCI_MAX_CURRENT));
 
 	if (host->flags & SDHCI_USE_ADMA)
@@ -1518,7 +1522,11 @@
 
 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
 		host->data->error = -ETIMEDOUT;
-	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
+	else if (intmask & SDHCI_INT_DATA_END_BIT)
+		host->data->error = -EILSEQ;
+	else if ((intmask & SDHCI_INT_DATA_CRC) &&
+		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
+			!= MMC_BUS_TEST_R)
 		host->data->error = -EILSEQ;
 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
 		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -1736,7 +1744,7 @@
 int sdhci_add_host(struct sdhci_host *host)
 {
 	struct mmc_host *mmc;
-	unsigned int caps;
+	unsigned int caps, ocr_avail;
 	int ret;
 
 	WARN_ON(host == NULL);
@@ -1890,13 +1898,26 @@
 	    mmc_card_is_removable(mmc))
 		mmc->caps |= MMC_CAP_NEEDS_POLL;
 
-	mmc->ocr_avail = 0;
+	ocr_avail = 0;
 	if (caps & SDHCI_CAN_VDD_330)
-		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
 	if (caps & SDHCI_CAN_VDD_300)
-		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
 	if (caps & SDHCI_CAN_VDD_180)
-		mmc->ocr_avail |= MMC_VDD_165_195;
+		ocr_avail |= MMC_VDD_165_195;
+
+	mmc->ocr_avail = ocr_avail;
+	mmc->ocr_avail_sdio = ocr_avail;
+	if (host->ocr_avail_sdio)
+		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
+	mmc->ocr_avail_sd = ocr_avail;
+	if (host->ocr_avail_sd)
+		mmc->ocr_avail_sd &= host->ocr_avail_sd;
+	else /* normal SD controllers don't support 1.8V */
+		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
+	mmc->ocr_avail_mmc = ocr_avail;
+	if (host->ocr_avail_mmc)
+		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
 
 	if (mmc->ocr_avail == 0) {
 		printk(KERN_ERR "%s: Hardware doesn't report any "
@@ -1928,10 +1949,14 @@
 	 * of bytes. When doing hardware scatter/gather, each entry cannot
 	 * be larger than 64 KiB though.
 	 */
-	if (host->flags & SDHCI_USE_ADMA)
-		mmc->max_seg_size = 65536;
-	else
+	if (host->flags & SDHCI_USE_ADMA) {
+		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
+			mmc->max_seg_size = 65535;
+		else
+			mmc->max_seg_size = 65536;
+	} else {
 		mmc->max_seg_size = mmc->max_req_size;
+	}
 
 	/*
 	 * Maximum block size. This varies from controller to controller and
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e42d7f0..6e0969e 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -52,6 +52,7 @@
 #define  SDHCI_CMD_RESP_SHORT_BUSY 0x03
 
 #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
+#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
 
 #define SDHCI_RESPONSE		0x10
 
@@ -165,7 +166,7 @@
 #define  SDHCI_CAN_VDD_180	0x04000000
 #define  SDHCI_CAN_64BIT	0x10000000
 
-/* 44-47 reserved for more caps */
+#define SDHCI_CAPABILITIES_1	0x44
 
 #define SDHCI_MAX_CURRENT	0x48
 
diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c
index f472c27..bbc298f 100644
--- a/drivers/mmc/host/sdricoh_cs.c
+++ b/drivers/mmc/host/sdricoh_cs.c
@@ -446,7 +446,7 @@
 	mmc->max_seg_size = 1024 * 512;
 	mmc->max_blk_size = 512;
 
-	/* reset the controler */
+	/* reset the controller */
 	if (sdricoh_reset(host)) {
 		dev_dbg(dev, "could not reset\n");
 		result = -EIO;
@@ -478,7 +478,7 @@
 	dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"
 		" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]);
 
-	/* search pci cardbus bridge that contains the mmc controler */
+	/* search pci cardbus bridge that contains the mmc controller */
 	/* the io region is already claimed by yenta_socket... */
 	while ((pci_dev =
 		pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index e7765a8..e3c6ef2 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -25,16 +25,261 @@
  *   double buffer support
  *
  */
-#include <linux/module.h>
-#include <linux/irq.h>
-#include <linux/device.h>
+
 #include <linux/delay.h>
+#include <linux/device.h>
 #include <linux/dmaengine.h>
-#include <linux/mmc/host.h>
+#include <linux/highmem.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
 #include <linux/mfd/core.h>
 #include <linux/mfd/tmio.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/scatterlist.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
 
-#include "tmio_mmc.h"
+#define CTL_SD_CMD 0x00
+#define CTL_ARG_REG 0x04
+#define CTL_STOP_INTERNAL_ACTION 0x08
+#define CTL_XFER_BLK_COUNT 0xa
+#define CTL_RESPONSE 0x0c
+#define CTL_STATUS 0x1c
+#define CTL_IRQ_MASK 0x20
+#define CTL_SD_CARD_CLK_CTL 0x24
+#define CTL_SD_XFER_LEN 0x26
+#define CTL_SD_MEM_CARD_OPT 0x28
+#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
+#define CTL_SD_DATA_PORT 0x30
+#define CTL_TRANSACTION_CTL 0x34
+#define CTL_SDIO_STATUS 0x36
+#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_RESET_SD 0xe0
+#define CTL_SDIO_REGS 0x100
+#define CTL_CLK_AND_WAIT_CTL 0x138
+#define CTL_RESET_SDIO 0x1e0
+
+/* Definitions for values the CTRL_STATUS register can take. */
+#define TMIO_STAT_CMDRESPEND    0x00000001
+#define TMIO_STAT_DATAEND       0x00000004
+#define TMIO_STAT_CARD_REMOVE   0x00000008
+#define TMIO_STAT_CARD_INSERT   0x00000010
+#define TMIO_STAT_SIGSTATE      0x00000020
+#define TMIO_STAT_WRPROTECT     0x00000080
+#define TMIO_STAT_CARD_REMOVE_A 0x00000100
+#define TMIO_STAT_CARD_INSERT_A 0x00000200
+#define TMIO_STAT_SIGSTATE_A    0x00000400
+#define TMIO_STAT_CMD_IDX_ERR   0x00010000
+#define TMIO_STAT_CRCFAIL       0x00020000
+#define TMIO_STAT_STOPBIT_ERR   0x00040000
+#define TMIO_STAT_DATATIMEOUT   0x00080000
+#define TMIO_STAT_RXOVERFLOW    0x00100000
+#define TMIO_STAT_TXUNDERRUN    0x00200000
+#define TMIO_STAT_CMDTIMEOUT    0x00400000
+#define TMIO_STAT_RXRDY         0x01000000
+#define TMIO_STAT_TXRQ          0x02000000
+#define TMIO_STAT_ILL_FUNC      0x20000000
+#define TMIO_STAT_CMD_BUSY      0x40000000
+#define TMIO_STAT_ILL_ACCESS    0x80000000
+
+/* Definitions for values the CTRL_SDIO_STATUS register can take. */
+#define TMIO_SDIO_STAT_IOIRQ	0x0001
+#define TMIO_SDIO_STAT_EXPUB52	0x4000
+#define TMIO_SDIO_STAT_EXWT	0x8000
+#define TMIO_SDIO_MASK_ALL	0xc007
+
+/* Define some IRQ masks */
+/* This is the mask used at reset by the chip */
+#define TMIO_MASK_ALL           0x837f031d
+#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
+#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
+#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
+		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
+#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
+
+#define enable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask &= ~((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define disable_mmc_irqs(host, i) \
+	do { \
+		u32 mask;\
+		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
+		mask |= ((i) & TMIO_MASK_IRQ); \
+		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
+	} while (0)
+
+#define ack_mmc_irqs(host, i) \
+	do { \
+		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
+	} while (0)
+
+/* This is arbitrary, just noone needed any higher alignment yet */
+#define MAX_ALIGN 4
+
+struct tmio_mmc_host {
+	void __iomem *ctl;
+	unsigned long bus_shift;
+	struct mmc_command      *cmd;
+	struct mmc_request      *mrq;
+	struct mmc_data         *data;
+	struct mmc_host         *mmc;
+	int                     irq;
+	unsigned int		sdio_irq_enabled;
+
+	/* Callbacks for clock / power control */
+	void (*set_pwr)(struct platform_device *host, int state);
+	void (*set_clk_div)(struct platform_device *host, int state);
+
+	/* pio related stuff */
+	struct scatterlist      *sg_ptr;
+	struct scatterlist      *sg_orig;
+	unsigned int            sg_len;
+	unsigned int            sg_off;
+
+	struct platform_device *pdev;
+
+	/* DMA support */
+	struct dma_chan		*chan_rx;
+	struct dma_chan		*chan_tx;
+	struct tasklet_struct	dma_complete;
+	struct tasklet_struct	dma_issue;
+#ifdef CONFIG_TMIO_MMC_DMA
+	unsigned int            dma_sglen;
+	u8			bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
+	struct scatterlist	bounce_sg;
+#endif
+
+	/* Track lost interrupts */
+	struct delayed_work	delayed_reset_work;
+	spinlock_t		lock;
+	unsigned long		last_req_ts;
+};
+
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
+
+static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+	return readw(host->ctl + (addr << host->bus_shift)) |
+	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+		u16 *buf, int count)
+{
+	writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+	writew(val, host->ctl + (addr << host->bus_shift));
+	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
+{
+	host->sg_len = data->sg_len;
+	host->sg_ptr = data->sg;
+	host->sg_orig = data->sg;
+	host->sg_off = 0;
+}
+
+static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
+{
+	host->sg_ptr = sg_next(host->sg_ptr);
+	host->sg_off = 0;
+	return --host->sg_len;
+}
+
+static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
+{
+	local_irq_save(*flags);
+	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+}
+
+static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
+{
+	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
+	local_irq_restore(*flags);
+}
+
+#ifdef CONFIG_MMC_DEBUG
+
+#define STATUS_TO_TEXT(a) \
+	do { \
+		if (status & TMIO_STAT_##a) \
+			printk(#a); \
+	} while (0)
+
+void pr_debug_status(u32 status)
+{
+	printk(KERN_DEBUG "status: %08x = ", status);
+	STATUS_TO_TEXT(CARD_REMOVE);
+	STATUS_TO_TEXT(CARD_INSERT);
+	STATUS_TO_TEXT(SIGSTATE);
+	STATUS_TO_TEXT(WRPROTECT);
+	STATUS_TO_TEXT(CARD_REMOVE_A);
+	STATUS_TO_TEXT(CARD_INSERT_A);
+	STATUS_TO_TEXT(SIGSTATE_A);
+	STATUS_TO_TEXT(CMD_IDX_ERR);
+	STATUS_TO_TEXT(STOPBIT_ERR);
+	STATUS_TO_TEXT(ILL_FUNC);
+	STATUS_TO_TEXT(CMD_BUSY);
+	STATUS_TO_TEXT(CMDRESPEND);
+	STATUS_TO_TEXT(DATAEND);
+	STATUS_TO_TEXT(CRCFAIL);
+	STATUS_TO_TEXT(DATATIMEOUT);
+	STATUS_TO_TEXT(CMDTIMEOUT);
+	STATUS_TO_TEXT(RXOVERFLOW);
+	STATUS_TO_TEXT(TXUNDERRUN);
+	STATUS_TO_TEXT(RXRDY);
+	STATUS_TO_TEXT(TXRQ);
+	STATUS_TO_TEXT(ILL_ACCESS);
+	printk("\n");
+}
+
+#else
+#define pr_debug_status(s)  do { } while (0)
+#endif
+
+static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+	struct tmio_mmc_host *host = mmc_priv(mmc);
+
+	if (enable) {
+		host->sdio_irq_enabled = 1;
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
+			(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
+	} else {
+		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
+		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
+		host->sdio_irq_enabled = 0;
+	}
+}
 
 static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
 {
@@ -55,8 +300,23 @@
 
 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
+	/*
+	 * Testing on sh-mobile showed that SDIO IRQs are unmasked when
+	 * CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
+	 * device IRQ here and restore the SDIO IRQ mask before
+	 * re-enabling the device IRQ.
+	 */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
@@ -64,11 +324,21 @@
 
 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 {
+	struct mfd_cell *cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+
 	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
 		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
 	msleep(10);
+	/* see comment in tmio_mmc_clk_stop above */
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		disable_irq(host->irq);
 	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
 	msleep(10);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
+		enable_irq(host->irq);
+	}
 }
 
 static void reset(struct tmio_mmc_host *host)
@@ -82,15 +352,60 @@
 	msleep(10);
 }
 
+static void tmio_mmc_reset_work(struct work_struct *work)
+{
+	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+						  delayed_reset_work.work);
+	struct mmc_request *mrq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	mrq = host->mrq;
+
+	/* request already finished */
+	if (!mrq
+	    || time_is_after_jiffies(host->last_req_ts +
+		msecs_to_jiffies(2000))) {
+		spin_unlock_irqrestore(&host->lock, flags);
+		return;
+	}
+
+	dev_warn(&host->pdev->dev,
+		"timeout waiting for hardware interrupt (CMD%u)\n",
+		mrq->cmd->opcode);
+
+	if (host->data)
+		host->data->error = -ETIMEDOUT;
+	else if (host->cmd)
+		host->cmd->error = -ETIMEDOUT;
+	else
+		mrq->cmd->error = -ETIMEDOUT;
+
+	host->cmd = NULL;
+	host->data = NULL;
+	host->mrq = NULL;
+
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	reset(host);
+
+	mmc_request_done(host->mmc, mrq);
+}
+
 static void
 tmio_mmc_finish_request(struct tmio_mmc_host *host)
 {
 	struct mmc_request *mrq = host->mrq;
 
+	if (!mrq)
+		return;
+
 	host->mrq = NULL;
 	host->cmd = NULL;
 	host->data = NULL;
 
+	cancel_delayed_work(&host->delayed_reset_work);
+
 	mmc_request_done(host->mmc, mrq);
 }
 
@@ -200,6 +515,7 @@
 	return;
 }
 
+/* needs to be called with host->lock held */
 static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
 {
 	struct mmc_data *data = host->data;
@@ -233,6 +549,8 @@
 	if (data->flags & MMC_DATA_READ) {
 		if (!host->chan_rx)
 			disable_mmc_irqs(host, TMIO_MASK_READOP);
+		else
+			tmio_check_bounce_buffer(host);
 		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
 			host->mrq);
 	} else {
@@ -254,10 +572,12 @@
 
 static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
 {
-	struct mmc_data *data = host->data;
+	struct mmc_data *data;
+	spin_lock(&host->lock);
+	data = host->data;
 
 	if (!data)
-		return;
+		goto out;
 
 	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
 		/*
@@ -278,6 +598,8 @@
 	} else {
 		tmio_mmc_do_data_irq(host);
 	}
+out:
+	spin_unlock(&host->lock);
 }
 
 static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
@@ -286,9 +608,11 @@
 	struct mmc_command *cmd = host->cmd;
 	int i, addr;
 
+	spin_lock(&host->lock);
+
 	if (!host->cmd) {
 		pr_debug("Spurious CMD irq\n");
-		return;
+		goto out;
 	}
 
 	host->cmd = NULL;
@@ -324,8 +648,7 @@
 			if (!host->chan_rx)
 				enable_mmc_irqs(host, TMIO_MASK_READOP);
 		} else {
-			struct dma_chan *chan = host->chan_tx;
-			if (!chan)
+			if (!host->chan_tx)
 				enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
 			else
 				tasklet_schedule(&host->dma_issue);
@@ -334,13 +657,19 @@
 		tmio_mmc_finish_request(host);
 	}
 
+out:
+	spin_unlock(&host->lock);
+
 	return;
 }
 
 static irqreturn_t tmio_mmc_irq(int irq, void *devid)
 {
 	struct tmio_mmc_host *host = devid;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
 	unsigned int ireg, irq_mask, status;
+	unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
 
 	pr_debug("MMC IRQ begin\n");
 
@@ -348,6 +677,29 @@
 	irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
 	ireg = status & TMIO_MASK_IRQ & ~irq_mask;
 
+	sdio_ireg = 0;
+	if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
+		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
+		sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
+		sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
+
+		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
+
+		if (sdio_ireg && !host->sdio_irq_enabled) {
+			pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
+				   sdio_status, sdio_irq_mask, sdio_ireg);
+			tmio_mmc_enable_sdio_irq(host->mmc, 0);
+			goto out;
+		}
+
+		if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+			sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
+			mmc_signal_sdio_irq(host->mmc);
+
+		if (sdio_ireg)
+			goto out;
+	}
+
 	pr_debug_status(status);
 	pr_debug_status(ireg);
 
@@ -375,8 +727,10 @@
  */
 
 		/* Command completion */
-		if (ireg & TMIO_MASK_CMD) {
-			ack_mmc_irqs(host, TMIO_MASK_CMD);
+		if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+			ack_mmc_irqs(host,
+				     TMIO_STAT_CMDRESPEND |
+				     TMIO_STAT_CMDTIMEOUT);
 			tmio_mmc_cmd_irq(host, status);
 		}
 
@@ -407,6 +761,16 @@
 }
 
 #ifdef CONFIG_TMIO_MMC_DMA
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+	if (host->sg_ptr == &host->bounce_sg) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
+		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+	}
+}
+
 static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
 {
 #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
@@ -427,12 +791,39 @@
 		enable_mmc_irqs(host, TMIO_STAT_DATAEND);
 }
 
-static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_rx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
 	if (ret > 0) {
@@ -442,21 +833,21 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		} else {
 			chan->device->device_issue_pending(chan);
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -471,24 +862,49 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
-		desc, host->cookie, host->sg_len);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie, host->sg_len);
 }
 
-static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
+static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
 {
-	struct scatterlist *sg = host->sg_ptr;
+	struct scatterlist *sg = host->sg_ptr, *sg_tmp;
 	struct dma_async_tx_descriptor *desc = NULL;
 	struct dma_chan *chan = host->chan_tx;
-	int ret;
+	struct mfd_cell	*cell = host->pdev->dev.platform_data;
+	struct tmio_mmc_data *pdata = cell->driver_data;
+	dma_cookie_t cookie;
+	int ret, i;
+	bool aligned = true, multiple = true;
+	unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
+
+	for_each_sg(sg, sg_tmp, host->sg_len, i) {
+		if (sg_tmp->offset & align)
+			aligned = false;
+		if (sg_tmp->length & align) {
+			multiple = false;
+			break;
+		}
+	}
+
+	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
+			  align >= MAX_ALIGN)) || !multiple) {
+		ret = -EINVAL;
+		goto pio;
+	}
+
+	/* The only sg element can be unaligned, use our bounce buffer then */
+	if (!aligned) {
+		unsigned long flags;
+		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
+		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
+		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
+		tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
+		host->sg_ptr = &host->bounce_sg;
+		sg = host->sg_ptr;
+	}
 
 	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
 	if (ret > 0) {
@@ -498,19 +914,19 @@
 	}
 
 	if (desc) {
-		host->desc = desc;
 		desc->callback = tmio_dma_complete;
 		desc->callback_param = host;
-		host->cookie = desc->tx_submit(desc);
-		if (host->cookie < 0) {
-			host->desc = NULL;
-			ret = host->cookie;
+		cookie = desc->tx_submit(desc);
+		if (cookie < 0) {
+			desc = NULL;
+			ret = cookie;
 		}
 	}
 	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-		__func__, host->sg_len, ret, host->cookie, host->mrq);
+		__func__, host->sg_len, ret, cookie, host->mrq);
 
-	if (!host->desc) {
+pio:
+	if (!desc) {
 		/* DMA failed, fall back to PIO */
 		if (ret >= 0)
 			ret = -EIO;
@@ -525,30 +941,22 @@
 		dev_warn(&host->pdev->dev,
 			 "DMA failed: %d, falling back to PIO\n", ret);
 		tmio_mmc_enable_dma(host, false);
-		reset(host);
-		/* Fail this request, let above layers recover */
-		host->mrq->cmd->error = ret;
-		tmio_mmc_finish_request(host);
 	}
 
 	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
-		desc, host->cookie);
-
-	return ret > 0 ? 0 : ret;
+		desc, cookie);
 }
 
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
 	if (data->flags & MMC_DATA_READ) {
 		if (host->chan_rx)
-			return tmio_mmc_start_dma_rx(host);
+			tmio_mmc_start_dma_rx(host);
 	} else {
 		if (host->chan_tx)
-			return tmio_mmc_start_dma_tx(host);
+			tmio_mmc_start_dma_tx(host);
 	}
-
-	return 0;
 }
 
 static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -562,6 +970,12 @@
 static void tmio_tasklet_fn(unsigned long arg)
 {
 	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	if (!host->data)
+		goto out;
 
 	if (host->data->flags & MMC_DATA_READ)
 		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
@@ -571,6 +985,8 @@
 			     DMA_TO_DEVICE);
 
 	tmio_mmc_do_data_irq(host);
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
 }
 
 /* It might be necessary to make filter MFD specific */
@@ -584,9 +1000,6 @@
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
 				 struct tmio_mmc_data *pdata)
 {
-	host->cookie = -EINVAL;
-	host->desc = NULL;
-
 	/* We can only either use DMA for both Tx and Rx or not use it at all */
 	if (pdata->dma) {
 		dma_cap_mask_t mask;
@@ -632,15 +1045,15 @@
 		host->chan_rx = NULL;
 		dma_release_channel(chan);
 	}
-
-	host->cookie = -EINVAL;
-	host->desc = NULL;
 }
 #else
-static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
+{
+}
+
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
 			       struct mmc_data *data)
 {
-	return 0;
 }
 
 static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +1095,9 @@
 	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
 	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
 
-	return tmio_mmc_start_dma(host, data);
+	tmio_mmc_start_dma(host, data);
+
+	return 0;
 }
 
 /* Process requests from the MMC layer */
@@ -694,6 +1109,8 @@
 	if (host->mrq)
 		pr_debug("request not null\n");
 
+	host->last_req_ts = jiffies;
+	wmb();
 	host->mrq = mrq;
 
 	if (mrq->data) {
@@ -703,10 +1120,14 @@
 	}
 
 	ret = tmio_mmc_start_command(host, mrq->cmd);
-	if (!ret)
+	if (!ret) {
+		schedule_delayed_work(&host->delayed_reset_work,
+				      msecs_to_jiffies(2000));
 		return;
+	}
 
 fail:
+	host->mrq = NULL;
 	mrq->cmd->error = ret;
 	mmc_request_done(mmc, mrq);
 }
@@ -780,6 +1201,7 @@
 	.set_ios	= tmio_mmc_set_ios,
 	.get_ro         = tmio_mmc_get_ro,
 	.get_cd		= tmio_mmc_get_cd,
+	.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
 };
 
 #ifdef CONFIG_PM
@@ -864,10 +1286,15 @@
 		goto host_free;
 
 	mmc->ops = &tmio_mmc_ops;
-	mmc->caps = MMC_CAP_4_BIT_DATA;
-	mmc->caps |= pdata->capabilities;
+	mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
 	mmc->f_max = pdata->hclk;
 	mmc->f_min = mmc->f_max / 512;
+	mmc->max_segs = 32;
+	mmc->max_blk_size = 512;
+	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
+		mmc->max_segs;
+	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+	mmc->max_seg_size = mmc->max_req_size;
 	if (pdata->ocr_mask)
 		mmc->ocr_avail = pdata->ocr_mask;
 	else
@@ -890,12 +1317,19 @@
 		goto cell_disable;
 
 	disable_mmc_irqs(host, TMIO_MASK_ALL);
+	if (pdata->flags & TMIO_MMC_SDIO_IRQ)
+		tmio_mmc_enable_sdio_irq(mmc, 0);
 
 	ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
 		IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
 	if (ret)
 		goto cell_disable;
 
+	spin_lock_init(&host->lock);
+
+	/* Init delayed work for request timeouts */
+	INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
+
 	/* See if we also get DMA */
 	tmio_mmc_request_dma(host, pdata);
 
@@ -934,6 +1368,7 @@
 	if (mmc) {
 		struct tmio_mmc_host *host = mmc_priv(mmc);
 		mmc_remove_host(mmc);
+		cancel_delayed_work_sync(&host->delayed_reset_work);
 		tmio_mmc_release_dma(host);
 		free_irq(host->irq, host);
 		if (cell->disable)
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
deleted file mode 100644
index 0fedc78..0000000
--- a/drivers/mmc/host/tmio_mmc.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Definitons for use with the tmio_mmc.c
- *
- * (c) 2004 Ian Molton <spyro@f2s.com>
- * (c) 2007 Ian Molton <spyro@f2s.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/highmem.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-
-#define CTL_SD_CMD 0x00
-#define CTL_ARG_REG 0x04
-#define CTL_STOP_INTERNAL_ACTION 0x08
-#define CTL_XFER_BLK_COUNT 0xa
-#define CTL_RESPONSE 0x0c
-#define CTL_STATUS 0x1c
-#define CTL_IRQ_MASK 0x20
-#define CTL_SD_CARD_CLK_CTL 0x24
-#define CTL_SD_XFER_LEN 0x26
-#define CTL_SD_MEM_CARD_OPT 0x28
-#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
-#define CTL_SD_DATA_PORT 0x30
-#define CTL_TRANSACTION_CTL 0x34
-#define CTL_RESET_SD 0xe0
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-/* Definitions for values the CTRL_STATUS register can take. */
-#define TMIO_STAT_CMDRESPEND    0x00000001
-#define TMIO_STAT_DATAEND       0x00000004
-#define TMIO_STAT_CARD_REMOVE   0x00000008
-#define TMIO_STAT_CARD_INSERT   0x00000010
-#define TMIO_STAT_SIGSTATE      0x00000020
-#define TMIO_STAT_WRPROTECT     0x00000080
-#define TMIO_STAT_CARD_REMOVE_A 0x00000100
-#define TMIO_STAT_CARD_INSERT_A 0x00000200
-#define TMIO_STAT_SIGSTATE_A    0x00000400
-#define TMIO_STAT_CMD_IDX_ERR   0x00010000
-#define TMIO_STAT_CRCFAIL       0x00020000
-#define TMIO_STAT_STOPBIT_ERR   0x00040000
-#define TMIO_STAT_DATATIMEOUT   0x00080000
-#define TMIO_STAT_RXOVERFLOW    0x00100000
-#define TMIO_STAT_TXUNDERRUN    0x00200000
-#define TMIO_STAT_CMDTIMEOUT    0x00400000
-#define TMIO_STAT_RXRDY         0x01000000
-#define TMIO_STAT_TXRQ          0x02000000
-#define TMIO_STAT_ILL_FUNC      0x20000000
-#define TMIO_STAT_CMD_BUSY      0x40000000
-#define TMIO_STAT_ILL_ACCESS    0x80000000
-
-/* Define some IRQ masks */
-/* This is the mask used at reset by the chip */
-#define TMIO_MASK_ALL           0x837f031d
-#define TMIO_MASK_READOP  (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
-#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
-#define TMIO_MASK_CMD     (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
-		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
-#define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
-
-
-#define enable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask &= ~((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define disable_mmc_irqs(host, i) \
-	do { \
-		u32 mask;\
-		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \
-		mask |= ((i) & TMIO_MASK_IRQ); \
-		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
-	} while (0)
-
-#define ack_mmc_irqs(host, i) \
-	do { \
-		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
-	} while (0)
-
-
-struct tmio_mmc_host {
-	void __iomem *ctl;
-	unsigned long bus_shift;
-	struct mmc_command      *cmd;
-	struct mmc_request      *mrq;
-	struct mmc_data         *data;
-	struct mmc_host         *mmc;
-	int                     irq;
-
-	/* Callbacks for clock / power control */
-	void (*set_pwr)(struct platform_device *host, int state);
-	void (*set_clk_div)(struct platform_device *host, int state);
-
-	/* pio related stuff */
-	struct scatterlist      *sg_ptr;
-	unsigned int            sg_len;
-	unsigned int            sg_off;
-
-	struct platform_device *pdev;
-
-	/* DMA support */
-	struct dma_chan		*chan_rx;
-	struct dma_chan		*chan_tx;
-	struct tasklet_struct	dma_complete;
-	struct tasklet_struct	dma_issue;
-#ifdef CONFIG_TMIO_MMC_DMA
-	struct dma_async_tx_descriptor *desc;
-	unsigned int            dma_sglen;
-	dma_cookie_t		cookie;
-#endif
-};
-
-#include <linux/io.h>
-
-static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
-	return readw(host->ctl + (addr << host->bus_shift)) |
-	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
-		u16 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
-		u16 *buf, int count)
-{
-	writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
-		u32 val)
-{
-	writew(val, host->ctl + (addr << host->bus_shift));
-	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
-#include <linux/scatterlist.h>
-#include <linux/blkdev.h>
-
-static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
-	struct mmc_data *data)
-{
-	host->sg_len = data->sg_len;
-	host->sg_ptr = data->sg;
-	host->sg_off = 0;
-}
-
-static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
-{
-	host->sg_ptr = sg_next(host->sg_ptr);
-	host->sg_off = 0;
-	return --host->sg_len;
-}
-
-static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
-	unsigned long *flags)
-{
-	local_irq_save(*flags);
-	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
-}
-
-static inline void tmio_mmc_kunmap_atomic(void *virt,
-	unsigned long *flags)
-{
-	kunmap_atomic(virt, KM_BIO_SRC_IRQ);
-	local_irq_restore(*flags);
-}
-
-#ifdef CONFIG_MMC_DEBUG
-
-#define STATUS_TO_TEXT(a) \
-	do { \
-		if (status & TMIO_STAT_##a) \
-			printk(#a); \
-	} while (0)
-
-void pr_debug_status(u32 status)
-{
-	printk(KERN_DEBUG "status: %08x = ", status);
-	STATUS_TO_TEXT(CARD_REMOVE);
-	STATUS_TO_TEXT(CARD_INSERT);
-	STATUS_TO_TEXT(SIGSTATE);
-	STATUS_TO_TEXT(WRPROTECT);
-	STATUS_TO_TEXT(CARD_REMOVE_A);
-	STATUS_TO_TEXT(CARD_INSERT_A);
-	STATUS_TO_TEXT(SIGSTATE_A);
-	STATUS_TO_TEXT(CMD_IDX_ERR);
-	STATUS_TO_TEXT(STOPBIT_ERR);
-	STATUS_TO_TEXT(ILL_FUNC);
-	STATUS_TO_TEXT(CMD_BUSY);
-	STATUS_TO_TEXT(CMDRESPEND);
-	STATUS_TO_TEXT(DATAEND);
-	STATUS_TO_TEXT(CRCFAIL);
-	STATUS_TO_TEXT(DATATIMEOUT);
-	STATUS_TO_TEXT(CMDTIMEOUT);
-	STATUS_TO_TEXT(RXOVERFLOW);
-	STATUS_TO_TEXT(TXUNDERRUN);
-	STATUS_TO_TEXT(RXRDY);
-	STATUS_TO_TEXT(TXRQ);
-	STATUS_TO_TEXT(ILL_ACCESS);
-	printk("\n");
-}
-
-#else
-#define pr_debug_status(s)  do { } while (0)
-#endif
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index f511dd1..ee4bb33 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -1134,7 +1134,7 @@
 static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
 				int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "mtd_inode:", NULL, MTD_INODE_FS_MAGIC);
+	return mount_pseudo(fs_type, "mtd_inode:", NULL, NULL, MTD_INODE_FS_MAGIC);
 }
 
 static struct file_system_type mtd_inodefs_type = {
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1ee72f3..c948150 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -307,6 +307,11 @@
 	unsigned long l1_cpy, l2_cpy;
 	char *dst;
 
+	if (reason != KMSG_DUMP_OOPS &&
+	    reason != KMSG_DUMP_PANIC &&
+	    reason != KMSG_DUMP_KEXEC)
+		return;
+
 	/* Only dump oopses if dump_oops is set */
 	if (reason == KMSG_DUMP_OOPS && !dump_oops)
 		return;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1f75a1b..31bf376 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -821,7 +821,7 @@
  *
  * Wait for command done. This is a helper function for nand_wait used when
  * we are in interrupt context. May happen when in panic and trying to write
- * an oops trough mtdoops.
+ * an oops through mtdoops.
  */
 static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
 			    unsigned long timeo)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3fda24a..4c8bfc9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1944,19 +1944,12 @@
 config FEC
 	bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
 	depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
-		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
+		MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
 	select PHYLIB
 	help
 	  Say Y here if you want to use the built-in 10/100 Fast ethernet
 	  controller on some Motorola ColdFire and Freescale i.MX processors.
 
-config FEC2
-	bool "Second FEC ethernet controller (on some ColdFire CPUs)"
-	depends on FEC
-	help
-	  Say Y here if you want to use the second built-in 10/100 Fast
-	  ethernet controller on some Motorola ColdFire processors.
-
 config FEC_MPC52xx
 	tristate "MPC52xx FEC driver"
 	depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2970,6 +2963,7 @@
 config XEN_NETDEV_FRONTEND
 	tristate "Xen network device frontend driver"
 	depends on XEN
+	select XEN_XENBUS_FRONTEND
 	default y
 	help
 	  The network device frontend driver allows the kernel to
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9..0b9fc51 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
  * Licensed under the GPL-2 or later.
  */
 
+#define DRV_VERSION	"1.1"
+#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -41,12 +46,7 @@
 
 #include "bfin_mac.h"
 
-#define DRV_NAME	"bfin_mac"
-#define DRV_VERSION	"1.1"
-#define DRV_AUTHOR	"Bryan Wu, Luke Yang"
-#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"
-
-MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_AUTHOR("Bryan Wu, Luke Yang");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION(DRV_DESC);
 MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@
 		/* allocate a new skb for next time receive */
 		new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 		if (!new_skb) {
-			printk(KERN_NOTICE DRV_NAME
-			       ": init: low on mem - packet dropped\n");
+			pr_notice("init: low on mem - packet dropped\n");
 			goto init_error;
 		}
 		skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@
 
 init_error:
 	desc_list_free();
-	printk(KERN_ERR DRV_NAME ": kmalloc failed\n");
+	pr_err("kmalloc failed\n");
 	return -ENOMEM;
 }
 
@@ -259,8 +258,7 @@
 	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
 		udelay(1);
 		if (timeout_cnt-- < 0) {
-			printk(KERN_ERR DRV_NAME
-			": wait MDC/MDIO transaction to complete timeout\n");
+			pr_err("wait MDC/MDIO transaction to complete timeout\n");
 			return -ETIMEDOUT;
 		}
 	}
@@ -350,9 +348,9 @@
 					opmode &= ~RMII_10;
 					break;
 				default:
-					printk(KERN_WARNING
-						"%s: Ack!  Speed (%d) is not 10/100!\n",
-						DRV_NAME, phydev->speed);
+					netdev_warn(dev,
+						"Ack! Speed (%d) is not 10/100!\n",
+						phydev->speed);
 					break;
 				}
 				bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@
 
 	/* now we are supposed to have a proper phydev, to attach to... */
 	if (!phydev) {
-		printk(KERN_INFO "%s: Don't found any phy device at all\n",
-			dev->name);
+		netdev_err(dev, "no phy device found\n");
 		return -ENODEV;
 	}
 
 	if (phy_mode != PHY_INTERFACE_MODE_RMII &&
 		phy_mode != PHY_INTERFACE_MODE_MII) {
-		printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name);
+		netdev_err(dev, "invalid phy interface mode\n");
 		return -EINVAL;
 	}
 
@@ -432,7 +429,7 @@
 			0, phy_mode);
 
 	if (IS_ERR(phydev)) {
-		printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+		netdev_err(dev, "could not attach PHY\n");
 		return PTR_ERR(phydev);
 	}
 
@@ -453,11 +450,10 @@
 	lp->old_duplex = -1;
 	lp->phydev = phydev;
 
-	printk(KERN_INFO "%s: attached PHY driver [%s] "
-	       "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)"
-	       "@sclk=%dMHz)\n",
-	       DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
-	       MDC_CLK, mdc_div, sclk/1000000);
+	pr_info("attached PHY driver [%s] "
+	        "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
+	        phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
+	        MDC_CLK, mdc_div, sclk/1000000);
 
 	return 0;
 }
@@ -502,7 +498,7 @@
 static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
 					struct ethtool_drvinfo *info)
 {
-	strcpy(info->driver, DRV_NAME);
+	strcpy(info->driver, KBUILD_MODNAME);
 	strcpy(info->version, DRV_VERSION);
 	strcpy(info->fw_version, "N/A");
 	strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@
 };
 
 /**************************************************************************/
-void setup_system_regs(struct net_device *dev)
+static void setup_system_regs(struct net_device *dev)
 {
 	struct bfin_mac_local *lp = netdev_priv(dev);
 	int i;
@@ -592,6 +588,10 @@
 
 	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
 
+	/* Set vlan regs to let 1522 bytes long packets pass through */
+	bfin_write_EMAC_VLAN1(lp->vlan1_mask);
+	bfin_write_EMAC_VLAN2(lp->vlan2_mask);
+
 	/* Initialize the TX DMA channel registers */
 	bfin_write_DMA2_X_COUNT(0);
 	bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@
 		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
 			udelay(1);
 		if (timeout_cnt == 0)
-			printk(KERN_ERR DRV_NAME
-					": fails to timestamp the TX packet\n");
+			netdev_err(netdev, "timestamp the TX packet failed\n");
 		else {
 			struct skb_shared_hwtstamps shhwtstamps;
 			u64 ns;
@@ -1083,8 +1082,7 @@
 	 * we which case we simply drop the packet
 	 */
 	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: receive error - packet dropped\n");
+		netdev_notice(dev, "rx: receive error - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1094,8 +1092,7 @@
 
 	new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
 	if (!new_skb) {
-		printk(KERN_NOTICE DRV_NAME
-		       ": rx: low on mem - packet dropped\n");
+		netdev_notice(dev, "rx: low on mem - packet dropped\n");
 		dev->stats.rx_dropped++;
 		goto out;
 	}
@@ -1213,7 +1210,7 @@
 	int ret;
 	u32 opmode;
 
-	pr_debug("%s: %s\n", DRV_NAME, __func__);
+	pr_debug("%s\n", __func__);
 
 	/* Set RX DMA */
 	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1323,7 +1320,7 @@
 	u32 sysctl;
 
 	if (dev->flags & IFF_PROMISC) {
-		printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
+		netdev_info(dev, "set promisc mode\n");
 		sysctl = bfin_read_EMAC_OPMODE();
 		sysctl |= PR;
 		bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1390,7 @@
 	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
 	 */
 	if (!is_valid_ether_addr(dev->dev_addr)) {
-		printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n");
+		netdev_warn(dev, "no valid ethernet hw addr\n");
 		return -EINVAL;
 	}
 
@@ -1527,6 +1524,9 @@
 		goto out_err_mii_probe;
 	}
 
+	lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
+	lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
+
 	/* Fill in the fields of the device structure with ethernet values. */
 	ether_setup(ndev);
 
@@ -1558,7 +1558,7 @@
 	bfin_mac_hwtstamp_init(ndev);
 
 	/* now, print out the card info, in a short format.. */
-	dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
+	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
 	return 0;
 
@@ -1650,7 +1650,7 @@
 	 * so set the GPIO pins to Ethernet mode
 	 */
 	pin_req = mii_bus_pd->mac_peripherals;
-	rc = peripheral_request_list(pin_req, DRV_NAME);
+	rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
 	if (rc) {
 		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
 		return rc;
@@ -1739,7 +1739,7 @@
 	.resume = bfin_mac_resume,
 	.suspend = bfin_mac_suspend,
 	.driver = {
-		.name = DRV_NAME,
+		.name = KBUILD_MODNAME,
 		.owner	= THIS_MODULE,
 	},
 };
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68be..f8559ac 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
 #include <linux/etherdevice.h>
 #include <linux/bfin_mac.h>
 
+/*
+ * Disable hardware checksum for bug #5600 if writeback cache is
+ * enabled. Otherwize, corrupted RX packet will be sent up stack
+ * without error mark.
+ */
+#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
 #define BFIN_MAC_CSUM_OFFLOAD
+#endif
 
 #define TX_RECLAIM_JIFFIES (HZ / 5)
 
@@ -68,7 +75,6 @@
 	 */
 	struct net_device_stats stats;
 
-	unsigned char Mac[6];	/* MAC address of the board */
 	spinlock_t lock;
 
 	int wol;		/* Wake On Lan */
@@ -76,6 +82,9 @@
 	struct timer_list tx_reclaim_timer;
 	struct net_device *ndev;
 
+	/* Data for EMAC_VLAN1 regs */
+	u16 vlan1_mask, vlan2_mask;
+
 	/* MII and PHY stuffs */
 	int old_link;          /* used by bf537_adjust_link */
 	int old_speed;
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 77d6c8d..a6cd335 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -636,6 +636,7 @@
 
 #define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
 #define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
+#define CHIP_PARITY_ENABLED(bp)	(CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 	int			flash_size;
 #define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
@@ -1414,12 +1415,12 @@
 		else
 
 /* skip rx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_rx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
 /* skip tx queue
- * if FCOE l2 support is diabled and this is the fcoe L2 queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
  */
 #define skip_tx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
 
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25..fb3ff7c 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
 /* bnx2x_dump.h: Broadcom Everest network driver.
  *
- * Copyright (c) 2009 Broadcom Corporation
+ * Copyright (c) 2011 Broadcom Corporation
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
  */
 
 
@@ -17,53 +23,53 @@
 #define BNX2X_DUMP_H
 
 
+
+/*definitions */
+#define XSTORM_WAITP_ADDR    0x2b8a80
+#define TSTORM_WAITP_ADDR    0x1b8a80
+#define USTORM_WAITP_ADDR    0x338a80
+#define CSTORM_WAITP_ADDR    0x238a80
+#define TSTORM_CAM_MODE         0x1B1440
+
+#define MAX_TIMER_PENDING      200
+#define TIMER_SCAN_DONT_CARE   0xFF
+#define RI_E1			0x1
+#define RI_E1H			0x2
+#define RI_E2			0x4
+#define RI_ONLINE		0x100
+#define RI_PATH0_DUMP		0x200
+#define RI_PATH1_DUMP		0x400
+#define RI_E1_OFFLINE		(RI_E1)
+#define RI_E1_ONLINE		(RI_E1 | RI_ONLINE)
+#define RI_E1H_OFFLINE		(RI_E1H)
+#define RI_E1H_ONLINE		(RI_E1H | RI_ONLINE)
+#define RI_E2_OFFLINE		(RI_E2)
+#define RI_E2_ONLINE		(RI_E2 | RI_ONLINE)
+#define RI_E1E1H_OFFLINE	(RI_E1 | RI_E1H)
+#define RI_E1E1H_ONLINE		(RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E1HE2_OFFLINE	(RI_E2 | RI_E1H)
+#define RI_E1HE2_ONLINE		(RI_E2 | RI_E1H | RI_ONLINE)
+#define RI_E1E2_OFFLINE		(RI_E2 | RI_E1)
+#define RI_E1E2_ONLINE		(RI_E2 | RI_E1 | RI_ONLINE)
+#define RI_ALL_OFFLINE         (RI_E1 | RI_E1H | RI_E2)
+#define RI_ALL_ONLINE          (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+
 struct dump_sign {
 	u32 time_stamp;
 	u32 diag_ver;
 	u32 grc_dump_ver;
 };
 
-#define TSTORM_WAITP_ADDR		0x1b8a80
-#define CSTORM_WAITP_ADDR		0x238a80
-#define XSTORM_WAITP_ADDR		0x2b8a80
-#define USTORM_WAITP_ADDR		0x338a80
-#define TSTORM_CAM_MODE			0x1b1440
-
-#define RI_E1				0x1
-#define RI_E1H				0x2
-#define RI_E2			0x4
-#define RI_ONLINE			0x100
-#define RI_PATH0_DUMP		0x200
-#define RI_PATH1_DUMP		0x400
-#define RI_E1_OFFLINE			(RI_E1)
-#define RI_E1_ONLINE			(RI_E1 | RI_ONLINE)
-#define RI_E1H_OFFLINE			(RI_E1H)
-#define RI_E1H_ONLINE			(RI_E1H | RI_ONLINE)
-#define RI_E2_OFFLINE			(RI_E2)
-#define RI_E2_ONLINE			(RI_E2 | RI_ONLINE)
-#define RI_E1E1H_OFFLINE		(RI_E1 | RI_E1H)
-#define RI_E1E1H_ONLINE			(RI_E1 | RI_E1H | RI_ONLINE)
-#define RI_E1HE2_OFFLINE		(RI_E2 | RI_E1H)
-#define RI_E1HE2_ONLINE			(RI_E2 | RI_E1H | RI_ONLINE)
-#define RI_E1E2_OFFLINE			(RI_E2 | RI_E1)
-#define RI_E1E2_ONLINE			(RI_E2 | RI_E1 | RI_ONLINE)
-#define RI_ALL_OFFLINE			(RI_E1 | RI_E1H | RI_E2)
-#define RI_ALL_ONLINE			(RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
-
-#define MAX_TIMER_PENDING		200
-#define TIMER_SCAN_DONT_CARE		0xFF
-
-
 struct dump_hdr {
-	u32		 hdr_size;	/* in dwords, excluding this field */
-	struct dump_sign dump_sign;
-	u32		 xstorm_waitp;
-	u32		 tstorm_waitp;
-	u32		 ustorm_waitp;
-	u32		 cstorm_waitp;
-	u16		 info;
-	u8		 idle_chk;
-	u8		 reserved;
+	u32  hdr_size;	/* in dwords, excluding this field */
+	struct dump_sign	dump_sign;
+	u32  xstorm_waitp;
+	u32  tstorm_waitp;
+	u32  ustorm_waitp;
+	u32  cstorm_waitp;
+	u16  info;
+	u8   idle_chk;
+	u8   reserved;
 };
 
 struct reg_addr {
@@ -80,202 +86,185 @@
 	u16 info;
 };
 
-
-#define REGS_COUNT			558
+#define REGS_COUNT			834
 static const struct reg_addr reg_addrs[REGS_COUNT] = {
 	{ 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
 	{ 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
-	{ 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE },
-	{ 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE },
-	{ 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE },
-	{ 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE },
-	{ 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE },
-	{ 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE },
-	{ 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE },
-	{ 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE },
-	{ 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE },
-	{ 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE },
-	{ 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE },
-	{ 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE },
-	{ 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE },
-	{ 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE },
-	{ 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE },
-	{ 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE },
-	{ 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
-	{ 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE },
-	{ 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE },
-	{ 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE },
-	{ 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE },
-	{ 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE },
-	{ 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE },
-	{ 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE },
-	{ 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE },
-	{ 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE },
-	{ 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE },
-	{ 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE },
-	{ 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE },
-	{ 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE },
-	{ 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE },
-	{ 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE },
-	{ 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE },
-	{ 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE },
-	{ 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE },
-	{ 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE },
-	{ 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE },
-	{ 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE },
-	{ 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE },
-	{ 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE },
-	{ 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE },
-	{ 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE },
-	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE },
-	{ 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE },
-	{ 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
+	{ 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
+	{ 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
+	{ 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
+	{ 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
+	{ 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
+	{ 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
+	{ 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
+	{ 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
+	{ 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
+	{ 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
+	{ 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
+	{ 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
+	{ 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
+	{ 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
+	{ 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
+	{ 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
+	{ 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
+	{ 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
+	{ 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
+	{ 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
+	{ 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
+	{ 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
+	{ 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
+	{ 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
+	{ 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
+	{ 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
+	{ 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
+	{ 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
+	{ 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
+	{ 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
+	{ 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
+	{ 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
+	{ 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
+	{ 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
+	{ 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
+	{ 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
+	{ 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
+	{ 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
+	{ 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
+	{ 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
+	{ 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
+	{ 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
+	{ 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
+	{ 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
+	{ 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
+	{ 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
+	{ 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
+	{ 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
+	{ 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
+	{ 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
+	{ 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
+	{ 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
+	{ 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
+	{ 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
+	{ 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
+	{ 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
+	{ 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
+	{ 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
 	{ 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
 	{ 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
-	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE },
-	{ 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE },
-	{ 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE },
-	{ 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE },
-	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE },
-	{ 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE },
-	{ 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE },
-	{ 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE },
-	{ 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE },
-	{ 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE },
+	{ 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
+	{ 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
+	{ 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
+	{ 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
+	{ 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
+	{ 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
+	{ 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
+	{ 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
+	{ 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
+	{ 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
+	{ 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
+	{ 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
+	{ 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
 	{ 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
 	{ 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
-	{ 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE },
-	{ 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE },
-	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
+	{ 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
+	{ 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
+	{ 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
+	{ 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
+	{ 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
+	{ 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
 	{ 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
-	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE },
-	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE },
-	{ 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE },
-	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE },
-	{ 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE },
-	{ 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE },
-	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
-	{ 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
-	{ 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
-	{ 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
+	{ 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
+	{ 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
+	{ 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
+	{ 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
+	{ 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
+	{ 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
 	{ 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
 	{ 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
-	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE },
-	{ 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE },
-	{ 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE },
-	{ 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE },
+	{ 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
+	{ 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
+	{ 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
+	{ 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
 	{ 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
 	{ 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
-	{ 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE },
-	{ 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE },
-	{ 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE },
-	{ 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE },
-	{ 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE },
-	{ 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE },
-	{ 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE },
-	{ 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE },
-	{ 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
-	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE },
+	{ 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
+	{ 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
+	{ 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
+	{ 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
+	{ 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
+	{ 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
+	{ 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
+	{ 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
+	{ 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
+	{ 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
 	{ 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
 	{ 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
 	{ 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
 	{ 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
-	{ 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE },
-	{ 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE },
-	{ 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE },
-	{ 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
+	{ 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
+	{ 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
+	{ 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
 	{ 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
-	{ 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE },
-	{ 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE },
-	{ 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE },
+	{ 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
+	{ 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
+	{ 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
+	{ 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
 	{ 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
-	{ 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE },
-	{ 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE },
-	{ 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE },
-	{ 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE },
-	{ 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE },
-	{ 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE },
-	{ 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE },
-	{ 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE },
-	{ 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE },
-	{ 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE },
-	{ 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE },
-	{ 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE },
-	{ 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE },
-	{ 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE },
-	{ 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE },
-	{ 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE },
-	{ 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE },
-	{ 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE },
-	{ 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE },
-	{ 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE },
-	{ 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE },
-	{ 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE },
-	{ 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE },
-	{ 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE },
-	{ 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE },
-	{ 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE },
-	{ 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE },
-	{ 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
-	{ 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
-	{ 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
-	{ 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
-	{ 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
-	{ 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
-	{ 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
-	{ 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
-	{ 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
-	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
-	{ 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
-	{ 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
-	{ 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
-	{ 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
-	{ 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
-	{ 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
-	{ 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
-	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
-	{ 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
-	{ 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
-	{ 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
-	{ 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
-	{ 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
-	{ 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
-	{ 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
-	{ 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
-	{ 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
+	{ 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
+	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
+	{ 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
+	{ 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
+	{ 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
+	{ 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
+	{ 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
+	{ 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
+	{ 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
+	{ 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
+	{ 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
+	{ 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
+	{ 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
+	{ 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
+	{ 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
+	{ 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
+	{ 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
+	{ 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
+	{ 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
+	{ 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
+	{ 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
+	{ 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
+	{ 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
+	{ 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
+	{ 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
+	{ 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
+	{ 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
+	{ 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
+	{ 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
+	{ 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
+	{ 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
+	{ 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
+	{ 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
+	{ 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
+	{ 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
+	{ 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
+	{ 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
+	{ 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
+	{ 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
+	{ 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
+	{ 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
+	{ 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
+	{ 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
+	{ 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
+	{ 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
+	{ 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
+	{ 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
+	{ 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
+	{ 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
+	{ 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
 	{ 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
 	{ 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
 	{ 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@
 	{ 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
 	{ 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
 	{ 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
-	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE },
-	{ 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
+	{ 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
 	{ 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
-	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE },
-	{ 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE },
-	{ 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE },
-	{ 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE },
-	{ 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE },
-	{ 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE },
-	{ 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
-	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE },
-	{ 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE },
-	{ 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE },
-	{ 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE },
-	{ 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE },
-	{ 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE },
-	{ 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE },
-	{ 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE },
-	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE },
+	{ 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
+	{ 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
+	{ 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
+	{ 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
+	{ 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
+	{ 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
+	{ 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
+	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
+	{ 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
+	{ 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
+	{ 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
+	{ 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
+	{ 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
+	{ 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
+	{ 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
+	{ 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
+	{ 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
+	{ 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
+	{ 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
+	{ 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
+	{ 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
+	{ 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
+	{ 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
+	{ 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
+	{ 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
+	{ 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
 	{ 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
-	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE },
-	{ 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE },
-	{ 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE },
-	{ 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE },
-	{ 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
+	{ 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
+	{ 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
+	{ 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
+	{ 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
+	{ 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
+	{ 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
+	{ 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
+	{ 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
+	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
+	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
+	{ 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
+	{ 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
 	{ 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
-	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
-	{ 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE },
-	{ 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE },
-	{ 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE },
-	{ 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE },
-	{ 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE },
-	{ 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE },
-	{ 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE },
-	{ 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE },
-	{ 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE },
-	{ 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE },
-	{ 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE },
-	{ 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
-	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE },
-	{ 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE },
-	{ 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE },
-	{ 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE },
-	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE },
-	{ 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE },
-	{ 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE },
-	{ 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE },
-	{ 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE },
-	{ 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE },
-	{ 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE },
-	{ 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE },
-	{ 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE },
-	{ 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE },
-	{ 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE },
-	{ 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
-	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE },
-	{ 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE },
-	{ 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE },
-	{ 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE },
-	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
-	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE },
-	{ 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE },
-	{ 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE },
+	{ 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
+	{ 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
+	{ 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
+	{ 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
+	{ 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
+	{ 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
+	{ 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
+	{ 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
+	{ 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
+	{ 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
+	{ 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
+	{ 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
+	{ 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
+	{ 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
+	{ 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
+	{ 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
+	{ 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
+	{ 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
+	{ 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
+	{ 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
+	{ 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
+	{ 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
+	{ 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
+	{ 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
+	{ 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
+	{ 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
+	{ 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
+	{ 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
+	{ 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
+	{ 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
+	{ 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
+	{ 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
+	{ 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
+	{ 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
+	{ 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
+	{ 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
+	{ 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
+	{ 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
+	{ 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
+	{ 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
+	{ 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
+	{ 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
+	{ 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
+	{ 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
+	{ 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
+	{ 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
+	{ 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
+	{ 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
+	{ 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
+	{ 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
+	{ 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
+	{ 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
+	{ 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
+	{ 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
+	{ 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
+	{ 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
+	{ 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
+	{ 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
+	{ 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
+	{ 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
+	{ 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
+	{ 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
+	{ 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
+	{ 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
+	{ 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
+	{ 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
+	{ 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
+	{ 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
+	{ 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
+	{ 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
+	{ 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
+	{ 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
+	{ 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
+	{ 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
+	{ 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
+	{ 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
+	{ 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
+	{ 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
+	{ 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
+	{ 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
+	{ 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
+	{ 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
+	{ 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
+	{ 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
+	{ 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
+	{ 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
+	{ 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
+	{ 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
+	{ 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
+	{ 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
+	{ 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
+	{ 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
+	{ 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
+	{ 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
+	{ 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
+	{ 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
+	{ 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
+	{ 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
+	{ 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
+	{ 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
+	{ 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
+	{ 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
+	{ 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
+	{ 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
+	{ 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
+	{ 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
+	{ 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
+	{ 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
 	{ 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
-	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE },
+	{ 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
+	{ 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
+	{ 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
+	{ 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
 	{ 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
-	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE },
-	{ 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE },
-	{ 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE },
-	{ 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE },
-	{ 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE },
-	{ 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE },
-	{ 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE },
+	{ 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
+	{ 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
+	{ 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
+	{ 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
+	{ 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
+	{ 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
+	{ 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
+	{ 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
+	{ 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
+	{ 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
+	{ 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
+	{ 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
+	{ 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
+	{ 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
+	{ 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
+	{ 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
 	{ 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
-	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
-	{ 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE }
+	{ 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
+	{ 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
+	{ 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
+	{ 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
+	{ 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
+	{ 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
+	{ 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
+	{ 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
+	{ 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
+	{ 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
+	{ 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
+	{ 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
+	{ 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
+	{ 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
+	{ 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
+	{ 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
+	{ 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
+	{ 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
+	{ 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
+	{ 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
+	{ 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
+	{ 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
+	{ 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
+	{ 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
+	{ 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
+	{ 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
 };
 
-
-#define IDLE_REGS_COUNT			277
+#define IDLE_REGS_COUNT			237
 static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
-	{ 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE },
-	{ 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
-	{ 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE },
+	{ 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
+	{ 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
+	{ 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
+	{ 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
+	{ 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
+	{ 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
+	{ 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
+	{ 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
 	{ 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
-	{ 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE },
-	{ 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE },
-	{ 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE },
-	{ 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE },
-	{ 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE },
-	{ 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE },
-	{ 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE },
-	{ 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE },
-	{ 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE },
-	{ 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE },
-	{ 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE },
-	{ 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE },
-	{ 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE },
-	{ 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE },
-	{ 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE },
-	{ 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE },
-	{ 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE },
-	{ 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE },
-	{ 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE },
-	{ 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE },
-	{ 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE },
-	{ 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE },
-	{ 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE },
-	{ 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE },
-	{ 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE },
-	{ 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE },
-	{ 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE },
-	{ 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE },
-	{ 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE },
-	{ 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE },
-	{ 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE },
-	{ 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE },
-	{ 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE },
+	{ 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
+	{ 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
+	{ 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
+	{ 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
+	{ 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
+	{ 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
+	{ 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
+	{ 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
+	{ 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
+	{ 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
+	{ 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
+	{ 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
+	{ 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
+	{ 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
+	{ 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
+	{ 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
+	{ 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
+	{ 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
+	{ 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
+	{ 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
+	{ 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
+	{ 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
+	{ 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
+	{ 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
+	{ 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
+	{ 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
+	{ 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
+	{ 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
+	{ 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
+	{ 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
+	{ 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
+	{ 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
+	{ 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
+	{ 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
+	{ 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
+	{ 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
+	{ 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
 	{ 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
 	{ 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
 	{ 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
 	{ 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
-	{ 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE },
-	{ 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE },
-	{ 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE },
-	{ 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE },
-	{ 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE },
-	{ 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE },
-	{ 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE },
-	{ 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE },
-	{ 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
-	{ 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
-	{ 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
-	{ 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
-	{ 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
-	{ 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
-	{ 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
-	{ 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
-	{ 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
-	{ 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
-	{ 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
-	{ 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
-	{ 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
-	{ 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
-	{ 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
-	{ 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
-	{ 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
-	{ 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
-	{ 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
-	{ 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
-	{ 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
-	{ 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
-	{ 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
-	{ 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
-	{ 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
-	{ 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
-	{ 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
-	{ 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
-	{ 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
-	{ 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
-	{ 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
+	{ 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
+	{ 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
+	{ 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
+	{ 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
+	{ 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
+	{ 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
+	{ 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
 	{ 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
 	{ 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
 	{ 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@
 	{ 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
 	{ 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
 	{ 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
-	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE },
-	{ 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE },
-	{ 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
-	{ 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE },
-	{ 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE },
-	{ 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE },
-	{ 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE },
-	{ 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE },
-	{ 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE },
-	{ 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE },
-	{ 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE },
-	{ 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE },
-	{ 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE },
-	{ 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
+	{ 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
+	{ 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
+	{ 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
+	{ 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
+	{ 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
+	{ 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
+	{ 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
+	{ 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
+	{ 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
+	{ 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
+	{ 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
+	{ 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
+	{ 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
+	{ 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
+	{ 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
+	{ 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
+	{ 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
+	{ 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
 	{ 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
 	{ 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
-	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE },
-	{ 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE },
-	{ 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE },
-	{ 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE },
-	{ 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE },
-	{ 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE },
-	{ 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE },
-	{ 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
-	{ 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
-	{ 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE },
-	{ 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE },
-	{ 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE },
-	{ 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE },
-	{ 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE },
-	{ 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE },
-	{ 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE },
-	{ 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE },
-	{ 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE },
-	{ 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE },
-	{ 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE },
-	{ 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
-	{ 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
+	{ 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
+	{ 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
+	{ 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
+	{ 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
+	{ 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
+	{ 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
+	{ 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
+	{ 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
+	{ 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
+	{ 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
+	{ 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
+	{ 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
+	{ 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
+	{ 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
 	{ 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
 	{ 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
-	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE },
-	{ 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
 	{ 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
 	{ 0x3380c0, 1, RI_ALL_ONLINE }
 };
@@ -515,7 +635,6 @@
 	{ 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
 };
 
-
 #define WREGS_COUNT_E1H			1
 static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
 
@@ -530,22 +649,53 @@
 	{ 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
 };
 
-static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 };
-
+static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
 
 #define TIMER_REGS_COUNT_E1		2
-static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
+	0x1640d0, 0x1640d4 };
 
 #define TIMER_REGS_COUNT_E1H		2
-static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x164014, 0x164018 };
-static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
-	{ 0x1640d0, 0x1640d4 };
 
+static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
+	0x1640d0, 0x1640d4 };
+
+#define TIMER_REGS_COUNT_E2		2
+
+static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x164014, 0x164018 };
+static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
+	0x1640d0, 0x1640d4 };
+
+#define PAGE_MODE_VALUES_E1 0
+
+#define PAGE_READ_REGS_E1 0
+
+#define PAGE_WRITE_REGS_E1 0
+
+static const u32 page_vals_e1[] = { 0 };
+
+static const u32 page_write_regs_e1[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
+
+#define PAGE_MODE_VALUES_E1H 0
+
+#define PAGE_READ_REGS_E1H 0
+
+#define PAGE_WRITE_REGS_E1H 0
+
+static const u32 page_vals_e1h[] = { 0 };
+
+static const u32 page_write_regs_e1h[] = { 0 };
+
+static const struct reg_addr page_read_regs_e1h[] = {
+	{ 0x0, 0, RI_E1H_ONLINE } };
 
 #define PAGE_MODE_VALUES_E2 2
 
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 99c672d..5b44a8b 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,7 @@
 #include "bnx2x.h"
 #include "bnx2x_cmn.h"
 #include "bnx2x_dump.h"
+#include "bnx2x_init.h"
 
 /* Note: in the format strings below %s is replaced by the queue-name which is
  * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -472,7 +473,7 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	int regdump_len = 0;
-	int i;
+	int i, j, k;
 
 	if (CHIP_IS_E1(bp)) {
 		for (i = 0; i < REGS_COUNT; i++)
@@ -502,6 +503,15 @@
 			if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
 				regdump_len += wreg_addrs_e2[i].size *
 					(1 + wreg_addrs_e2[i].read_regs_count);
+
+		for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
+			for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
+				for (k = 0; k < PAGE_READ_REGS_E2; k++)
+					if (IS_E2_ONLINE(page_read_regs_e2[k].
+							 info))
+						regdump_len +=
+						page_read_regs_e2[k].size;
+			}
 	}
 	regdump_len *= 4;
 	regdump_len += sizeof(struct dump_hdr);
@@ -539,6 +549,12 @@
 	if (!netif_running(bp->dev))
 		return;
 
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+	bnx2x_disable_blocks_parity(bp);
+
 	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
 	dump_hdr.dump_sign = dump_sign_all;
 	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -580,6 +596,10 @@
 
 		bnx2x_read_pages_regs_e2(bp, p);
 	}
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 }
 
 #define PHY_FW_VER_LEN			20
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d5487..5a268e9 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@
 	u64 next;
 };
 
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK, \
+	block##_REG_##block##_PRTY_STS_CLR, \
+	en_mask, {m1, m1h, m2}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_0, \
+	block##_REG_##block##_PRTY_STS_CLR_0, \
+	en_mask, {m1, m1h, m2}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
+{ \
+	block##_REG_##block##_PRTY_MASK_1, \
+	block##_REG_##block##_PRTY_STS_CLR_1, \
+	en_mask, {m1, m1h, m2}, #block"_1" \
+}
+
+static const struct {
+	u32 mask_addr;
+	u32 sts_clr_addr;
+	u32 en_mask;		/* Mask to enable parity attentions */
+	struct {
+		u32 e1;		/* 57710 */
+		u32 e1h;	/* 57711 */
+		u32 e2;		/* 57712 */
+	} reg_mask;		/* Register mask (all valid bits) */
+	char name[7];		/* Block's longest name is 6 characters long
+				 * (name + suffix)
+				 */
+} bnx2x_blocks_parity_data[] = {
+	/* bit 19 masked */
+	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* bit 5,18,20-31 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* bit 5 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+	 * want to handle "system kill" flow at the moment.
+	 */
+	BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(PXP2,	0x7ff, 0x7f, 0x7f, 0x7ff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "UPB"},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_0,
+	MISC_REG_AEU_ENABLE4_PXP_0,
+	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_1,
+	MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+	int i;
+	u32 reg_val;
+
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+		if (enable)
+			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+		else
+			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+		REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+	}
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+	if (CHIP_IS_E1(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+	else
+		return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (dis_mask) {
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+			       dis_mask);
+			DP(NETIF_MSG_HW, "Setting parity mask "
+						 "for %s to\t\t0x%x\n",
+				    bnx2x_blocks_parity_data[i].name, dis_mask);
+		}
+	}
+
+	/* Disable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+	u32 reg_val, mcp_aeu_bits =
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+	/* Clear SEM_FAST parities */
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask) {
+			reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+					 sts_clr_addr);
+			if (reg_val & reg_mask)
+				DP(NETIF_MSG_HW,
+					    "Parity errors in %s: 0x%x\n",
+					    bnx2x_blocks_parity_data[i].name,
+					    reg_val & reg_mask);
+		}
+	}
+
+	/* Check if there were parity attentions in MCP */
+	reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+	if (reg_val & mcp_aeu_bits)
+		DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+		   reg_val & mcp_aeu_bits);
+
+	/* Clear parity attentions in MCP:
+	 * [7]  clears Latched rom_parity
+	 * [8]  clears Latched ump_rx_parity
+	 * [9]  clears Latched ump_tx_parity
+	 * [10] clears Latched scpad_parity (both ports)
+	 */
+	REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask)
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+				bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+	}
+
+	/* Enable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, true);
+}
+
+
 #endif /* BNX2X_INIT_H */
 
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 489a5512..8cdcf5b3 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -3152,7 +3152,6 @@
 #define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
 #define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
 #define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
-#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
 
 /*
  * should be run under rtnl lock
@@ -3527,7 +3526,7 @@
 	   try to handle this event */
 	bnx2x_acquire_alr(bp);
 
-	if (bnx2x_chk_parity_attn(bp)) {
+	if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
 		bp->recovery_state = BNX2X_RECOVERY_INIT;
 		bnx2x_set_reset_in_progress(bp);
 		schedule_delayed_work(&bp->reset_task, 0);
@@ -4754,7 +4753,7 @@
 	return 0; /* OK */
 }
 
-static void enable_blocks_attention(struct bnx2x *bp)
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
 {
 	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
 	if (CHIP_IS_E2(bp))
@@ -4808,53 +4807,9 @@
 	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
 	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
 /*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
-	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
 }
 
-static const struct {
-	u32 addr;
-	u32 mask;
-} bnx2x_parity_mask[] = {
-	{PXP_REG_PXP_PRTY_MASK,		0x3ffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_0,	0xffffffff},
-	{PXP2_REG_PXP2_PRTY_MASK_1,	0x7f},
-	{HC_REG_HC_PRTY_MASK,		0x7},
-	{MISC_REG_MISC_PRTY_MASK,	0x1},
-	{QM_REG_QM_PRTY_MASK,		0x0},
-	{DORQ_REG_DORQ_PRTY_MASK,	0x0},
-	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
-	{SRC_REG_SRC_PRTY_MASK,		0x4}, /* bit 2 */
-	{CDU_REG_CDU_PRTY_MASK,		0x0},
-	{CFC_REG_CFC_PRTY_MASK,		0x0},
-	{DBG_REG_DBG_PRTY_MASK,		0x0},
-	{DMAE_REG_DMAE_PRTY_MASK,	0x0},
-	{BRB1_REG_BRB1_PRTY_MASK,	0x0},
-	{PRS_REG_PRS_PRTY_MASK,		(1<<6)},/* bit 6 */
-	{TSDM_REG_TSDM_PRTY_MASK,	0x18},	/* bit 3,4 */
-	{CSDM_REG_CSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{USDM_REG_USDM_PRTY_MASK,	0x38},  /* bit 3,4,5 */
-	{XSDM_REG_XSDM_PRTY_MASK,	0x8},	/* bit 3 */
-	{TSEM_REG_TSEM_PRTY_MASK_0,	0x0},
-	{TSEM_REG_TSEM_PRTY_MASK_1,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_0,	0x0},
-	{USEM_REG_USEM_PRTY_MASK_1,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_0,	0x0},
-	{CSEM_REG_CSEM_PRTY_MASK_1,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_0,	0x0},
-	{XSEM_REG_XSEM_PRTY_MASK_1,	0x0}
-};
-
-static void enable_blocks_parity(struct bnx2x *bp)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
-		REG_WR(bp, bnx2x_parity_mask[i].addr,
-			bnx2x_parity_mask[i].mask);
-}
-
-
 static void bnx2x_reset_common(struct bnx2x *bp)
 {
 	/* reset_common */
@@ -5082,7 +5037,7 @@
 		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
 		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
 
-		/* initalize dummy TM client */
+		/* initialize dummy TM client */
 		ilt_cli.start = 0;
 		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
 		ilt_cli.client_num = ILT_CLIENT_TM;
@@ -5350,9 +5305,9 @@
 	/* clear PXP2 attentions */
 	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
 
-	enable_blocks_attention(bp);
-	if (CHIP_PARITY_SUPPORTED(bp))
-		enable_blocks_parity(bp);
+	bnx2x_enable_blocks_attention(bp);
+	if (CHIP_PARITY_ENABLED(bp))
+		bnx2x_enable_blocks_parity(bp);
 
 	if (!BP_NOMCP(bp)) {
 		/* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -8751,13 +8706,6 @@
 		dev_err(&bp->pdev->dev, "MCP disabled, "
 					"must load devices in order!\n");
 
-	/* Set multi queue mode */
-	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
-	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
-		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
-					"requested is not MSI-X\n");
-		multi_mode = ETH_RSS_MODE_DISABLED;
-	}
 	bp->multi_mode = multi_mode;
 	bp->int_mode = int_mode;
 
@@ -9560,9 +9508,15 @@
 	/* Delete all NAPI objects */
 	bnx2x_del_all_napi(bp);
 
+	/* Power on: we can't let PCI layer write to us while we are in D3 */
+	bnx2x_set_power_state(bp, PCI_D0);
+
 	/* Disable MSI/MSI-X */
 	bnx2x_disable_msi(bp);
 
+	/* Power off */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
 	/* Make sure RESET task is not scheduled before continuing */
 	cancel_delayed_work_sync(&bp->reset_task);
 
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index bfd875b..c939683 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
  * WR - Write Clear (write 1 to clear the bit)
  *
  */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
 
 #define ATC_ATC_INT_STS_REG_ADDRESS_ERROR			 (0x1<<0)
 #define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS		 (0x1<<2)
@@ -39,6 +41,8 @@
 #define BRB1_REG_BRB1_PRTY_MASK 				 0x60138
 /* [R 4] Parity register #0 read */
 #define BRB1_REG_BRB1_PRTY_STS					 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR				 0x60130
 /* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
  * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
  * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
 #define CCM_REG_CCM_INT_MASK					 0xd01e4
 /* [R 11] Interrupt register #0 read */
 #define CCM_REG_CCM_INT_STS					 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK					 0xd01f4
 /* [R 27] Parity register #0 read */
 #define CCM_REG_CCM_PRTY_STS					 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR				 0xd01ec
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
 #define CDU_REG_CDU_PRTY_MASK					 0x10104c
 /* [R 5] Parity register #0 read */
 #define CDU_REG_CDU_PRTY_STS					 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR				 0x101044
 /* [RC 32] logging of error data in case of a CDU load error:
    {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
    ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
 #define CFC_REG_CFC_PRTY_MASK					 0x104118
 /* [R 4] Parity register #0 read */
 #define CFC_REG_CFC_PRTY_STS					 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR				 0x104110
 /* [RW 21] CID cam access (21:1 - Data; alid - 0) */
 #define CFC_REG_CID_CAM 					 0x104800
 #define CFC_REG_CONTROL0					 0x104028
@@ -466,6 +478,8 @@
 #define CSDM_REG_CSDM_PRTY_MASK 				 0xc22bc
 /* [R 11] Parity register #0 read */
 #define CSDM_REG_CSDM_PRTY_STS					 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR				 0xc22b4
 #define CSDM_REG_ENABLE_IN1					 0xc2238
 #define CSDM_REG_ENABLE_IN2					 0xc223c
 #define CSDM_REG_ENABLE_OUT1					 0xc2240
@@ -556,6 +570,9 @@
 /* [R 32] Parity register #0 read */
 #define CSEM_REG_CSEM_PRTY_STS_0				 0x200124
 #define CSEM_REG_CSEM_PRTY_STS_1				 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0				 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1				 0x200138
 #define CSEM_REG_ENABLE_IN					 0x2000a4
 #define CSEM_REG_ENABLE_OUT					 0x2000a8
 /* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
 #define DBG_REG_DBG_PRTY_MASK					 0xc0a8
 /* [R 1] Parity register #0 read */
 #define DBG_REG_DBG_PRTY_STS					 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR				 0xc0a0
 /* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
  * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
  * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
 #define DMAE_REG_DMAE_PRTY_MASK 				 0x102064
 /* [R 4] Parity register #0 read */
 #define DMAE_REG_DMAE_PRTY_STS					 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR				 0x10205c
 /* [RW 1] Command 0 go. */
 #define DMAE_REG_GO_C0						 0x102080
 /* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
 #define DORQ_REG_DORQ_PRTY_MASK 				 0x170190
 /* [R 2] Parity register #0 read */
 #define DORQ_REG_DORQ_PRTY_STS					 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR				 0x170188
 /* [RW 8] The address to write the DPM CID to STORM. */
 #define DORQ_REG_DPM_CID_ADDR					 0x170044
 /* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
 /* [R 1] data availble for error memory. If this bit is clear do not red
  * from error_handling_memory. */
 #define IGU_REG_ERROR_HANDLING_DATA_VALID			 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK					 0x1300a8
 /* [R 11] Parity register #0 read */
 #define IGU_REG_IGU_PRTY_STS					 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR				 0x1300a0
 /* [R 4] Debug: int_handle_fsm */
 #define IGU_REG_INT_HANDLE_FSM					 0x130050
 #define IGU_REG_LEADING_EDGE_LATCH				 0x130134
@@ -1501,6 +1528,8 @@
 #define MISC_REG_MISC_PRTY_MASK 				 0xa398
 /* [R 1] Parity register #0 read */
 #define MISC_REG_MISC_PRTY_STS					 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR				 0xa390
 #define MISC_REG_NIG_WOL_P0					 0xa270
 #define MISC_REG_NIG_WOL_P1					 0xa274
 /* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1604,7 +1633,7 @@
    (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
 #define MISC_REG_SW_TIMER_RELOAD_VAL_4				 0xa2fc
 /* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
-   in this register. addres 0 - timer 1; address 1 - timer 2, ...  address 7 -
+   in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
    timer 8 */
 #define MISC_REG_SW_TIMER_VAL					 0xa5c0
 /* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2082,6 +2111,10 @@
 #define PBF_REG_PBF_INT_MASK					 0x1401d4
 /* [R 5] Interrupt register #0 read */
 #define PBF_REG_PBF_INT_STS					 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK					 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
 #define PB_REG_CONTROL						 0
 /* [RW 2] Interrupt mask register #0 read/write */
 #define PB_REG_PB_INT_MASK					 0x28
@@ -2091,6 +2124,8 @@
 #define PB_REG_PB_PRTY_MASK					 0x38
 /* [R 4] Parity register #0 read */
 #define PB_REG_PB_PRTY_STS					 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR					 0x30
 #define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR		 (0x1<<0)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW	 (0x1<<8)
 #define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR	 (0x1<<1)
@@ -2446,6 +2481,8 @@
 #define PRS_REG_PRS_PRTY_MASK					 0x401a4
 /* [R 8] Parity register #0 read */
 #define PRS_REG_PRS_PRTY_STS					 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR				 0x4019c
 /* [RW 8] Context region for pure acknowledge packets. Used in CFC load
    request message */
 #define PRS_REG_PURE_REGIONS					 0x40024
@@ -2599,6 +2636,9 @@
 /* [R 32] Parity register #0 read */
 #define PXP2_REG_PXP2_PRTY_STS_0				 0x12057c
 #define PXP2_REG_PXP2_PRTY_STS_1				 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0				 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1				 0x120590
 /* [R 1] Debug only: The 'almost full' indication from each fifo (gives
    indication about backpressure) */
 #define PXP2_REG_RD_ALMOST_FULL_0				 0x120424
@@ -3001,6 +3041,8 @@
 #define PXP_REG_PXP_PRTY_MASK					 0x103094
 /* [R 26] Parity register #0 read */
 #define PXP_REG_PXP_PRTY_STS					 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR				 0x10308c
 /* [RW 4] The activity counter initial increment value sent in the load
    request */
 #define QM_REG_ACTCTRINITVAL_0					 0x168040
@@ -3157,6 +3199,8 @@
 #define QM_REG_QM_PRTY_MASK					 0x168454
 /* [R 12] Parity register #0 read */
 #define QM_REG_QM_PRTY_STS					 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR					 0x16844c
 /* [R 32] Current queues in pipeline: Queues from 32 to 63 */
 #define QM_REG_QSTATUS_HIGH					 0x16802c
 /* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3442,6 +3486,8 @@
 #define QM_REG_WRRWEIGHTS_9					 0x168848
 /* [R 6] Keep the fill level of the fifo from write client 1 */
 #define QM_REG_XQM_WRC_FIFOLVL					 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST					 0x18840
 #define SRC_REG_COUNTFREE0					 0x40500
 /* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
    ports. If set the searcher support 8 functions. */
@@ -3470,6 +3516,8 @@
 #define SRC_REG_SRC_PRTY_MASK					 0x404c8
 /* [R 3] Parity register #0 read */
 #define SRC_REG_SRC_PRTY_STS					 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR				 0x404c0
 /* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
 #define TCM_REG_CAM_OCCUP					 0x5017c
 /* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3596,8 +3644,12 @@
 #define TCM_REG_TCM_INT_MASK					 0x501dc
 /* [R 11] Interrupt register #0 read */
 #define TCM_REG_TCM_INT_STS					 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK					 0x501ec
 /* [R 27] Parity register #0 read */
 #define TCM_REG_TCM_PRTY_STS					 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR				 0x501e4
 /* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -3755,6 +3807,10 @@
 #define TM_REG_TM_INT_MASK					 0x1640fc
 /* [R 1] Interrupt register #0 read */
 #define TM_REG_TM_INT_STS					 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK					 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR					 0x164104
 /* [RW 8] The event id for aggregated interrupt 0 */
 #define TSDM_REG_AGG_INT_EVENT_0				 0x42038
 #define TSDM_REG_AGG_INT_EVENT_1				 0x4203c
@@ -3835,6 +3891,8 @@
 #define TSDM_REG_TSDM_PRTY_MASK 				 0x422bc
 /* [R 11] Parity register #0 read */
 #define TSDM_REG_TSDM_PRTY_STS					 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR				 0x422b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define TSEM_REG_ARB_CYCLE_SIZE 				 0x180034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3914,6 +3972,9 @@
 #define TSEM_REG_SLOW_EXT_STORE_EMPTY				 0x1802a0
 /* [RW 8] List of free threads . There is a bit per thread. */
 #define TSEM_REG_THREADS_LIST					 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0				 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1				 0x180128
 /* [RW 3] The arbitration scheme of time_slot 0 */
 #define TSEM_REG_TS_0_AS					 0x180038
 /* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4116,6 +4177,8 @@
 #define UCM_REG_UCM_INT_STS					 0xe01c8
 /* [R 27] Parity register #0 read */
 #define UCM_REG_UCM_PRTY_STS					 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR				 0xe01dc
 /* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
    REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
    Is used to determine the number of the AG context REG-pairs written back;
@@ -4292,6 +4355,8 @@
 #define USDM_REG_USDM_PRTY_MASK 				 0xc42c0
 /* [R 11] Parity register #0 read */
 #define USDM_REG_USDM_PRTY_STS					 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR				 0xc42b8
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define USEM_REG_ARB_CYCLE_SIZE 				 0x300034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4421,6 +4486,9 @@
 /* [R 32] Parity register #0 read */
 #define USEM_REG_USEM_PRTY_STS_0				 0x300124
 #define USEM_REG_USEM_PRTY_STS_1				 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0				 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1				 0x300138
 /* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
  * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
 #define USEM_REG_VFPF_ERR_NUM					 0x300380
@@ -4797,6 +4865,8 @@
 #define XSDM_REG_XSDM_PRTY_MASK 				 0x1662bc
 /* [R 11] Parity register #0 read */
 #define XSDM_REG_XSDM_PRTY_STS					 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR				 0x1662b4
 /* [RW 5] The number of time_slots in the arbitration cycle */
 #define XSEM_REG_ARB_CYCLE_SIZE 				 0x280034
 /* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4929,6 +4999,9 @@
 /* [R 32] Parity register #0 read */
 #define XSEM_REG_XSEM_PRTY_STS_0				 0x280124
 #define XSEM_REG_XSEM_PRTY_STS_1				 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
 #define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
 #define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
 #define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
@@ -6316,3 +6389,4 @@
 }
 
 
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 6e4d9b1..bda60d5 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,6 +158,11 @@
 
 		spin_lock_bh(&bp->stats_lock);
 
+		if (bp->stats_pending) {
+			spin_unlock_bh(&bp->stats_lock);
+			return;
+		}
+
 		ramrod_data.drv_counter = bp->stats_counter++;
 		ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
 		for_each_eth_queue(bp, i)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 48cf24f..171782e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -840,7 +840,7 @@
 	lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
 
 	memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback lacpdus in receive. */
 	memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -881,7 +881,7 @@
 	marker_header = (struct bond_marker_header *)skb_put(skb, length);
 
 	memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
-	/* Note: source addres is set to be the member's PERMANENT address,
+	/* Note: source address is set to be the member's PERMANENT address,
 	   because we use it to identify loopback MARKERs in receive. */
 	memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
 	marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
@@ -1916,7 +1916,7 @@
 		return -1;
 	}
 
-	//check that the slave has not been intialized yet.
+	//check that the slave has not been initialized yet.
 	if (SLAVE_AD_INFO(slave).port.slave != slave) {
 
 		// port initialization
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 63ebf76..8a43c7e 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -556,7 +556,7 @@
 #define EEPROM_MAX_POLL   4
 
 /*
- * Read SEEPROM. A zero is written to the flag register when the addres is
+ * Read SEEPROM. A zero is written to the flag register when the address is
  * written to the Control register. The hardware device will set the flag to a
  * one when 4B have been transferred to the Data register.
  */
diff --git a/drivers/net/cxgb3/mc5.c b/drivers/net/cxgb3/mc5.c
index a8766fb..e13b7fe 100644
--- a/drivers/net/cxgb3/mc5.c
+++ b/drivers/net/cxgb3/mc5.c
@@ -318,7 +318,7 @@
 
 /*
  * Initialization that requires the OS and protocol layers to already
- * be intialized goes here.
+ * be initialized goes here.
  */
 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
 		unsigned int nroutes)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index ec8579a..d55db6b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -607,7 +607,7 @@
  *
  *	Read a 32-bit word from a location in VPD EEPROM using the card's PCI
  *	VPD ROM capability.  A zero is written to the flag bit when the
- *	addres is written to the control register.  The hardware device will
+ *	address is written to the control register.  The hardware device will
  *	set the flag to 1 when 4 bytes have been read into the data register.
  */
 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 3c403f8..56166ae 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -749,13 +749,19 @@
 	netif_set_real_num_tx_queues(dev, pi->nqsets);
 	err = netif_set_real_num_rx_queues(dev, pi->nqsets);
 	if (err)
-		return err;
-	set_bit(pi->port_id, &adapter->open_device_map);
+		goto err_unwind;
 	err = link_start(dev);
 	if (err)
-		return err;
+		goto err_unwind;
+
 	netif_tx_start_all_queues(dev);
+	set_bit(pi->port_id, &adapter->open_device_map);
 	return 0;
+
+err_unwind:
+	if (adapter->open_device_map == 0)
+		adapter_down(adapter);
+	return err;
 }
 
 /*
@@ -764,13 +770,12 @@
  */
 static int cxgb4vf_stop(struct net_device *dev)
 {
-	int ret;
 	struct port_info *pi = netdev_priv(dev);
 	struct adapter *adapter = pi->adapter;
 
 	netif_tx_stop_all_queues(dev);
 	netif_carrier_off(dev);
-	ret = t4vf_enable_vi(adapter, pi->viid, false, false);
+	t4vf_enable_vi(adapter, pi->viid, false, false);
 	pi->link_cfg.link_ok = 0;
 
 	clear_bit(pi->port_id, &adapter->open_device_map);
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e4bec78..0f51c80 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -147,9 +147,20 @@
 	/*
 	 * Write the command array into the Mailbox Data register array and
 	 * transfer ownership of the mailbox to the firmware.
+	 *
+	 * For the VFs, the Mailbox Data "registers" are actually backed by
+	 * T4's "MA" interface rather than PL Registers (as is the case for
+	 * the PFs).  Because these are in different coherency domains, the
+	 * write to the VF's PL-register-backed Mailbox Control can race in
+	 * front of the writes to the MA-backed VF Mailbox Data "registers".
+	 * So we need to do a read-back on at least one byte of the VF Mailbox
+	 * Data registers before doing the write to the VF Mailbox Control
+	 * register.
 	 */
 	for (i = 0, p = cmd; i < size; i += 8)
 		t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
+	t4_read_reg(adapter, mbox_data);         /* flush write */
+
 	t4_write_reg(adapter, mbox_ctl,
 		     MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
 	t4_read_reg(adapter, mbox_ctl);          /* flush write */
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 77d08e6..aed223b 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@
 		if (hw->mac_type == e1000_82541 ||
 		    hw->mac_type == e1000_82541_rev_2 ||
 		    hw->mac_type == e1000_82547 ||
-		    hw->mac_type == e1000_82547_rev_2) {
+		    hw->mac_type == e1000_82547_rev_2)
 			hw->phy_type = e1000_phy_igp;
-			break;
-		}
+		break;
+	case RTL8211B_PHY_ID:
+		hw->phy_type = e1000_phy_8211;
+		break;
+	case RTL8201N_PHY_ID:
+		hw->phy_type = e1000_phy_8201;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@
 	case E1000_DEV_ID_82547GI:
 		hw->mac_type = e1000_82547_rev_2;
 		break;
+	case E1000_DEV_ID_INTEL_CE4100_GBE:
+		hw->mac_type = e1000_ce4100;
+		break;
 	default:
 		/* Should never have loaded on this device */
 		return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@
 		case e1000_82542_rev2_1:
 			hw->media_type = e1000_media_type_fiber;
 			break;
+		case e1000_ce4100:
+			hw->media_type = e1000_media_type_copper;
+			break;
 		default:
 			status = er32(STATUS);
 			if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@
 		/* Reset is performed on a shadow of the control register */
 		ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
 		break;
+	case e1000_ce4100:
 	default:
 		ew32(CTRL, (ctrl | E1000_CTRL_RST));
 		break;
@@ -952,6 +964,67 @@
 }
 
 /**
+ * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
+ * @hw: Struct containing variables accessed by shared code
+ *
+ * Commits changes to PHY configuration by calling e1000_phy_reset().
+ */
+static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	/* SW reset the PHY so all changes take effect */
+	ret_val = e1000_phy_reset(hw);
+	if (ret_val) {
+		e_dbg("Error Resetting the PHY\n");
+		return ret_val;
+	}
+
+	return E1000_SUCCESS;
+}
+
+static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 ctrl_aux;
+
+	switch (hw->phy_type) {
+	case e1000_phy_8211:
+		ret_val = e1000_copper_link_rtl_setup(hw);
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	case e1000_phy_8201:
+		/* Set RMII mode */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= E1000_CTL_AUX_RMII;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+
+		/* Disable the J/K bits required for receive */
+		ctrl_aux = er32(CTL_AUX);
+		ctrl_aux |= 0x4;
+		ctrl_aux &= ~0x2;
+		ew32(CTL_AUX, ctrl_aux);
+		E1000_WRITE_FLUSH();
+		ret_val = e1000_copper_link_rtl_setup(hw);
+
+		if (ret_val) {
+			e_dbg("e1000_copper_link_rtl_setup failed!\n");
+			return ret_val;
+		}
+		break;
+	default:
+		e_dbg("Error Resetting the PHY\n");
+		return E1000_ERR_PHY_TYPE;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
  * e1000_copper_link_preconfig - early configuration for copper
  * @hw: Struct containing variables accessed by shared code
  *
@@ -1286,6 +1359,10 @@
 	if (hw->autoneg_advertised == 0)
 		hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
 
+	/* IFE/RTL8201N PHY only supports 10/100 */
+	if (hw->phy_type == e1000_phy_8201)
+		hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
 	e_dbg("Reconfiguring auto-neg advertisement params\n");
 	ret_val = e1000_phy_setup_autoneg(hw);
 	if (ret_val) {
@@ -1341,7 +1418,7 @@
 	s32 ret_val;
 	e_dbg("e1000_copper_link_postconfig");
 
-	if (hw->mac_type >= e1000_82544) {
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
 		e1000_config_collision_dist(hw);
 	} else {
 		ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@
 		ret_val = e1000_copper_link_mgp_setup(hw);
 		if (ret_val)
 			return ret_val;
+	} else {
+		ret_val = gbe_dhg_phy_setup(hw);
+		if (ret_val) {
+			e_dbg("gbe_dhg_phy_setup failed!\n");
+			return ret_val;
+		}
 	}
 
 	if (hw->autoneg) {
@@ -1461,10 +1544,11 @@
 		return ret_val;
 
 	/* Read the MII 1000Base-T Control Register (Address 9). */
-	ret_val =
-	    e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+	ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
 	if (ret_val)
 		return ret_val;
+	else if (hw->phy_type == e1000_phy_8201)
+		mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
 
 	/* Need to parse both autoneg_advertised and fc and set up
 	 * the appropriate PHY registers.  First we will parse for
@@ -1577,9 +1661,14 @@
 
 	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
-	ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
-	if (ret_val)
-		return ret_val;
+	if (hw->phy_type == e1000_phy_8201) {
+		mii_1000t_ctrl_reg = 0;
+	} else {
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			return ret_val;
+	}
 
 	return E1000_SUCCESS;
 }
@@ -1860,7 +1949,7 @@
 
 	/* 82544 or newer MAC, Auto Speed Detection takes care of
 	 * MAC speed/duplex configuration.*/
-	if (hw->mac_type >= e1000_82544)
+	if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
 		return E1000_SUCCESS;
 
 	/* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@
 	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
 	ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
 
-	/* Set up duplex in the Device Control and Transmit Control
-	 * registers depending on negotiated values.
-	 */
-	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
-	if (ret_val)
-		return ret_val;
+	switch (hw->phy_type) {
+	case e1000_phy_8201:
+		ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
 
-	if (phy_data & M88E1000_PSSR_DPLX)
-		ctrl |= E1000_CTRL_FD;
-	else
-		ctrl &= ~E1000_CTRL_FD;
+		if (phy_data & RTL_PHY_CTRL_FD)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
 
-	e1000_config_collision_dist(hw);
+		if (phy_data & RTL_PHY_CTRL_SPD_100)
+			ctrl |= E1000_CTRL_SPD_100;
+		else
+			ctrl |= E1000_CTRL_SPD_10;
 
-	/* Set up speed in the Device Control register depending on
-	 * negotiated values.
-	 */
-	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
-		ctrl |= E1000_CTRL_SPD_1000;
-	else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
-		ctrl |= E1000_CTRL_SPD_100;
+		e1000_config_collision_dist(hw);
+		break;
+	default:
+		/* Set up duplex in the Device Control and Transmit Control
+		 * registers depending on negotiated values.
+		 */
+		ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+		                             &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		if (phy_data & M88E1000_PSSR_DPLX)
+			ctrl |= E1000_CTRL_FD;
+		else
+			ctrl &= ~E1000_CTRL_FD;
+
+		e1000_config_collision_dist(hw);
+
+		/* Set up speed in the Device Control register depending on
+		 * negotiated values.
+		 */
+		if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+			ctrl |= E1000_CTRL_SPD_1000;
+		else if ((phy_data & M88E1000_PSSR_SPEED) ==
+		         M88E1000_PSSR_100MBS)
+			ctrl |= E1000_CTRL_SPD_100;
+	}
 
 	/* Write the configured values back to the Device Control Reg. */
 	ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@
 		 * speed/duplex on the MAC to the current PHY speed/duplex
 		 * settings.
 		 */
-		if (hw->mac_type >= e1000_82544)
+		if ((hw->mac_type >= e1000_82544) &&
+		    (hw->mac_type != e1000_ce4100))
 			e1000_config_collision_dist(hw);
 		else {
 			ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_read_phy_reg_ex");
 
@@ -2752,28 +2864,61 @@
 		 * Control register.  The MAC will take care of interfacing with the
 		 * PHY to retrieve the desired data.
 		 */
-		mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_READ));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_READ) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 64; i++) {
-			udelay(50);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+
+			mdic = readl(E1000_MDIO_STS);
+			if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
+				e_dbg("MDI Read Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
+		} else {
+			mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_READ));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 64; i++) {
+				udelay(50);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Read did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+			if (mdic & E1000_MDIC_ERROR) {
+				e_dbg("MDI Error\n");
+				return -E1000_ERR_PHY;
+			}
+			*phy_data = (u16) mdic;
 		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Read did not complete\n");
-			return -E1000_ERR_PHY;
-		}
-		if (mdic & E1000_MDIC_ERROR) {
-			e_dbg("MDI Error\n");
-			return -E1000_ERR_PHY;
-		}
-		*phy_data = (u16) mdic;
 	} else {
 		/* We must first send a preamble through the MDIO pin to signal the
 		 * beginning of an MII instruction.  This is done by sending 32
@@ -2840,7 +2985,7 @@
 {
 	u32 i;
 	u32 mdic = 0;
-	const u32 phy_addr = 1;
+	const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
 
 	e_dbg("e1000_write_phy_reg_ex");
 
@@ -2850,27 +2995,54 @@
 	}
 
 	if (hw->mac_type > e1000_82543) {
-		/* Set up Op-code, Phy Address, register address, and data intended
-		 * for the PHY register in the MDI Control register.  The MAC will take
-		 * care of interfacing with the PHY to send the desired data.
+		/* Set up Op-code, Phy Address, register address, and data
+		 * intended for the PHY register in the MDI Control register.
+		 * The MAC will take care of interfacing with the PHY to send
+		 * the desired data.
 		 */
-		mdic = (((u32) phy_data) |
-			(reg_addr << E1000_MDIC_REG_SHIFT) |
-			(phy_addr << E1000_MDIC_PHY_SHIFT) |
-			(E1000_MDIC_OP_WRITE));
+		if (hw->mac_type == e1000_ce4100) {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(INTEL_CE_GBE_MDIC_OP_WRITE) |
+				(INTEL_CE_GBE_MDIC_GO));
 
-		ew32(MDIC, mdic);
+			writel(mdic, E1000_MDIO_CMD);
 
-		/* Poll the ready bit to see if the MDI read completed */
-		for (i = 0; i < 641; i++) {
-			udelay(5);
-			mdic = er32(MDIC);
-			if (mdic & E1000_MDIC_READY)
-				break;
-		}
-		if (!(mdic & E1000_MDIC_READY)) {
-			e_dbg("MDI Write did not complete\n");
-			return -E1000_ERR_PHY;
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 640; i++) {
+				udelay(5);
+				mdic = readl(E1000_MDIO_CMD);
+				if (!(mdic & INTEL_CE_GBE_MDIC_GO))
+					break;
+			}
+			if (mdic & INTEL_CE_GBE_MDIC_GO) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
+		} else {
+			mdic = (((u32) phy_data) |
+				(reg_addr << E1000_MDIC_REG_SHIFT) |
+				(phy_addr << E1000_MDIC_PHY_SHIFT) |
+				(E1000_MDIC_OP_WRITE));
+
+			ew32(MDIC, mdic);
+
+			/* Poll the ready bit to see if the MDI read
+			 * completed
+			 */
+			for (i = 0; i < 641; i++) {
+				udelay(5);
+				mdic = er32(MDIC);
+				if (mdic & E1000_MDIC_READY)
+					break;
+			}
+			if (!(mdic & E1000_MDIC_READY)) {
+				e_dbg("MDI Write did not complete\n");
+				return -E1000_ERR_PHY;
+			}
 		}
 	} else {
 		/* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@
 		if (hw->phy_id == M88E1011_I_PHY_ID)
 			match = true;
 		break;
+	case e1000_ce4100:
+		if ((hw->phy_id == RTL8211B_PHY_ID) ||
+		    (hw->phy_id == RTL8201N_PHY_ID))
+			match = true;
+		break;
 	case e1000_82541:
 	case e1000_82541_rev_2:
 	case e1000_82547:
@@ -3291,6 +3468,9 @@
 
 	if (hw->phy_type == e1000_phy_igp)
 		return e1000_phy_igp_get_info(hw, phy_info);
+	else if ((hw->phy_type == e1000_phy_8211) ||
+	         (hw->phy_type == e1000_phy_8201))
+		return E1000_SUCCESS;
 	else
 		return e1000_phy_m88_get_info(hw, phy_info);
 }
@@ -3742,6 +3922,12 @@
 
 	e_dbg("e1000_read_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
+		                      data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@
 
 	e_dbg("e1000_write_eeprom");
 
+	if (hw->mac_type == e1000_ce4100) {
+		GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
+		                       data);
+		return E1000_SUCCESS;
+	}
+
 	/* If eeprom is not yet detected, do so now */
 	if (eeprom->word_size == 0)
 		e1000_init_eeprom_params(hw);
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index ecd9f6c..196eeda 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -41,7 +41,7 @@
 struct e1000_hw_stats;
 
 /* Enumerated types specific to the e1000 hardware */
-/* Media Access Controlers */
+/* Media Access Controllers */
 typedef enum {
 	e1000_undefined = 0,
 	e1000_82542_rev2_0,
@@ -52,6 +52,7 @@
 	e1000_82545,
 	e1000_82545_rev_3,
 	e1000_82546,
+	e1000_ce4100,
 	e1000_82546_rev_3,
 	e1000_82541,
 	e1000_82541_rev_2,
@@ -209,9 +210,11 @@
 } e1000_1000t_rx_status;
 
 typedef enum {
-    e1000_phy_m88 = 0,
-    e1000_phy_igp,
-    e1000_phy_undefined = 0xFF
+	e1000_phy_m88 = 0,
+	e1000_phy_igp,
+	e1000_phy_8211,
+	e1000_phy_8201,
+	e1000_phy_undefined = 0xFF
 } e1000_phy_type;
 
 typedef enum {
@@ -442,6 +445,7 @@
 #define E1000_DEV_ID_82547EI             0x1019
 #define E1000_DEV_ID_82547EI_MOBILE      0x101A
 #define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_INTEL_CE4100_GBE    0x2E6E
 
 #define NODE_ADDRESS_SIZE 6
 #define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@
 #define E1000_CTRL_EXT 0x00018	/* Extended Device Control - RW */
 #define E1000_FLA      0x0001C	/* Flash Access - RW */
 #define E1000_MDIC     0x00020	/* MDI Control - RW */
+
+extern void __iomem *ce4100_gbe_mdio_base_virt;
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE    (ce4100_gbe_mdio_base_virt)
+#define E1000_MDIO_STS  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
+#define E1000_MDIO_CMD  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
+#define E1000_MDIO_DRV  (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
+#define E1000_MDC_CMD   (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
+#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
+#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
+
 #define E1000_SCTL     0x00024	/* SerDes Control - RW */
 #define E1000_FEXTNVM  0x00028	/* Future Extended NVM register */
 #define E1000_FCAL     0x00028	/* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@
 #define E1000_IMS      0x000D0	/* Interrupt Mask Set - RW */
 #define E1000_IMC      0x000D8	/* Interrupt Mask Clear - WO */
 #define E1000_IAM      0x000E0	/* Interrupt Acknowledge Auto Mask */
+
+/* Auxiliary Control Register. This register is CE4100 specific,
+ * RMII/RGMII function is switched by this register - RW
+ * Following are bits definitions of the Auxiliary Control Register
+ */
+#define E1000_CTL_AUX  0x000E0
+#define E1000_CTL_AUX_END_SEL_SHIFT     10
+#define E1000_CTL_AUX_ENDIANESS_SHIFT   8
+#define E1000_CTL_AUX_RGMII_RMII_SHIFT  0
+
+/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_DES_PKT   (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use CTL_AUX.ENDIANESS, packet use default */
+#define E1000_CTL_AUX_DES       (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* descriptor use default, packet use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_PKT       (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
+/* all use CTL_AUX.ENDIANESS */
+#define E1000_CTL_AUX_ALL       (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
+
+#define E1000_CTL_AUX_RGMII     (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+#define E1000_CTL_AUX_RMII      (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
+
+/* LW little endian, Byte big endian */
+#define E1000_CTL_AUX_LWLE_BBE  (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWLE_BLE  (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BBE  (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+#define E1000_CTL_AUX_LWBE_BLE  (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
+
 #define E1000_RCTL     0x00100	/* RX Control - RW */
 #define E1000_RDTR1    0x02820	/* RX Delay Timer (1) - RW */
 #define E1000_RDBAL1   0x02900	/* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@
  * in more current versions of the 8254x. Despite the difference in location,
  * the registers function in the same manner.
  */
+#define E1000_82542_CTL_AUX  E1000_CTL_AUX
 #define E1000_82542_CTRL     E1000_CTRL
 #define E1000_82542_CTRL_DUP E1000_CTRL_DUP
 #define E1000_82542_STATUS   E1000_STATUS
@@ -1571,6 +1614,11 @@
 #define E1000_MDIC_INT_EN    0x20000000
 #define E1000_MDIC_ERROR     0x40000000
 
+#define INTEL_CE_GBE_MDIC_OP_WRITE      0x04000000
+#define INTEL_CE_GBE_MDIC_OP_READ       0x00000000
+#define INTEL_CE_GBE_MDIC_GO            0x80000000
+#define INTEL_CE_GBE_MDIC_READ_ERROR    0x80000000
+
 #define E1000_KUMCTRLSTA_MASK           0x0000FFFF
 #define E1000_KUMCTRLSTA_OFFSET         0x001F0000
 #define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
@@ -2871,6 +2919,11 @@
 #define M88E1111_I_PHY_ID  0x01410CC0
 #define L1LXT971A_PHY_ID   0x001378E0
 
+#define RTL8211B_PHY_ID    0x001CC910
+#define RTL8201N_PHY_ID    0x8200
+#define RTL_PHY_CTRL_FD    0x0100 /* Full duplex.0=half; 1=full */
+#define RTL_PHY_CTRL_SPD_100    0x200000 /* Force 100Mb */
+
 /* Bits...
  * 15-5: page
  * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 340e12d..de69c54 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
 
 #include "e1000.h"
 #include <net/ip6_checksum.h>
+#include <linux/io.h>
+
+/* Intel Media SOC GbE MDIO physical base address */
+static unsigned long ce4100_gbe_mdio_base_phy;
+/* Intel Media SOC GbE MDIO virtual base address */
+void __iomem *ce4100_gbe_mdio_base_virt;
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@
 	INTEL_E1000_ETHERNET_DEVICE(0x108A),
 	INTEL_E1000_ETHERNET_DEVICE(0x1099),
 	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
 	/* required last entry */
 	{0,}
 };
@@ -459,6 +466,7 @@
 		case e1000_82545:
 		case e1000_82545_rev_3:
 		case e1000_82546:
+		case e1000_ce4100:
 		case e1000_82546_rev_3:
 		case e1000_82541:
 		case e1000_82541_rev_2:
@@ -573,6 +581,7 @@
 	case e1000_82545:
 	case e1000_82545_rev_3:
 	case e1000_82546:
+	case e1000_ce4100:
 	case e1000_82546_rev_3:
 		pba = E1000_PBA_48K;
 		break;
@@ -894,6 +903,7 @@
 	static int global_quad_port_a = 0; /* global ksp3 port a indication */
 	int i, err, pci_using_dac;
 	u16 eeprom_data = 0;
+	u16 tmp = 0;
 	u16 eeprom_apme_mask = E1000_EEPROM_APME;
 	int bars, need_ioport;
 
@@ -996,6 +1006,14 @@
 		goto err_sw_init;
 
 	err = -EIO;
+	if (hw->mac_type == e1000_ce4100) {
+		ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
+		ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+		                                pci_resource_len(pdev, BAR_1));
+
+		if (!ce4100_gbe_mdio_base_virt)
+			goto err_mdio_ioremap;
+	}
 
 	if (hw->mac_type >= e1000_82543) {
 		netdev->features = NETIF_F_SG |
@@ -1135,6 +1153,20 @@
 	adapter->wol = adapter->eeprom_wol;
 	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
 
+	/* Auto detect PHY address */
+	if (hw->mac_type == e1000_ce4100) {
+		for (i = 0; i < 32; i++) {
+			hw->phy_addr = i;
+			e1000_read_phy_reg(hw, PHY_ID2, &tmp);
+			if (tmp == 0 || tmp == 0xFF) {
+				if (i == 31)
+					goto err_eeprom;
+				continue;
+			} else
+				break;
+		}
+	}
+
 	/* reset the hardware with the new settings */
 	e1000_reset(adapter);
 
@@ -1171,6 +1203,8 @@
 	kfree(adapter->rx_ring);
 err_dma:
 err_sw_init:
+err_mdio_ioremap:
+	iounmap(ce4100_gbe_mdio_base_virt);
 	iounmap(hw->hw_addr);
 err_ioremap:
 	free_netdev(netdev);
@@ -1409,6 +1443,7 @@
 	/* First rev 82545 and 82546 need to not allow any memory
 	 * write location to cross 64k boundary due to errata 23 */
 	if (hw->mac_type == e1000_82545 ||
+	    hw->mac_type == e1000_ce4100 ||
 	    hw->mac_type == e1000_82546) {
 		return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
 	}
@@ -2198,7 +2233,7 @@
 	 * addresses take precedence to avoid disabling unicast filtering
 	 * when possible.
 	 *
-	 * RAR 0 is used for the station MAC adddress
+	 * RAR 0 is used for the station MAC address
 	 * if there are not 14 addresses, go ahead and clear the filters
 	 */
 	i = 1;
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75..55c1711 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
 #ifndef _E1000_OSDEP_H_
 #define _E1000_OSDEP_H_
 
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
 #include <asm/io.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
+
+#define CONFIG_RAM_BASE         0x60000
+#define GBE_CONFIG_OFFSET       0x0
+
+#define GBE_CONFIG_RAM_BASE \
+	((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
+
+#define GBE_CONFIG_BASE_VIRT    phys_to_virt(GBE_CONFIG_RAM_BASE)
+
+#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
+	(iowrite16_rep(base + offset, data, count))
+
+#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
+	(ioread16_rep(base + (offset << 1), data, count))
 
 #define er32(reg)							\
 	(readl(hw->hw_addr + ((hw->mac_type >= e1000_82543)		\
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index e57e409..1397da1 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -78,6 +78,8 @@
 static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
 static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -113,6 +115,8 @@
 		phy->type		 = e1000_phy_bm;
 		phy->ops.acquire = e1000_get_hw_semaphore_82574;
 		phy->ops.release = e1000_put_hw_semaphore_82574;
+		phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+		phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -121,29 +125,36 @@
 
 	/* This can only be done after all function pointers are setup. */
 	ret_val = e1000_get_phy_id_82571(hw);
+	if (ret_val) {
+		e_dbg("Error getting PHY ID\n");
+		return ret_val;
+	}
 
 	/* Verify phy id */
 	switch (hw->mac.type) {
 	case e1000_82571:
 	case e1000_82572:
 		if (phy->id != IGP01E1000_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82573:
 		if (phy->id != M88E1111_I_PHY_ID)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	case e1000_82574:
 	case e1000_82583:
 		if (phy->id != BME1000_E_PHY_ID_R2)
-			return -E1000_ERR_PHY;
+			ret_val = -E1000_ERR_PHY;
 		break;
 	default:
-		return -E1000_ERR_PHY;
+		ret_val = -E1000_ERR_PHY;
 		break;
 	}
 
-	return 0;
+	if (ret_val)
+		e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+	return ret_val;
 }
 
 /**
@@ -317,7 +328,7 @@
 
 	/*
 	 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
-	 * first NVM or PHY acess. This should be done for single-port
+	 * first NVM or PHY access. This should be done for single-port
 	 * devices, and for one port only on dual-port devices so that
 	 * for those devices we can still use the SMBI lock to synchronize
 	 * inter-port accesses to the PHY & NVM.
@@ -649,6 +660,58 @@
 }
 
 /**
+ *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.
+ *  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (active)
+		data |= E1000_PHY_CTRL_D0A_LPLU;
+	else
+		data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  when active is true, else clear lplu for D3. LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (!active) {
+		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+	} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_PHY_CTRL_NOND0A_LPLU;
+	}
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
  *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
  *  @hw: pointer to the HW structure
  *
@@ -956,7 +1019,7 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, ctrl_ext, icr;
+	u32 ctrl, ctrl_ext;
 	s32 ret_val;
 
 	/*
@@ -1040,7 +1103,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	if (hw->mac.type == e1000_82571) {
 		/* Install any alternate MAC address into RAR0 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 2c913b8..5255be7 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -38,6 +38,7 @@
 #include <linux/netdevice.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
+#include <linux/crc32.h>
 
 #include "hw.h"
 
@@ -496,6 +497,8 @@
 extern void e1000e_update_stats(struct e1000_adapter *adapter);
 extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
 extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
 extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
 
 extern unsigned int copybreak;
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index b18c644..e45a61c 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -784,7 +784,7 @@
  **/
 static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
 {
-	u32 ctrl, icr;
+	u32 ctrl;
 	s32 ret_val;
 
 	/*
@@ -818,7 +818,7 @@
 
 	/* Clear any pending interrupt events. */
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	ret_val = e1000_check_alt_mac_addr_generic(hw);
 
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index affcacf..f8ed03d 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -624,20 +624,24 @@
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 	char firmware_version[32];
 
-	strncpy(drvinfo->driver,  e1000e_driver_name, 32);
-	strncpy(drvinfo->version, e1000e_driver_version, 32);
+	strncpy(drvinfo->driver,  e1000e_driver_name,
+		sizeof(drvinfo->driver) - 1);
+	strncpy(drvinfo->version, e1000e_driver_version,
+		sizeof(drvinfo->version) - 1);
 
 	/*
 	 * EEPROM image version # is reported as firmware version # for
 	 * PCI-E controllers
 	 */
-	sprintf(firmware_version, "%d.%d-%d",
+	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
 		(adapter->eeprom_vers & 0xF000) >> 12,
 		(adapter->eeprom_vers & 0x0FF0) >> 4,
 		(adapter->eeprom_vers & 0x000F));
 
-	strncpy(drvinfo->fw_version, firmware_version, 32);
-	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	strncpy(drvinfo->fw_version, firmware_version,
+		sizeof(drvinfo->fw_version) - 1);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+		sizeof(drvinfo->bus_info) - 1);
 	drvinfo->regdump_len = e1000_get_regs_len(netdev);
 	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
 }
@@ -1704,6 +1708,19 @@
 	bool if_running = netif_running(netdev);
 
 	set_bit(__E1000_TESTING, &adapter->state);
+
+	if (!if_running) {
+		/* Get control of and reset hardware */
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_get_hw_control(adapter);
+
+		e1000e_power_up_phy(adapter);
+
+		adapter->hw.phy.autoneg_wait_to_complete = 1;
+		e1000e_reset(adapter);
+		adapter->hw.phy.autoneg_wait_to_complete = 0;
+	}
+
 	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline tests */
 
@@ -1717,8 +1734,6 @@
 		if (if_running)
 			/* indicate we're in test mode */
 			dev_close(netdev);
-		else
-			e1000e_reset(adapter);
 
 		if (e1000_reg_test(adapter, &data[0]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1732,8 +1747,6 @@
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		e1000e_reset(adapter);
-		/* make sure the phy is powered up */
-		e1000e_power_up_phy(adapter);
 		if (e1000_loopback_test(adapter, &data[3]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
@@ -1755,28 +1768,29 @@
 		if (if_running)
 			dev_open(netdev);
 	} else {
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
-			clear_bit(__E1000_TESTING, &adapter->state);
-			dev_open(netdev);
-			set_bit(__E1000_TESTING, &adapter->state);
-		}
+		/* Online tests */
 
 		e_info("online testing starting\n");
-		/* Online tests */
-		if (e1000_link_test(adapter, &data[4]))
-			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		/* Online tests aren't run; pass by default */
+		/* register, eeprom, intr and loopback tests not run online */
 		data[0] = 0;
 		data[1] = 0;
 		data[2] = 0;
 		data[3] = 0;
 
-		if (!if_running && (adapter->flags & FLAG_HAS_AMT))
-			dev_close(netdev);
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 		clear_bit(__E1000_TESTING, &adapter->state);
 	}
+
+	if (!if_running) {
+		e1000e_reset(adapter);
+
+		if (adapter->flags & FLAG_HAS_AMT)
+			e1000e_release_hw_control(adapter);
+	}
+
 	msleep_interruptible(4 * 1000);
 }
 
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5..e774380 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -83,6 +83,7 @@
 	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
 	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
 	E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */
 	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
 	E1000_PBS      = 0x01008, /* Packet Buffer Size */
 	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index d86cc08..5bb65b7 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -321,7 +321,7 @@
 	}
 
 	/*
-	 * Reset the PHY before any acccess to it.  Doing so, ensures that
+	 * Reset the PHY before any access to it.  Doing so, ensures that
 	 * the PHY is in a known good state before we read/write PHY registers.
 	 * The generic reset is sufficient here, because we haven't determined
 	 * the PHY type yet.
@@ -1395,22 +1395,6 @@
 	}
 }
 
-static u32 e1000_calc_rx_da_crc(u8 mac[])
-{
-	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
-	u32 i, j, mask, crc;
-
-	crc = 0xffffffff;
-	for (i = 0; i < 6; i++) {
-		crc = crc ^ mac[i];
-		for (j = 8; j > 0; j--) {
-			mask = (crc & 1) * (-1);
-			crc = (crc >> 1) ^ (poly & mask);
-		}
-	}
-	return ~crc;
-}
-
 /**
  *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
  *  with 82579 PHY
@@ -1453,8 +1437,7 @@
 			mac_addr[4] = (addr_high & 0xFF);
 			mac_addr[5] = ((addr_high >> 8) & 0xFF);
 
-			ew32(PCH_RAICC(i),
-					e1000_calc_rx_da_crc(mac_addr));
+			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
 		}
 
 		/* Write Rx addresses to the PHY */
@@ -2977,7 +2960,7 @@
 {
 	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
 	u16 reg;
-	u32 ctrl, icr, kab;
+	u32 ctrl, kab;
 	s32 ret_val;
 
 	/*
@@ -3067,7 +3050,7 @@
 		ew32(CRC_OFFSET, 0x65656565);
 
 	ew32(IMC, 0xffffffff);
-	icr = er32(ICR);
+	er32(ICR);
 
 	kab = er32(KABGTXD);
 	kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3118,7 +3101,7 @@
 	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
 	 */
 	if (hw->phy.type == e1000_phy_82578) {
-		hw->phy.ops.read_reg(hw, BM_WUC, &i);
+		e1e_rphy(hw, BM_WUC, &i);
 		ret_val = e1000_phy_hw_reset_ich8lan(hw);
 		if (ret_val)
 			return ret_val;
@@ -3276,9 +3259,8 @@
 	    (hw->phy.type == e1000_phy_82577)) {
 		ew32(FCRTV_PCH, hw->fc.refresh_time);
 
-		ret_val = hw->phy.ops.write_reg(hw,
-		                             PHY_REG(BM_PORT_CTRL_PAGE, 27),
-		                             hw->fc.pause_time);
+		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+				   hw->fc.pause_time);
 		if (ret_val)
 			return ret_val;
 	}
@@ -3342,8 +3324,7 @@
 			return ret_val;
 		break;
 	case e1000_phy_ife:
-		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                               &reg_data);
+		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
 		if (ret_val)
 			return ret_val;
 
@@ -3361,8 +3342,7 @@
 			reg_data |= IFE_PMC_AUTO_MDIX;
 			break;
 		}
-		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
-		                                reg_data);
+		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
 		if (ret_val)
 			return ret_val;
 		break;
@@ -3646,7 +3626,8 @@
 {
 	if (hw->phy.type == e1000_phy_ife)
 		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
-			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+				(IFE_PSCL_PROBE_MODE |
+				 IFE_PSCL_PROBE_LEDS_OFF));
 
 	ew32(LEDCTL, hw->mac.ledctl_mode1);
 	return 0;
@@ -3660,8 +3641,7 @@
  **/
 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_mode1);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
 }
 
 /**
@@ -3672,8 +3652,7 @@
  **/
 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
 {
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
-					(u16)hw->mac.ledctl_default);
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
 }
 
 /**
@@ -3704,7 +3683,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3735,7 +3714,7 @@
 		}
 	}
 
-	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
 }
 
 /**
@@ -3844,20 +3823,20 @@
 	if ((hw->phy.type == e1000_phy_82578) ||
 	    (hw->phy.type == e1000_phy_82579) ||
 	    (hw->phy.type == e1000_phy_82577)) {
-		hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data);
-		hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data);
+		e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
+		e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
+		e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_DC_UPPER, &phy_data);
+		e1e_rphy(hw, HV_DC_LOWER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
+		e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
 	}
 }
 
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 7e55170..ff28721 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1135,7 +1135,8 @@
 		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
 		if (ret_val)
 			return ret_val;
-		ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+		ret_val =
+		    e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
 		if (ret_val)
 			return ret_val;
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fe50242..fa5b604 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1980,15 +1980,15 @@
 }
 
 /**
- * e1000_get_hw_control - get control of the h/w from f/w
+ * e1000e_get_hw_control - get control of the h/w from f/w
  * @adapter: address of board private structure
  *
- * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that
  * the driver is loaded. For AMT version (only with 82573)
  * of the f/w this means that the network i/f is open.
  **/
-static void e1000_get_hw_control(struct e1000_adapter *adapter)
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2005,16 +2005,16 @@
 }
 
 /**
- * e1000_release_hw_control - release control of the h/w to f/w
+ * e1000e_release_hw_control - release control of the h/w to f/w
  * @adapter: address of board private structure
  *
- * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
  * of the f/w this means that the network i/f is closed.
  *
  **/
-static void e1000_release_hw_control(struct e1000_adapter *adapter)
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
 {
 	struct e1000_hw *hw = &adapter->hw;
 	u32 ctrl_ext;
@@ -2445,7 +2445,7 @@
 	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
 	    (vid == adapter->mng_vlan_id)) {
 		/* release control to f/w */
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 		return;
 	}
 
@@ -2734,6 +2734,9 @@
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
 		else
 			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+		if (ret_val)
+			e_dbg("failed to enable jumbo frame workaround mode\n");
 	}
 
 	/* Program MC offset vector base */
@@ -3184,7 +3187,6 @@
 		ew32(PBA, pba);
 	}
 
-
 	/*
 	 * flow control settings
 	 *
@@ -3272,7 +3274,7 @@
 	 * that the network interface is in control
 	 */
 	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	ew32(WUC, 0);
 
@@ -3285,6 +3287,13 @@
 	ew32(VET, ETH_P_8021Q);
 
 	e1000e_reset_adaptive(hw);
+
+	if (!netif_running(adapter->netdev) &&
+	    !test_bit(__E1000_TESTING, &adapter->state)) {
+		e1000_power_down_phy(adapter);
+		return;
+	}
+
 	e1000_get_phy_info(hw);
 
 	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3570,7 +3579,7 @@
 	 * interface is now open and reset the part to a known state.
 	 */
 	if (adapter->flags & FLAG_HAS_AMT) {
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 		e1000e_reset(adapter);
 	}
 
@@ -3634,7 +3643,7 @@
 	return 0;
 
 err_req_irq:
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 	e1000_power_down_phy(adapter);
 	e1000e_free_rx_resources(adapter);
 err_setup_rx:
@@ -3689,8 +3698,9 @@
 	 * If AMT is enabled, let the firmware know that the network
 	 * interface is now closed
 	 */
-	if (adapter->flags & FLAG_HAS_AMT)
-		e1000_release_hw_control(adapter);
+	if ((adapter->flags & FLAG_HAS_AMT) &&
+	    !test_bit(__E1000_TESTING, &adapter->state))
+		e1000e_release_hw_control(adapter);
 
 	if ((adapter->flags & FLAG_HAS_ERT) ||
 	    (adapter->hw.mac.type == e1000_pch2lan))
@@ -5209,7 +5219,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
 
@@ -5366,7 +5376,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 	return 0;
 }
@@ -5613,7 +5623,7 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
 }
 
@@ -5636,7 +5646,7 @@
 	ret_val = e1000_read_pba_string_generic(hw, pba_str,
 						E1000_PBANUM_LENGTH);
 	if (ret_val)
-		strcpy(pba_str, "Unknown");
+		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
 	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
 	       hw->mac.type, hw->phy.type, pba_str);
 }
@@ -5963,9 +5973,9 @@
 	 * under the control of the driver.
 	 */
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_get_hw_control(adapter);
+		e1000e_get_hw_control(adapter);
 
-	strcpy(netdev->name, "eth%d");
+	strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
 	err = register_netdev(netdev);
 	if (err)
 		goto err_register;
@@ -5982,12 +5992,11 @@
 
 err_register:
 	if (!(adapter->flags & FLAG_HAS_AMT))
-		e1000_release_hw_control(adapter);
+		e1000e_release_hw_control(adapter);
 err_eeprom:
 	if (!e1000_check_reset_block(&adapter->hw))
 		e1000_phy_hw_reset(&adapter->hw);
 err_hw_init:
-
 	kfree(adapter->tx_ring);
 	kfree(adapter->rx_ring);
 err_sw_init:
@@ -6053,7 +6062,7 @@
 	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
 	 * would have already happened in close and is redundant.
 	 */
-	e1000_release_hw_control(adapter);
+	e1000e_release_hw_control(adapter);
 
 	e1000e_reset_interrupt_capability(adapter);
 	kfree(adapter->tx_ring);
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 1781efe..00f89e8 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -637,12 +637,11 @@
  **/
 s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
 {
-	struct e1000_phy_info *phy = &hw->phy;
 	s32 ret_val;
 	u16 phy_data;
 
 	/* Enable CRS on TX. This must be set for half-duplex operation. */
-	ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
 	if (ret_val)
 		goto out;
 
@@ -651,7 +650,7 @@
 	/* Enable downshift */
 	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
-	ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data);
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
 
 out:
 	return ret_val;
@@ -774,16 +773,14 @@
 	}
 
 	if (phy->type == e1000_phy_82578) {
-		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                            &phy_data);
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
 		if (ret_val)
 			return ret_val;
 
 		/* 82578 PHY - set the downshift count to 1x. */
 		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
 		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
-		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
-		                             phy_data);
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
 		if (ret_val)
 			return ret_val;
 	}
@@ -1319,9 +1316,8 @@
 				 * We didn't get link.
 				 * Reset the DSP and cross our fingers.
 				 */
-				ret_val = e1e_wphy(hw,
-						M88E1000_PHY_PAGE_SELECT,
-						0x001d);
+				ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+						   0x001d);
 				if (ret_val)
 					return ret_val;
 				ret_val = e1000e_phy_reset_dsp(hw);
@@ -2990,7 +2986,7 @@
 }
 
 /**
- *  e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page
+ *  e1000_get_phy_addr_for_hv_page - Get PHY address based on page
  *  @page: page to be accessed
  **/
 static u32 e1000_get_phy_addr_for_hv_page(u32 page)
@@ -3071,12 +3067,12 @@
 		goto out;
 
 	/* Do not apply workaround if in PHY loopback bit 14 set */
-	hw->phy.ops.read_reg(hw, PHY_CONTROL, &data);
+	e1e_rphy(hw, PHY_CONTROL, &data);
 	if (data & PHY_CONTROL_LB)
 		goto out;
 
 	/* check if link is up and at 1Gbps */
-	ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data);
+	ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
 	if (ret_val)
 		goto out;
 
@@ -3092,14 +3088,12 @@
 	mdelay(200);
 
 	/* flush the packets in the fifo buffer */
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC |
-	                                HV_MUX_DATA_CTRL_FORCE_SPEED);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+			   HV_MUX_DATA_CTRL_FORCE_SPEED);
 	if (ret_val)
 		goto out;
 
-	ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL,
-	                                HV_MUX_DATA_CTRL_GEN_TO_MAC);
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
 
 out:
 	return ret_val;
@@ -3119,7 +3113,7 @@
 	s32 ret_val;
 	u16 data;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 
 	if (!ret_val)
 		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3142,13 +3136,13 @@
 	u16 phy_data;
 	bool link;
 
-	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
 	if (ret_val)
 		goto out;
 
 	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
 
-	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
 	if (ret_val)
 		goto out;
 
@@ -3212,7 +3206,7 @@
 	if (ret_val)
 		goto out;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
 	if (ret_val)
 		goto out;
 
@@ -3224,7 +3218,7 @@
 		if (ret_val)
 			goto out;
 
-		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
 		if (ret_val)
 			goto out;
 
@@ -3258,7 +3252,7 @@
 	s32 ret_val;
 	u16 phy_data, length;
 
-	ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
 	if (ret_val)
 		goto out;
 
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 4fa8d2a..eb35951 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1761,7 +1761,7 @@
 module_param_array(irq, int, NULL, 0);
 module_param_array(mem, int, NULL, 0);
 module_param(autodetect, int, 0);
-MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base addres(es)");
+MODULE_PARM_DESC(io, "EtherExpress Pro/10 I/O base address(es)");
 MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
 MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
 MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a724a2d..6c7257b 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
 #include <asm/io.h>
 
 #define DRV_NAME	"ehea"
-#define DRV_VERSION	"EHEA_0106"
+#define DRV_VERSION	"EHEA_0107"
 
 /* eHEA capability flags */
 #define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 1032b5b..f75d314 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -437,7 +437,7 @@
 		}
 	}
 	/* Ring doorbell */
-	ehea_update_rq1a(pr->qp, i);
+	ehea_update_rq1a(pr->qp, i - 1);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -1329,9 +1329,7 @@
 	int ret;
 	struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
 
-	ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
-			       - init_attr->act_nr_rwqes_rq2
-			       - init_attr->act_nr_rwqes_rq3 - 1);
+	ehea_init_fill_rq1(pr, pr->rq1_skba.len);
 
 	ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
 
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d4..2a71373 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
  *
  * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
  * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
  */
 
 #include <linux/module.h>
@@ -45,29 +47,41 @@
 
 #include <asm/cacheflush.h>
 
-#ifndef CONFIG_ARCH_MXC
+#ifndef CONFIG_ARM
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
 #endif
 
 #include "fec.h"
 
-#ifdef CONFIG_ARCH_MXC
-#include <mach/hardware.h>
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define FEC_ALIGNMENT	0xf
 #else
 #define FEC_ALIGNMENT	0x3
 #endif
 
-/*
- * Define the fixed address of the FEC hardware.
- */
-#if defined(CONFIG_M5272)
+#define DRIVER_NAME	"fec"
 
-static unsigned char	fec_mac_default[] = {
-	0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC		(1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME		(1 << 1)
+
+static struct platform_device_id fec_devtype[] = {
+	{
+		.name = DRIVER_NAME,
+		.driver_data = 0,
+	}, {
+		.name = "imx28-fec",
+		.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+	}
 };
 
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
 /*
  * Some hardware gets it MAC address out of local flash memory.
  * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@
  * account when setting it.
  */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 #define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
 #else
 #define	OPT_FRAME_SIZE	0
@@ -186,7 +201,6 @@
 	int     mii_timeout;
 	uint    phy_speed;
 	phy_interface_t	phy_interface;
-	int	index;
 	int	link;
 	int	full_duplex;
 	struct	completion mdio_done;
@@ -213,10 +227,23 @@
 /* Transmitter timeout */
 #define TX_TIMEOUT (2 * HZ)
 
+static void *swap_buffer(void *bufaddr, int len)
+{
+	int i;
+	unsigned int *buf = bufaddr;
+
+	for (i = 0; i < (len + 3) / 4; i++, buf++)
+		*buf = cpu_to_be32(*buf);
+
+	return bufaddr;
+}
+
 static netdev_tx_t
 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	void *bufaddr;
 	unsigned short	status;
@@ -261,6 +288,14 @@
 		bufaddr = fep->tx_bounce[index];
 	}
 
+	/*
+	 * Some design made an incorrect assumption on endian mode of
+	 * the system that it's running on. As the result, driver has to
+	 * swap every frame going to and coming from the controller.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+		swap_buffer(bufaddr, skb->len);
+
 	/* Save skb pointer */
 	fep->tx_skbuff[fep->skb_cur] = skb;
 
@@ -429,6 +464,8 @@
 fec_enet_rx(struct net_device *dev)
 {
 	struct	fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	struct bufdesc *bdp;
 	unsigned short status;
 	struct	sk_buff	*skb;
@@ -492,6 +529,9 @@
 	        dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
         			DMA_FROM_DEVICE);
 
+		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+			swap_buffer(data, pkt_len);
+
 		/* This does 16 byte alignment, exactly what we need.
 		 * The packet length includes FCS, but we don't want to
 		 * include that when passing upstream as it messes up
@@ -538,37 +578,50 @@
 }
 
 /* ------------------------------------------------------------------------- */
-#ifdef CONFIG_M5272
 static void __inline__ fec_get_mac(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
 	unsigned char *iap, tmpaddr[ETH_ALEN];
 
-	if (FEC_FLASHMAC) {
-		/*
-		 * Get MAC address from FLASH.
-		 * If it is all 1's or 0's, use the default.
-		 */
-		iap = (unsigned char *)FEC_FLASHMAC;
-		if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
-		    (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
-			iap = fec_mac_default;
-		if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
-		    (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
-			iap = fec_mac_default;
-	} else {
-		*((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
-		*((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+	/*
+	 * try to get mac address in following order:
+	 *
+	 * 1) module parameter via kernel command line in form
+	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+	 */
+	iap = macaddr;
+
+	/*
+	 * 2) from flash or fuse (via platform data)
+	 */
+	if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+		if (FEC_FLASHMAC)
+			iap = (unsigned char *)FEC_FLASHMAC;
+#else
+		if (pdata)
+			memcpy(iap, pdata->mac, ETH_ALEN);
+#endif
+	}
+
+	/*
+	 * 3) FEC mac registers set by bootloader
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		*((unsigned long *) &tmpaddr[0]) =
+			be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
+		*((unsigned short *) &tmpaddr[4]) =
+			be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
 		iap = &tmpaddr[0];
 	}
 
 	memcpy(dev->dev_addr, iap, ETH_ALEN);
 
-	/* Adjust MAC if using default MAC address */
-	if (iap == fec_mac_default)
-		 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+	/* Adjust MAC if using macaddr */
+	if (iap == macaddr)
+		 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
 }
-#endif
 
 /* ------------------------------------------------------------------------- */
 
@@ -651,8 +704,8 @@
 	fep->mii_timeout = 0;
 	init_completion(&fep->mdio_done);
 
-	/* start a read op */
-	writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
+	/* start a write op */
+	writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
 		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
 		FEC_MMFR_TA | FEC_MMFR_DATA(value),
 		fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@
 	char mdio_bus_id[MII_BUS_ID_SIZE];
 	char phy_name[MII_BUS_ID_SIZE + 3];
 	int phy_id;
+	int dev_id = fep->pdev->id;
 
 	fep->phy_dev = NULL;
 
@@ -692,6 +746,8 @@
 			continue;
 		if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
 			continue;
+		if (dev_id--)
+			continue;
 		strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
 		break;
 	}
@@ -729,10 +785,35 @@
 
 static int fec_enet_mii_init(struct platform_device *pdev)
 {
+	static struct mii_bus *fec0_mii_bus;
 	struct net_device *dev = platform_get_drvdata(pdev);
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int err = -ENXIO, i;
 
+	/*
+	 * The dual fec interfaces are not equivalent with enet-mac.
+	 * Here are the differences:
+	 *
+	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
+	 *  - fec0 acts as the 1588 time master while fec1 is slave
+	 *  - external phys can only be configured by fec0
+	 *
+	 * That is to say fec1 can not work independently. It only works
+	 * when fec0 is working. The reason behind this design is that the
+	 * second interface is added primarily for Switch mode.
+	 *
+	 * Because of the last point above, both phys are attached on fec0
+	 * mdio interface in board design, and need to be configured by
+	 * fec0 mii_bus.
+	 */
+	if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
+		/* fec1 uses fec0 mii_bus */
+		fep->mii_bus = fec0_mii_bus;
+		return 0;
+	}
+
 	fep->mii_timeout = 0;
 
 	/*
@@ -769,6 +850,10 @@
 	if (mdiobus_register(fep->mii_bus))
 		goto err_out_free_mdio_irq;
 
+	/* save fec0 mii_bus */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+		fec0_mii_bus = fep->mii_bus;
+
 	return 0;
 
 err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@
  /*
   * XXX:  We need to clean up on failure exits here.
   *
-  * index is only used in legacy code
   */
-static int fec_enet_init(struct net_device *dev, int index)
+static int fec_enet_init(struct net_device *dev)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
 	struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@
 
 	spin_lock_init(&fep->hw_lock);
 
-	fep->index = index;
 	fep->hwp = (void __iomem *)dev->base_addr;
 	fep->netdev = dev;
 
-	/* Set the Ethernet address */
-#ifdef CONFIG_M5272
+	/* Get the Ethernet address */
 	fec_get_mac(dev);
-#else
-	{
-		unsigned long l;
-		l = readl(fep->hwp + FEC_ADDR_LOW);
-		dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
-		dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
-		dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
-		l = readl(fep->hwp + FEC_ADDR_HIGH);
-		dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
-		dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
-	}
-#endif
 
 	/* Set receive and transmit descriptor base. */
 	fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@
 fec_restart(struct net_device *dev, int duplex)
 {
 	struct fec_enet_private *fep = netdev_priv(dev);
+	const struct platform_device_id *id_entry =
+				platform_get_device_id(fep->pdev);
 	int i;
+	u32 val, temp_mac[2];
 
 	/* Whack a reset.  We should wait for this. */
 	writel(1, fep->hwp + FEC_ECNTRL);
 	udelay(10);
 
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
+		writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
+		writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+	}
+
 	/* Clear any outstanding interrupt. */
 	writel(0xffc00000, fep->hwp + FEC_IEVENT);
 
@@ -1208,20 +1290,45 @@
 	/* Set MII speed */
 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
+		val = readl(fep->hwp + FEC_R_CNTRL);
+
+		/* MII or RMII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			val |= (1 << 8);
+		else
+			val &= ~(1 << 8);
+
+		/* 10M or 100M */
+		if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
+			val &= ~(1 << 9);
+		else
+			val |= (1 << 9);
+
+		writel(val, fep->hwp + FEC_R_CNTRL);
+	} else {
 #ifdef FEC_MIIGSK_ENR
-	if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
-		/* disable the gasket and wait */
-		writel(0, fep->hwp + FEC_MIIGSK_ENR);
-		while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
-			udelay(1);
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
 
-		/* configure the gasket: RMII, 50 MHz, no loopback, no echo */
-		writel(1, fep->hwp + FEC_MIIGSK_CFGR);
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 */
+			writel(1, fep->hwp + FEC_MIIGSK_CFGR);
 
-		/* re-enable the gasket */
-		writel(2, fep->hwp + FEC_MIIGSK_ENR);
-	}
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
 #endif
+	}
 
 	/* And last, enable the transmit and receive processing */
 	writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@
 	}
 	clk_enable(fep->clk);
 
-	ret = fec_enet_init(ndev, 0);
+	ret = fec_enet_init(ndev);
 	if (ret)
 		goto failed_init;
 
@@ -1380,8 +1487,10 @@
 
 	if (ndev) {
 		fep = netdev_priv(ndev);
-		if (netif_running(ndev))
-			fec_enet_close(ndev);
+		if (netif_running(ndev)) {
+			fec_stop(ndev);
+			netif_device_detach(ndev);
+		}
 		clk_disable(fep->clk);
 	}
 	return 0;
@@ -1396,8 +1505,10 @@
 	if (ndev) {
 		fep = netdev_priv(ndev);
 		clk_enable(fep->clk);
-		if (netif_running(ndev))
-			fec_enet_open(ndev);
+		if (netif_running(ndev)) {
+			fec_restart(ndev, fep->full_duplex);
+			netif_device_attach(ndev);
+		}
 	}
 	return 0;
 }
@@ -1414,12 +1525,13 @@
 
 static struct platform_driver fec_driver = {
 	.driver	= {
-		.name	= "fec",
+		.name	= DRIVER_NAME,
 		.owner	= THIS_MODULE,
 #ifdef CONFIG_PM
 		.pm	= &fec_pm_ops,
 #endif
 	},
+	.id_table = fec_devtype,
 	.probe	= fec_probe,
 	.remove	= __devexit_p(fec_drv_remove),
 };
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25..ace318d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
 /****************************************************************************/
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
+    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 /*
  *	Just figures, Motorola would have to change the offsets for
  *	registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
 /*
  *	Define the buffer descriptor structure.
  */
-#ifdef CONFIG_ARCH_MXC
+#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
 struct bufdesc {
 	unsigned short cbd_datlen;	/* Data length */
 	unsigned short cbd_sc;	/* Control and status info */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index cd2d72d..af09296 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3949,6 +3949,7 @@
 		writel(flags, base + NvRegWakeUpFlags);
 		spin_unlock_irq(&np->lock);
 	}
+	device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
 	return 0;
 }
 
@@ -5488,14 +5489,10 @@
 	/* set mac address */
 	nv_copy_mac_to_hw(dev);
 
-	/* Workaround current PCI init glitch:  wakeup bits aren't
-	 * being set from PCI PM capability.
-	 */
-	device_init_wakeup(&pci_dev->dev, 1);
-
 	/* disable WOL */
 	writel(0, base + NvRegWakeUpFlags);
 	np->wolenabled = 0;
+	device_set_wakeup_enable(&pci_dev->dev, false);
 
 	if (id->driver_data & DEV_HAS_POWER_CNTRL) {
 
@@ -5746,8 +5743,9 @@
 }
 
 #ifdef CONFIG_PM
-static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
+static int nv_suspend(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
@@ -5763,25 +5761,17 @@
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
 
-	pci_save_state(pdev);
-	pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
-	pci_disable_device(pdev);
-	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 	return 0;
 }
 
-static int nv_resume(struct pci_dev *pdev)
+static int nv_resume(struct device *device)
 {
+	struct pci_dev *pdev = to_pci_dev(device);
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 	int i, rc = 0;
 
-	pci_set_power_state(pdev, PCI_D0);
-	pci_restore_state(pdev);
-	/* ack any pending wake events, disable PME */
-	pci_enable_wake(pdev, PCI_D0, 0);
-
 	/* restore non-pci configuration space */
 	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		writel(np->saved_config_space[i], base+i*sizeof(u32));
@@ -5800,6 +5790,9 @@
 	return rc;
 }
 
+static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
+#define NV_PM_OPS (&nv_pm_ops)
+
 static void nv_shutdown(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
@@ -5822,15 +5815,13 @@
 	 * only put the device into D3 if we really go for poweroff.
 	 */
 	if (system_state == SYSTEM_POWER_OFF) {
-		if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
-			pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
+		pci_wake_from_d3(pdev, np->wolenabled);
 		pci_set_power_state(pdev, PCI_D3hot);
 	}
 }
 #else
-#define nv_suspend NULL
+#define NV_PM_OPS NULL
 #define nv_shutdown NULL
-#define nv_resume NULL
 #endif /* CONFIG_PM */
 
 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6002,9 +5993,8 @@
 	.id_table	= pci_tbl,
 	.probe		= nv_probe,
 	.remove		= __devexit_p(nv_remove),
-	.suspend	= nv_suspend,
-	.resume		= nv_resume,
 	.shutdown	= nv_shutdown,
+	.driver.pm	= NV_PM_OPS,
 };
 
 static int __init init_nic(void)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0..7d9ced0 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@
 	while (p) {
 		if (p->bitrate == bitrate) {
 			memcpy(p->bits, bits, YAM_FPGA_SIZE);
-			return p->bits;
+			goto out;
 		}
 		p = p->next;
 	}
@@ -411,7 +411,7 @@
 	p->bitrate = bitrate;
 	p->next = yam_data;
 	yam_data = p;
-
+ out:
 	release_firmware(fw);
 	return p->bits;
 }
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f0..e3b285a 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
 #include <asm/cacheflush.h>
 #include <asm/dma.h>
 #include <asm/portmux.h>
+#include <mach/bfin_serial_5xx.h>
+#undef DRIVER_NAME
 
 #ifdef CONFIG_SIR_BFIN_DMA
 struct dma_rx_buf {
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 4dc39e5..77fcf44 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -30,7 +30,7 @@
  *     or the type-DO IR port.
  *
  * IrDA chip set list from Toshiba Computer Engineering Corp.
- * model			method	maker	controler		Version 
+ * model			method	maker	controller		Version 
  * Portege 320CT	FIR,SIR Toshiba Oboe(Triangle) 
  * Portege 3010CT	FIR,SIR Toshiba Oboe(Sydney) 
  * Portege 3015CT	FIR,SIR Toshiba Oboe(Sydney) 
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 3ae30b8..3b8c924 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -508,6 +508,8 @@
 extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
+extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+				   struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
@@ -524,26 +526,13 @@
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                                 struct ixgbe_atr_input *input,
+						 union ixgbe_atr_hash_dword input,
+						 union ixgbe_atr_hash_dword common,
                                                  u8 queue);
 extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue);
-extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
-                                       u16 vlan_id);
-extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 src_addr);
-extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 dst_addr);
-extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 src_port);
-extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 dst_port);
-extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 flex_byte);
-extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 l4type);
 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                    struct ixgbe_ring *ring);
 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index bfd3c22..a21f581 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1003,7 +1003,7 @@
 		udelay(10);
 	}
 	if (i >= IXGBE_FDIRCMD_CMD_POLL) {
-		hw_dbg(hw ,"Flow Director previous command isn't complete, "
+		hw_dbg(hw, "Flow Director previous command isn't complete, "
 		       "aborting table re-initialization.\n");
 		return IXGBE_ERR_FDIR_REINIT_FAILED;
 	}
@@ -1079,7 +1079,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1113,13 +1113,10 @@
 	/* Move the flexible bytes to use the ethertype - shift 6 words */
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
-	fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1170,7 +1167,7 @@
 
 	/*
 	 * The defaults in the HW for RX PB 1-7 are not zero and so should be
-	 * intialized to zero for non DCB mode otherwise actual total RX PB
+	 * initialized to zero for non DCB mode otherwise actual total RX PB
 	 * would be bigger than programmed and filter space would run into
 	 * the PB 0 region.
 	 */
@@ -1209,10 +1206,8 @@
 	fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
 
 	/* Prime the keys for hashing */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY,
-	                htonl(IXGBE_ATR_BUCKET_HASH_KEY));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
-	                htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
 
 	/*
 	 * Poll init-done after we write the register.  Estimated times:
@@ -1251,8 +1246,8 @@
  *  @stream: input bitstream to compute the hash on
  *  @key: 32-bit hash key
  **/
-static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
-                                        u32 key)
+static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+					u32 key)
 {
 	/*
 	 * The algorithm is as follows:
@@ -1272,410 +1267,250 @@
 	 *    To simplify for programming, the algorithm is implemented
 	 *    in software this way:
 	 *
-	 *    Key[31:0], Stream[335:0]
+	 *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
 	 *
-	 *    tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times
-	 *    int_key[350:0] = tmp_key[351:1]
-	 *    int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
+	 *    for (i = 0; i < 352; i+=32)
+	 *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
 	 *
-	 *    hash[15:0] = 0;
-	 *    for (i = 0; i < 351; i++) {
-	 *        if (int_key[i])
-	 *            hash ^= int_stream[(i + 15):i];
+	 *    lo_hash_dword[15:0]  ^= Stream[15:0];
+	 *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
+	 *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+	 *
+	 *    hi_hash_dword[31:0]  ^= Stream[351:320];
+	 *
+	 *    if(key[0])
+	 *        hash[15:0] ^= Stream[15:0];
+	 *
+	 *    for (i = 0; i < 16; i++) {
+	 *        if (key[i])
+	 *            hash[15:0] ^= lo_hash_dword[(i+15):i];
+	 *        if (key[i + 16])
+	 *            hash[15:0] ^= hi_hash_dword[(i+15):i];
 	 *    }
+	 *
 	 */
+	__be32 common_hash_dword = 0;
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 hash_result = 0;
+	u8 i;
 
-	union {
-		u64    fill[6];
-		u32    key[11];
-		u8     key_stream[44];
-	} tmp_key;
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
 
-	u8   *stream = (u8 *)atr_input;
-	u8   int_key[44];      /* upper-most bit unused */
-	u8   hash_str[46];     /* upper-most 2 bits unused */
-	u16  hash_result = 0;
-	int  i, j, k, h;
+	/* generate common hash dword */
+	for (i = 10; i; i -= 2)
+		common_hash_dword ^= atr_input->dword_stream[i] ^
+				     atr_input->dword_stream[i - 1];
+
+	hi_hash_dword = ntohl(common_hash_dword);
+
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+	/* Process bits 0 and 16 */
+	if (key & 0x0001) hash_result ^= lo_hash_dword;
+	if (key & 0x00010000) hash_result ^= hi_hash_dword;
 
 	/*
-	 * Initialize the fill member to prevent warnings
-	 * on some compilers
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
 	 */
-	 tmp_key.fill[0] = 0;
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	/* First load the temporary key stream */
-	for (i = 0; i < 6; i++) {
-		u64 fillkey = ((u64)key << 32) | key;
-		tmp_key.fill[i] = fillkey;
+
+	/* process the remaining 30 bits in the key 2 bits at a time */
+	for (i = 15; i; i-- ) {
+		if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+		if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
 	}
 
-	/*
-	 * Set the interim key for the hashing.  Bit 352 is unused, so we must
-	 * shift and compensate when building the key.
-	 */
-
-	int_key[0] = tmp_key.key_stream[0] >> 1;
-	for (i = 1, j = 0; i < 44; i++) {
-		unsigned int this_key = tmp_key.key_stream[j] << 7;
-		j++;
-		int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
-	}
-
-	/*
-	 * Set the interim bit string for the hashing.  Bits 368 and 367 are
-	 * unused, so shift and compensate when building the string.
-	 */
-	hash_str[0] = (stream[40] & 0x7f) >> 1;
-	for (i = 1, j = 40; i < 46; i++) {
-		unsigned int this_str = stream[j] << 7;
-		j++;
-		if (j > 41)
-			j = 0;
-		hash_str[i] = (u8)(this_str | (stream[j] >> 1));
-	}
-
-	/*
-	 * Now compute the hash.  i is the index into hash_str, j is into our
-	 * key stream, k is counting the number of bits, and h interates within
-	 * each byte.
-	 */
-	for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
-		for (h = 0; h < 8 && k < 351; h++, k++) {
-			if (int_key[j] & (1 << h)) {
-				/*
-				 * Key bit is set, XOR in the current 16-bit
-				 * string.  Example of processing:
-				 *    h = 0,
-				 *      tmp = (hash_str[i - 2] & 0 << 16) |
-				 *            (hash_str[i - 1] & 0xff << 8) |
-				 *            (hash_str[i] & 0xff >> 0)
-				 *      So tmp = hash_str[15 + k:k], since the
-				 *      i + 2 clause rolls off the 16-bit value
-				 *    h = 7,
-				 *      tmp = (hash_str[i - 2] & 0x7f << 9) |
-				 *            (hash_str[i - 1] & 0xff << 1) |
-				 *            (hash_str[i] & 0x80 >> 7)
-				 */
-				int tmp = (hash_str[i] >> h);
-				tmp |= (hash_str[i - 1] << (8 - h));
-				tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
-				             << (16 - h);
-				hash_result ^= (u16)tmp;
-			}
-		}
-	}
-
-	return hash_result;
+	return hash_result & IXGBE_ATR_HASH_MASK;
 }
 
-/**
- *  ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
- *  @input: input stream to modify
- *  @vlan: the VLAN id to load
- **/
-s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
-{
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
-	input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
-
-	return 0;
-}
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+		(IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+	u32 n = (_n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+		common_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+		bucket_hash ^= lo_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+		sig_hash ^= lo_hash_dword << (16 - n); \
+	if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+		common_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+		bucket_hash ^= hi_hash_dword >> n; \
+	else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+		sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
 
 /**
- *  ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
- *  @input: input stream to modify
- *  @src_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
-{
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
-	                                               (src_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
-	                                                (src_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
- *  @input: input stream to modify
- *  @dst_addr: the IP address to load
- **/
-s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
-{
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
-	                                               (dst_addr >> 16) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
-	                                                (dst_addr >> 8) & 0xff;
-	input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_src_port_82599 - Sets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
- **/
-s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
-{
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
-	input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_dst_port_82599 - Sets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- **/
-s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
-{
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
-	input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
-{
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
-	input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
-{
-	input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
- *  @input: input stream to search
- *  @vlan: the VLAN id to load
- **/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
-{
-	*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
-	*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address
- *  @input: input stream to search
- *  @src_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr)
-{
-	*src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
-	*src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address
- *  @input: input stream to search
- *  @dst_addr: the IP address to load
- **/
-static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input,
-                                        u32 *dst_addr)
-{
-	*dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET];
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16;
-	*dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address
- *  @input: input stream to search
- *  @src_addr_1: the first 4 bytes of the IP address to load
- *  @src_addr_2: the second 4 bytes of the IP address to load
- *  @src_addr_3: the third 4 bytes of the IP address to load
- *  @src_addr_4: the fourth 4 bytes of the IP address to load
- **/
-static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
-                                        u32 *src_addr_1, u32 *src_addr_2,
-                                        u32 *src_addr_3, u32 *src_addr_4)
-{
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
-	*src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
-
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
-	*src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
-
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
-	*src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
-
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
-	*src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
-
-	return 0;
-}
-
-/**
- *  ixgbe_atr_get_src_port_82599 - Gets the source port
- *  @input: input stream to modify
- *  @src_port: the source port to load
+ *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ *  @stream: input bitstream to compute the hash on
  *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
+ *  This function is almost identical to the function above but contains
+ *  several optomizations such as unwinding all of the loops, letting the
+ *  compiler work out all of the conditional ifs since the keys are static
+ *  defines, and computing two keys at once since the hashed dword stream
+ *  will be the same for both keys.
  **/
-static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *src_port)
+static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+					    union ixgbe_atr_hash_dword common)
 {
-	*src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8;
-	*src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1];
+	u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+	u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
 
-	return 0;
-}
+	/* record the flow_vm_vlan bits as they are a key part to the hash */
+	flow_vm_vlan = ntohl(input.dword);
 
-/**
- *  ixgbe_atr_get_dst_port_82599 - Gets the destination port
- *  @input: input stream to modify
- *  @dst_port: the destination port to load
- *
- *  Even though the input is given in big-endian, the FDIRPORT registers
- *  expect the ports to be programmed in little-endian.  Hence the need to swap
- *  endianness when retrieving the data.  This can be confusing since the
- *  internal hash engine expects it to be big-endian.
- **/
-static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
-                                        u16 *dst_port)
-{
-	*dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
-	*dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
+	/* generate common hash dword */
+	hi_hash_dword = ntohl(common.dword);
 
-	return 0;
-}
+	/* low dword is word swapped version of common */
+	lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
 
-/**
- *  ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes
- *  @input: input stream to modify
- *  @flex_bytes: the flexible bytes to load
- **/
-static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
-                                         u16 *flex_byte)
-{
-	*flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
-	*flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
+	/* apply flow ID/VM pool/VLAN ID bits to hash words */
+	hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
 
-	return 0;
-}
+	/* Process bits 0 and 16 */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
 
-/**
- *  ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type
- *  @input: input stream to modify
- *  @l4type: the layer 4 type value to load
- **/
-static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input,
-                                      u8 *l4type)
-{
-	*l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET];
+	/*
+	 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+	 * delay this because bit 0 of the stream should not be processed
+	 * so we do not add the vlan until after bit 0 was processed
+	 */
+	lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
 
-	return 0;
+	/* Process remaining 30 bit of the key */
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+	IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+	/* combine common_hash result with signature and bucket hashes */
+	bucket_hash ^= common_hash;
+	bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+	sig_hash ^= common_hash << 16;
+	sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+	/* return completed signature hash */
+	return sig_hash ^ bucket_hash;
 }
 
 /**
  *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
  *  @hw: pointer to hardware structure
- *  @stream: input bitstream
+ *  @input: unique input dword
+ *  @common: compressed common input dword
  *  @queue: queue index to direct traffic to
  **/
 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
-                                          struct ixgbe_atr_input *input,
+                                          union ixgbe_atr_hash_dword input,
+                                          union ixgbe_atr_hash_dword common,
                                           u8 queue)
 {
 	u64  fdirhashcmd;
-	u64  fdircmd;
-	u32  fdirhash;
-	u16  bucket_hash, sig_hash;
-	u8   l4type;
+	u32  fdircmd;
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
+	/*
+	 * Get the flow_type in order to program FDIRCMD properly
+	 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+	 */
+	switch (input.formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+	case IXGBE_ATR_FLOW_TYPE_TCPV6:
+	case IXGBE_ATR_FLOW_TYPE_UDPV6:
+	case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+		break;
+	default:
+		hw_dbg(hw, " Error on flow type input\n");
+		return IXGBE_ERR_CONFIG;
+	}
 
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	sig_hash = ixgbe_atr_compute_hash_82599(input,
-	                                        IXGBE_ATR_SIGNATURE_HASH_KEY);
-
-	/* Get the l4type in order to program FDIRCMD properly */
-	/* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+	          IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
 
 	/*
 	 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
 	 * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
 	 */
-	fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
-	           IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN);
-
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
-		break;
-	default:
-		hw_dbg(hw, "Error on l4type input\n");
-		return IXGBE_ERR_CONFIG;
-	}
-
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK)
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-
-	fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT);
-	fdirhashcmd = ((fdircmd << 32) | fdirhash);
+	fdirhashcmd = (u64)fdircmd << 32;
+	fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
 
 	IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
 
+	hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
 	return 0;
 }
 
 /**
+ *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+{
+	u32 mask = ntohs(input_masks->dst_port_mask);
+	mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+	mask |= ntohs(input_masks->src_port_mask);
+	mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+	mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+	mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+	return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian.  As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+	(((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+	 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+	IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+	(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+/**
  *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
  *  @hw: pointer to hardware structure
  *  @input: input bitstream
@@ -1687,135 +1522,139 @@
  *  hardware writes must be protected from one another.
  **/
 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
-                                      struct ixgbe_atr_input *input,
+                                      union ixgbe_atr_input *input,
                                       struct ixgbe_atr_input_masks *input_masks,
                                       u16 soft_id, u8 queue)
 {
-	u32 fdircmd = 0;
 	u32 fdirhash;
-	u32 src_ipv4 = 0, dst_ipv4 = 0;
-	u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
-	u16 src_port, dst_port, vlan_id, flex_bytes;
-	u16 bucket_hash;
-	u8  l4type;
-	u8  fdirm = 0;
-
-	/* Get our input values */
-	ixgbe_atr_get_l4type_82599(input, &l4type);
+	u32 fdircmd;
+	u32 fdirport, fdirtcpm;
+	u32 fdirvlan;
+	/* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
+	u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
+		    IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
 
 	/*
-	 * Check l4type formatting, and bail out before we touch the hardware
+	 * Check flow_type formatting, and bail out before we touch the hardware
 	 * if there's a configuration issue
 	 */
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
-		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
-		break;
-	case IXGBE_ATR_L4TYPE_SCTP:
-		fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
+	switch (input->formatted.flow_type) {
+	case IXGBE_ATR_FLOW_TYPE_IPV4:
+		/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+		fdirm |= IXGBE_FDIRM_L4P;
+	case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+		if (input_masks->dst_port_mask || input_masks->src_port_mask) {
+			hw_dbg(hw, " Error on src/dst port mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+	case IXGBE_ATR_FLOW_TYPE_TCPV4:
+	case IXGBE_ATR_FLOW_TYPE_UDPV4:
 		break;
 	default:
-		hw_dbg(hw, "Error on l4type input\n");
+		hw_dbg(hw, " Error on flow type input\n");
 		return IXGBE_ERR_CONFIG;
 	}
 
-	bucket_hash = ixgbe_atr_compute_hash_82599(input,
-	                                           IXGBE_ATR_BUCKET_HASH_KEY);
-
-	/* bucket_hash is only 15 bits */
-	bucket_hash &= IXGBE_ATR_HASH_MASK;
-
-	ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
-	ixgbe_atr_get_src_port_82599(input, &src_port);
-	ixgbe_atr_get_dst_port_82599(input, &dst_port);
-	ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
-
-	fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
-
-	/* Now figure out if we're IPv4 or IPv6 */
-	if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
-		/* IPv6 */
-		ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
-	                                     &src_ipv6_3, &src_ipv6_4);
-
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
-		/* The last 4 bytes is the same register as IPv4 */
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
-
-		fdircmd |= IXGBE_FDIRCMD_IPV6;
-		fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
-	} else {
-		/* IPv4 */
-		ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-	}
-
-	ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
-
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
-	                            (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
-	              (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
-
 	/*
-	 * Program the relevant mask registers.  L4type cannot be
-	 * masked out in this implementation.
+	 * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+	 * are zero, then assume a full mask for that field.  Also assume that
+	 * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+	 * cannot be masked out in this implementation.
 	 *
 	 * This also assumes IPv4 only.  IPv6 masking isn't supported at this
 	 * point in time.
 	 */
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
-	IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
 
-	switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
-	case IXGBE_ATR_L4TYPE_TCP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
-				 (input_masks->dst_port_mask << 16)));
+	/* Program FDIRM */
+	switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
+	case 0xEFFF:
+		/* Unmask VLAN ID - bit 0 and fall through to unmask prio */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+	case 0xE000:
+		/* Unmask VLAN prio - bit 1 */
+		fdirm &= ~IXGBE_FDIRM_VLANP;
 		break;
-	case IXGBE_ATR_L4TYPE_UDP:
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask);
-		IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
-				(IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
-				 (input_masks->src_port_mask << 16)));
+	case 0x0FFF:
+		/* Unmask VLAN ID - bit 0 */
+		fdirm &= ~IXGBE_FDIRM_VLANID;
+		break;
+	case 0x0000:
+		/* do nothing, vlans already masked */
 		break;
 	default:
-		/* this already would have failed above */
-		break;
+		hw_dbg(hw, " Error on VLAN mask\n");
+		return IXGBE_ERR_CONFIG;
 	}
 
-	/* Program the last mask register, FDIRM */
-	if (input_masks->vlan_id_mask)
-		/* Mask both VLAN and VLANP - bits 0 and 1 */
-		fdirm |= 0x3;
-
-	if (input_masks->data_mask)
-		/* Flex bytes need masking, so mask the whole thing - bit 4 */
-		fdirm |= 0x10;
+	if (input_masks->flex_mask & 0xFFFF) {
+		if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
+			hw_dbg(hw, " Error on flexible byte mask\n");
+			return IXGBE_ERR_CONFIG;
+		}
+		/* Unmask Flex Bytes - bit 4 */
+		fdirm &= ~IXGBE_FDIRM_FLEX;
+	}
 
 	/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
-	fdirm |= 0x24;
-
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
-	fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
-	fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
-	fdircmd |= IXGBE_FDIRCMD_LAST;
-	fdircmd |= IXGBE_FDIRCMD_QUEUE_EN;
-	fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+	/* store the TCP/UDP port masks, bit reversed from port layout */
+	fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+
+	/* write both the same so that UDP and TCP use the same mask */
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+	/* store source and destination IP masks (big-enian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+			     ~input_masks->src_ip_mask[0]);
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+			     ~input_masks->dst_ip_mask[0]);
+
+	/* Apply masks to input data */
+	input->formatted.vlan_id &= input_masks->vlan_id_mask;
+	input->formatted.flex_bytes &= input_masks->flex_mask;
+	input->formatted.src_port &= input_masks->src_port_mask;
+	input->formatted.dst_port &= input_masks->dst_port_mask;
+	input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
+	input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+
+	/* record vlan (little-endian) and flex_bytes(big-endian) */
+	fdirvlan =
+		IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
+	fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+	fdirvlan |= ntohs(input->formatted.vlan_id);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+	/* record source and destination port (little-endian)*/
+	fdirport = ntohs(input->formatted.dst_port);
+	fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+	fdirport |= ntohs(input->formatted.src_port);
+	IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+	/* record the first 32 bits of the destination address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+	/* record the source address (big-endian) */
+	IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+	/* configure FDIRCMD register */
+	fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+		  IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+	fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+	fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+	/* we only want the bucket hash so drop the upper 16 bits */
+	fdirhash = ixgbe_atr_compute_hash_82599(input,
+						IXGBE_ATR_BUCKET_HASH_KEY);
+	fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
 
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
 	IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
 
 	return 0;
 }
+
 /**
  *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 23ff23e..2002ea8 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1477,9 +1477,7 @@
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	reg_ctl &= ~IXGBE_RXCTRL_RXEN;
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
-	reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx));
-	reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
+	ixgbe_disable_rx_queue(adapter, rx_ring);
 
 	/* now Tx */
 	reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
@@ -2279,10 +2277,11 @@
                                struct ethtool_rx_ntuple *cmd)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
-	struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
-	struct ixgbe_atr_input input_struct;
+	struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
+	union ixgbe_atr_input input_struct;
 	struct ixgbe_atr_input_masks input_masks;
 	int target_queue;
+	int err;
 
 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
 		return -EOPNOTSUPP;
@@ -2291,67 +2290,122 @@
 	 * Don't allow programming if the action is a queue greater than
 	 * the number of online Tx queues.
 	 */
-	if ((fs.action >= adapter->num_tx_queues) ||
-	    (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+	if ((fs->action >= adapter->num_tx_queues) ||
+	    (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
 		return -EINVAL;
 
-	memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+	memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
 	memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
 
-	input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
-	input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
-	input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
-	input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
-	input_masks.vlan_id_mask = fs.vlan_tag_mask;
-	/* only use the lowest 2 bytes for flex bytes */
-	input_masks.data_mask = (fs.data_mask & 0xffff);
-
-	switch (fs.flow_type) {
+	/* record flow type */
+	switch (fs->flow_type) {
+	case IPV4_FLOW:
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+		break;
 	case TCP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
 		break;
 	case UDP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
 		break;
 	case SCTP_V4_FLOW:
-		ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+		input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
 		break;
 	default:
 		return -1;
 	}
 
-	/* Mask bits from the inputs based on user-supplied mask */
-	ixgbe_atr_set_src_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
-	ixgbe_atr_set_dst_ipv4_82599(&input_struct,
-	            (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
-	/* 82599 expects these to be byte-swapped for perfect filtering */
-	ixgbe_atr_set_src_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
-	ixgbe_atr_set_dst_port_82599(&input_struct,
-	       ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+	/* copy vlan tag minus the CFI bit */
+	if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
+		input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
+		if (!fs->vlan_tag_mask) {
+			input_masks.vlan_id_mask = htons(0xEFFF);
+		} else {
+			switch (~fs->vlan_tag_mask & 0xEFFF) {
+			/* all of these are valid vlan-mask values */
+			case 0xEFFF:
+			case 0xE000:
+			case 0x0FFF:
+			case 0x0000:
+				input_masks.vlan_id_mask =
+					htons(~fs->vlan_tag_mask);
+				break;
+			/* exit with error if vlan-mask is invalid */
+			default:
+				e_err(drv, "Partial VLAN ID or "
+				      "priority mask in vlan-mask is not "
+				      "supported by hardware\n");
+				return -1;
+			}
+		}
+	}
 
-	/* VLAN and Flex bytes are either completely masked or not */
-	if (!fs.vlan_tag_mask)
-		ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+	/* make sure we only use the first 2 bytes of user data */
+	if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
+		input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
+		if (!(fs->data_mask & 0xFFFF)) {
+			input_masks.flex_mask = 0xFFFF;
+		} else if (~fs->data_mask & 0xFFFF) {
+			e_err(drv, "Partial user-def-mask is not "
+			      "supported by hardware\n");
+			return -1;
+		}
+	}
 
-	if (!input_masks.data_mask)
-		/* make sure we only use the first 2 bytes of user data */
-		ixgbe_atr_set_flex_byte_82599(&input_struct,
-		                              (fs.data & 0xffff));
+	/*
+	 * Copy input into formatted structures
+	 *
+	 * These assignments are based on the following logic
+	 * If neither input or mask are set assume value is masked out.
+	 * If input is set, but mask is not mask should default to accept all.
+	 * If input is not set, but mask is set then mask likely results in 0.
+	 * If input is set and mask is set then assign both.
+	 */
+	if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
+		input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
+		if (!fs->m_u.tcp_ip4_spec.ip4src)
+			input_masks.src_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.src_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4src;
+	}
+	if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
+		input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
+		if (!fs->m_u.tcp_ip4_spec.ip4dst)
+			input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
+		else
+			input_masks.dst_ip_mask[0] =
+				~fs->m_u.tcp_ip4_spec.ip4dst;
+	}
+	if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
+		input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
+		if (!fs->m_u.tcp_ip4_spec.psrc)
+			input_masks.src_port_mask = 0xFFFF;
+		else
+			input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+	}
+	if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
+		input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
+		if (!fs->m_u.tcp_ip4_spec.pdst)
+			input_masks.dst_port_mask = 0xFFFF;
+		else
+			input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+	}
 
 	/* determine if we need to drop or route the packet */
-	if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+	if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
 		target_queue = MAX_RX_QUEUES - 1;
 	else
-		target_queue = fs.action;
+		target_queue = fs->action;
 
 	spin_lock(&adapter->fdir_perfect_lock);
-	ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
-	                                    &input_masks, 0, target_queue);
+	err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
+						  &input_struct,
+						  &input_masks, 0,
+						  target_queue);
 	spin_unlock(&adapter->fdir_perfect_lock);
 
-	return 0;
+	return err ? -1 : 0;
 }
 
 static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 38ab4f3..a060610 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3024,6 +3024,36 @@
 	}
 }
 
+void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
+			    struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
+	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
+
+	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+
+	/* write value back with RXDCTL.ENABLE bit cleared */
+	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
+
+	if (hw->mac.type == ixgbe_mac_82598EB &&
+	    !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
+		return;
+
+	/* the hardware may take up to 100us to really disable the rx queue */
+	do {
+		udelay(10);
+		rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
+	} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
+
+	if (!wait_loop) {
+		e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
+		      "the polling period\n", reg_idx);
+	}
+}
+
 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 			     struct ixgbe_ring *ring)
 {
@@ -3034,9 +3064,7 @@
 
 	/* disable queue to avoid issues while updating state */
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
-	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx),
-			rxdctl & ~IXGBE_RXDCTL_ENABLE);
-	IXGBE_WRITE_FLUSH(hw);
+	ixgbe_disable_rx_queue(adapter, ring);
 
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
 	IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -4064,7 +4092,11 @@
 	rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
 	IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
 
-	IXGBE_WRITE_FLUSH(hw);
+	/* disable all enabled rx queues */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		/* this call also flushes the previous write */
+		ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
+
 	msleep(10);
 
 	netif_tx_stop_all_queues(netdev);
@@ -4789,6 +4821,12 @@
 
 	adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 	adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+	if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
+			      IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+		e_err(probe,
+		      "Flow Director is not supported while multiple "
+		      "queues are disabled.  Disabling Flow Director\n");
+	}
 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 	adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
 	adapter->atr_sample_rate = 0;
@@ -5094,16 +5132,11 @@
 		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
 		if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
 			adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
-		if (dev->features & NETIF_F_NTUPLE) {
-			/* Flow Director perfect filter enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
-			adapter->atr_sample_rate = 0;
-			spin_lock_init(&adapter->fdir_perfect_lock);
-		} else {
-			/* Flow Director hash filters enabled */
-			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
-			adapter->atr_sample_rate = 20;
-		}
+		/* n-tuple support exists, always init our spinlock */
+		spin_lock_init(&adapter->fdir_perfect_lock);
+		/* Flow Director hash filters enabled */
+		adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+		adapter->atr_sample_rate = 20;
 		adapter->ring_feature[RING_F_FDIR].indices =
 							 IXGBE_MAX_FDIR_INDICES;
 		adapter->fdir_pballoc = 0;
@@ -6474,38 +6507,92 @@
 	writel(i, tx_ring->tail);
 }
 
-static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-		      u8 queue, u32 tx_flags, __be16 protocol)
+static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
+		      u32 tx_flags, __be16 protocol)
 {
-	struct ixgbe_atr_input atr_input;
-	struct iphdr *iph = ip_hdr(skb);
-	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct ixgbe_q_vector *q_vector = ring->q_vector;
+	union ixgbe_atr_hash_dword input = { .dword = 0 };
+	union ixgbe_atr_hash_dword common = { .dword = 0 };
+	union {
+		unsigned char *network;
+		struct iphdr *ipv4;
+		struct ipv6hdr *ipv6;
+	} hdr;
 	struct tcphdr *th;
-	u16 vlan_id;
+	__be16 vlan_id;
 
-	/* Right now, we support IPv4 w/ TCP only */
-	if (protocol != htons(ETH_P_IP) ||
-	    iph->protocol != IPPROTO_TCP)
+	/* if ring doesn't have a interrupt vector, cannot perform ATR */
+	if (!q_vector)
 		return;
 
-	memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
+	/* do nothing if sampling is disabled */
+	if (!ring->atr_sample_rate)
+		return;
 
-	vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
-		   IXGBE_TX_FLAGS_VLAN_SHIFT;
+	ring->atr_count++;
+
+	/* snag network header to get L4 type and address */
+	hdr.network = skb_network_header(skb);
+
+	/* Currently only IPv4/IPv6 with TCP is supported */
+	if ((protocol != __constant_htons(ETH_P_IPV6) ||
+	     hdr.ipv6->nexthdr != IPPROTO_TCP) &&
+	    (protocol != __constant_htons(ETH_P_IP) ||
+	     hdr.ipv4->protocol != IPPROTO_TCP))
+		return;
 
 	th = tcp_hdr(skb);
 
-	ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-	ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
-	ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
-	ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
-	ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
-	/* src and dst are inverted, think how the receiver sees them */
-	ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
-	ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
+	/* skip this packet since the socket is closing */
+	if (th->fin)
+		return;
+
+	/* sample on all syn packets or once every atr sample count */
+	if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
+		return;
+
+	/* reset sample count */
+	ring->atr_count = 0;
+
+	vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
+
+	/*
+	 * src and dst are inverted, think how the receiver sees them
+	 *
+	 * The input is broken into two sections, a non-compressed section
+	 * containing vm_pool, vlan_id, and flow_type.  The rest of the data
+	 * is XORed together and stored in the compressed dword.
+	 */
+	input.formatted.vlan_id = vlan_id;
+
+	/*
+	 * since src port and flex bytes occupy the same word XOR them together
+	 * and write the value to source port portion of compressed dword
+	 */
+	if (vlan_id)
+		common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
+	else
+		common.port.src ^= th->dest ^ protocol;
+	common.port.dst ^= th->source;
+
+	if (protocol == __constant_htons(ETH_P_IP)) {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+		common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+	} else {
+		input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
+		common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+			     hdr.ipv6->saddr.s6_addr32[1] ^
+			     hdr.ipv6->saddr.s6_addr32[2] ^
+			     hdr.ipv6->saddr.s6_addr32[3] ^
+			     hdr.ipv6->daddr.s6_addr32[0] ^
+			     hdr.ipv6->daddr.s6_addr32[1] ^
+			     hdr.ipv6->daddr.s6_addr32[2] ^
+			     hdr.ipv6->daddr.s6_addr32[3];
+	}
 
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
-	ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
+	ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
+					      input, common, ring->queue_index);
 }
 
 static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
@@ -6676,16 +6763,8 @@
 	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
 	if (count) {
 		/* add the ATR filter if ATR is on */
-		if (tx_ring->atr_sample_rate) {
-			++tx_ring->atr_count;
-			if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-			     test_bit(__IXGBE_TX_FDIR_INIT_DONE,
-				      &tx_ring->state)) {
-				ixgbe_atr(adapter, skb, tx_ring->queue_index,
-					  tx_flags, protocol);
-				tx_ring->atr_count = 0;
-			}
-		}
+		if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
+			ixgbe_atr(tx_ring, skb, tx_flags, protocol);
 		txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
 		txq->tx_bytes += skb->len;
 		txq->tx_packets++;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 446f3467..fd3358f 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1947,10 +1947,9 @@
 #define IXGBE_FDIRM_VLANID                      0x00000001
 #define IXGBE_FDIRM_VLANP                       0x00000002
 #define IXGBE_FDIRM_POOL                        0x00000004
-#define IXGBE_FDIRM_L3P                         0x00000008
-#define IXGBE_FDIRM_L4P                         0x00000010
-#define IXGBE_FDIRM_FLEX                        0x00000020
-#define IXGBE_FDIRM_DIPv6                       0x00000040
+#define IXGBE_FDIRM_L4P                         0x00000008
+#define IXGBE_FDIRM_FLEX                        0x00000010
+#define IXGBE_FDIRM_DIPv6                       0x00000020
 
 #define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
 #define IXGBE_FDIRFREE_FREE_SHIFT               0
@@ -1990,6 +1989,7 @@
 #define IXGBE_FDIRCMD_LAST                      0x00000800
 #define IXGBE_FDIRCMD_COLLISION                 0x00001000
 #define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
 #define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
 #define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
 #define IXGBE_FDIR_INIT_DONE_POLL               10
@@ -2147,51 +2147,80 @@
 #define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
 
 /* Software ATR hash keys */
-#define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
+#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
 
-/* Software ATR input stream offsets and masks */
-#define IXGBE_ATR_VLAN_OFFSET       0
-#define IXGBE_ATR_SRC_IPV6_OFFSET   2
-#define IXGBE_ATR_SRC_IPV4_OFFSET  14
-#define IXGBE_ATR_DST_IPV6_OFFSET  18
-#define IXGBE_ATR_DST_IPV4_OFFSET  30
-#define IXGBE_ATR_SRC_PORT_OFFSET  34
-#define IXGBE_ATR_DST_PORT_OFFSET  36
-#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
-#define IXGBE_ATR_VM_POOL_OFFSET   40
-#define IXGBE_ATR_L4TYPE_OFFSET    41
-
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK     0x7fff
 #define IXGBE_ATR_L4TYPE_MASK      0x3
-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
 #define IXGBE_ATR_L4TYPE_UDP       0x1
 #define IXGBE_ATR_L4TYPE_TCP       0x2
 #define IXGBE_ATR_L4TYPE_SCTP      0x3
-#define IXGBE_ATR_HASH_MASK     0x7fff
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+	IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
+	IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
+	IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2,
+	IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+	IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4,
+	IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5,
+	IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6,
+	IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
 
 /* Flow Director ATR input struct. */
-struct ixgbe_atr_input {
-	/* Byte layout in order, all values with MSB first:
+union ixgbe_atr_input {
+	/*
+	 * Byte layout in order, all values with MSB first:
 	 *
+	 * vm_pool    - 1 byte
+	 * flow_type  - 1 byte
 	 * vlan_id    - 2 bytes
 	 * src_ip     - 16 bytes
 	 * dst_ip     - 16 bytes
 	 * src_port   - 2 bytes
 	 * dst_port   - 2 bytes
 	 * flex_bytes - 2 bytes
-	 * vm_pool    - 1 byte
-	 * l4type     - 1 byte
+	 * rsvd0      - 2 bytes - space reserved must be 0.
 	 */
-	u8 byte_stream[42];
+	struct {
+		u8     vm_pool;
+		u8     flow_type;
+		__be16 vlan_id;
+		__be32 dst_ip[4];
+		__be32 src_ip[4];
+		__be16 src_port;
+		__be16 dst_port;
+		__be16 flex_bytes;
+		__be16 rsvd0;
+	} formatted;
+	__be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+	struct {
+		u8 vm_pool;
+		u8 flow_type;
+		__be16 vlan_id;
+	} formatted;
+	__be32 ip;
+	struct {
+		__be16 src;
+		__be16 dst;
+	} port;
+	__be16 flex_bytes;
+	__be32 dword;
 };
 
 struct ixgbe_atr_input_masks {
-	u32 src_ip_mask;
-	u32 dst_ip_mask;
-	u16 src_port_mask;
-	u16 dst_port_mask;
-	u16 vlan_id_mask;
-	u16 data_mask;
+	__be16 rsvd0;
+	__be16 vlan_id_mask;
+	__be32 dst_ip_mask[4];
+	__be32 src_ip_mask[4];
+	__be16 src_port_mask;
+	__be16 dst_port_mask;
+	__be16 flex_mask;
 };
 
 enum ixgbe_eeprom_type {
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 183765c..f35554d 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -238,7 +238,7 @@
 		goto out;
 	}
 	/* allocate the tx and rx ring buffer descriptors. */
-	/* returns a virtual addres and a physical address. */
+	/* returns a virtual address and a physical address. */
 	lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
 					 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
 					 &lp->tx_bd_p, GFP_KERNEL);
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f..3a4277f 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@
 	} else {
 		int i;
 
+		buf->direct.buf  = NULL;
 		buf->nbufs       = (size + PAGE_SIZE - 1) / PAGE_SIZE;
 		buf->npages      = buf->nbufs;
 		buf->page_shift  = PAGE_SHIFT;
@@ -229,7 +230,7 @@
 		dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
 				  buf->direct.map);
 	else {
-		if (BITS_PER_LONG == 64)
+		if (BITS_PER_LONG == 64 && buf->direct.buf)
 			vunmap(buf->direct.buf);
 
 		for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b..897f576 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@
 	int i;
 	int err;
 
-	dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num);
+	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
+	    prof->tx_ring_num, prof->rx_ring_num);
 	if (dev == NULL) {
 		mlx4_err(mdev, "Net device allocation failed\n");
 		return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18b..5de1db8 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
 		dev_cap->bf_reg_size = 1 << (field & 0x1f);
 		MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
-		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
-			mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
+		if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
 			field = 3;
-		}
 		dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
 		mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
 			 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 2c15891..e953793 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1536,6 +1536,7 @@
 	PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
 	PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
 	PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
+	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
 	PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
 	PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6..a1b82c9 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 #include <asm/string.h>
 
@@ -542,7 +543,7 @@
 	data = ap->tpkt->data;
 	count = ap->tpkt->len;
 	fcs = ap->tfcs;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/*
 	 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@
 	code = data[0];
 	if (code != CONFACK && code != CONFREQ)
 		return;
-	dlen = (data[2] << 8) + data[3];
+	dlen = get_unaligned_be16(data + 2);
 	if (len < dlen)
 		return;		/* packet got truncated or length is bogus */
 
@@ -997,15 +998,14 @@
 	while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
 		switch (data[0]) {
 		case LCP_MRU:
-			val = (data[2] << 8) + data[3];
+			val = get_unaligned_be16(data + 2);
 			if (inbound)
 				ap->mru = val;
 			else
 				ap->chan.mtu = val;
 			break;
 		case LCP_ASYNCMAP:
-			val = (data[2] << 24) + (data[3] << 16)
-				+ (data[4] << 8) + data[5];
+			val = get_unaligned_be32(data + 2);
 			if (inbound)
 				ap->raccm = val;
 			else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83..4358330 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
 #include <linux/ppp-comp.h>
 
 #include <linux/zlib.h>
+#include <asm/unaligned.h>
 
 /*
  * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@
 	 */
 	wptr[0] = PPP_ADDRESS(rptr);
 	wptr[1] = PPP_CONTROL(rptr);
-	wptr[2] = PPP_COMP >> 8;
-	wptr[3] = PPP_COMP;
+	put_unaligned_be16(PPP_COMP, wptr + 2);
 	wptr += PPP_HDRLEN;
-	wptr[0] = state->seqno >> 8;
-	wptr[1] = state->seqno;
+	put_unaligned_be16(state->seqno, wptr);
 	wptr += DEFLATE_OVHD;
 	olen = PPP_HDRLEN + DEFLATE_OVHD;
 	state->strm.next_out = wptr;
@@ -451,7 +450,7 @@
 	}
 
 	/* Check the sequence number. */
-	seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1];
+	seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
 	if (seq != (state->seqno & 0xffff)) {
 		if (state->debug)
 			printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6456484..c7a6c44 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <net/slhc_vj.h>
 #include <asm/atomic.h>
 
@@ -210,7 +211,7 @@
 };
 
 /* Get the PPP protocol number from a skb */
-#define PPP_PROTO(skb)	(((skb)->data[0] << 8) + (skb)->data[1])
+#define PPP_PROTO(skb)	get_unaligned_be16((skb)->data)
 
 /* We limit the length of ppp->file.rq to this (arbitrary) value */
 #define PPP_MAX_RQLEN	32
@@ -964,8 +965,7 @@
 
 	pp = skb_push(skb, 2);
 	proto = npindex_to_proto[npi];
-	pp[0] = proto >> 8;
-	pp[1] = proto;
+	put_unaligned_be16(proto, pp);
 
 	netif_stop_queue(dev);
 	skb_queue_tail(&ppp->file.xq, skb);
@@ -1473,8 +1473,7 @@
 		q = skb_put(frag, flen + hdrlen);
 
 		/* make the MP header */
-		q[0] = PPP_MP >> 8;
-		q[1] = PPP_MP;
+		put_unaligned_be16(PPP_MP, q);
 		if (ppp->flags & SC_MP_XSHORTSEQ) {
 			q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
 			q[3] = ppp->nxseq;
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b8..9a1849a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
 #include <linux/ppp_defs.h>
 #include <linux/ppp-comp.h>
 #include <linux/scatterlist.h>
+#include <asm/unaligned.h>
 
 #include "ppp_mppe.h"
 
@@ -395,16 +396,14 @@
 	 */
 	obuf[0] = PPP_ADDRESS(ibuf);
 	obuf[1] = PPP_CONTROL(ibuf);
-	obuf[2] = PPP_COMP >> 8;	/* isize + MPPE_OVHD + 1 */
-	obuf[3] = PPP_COMP;	/* isize + MPPE_OVHD + 2 */
+	put_unaligned_be16(PPP_COMP, obuf + 2);
 	obuf += PPP_HDRLEN;
 
 	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
 	if (state->debug >= 7)
 		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
 		       state->ccount);
-	obuf[0] = state->ccount >> 8;
-	obuf[1] = state->ccount & 0xff;
+	put_unaligned_be16(state->ccount, obuf);
 
 	if (!state->stateful ||	/* stateless mode     */
 	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3..4e6b72f 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
 #include <linux/completion.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 #include <asm/uaccess.h>
 
 #define PPP_VERSION	"2.4.2"
@@ -563,7 +564,7 @@
 	int islcp;
 
 	data  = skb->data;
-	proto = (data[0] << 8) + data[1];
+	proto = get_unaligned_be16(data);
 
 	/* LCP packets with codes between 1 (configure-request)
 	 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 9c2a02d..44e316f 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -34,8 +34,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 14
-#define QLCNIC_LINUX_VERSIONID  "5.0.14"
+#define _QLCNIC_LINUX_SUBVERSION 15
+#define QLCNIC_LINUX_VERSIONID  "5.0.15"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -289,6 +289,26 @@
 	u32	reserved[5];
 };
 
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION	0x3F1000
+#define QLCNIC_FW_IMAGE_REGION	0x74
+struct qlcnic_flt_header {
+	u16 version;
+	u16 len;
+	u16 checksum;
+	u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+	u8 region;
+	u8 reserved0;
+	u8 attrib;
+	u8 reserved1;
+	u32 size;
+	u32 start_addr;
+	u32 end_add;
+};
+
 /* Magic number to let user know flash is programmed */
 #define	QLCNIC_BDINFO_MAGIC 0x12345678
 
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 1e7af70..4c14510 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -672,7 +672,7 @@
 	if (data[1])
 		eth_test->flags |= ETH_TEST_FL_FAILED;
 
-	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+	if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
 		data[2] = qlcnic_irq_test(dev);
 		if (data[2])
 			eth_test->flags |= ETH_TEST_FL_FAILED;
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9b9c7c3..a7f1d5b 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -627,12 +627,73 @@
 	return 0;
 }
 
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+				struct qlcnic_flt_entry *region_entry)
+{
+	struct qlcnic_flt_header flt_hdr;
+	struct qlcnic_flt_entry *flt_entry;
+	int i = 0, ret;
+	u32 entry_size;
+
+	memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+					 (u8 *)&flt_hdr,
+					 sizeof(struct qlcnic_flt_header));
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout header\n");
+		return -EIO;
+	}
+
+	entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+	flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
+	if (flt_entry == NULL) {
+		dev_warn(&adapter->pdev->dev, "error allocating memory\n");
+		return -EIO;
+	}
+
+	ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+					 sizeof(struct qlcnic_flt_header),
+					 (u8 *)flt_entry, entry_size);
+	if (ret) {
+		dev_warn(&adapter->pdev->dev,
+			 "error reading flash layout entries\n");
+		goto err_out;
+	}
+
+	while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		if (flt_entry[i].region == region)
+			break;
+		i++;
+	}
+	if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+		dev_warn(&adapter->pdev->dev,
+			 "region=%x not found in %d regions\n", region, i);
+		ret = -EIO;
+		goto err_out;
+	}
+	memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+	vfree(flt_entry);
+	return ret;
+}
+
 int
 qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
 {
+	struct qlcnic_flt_entry fw_entry;
 	u32 ver = -1, min_ver;
+	int ret;
 
-	qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
+	ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
+	if (!ret)
+		/* 0-4:-signature,  4-8:-fw version */
+		qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+				     (int *)&ver);
+	else
+		qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+				     (int *)&ver);
 
 	ver = QLCNIC_DECODE_VERSION(ver);
 	min_ver = QLCNIC_MIN_FW_VERSION;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 11e3a46..37c04b4 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -31,15 +31,15 @@
 
 static struct workqueue_struct *qlcnic_wq;
 static int qlcnic_mac_learn;
-module_param(qlcnic_mac_learn, int, 0644);
+module_param(qlcnic_mac_learn, int, 0444);
 MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
 
 static int use_msi = 1;
-module_param(use_msi, int, 0644);
+module_param(use_msi, int, 0444);
 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
 
 static int use_msi_x = 1;
-module_param(use_msi_x, int, 0644);
+module_param(use_msi_x, int, 0444);
 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
 
 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -47,11 +47,11 @@
 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
 
 static int load_fw_file;
-module_param(load_fw_file, int, 0644);
+module_param(load_fw_file, int, 0444);
 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
 
 static int qlcnic_config_npars;
-module_param(qlcnic_config_npars, int, 0644);
+module_param(qlcnic_config_npars, int, 0444);
 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
 
 static int __devinit qlcnic_probe(struct pci_dev *pdev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 27a7c20..bb8645a 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1632,36 +1632,134 @@
 {
 	__le32 *phytable = (__le32 *)fw->data;
 	struct net_device *dev = tp->dev;
-	size_t i;
+	size_t index, fw_size = fw->size / sizeof(*phytable);
+	u32 predata, count;
 
 	if (fw->size % sizeof(*phytable)) {
 		netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
 		return;
 	}
 
-	for (i = 0; i < fw->size / sizeof(*phytable); i++) {
-		u32 action = le32_to_cpu(phytable[i]);
+	for (index = 0; index < fw_size; index++) {
+		u32 action = le32_to_cpu(phytable[index]);
+		u32 regno = (action & 0x0fff0000) >> 16;
 
-		if (!action)
+		switch(action & 0xf0000000) {
+		case PHY_READ:
+		case PHY_DATA_OR:
+		case PHY_DATA_AND:
+		case PHY_READ_EFUSE:
+		case PHY_CLEAR_READCOUNT:
+		case PHY_WRITE:
+		case PHY_WRITE_PREVIOUS:
+		case PHY_DELAY_MS:
 			break;
 
-		if ((action & 0xf0000000) != PHY_WRITE) {
-			netif_err(tp, probe, dev,
-				  "unknown action 0x%08x\n", action);
+		case PHY_BJMPN:
+			if (regno > index) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (index + 2 >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+		case PHY_COMP_EQ_SKIPN:
+		case PHY_COMP_NEQ_SKIPN:
+		case PHY_SKIPN:
+			if (index + 1 + regno >= fw_size) {
+				netif_err(tp, probe, tp->dev,
+					"Out of range of firmware\n");
+				return;
+			}
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
+		default:
+			netif_err(tp, probe, tp->dev,
+				  "Invalid action 0x%08x\n", action);
 			return;
 		}
 	}
 
-	while (i-- != 0) {
-		u32 action = le32_to_cpu(*phytable);
+	predata = 0;
+	count = 0;
+
+	for (index = 0; index < fw_size; ) {
+		u32 action = le32_to_cpu(phytable[index]);
 		u32 data = action & 0x0000ffff;
-		u32 reg = (action & 0x0fff0000) >> 16;
+		u32 regno = (action & 0x0fff0000) >> 16;
+
+		if (!action)
+			break;
 
 		switch(action & 0xf0000000) {
-		case PHY_WRITE:
-			rtl_writephy(tp, reg, data);
-			phytable++;
+		case PHY_READ:
+			predata = rtl_readphy(tp, regno);
+			count++;
+			index++;
 			break;
+		case PHY_DATA_OR:
+			predata |= data;
+			index++;
+			break;
+		case PHY_DATA_AND:
+			predata &= data;
+			index++;
+			break;
+		case PHY_BJMPN:
+			index -= regno;
+			break;
+		case PHY_READ_EFUSE:
+			predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+			index++;
+			break;
+		case PHY_CLEAR_READCOUNT:
+			count = 0;
+			index++;
+			break;
+		case PHY_WRITE:
+			rtl_writephy(tp, regno, data);
+			index++;
+			break;
+		case PHY_READCOUNT_EQ_SKIP:
+			if (count == data)
+				index += 2;
+			else
+				index += 1;
+			break;
+		case PHY_COMP_EQ_SKIPN:
+			if (predata == data)
+				index += regno;
+			index++;
+			break;
+		case PHY_COMP_NEQ_SKIPN:
+			if (predata != data)
+				index += regno;
+			index++;
+			break;
+		case PHY_WRITE_PREVIOUS:
+			rtl_writephy(tp, regno, predata);
+			index++;
+			break;
+		case PHY_SKIPN:
+			index += regno + 1;
+			break;
+		case PHY_DELAY_MS:
+			mdelay(data);
+			index++;
+			break;
+
+		case PHY_READ_MAC_BYTE:
+		case PHY_WRITE_MAC_BYTE:
+		case PHY_WRITE_ERI_WORD:
 		default:
 			BUG();
 		}
@@ -3069,15 +3167,6 @@
 		rtl8168_driver_start(tp);
 	}
 
-	rtl8169_init_phy(dev, tp);
-
-	/*
-	 * Pretend we are using VLANs; This bypasses a nasty bug where
-	 * Interrupts stop flowing on high load on 8110SCd controllers.
-	 */
-	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
-		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
-
 	device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
 
 	if (pci_dev_run_wake(pdev))
@@ -3127,6 +3216,7 @@
 static int rtl8169_open(struct net_device *dev)
 {
 	struct rtl8169_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
 	struct pci_dev *pdev = tp->pci_dev;
 	int retval = -ENOMEM;
 
@@ -3162,6 +3252,15 @@
 
 	napi_enable(&tp->napi);
 
+	rtl8169_init_phy(dev, tp);
+
+	/*
+	 * Pretend we are using VLANs; This bypasses a nasty bug where
+	 * Interrupts stop flowing on high load on 8110SCd controllers.
+	 */
+	if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+		RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
+
 	rtl_pll_power_up(tp);
 
 	rtl_hw_start(dev);
@@ -3171,7 +3270,7 @@
 	tp->saved_wolopts = 0;
 	pm_runtime_put_noidle(&pdev->dev);
 
-	rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+	rtl8169_check_link_status(dev, tp, ioaddr);
 out:
 	return retval;
 
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 5818368..5976d1d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -36,7 +36,7 @@
    Rev 1.07.06 Nov.  7 2000 Jeff Garzik <jgarzik@pobox.com> some bug fix and cleaning
    Rev 1.07.05 Nov.  6 2000 metapirat<metapirat@gmx.de> contribute media type select by ifconfig
    Rev 1.07.04 Sep.  6 2000 Lei-Chun Chang added ICS1893 PHY support
-   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E eqaulizer workaround rule
+   Rev 1.07.03 Aug. 24 2000 Lei-Chun Chang (lcchang@sis.com.tw) modified 630E equalizer workaround rule
    Rev 1.07.01 Aug. 08 2000 Ollie Lho minor update for SiS 630E and SiS 630E A1
    Rev 1.07    Mar. 07 2000 Ollie Lho bug fix in Rx buffer ring
    Rev 1.06.04 Feb. 11 2000 Jeff Garzik <jgarzik@pobox.com> softnet and init for kernel 2.4
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 39996bf..7d85a38 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
 
 #include <asm/irq.h>
 
-#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
-#define SKY2_VLAN_TAG_USED 1
-#endif
-
 #include "sky2.h"
 
 #define DRV_NAME		"sky2"
@@ -1326,39 +1322,34 @@
 	return err;
 }
 
-#ifdef SKY2_VLAN_TAG_USED
-static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
-{
-	if (onoff) {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_ON);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_ON);
-	} else {
-		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
-			     RX_VLAN_STRIP_OFF);
-		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
-			     TX_VLAN_TAG_OFF);
-	}
-}
+#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
 
-static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+static void sky2_vlan_mode(struct net_device *dev)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
 	struct sky2_hw *hw = sky2->hw;
 	u16 port = sky2->port;
 
-	netif_tx_lock_bh(dev);
-	napi_disable(&hw->napi);
+	if (dev->features & NETIF_F_HW_VLAN_RX)
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_ON);
+	else
+		sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
+			     RX_VLAN_STRIP_OFF);
 
-	sky2->vlgrp = grp;
-	sky2_set_vlan_mode(hw, port, grp != NULL);
+	dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_ON);
+	else {
+		sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
+			     TX_VLAN_TAG_OFF);
 
-	sky2_read32(hw, B0_Y2_SP_LISR);
-	napi_enable(&hw->napi);
-	netif_tx_unlock_bh(dev);
+		/* Can't do transmit offload of vlan without hw vlan */
+		dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
+					| NETIF_F_ALL_CSUM);
+	}
 }
-#endif
 
 /* Amount of required worst case padding in rx buffer */
 static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@@ -1635,9 +1626,7 @@
 	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
 			   sky2->tx_ring_size - 1);
 
-#ifdef SKY2_VLAN_TAG_USED
-	sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
-#endif
+	sky2_vlan_mode(sky2->netdev);
 
 	sky2_rx_start(sky2);
 }
@@ -1780,7 +1769,7 @@
 	}
 
 	ctrl = 0;
-#ifdef SKY2_VLAN_TAG_USED
+
 	/* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
 	if (vlan_tx_tag_present(skb)) {
 		if (!le) {
@@ -1792,7 +1781,6 @@
 		le->length = cpu_to_be16(vlan_tx_tag_get(skb));
 		ctrl |= INS_VLAN;
 	}
-#endif
 
 	/* Handle TCP checksum offload */
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -2432,11 +2420,8 @@
 	struct sk_buff *skb = NULL;
 	u16 count = (status & GMR_FS_LEN) >> 16;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* Account for vlan tag */
-	if (sky2->vlgrp && (status & GMR_FS_VLAN))
-		count -= VLAN_HLEN;
-#endif
+	if (status & GMR_FS_VLAN)
+		count -= VLAN_HLEN;	/* Account for vlan tag */
 
 	netif_printk(sky2, rx_status, KERN_DEBUG, dev,
 		     "rx slot %u status 0x%x len %d\n",
@@ -2504,17 +2489,9 @@
 static inline void sky2_skb_rx(const struct sky2_port *sky2,
 			       u32 status, struct sk_buff *skb)
 {
-#ifdef SKY2_VLAN_TAG_USED
-	u16 vlan_tag = be16_to_cpu(sky2->rx_tag);
-	if (sky2->vlgrp && (status & GMR_FS_VLAN)) {
-		if (skb->ip_summed == CHECKSUM_NONE)
-			vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
-		else
-			vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
-					 vlan_tag, skb);
-		return;
-	}
-#endif
+	if (status & GMR_FS_VLAN)
+		__vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+
 	if (skb->ip_summed == CHECKSUM_NONE)
 		netif_receive_skb(skb);
 	else
@@ -2631,7 +2608,6 @@
 				goto exit_loop;
 			break;
 
-#ifdef SKY2_VLAN_TAG_USED
 		case OP_RXVLAN:
 			sky2->rx_tag = length;
 			break;
@@ -2639,7 +2615,6 @@
 		case OP_RXCHKSVLAN:
 			sky2->rx_tag = length;
 			/* fall through */
-#endif
 		case OP_RXCHKS:
 			if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
 				sky2_rx_checksum(sky2, status);
@@ -3042,6 +3017,10 @@
 			| SKY2_HW_NEW_LE
 			| SKY2_HW_AUTO_TX_SUM
 			| SKY2_HW_ADV_POWER_CTL;
+
+		/* The workaround for status conflicts VLAN tag detection. */
+		if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
+			hw->flags |= SKY2_HW_VLAN_BROKEN;
 		break;
 
 	case CHIP_ID_YUKON_SUPR:
@@ -3411,18 +3390,15 @@
 		u32 modes = SUPPORTED_10baseT_Half
 			| SUPPORTED_10baseT_Full
 			| SUPPORTED_100baseT_Half
-			| SUPPORTED_100baseT_Full
-			| SUPPORTED_Autoneg | SUPPORTED_TP;
+			| SUPPORTED_100baseT_Full;
 
 		if (hw->flags & SKY2_HW_GIGABIT)
 			modes |= SUPPORTED_1000baseT_Half
 				| SUPPORTED_1000baseT_Full;
 		return modes;
 	} else
-		return  SUPPORTED_1000baseT_Half
-			| SUPPORTED_1000baseT_Full
-			| SUPPORTED_Autoneg
-			| SUPPORTED_FIBRE;
+		return SUPPORTED_1000baseT_Half
+			| SUPPORTED_1000baseT_Full;
 }
 
 static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3436,9 +3412,11 @@
 	if (sky2_is_copper(hw)) {
 		ecmd->port = PORT_TP;
 		ecmd->speed = sky2->speed;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_TP;
 	} else {
 		ecmd->speed = SPEED_1000;
 		ecmd->port = PORT_FIBRE;
+		ecmd->supported |=  SUPPORTED_Autoneg | SUPPORTED_FIBRE;
 	}
 
 	ecmd->advertising = sky2->advertising;
@@ -3455,8 +3433,19 @@
 	u32 supported = sky2_supported_modes(hw);
 
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		if (ecmd->advertising & ~supported)
+			return -EINVAL;
+
+		if (sky2_is_copper(hw))
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_TP |
+					    ADVERTISED_Autoneg;
+		else
+			sky2->advertising = ecmd->advertising |
+					    ADVERTISED_FIBRE |
+					    ADVERTISED_Autoneg;
+
 		sky2->flags |= SKY2_FLAG_AUTO_SPEED;
-		ecmd->advertising = supported;
 		sky2->duplex = -1;
 		sky2->speed = -1;
 	} else {
@@ -3500,8 +3489,6 @@
 		sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
 	}
 
-	sky2->advertising = ecmd->advertising;
-
 	if (netif_running(dev)) {
 		sky2_phy_reinit(sky2);
 		sky2_set_multicast(dev);
@@ -4229,15 +4216,28 @@
 static int sky2_set_flags(struct net_device *dev, u32 data)
 {
 	struct sky2_port *sky2 = netdev_priv(dev);
-	u32 supported =
-		(sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH;
+	unsigned long old_feat = dev->features;
+	u32 supported = 0;
 	int rc;
 
+	if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
+		supported |= ETH_FLAG_RXHASH;
+
+	if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
+		supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+
+	printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
+	       supported, data);
+
 	rc = ethtool_op_set_flags(dev, data, supported);
 	if (rc)
 		return rc;
 
-	rx_set_rss(dev);
+	if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
+		rx_set_rss(dev);
+
+	if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
+		sky2_vlan_mode(dev);
 
 	return 0;
 }
@@ -4273,6 +4273,7 @@
 	.get_sset_count = sky2_get_sset_count,
 	.get_ethtool_stats = sky2_get_ethtool_stats,
 	.set_flags	= sky2_set_flags,
+	.get_flags	= ethtool_op_get_flags,
 };
 
 #ifdef CONFIG_SKY2_DEBUG
@@ -4554,9 +4555,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= sky2_netpoll,
 #endif
@@ -4572,9 +4570,6 @@
 	.ndo_change_mtu		= sky2_change_mtu,
 	.ndo_tx_timeout		= sky2_tx_timeout,
 	.ndo_get_stats64	= sky2_get_stats,
-#ifdef SKY2_VLAN_TAG_USED
-	.ndo_vlan_rx_register	= sky2_vlan_rx_register,
-#endif
   },
 };
 
@@ -4625,7 +4620,8 @@
 	sky2->port = port;
 
 	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
-		| NETIF_F_TSO  | NETIF_F_GRO;
+		| NETIF_F_TSO | NETIF_F_GRO;
+
 	if (highmem)
 		dev->features |= NETIF_F_HIGHDMA;
 
@@ -4633,13 +4629,8 @@
 	if (!(hw->flags & SKY2_HW_RSS_BROKEN))
 		dev->features |= NETIF_F_RXHASH;
 
-#ifdef SKY2_VLAN_TAG_USED
-	/* The workaround for FE+ status conflicts with VLAN tag detection. */
-	if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
-	      sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
+	if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
 		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
-	}
-#endif
 
 	/* read the mac address */
 	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 80bdc40..6861b0e 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2236,11 +2236,8 @@
 	u16		     rx_pending;
 	u16		     rx_data_size;
 	u16		     rx_nfrags;
-
-#ifdef SKY2_VLAN_TAG_USED
 	u16		     rx_tag;
-	struct vlan_group    *vlgrp;
-#endif
+
 	struct {
 		unsigned long last;
 		u32	mac_rp;
@@ -2284,6 +2281,7 @@
 #define SKY2_HW_AUTO_TX_SUM	0x00000040	/* new IP decode for Tx */
 #define SKY2_HW_ADV_POWER_CTL	0x00000080	/* additional PHY power regs */
 #define SKY2_HW_RSS_BROKEN	0x00000100
+#define SKY2_HW_VLAN_BROKEN     0x00000200
 
 	u8	     	     chip_id;
 	u8		     chip_rev;
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 296000b..3397618 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -12,7 +12,7 @@
 /*
  * RX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of RX communication channels betwean driver and NIC.
+ * There are 2 types of RX communication channels between driver and NIC.
  * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming
  * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds
  * info about buffer's location, size and ID. An ID field is used to identify a
@@ -821,7 +821,7 @@
 		}
 
 		/* use PMF to accept first MAC_MCST_NUM (15) addresses */
-		/* TBD: sort addreses and write them in ascending order
+		/* TBD: sort addresses and write them in ascending order
 		 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
 		 * multicast frames throu IMF */
 		/* accept the rest of addresses throu IMF */
@@ -1346,7 +1346,7 @@
 /*
  * TX HW/SW interaction overview
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * There are 2 types of TX communication channels betwean driver and NIC.
+ * There are 2 types of TX communication channels between driver and NIC.
  * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets
  * 2) TX Data Fifo - TXD - holds descriptors of full buffers.
  *
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7599c45..b100bd5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1309,7 +1309,7 @@
 		break;
 
 	case SIOCGIFHWADDR:
-		/* Get hw addres */
+		/* Get hw address */
 		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
 		ifr.ifr_hwaddr.sa_family = tun->dev->type;
 		if (copy_to_user(argp, &ifr, ifreq_len))
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cab96ad..09cac70 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -898,7 +898,7 @@
 	set_mii_flow_control(vptr);
 
 	/*
-	   Check if new status is consisent with current status
+	   Check if new status is consistent with current status
 	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
 	       (mii_status==curr_status)) {
 	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 8c3103f..d48486d 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1695,7 +1695,7 @@
  * struct vxge_hw_device_stats - Contains HW per-device statistics,
  * including hw.
  * @devh: HW device handle.
- * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @dma_addr: DMA address of the %hw_info. Given to device to fill-in the stats.
  * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
  *                space.
  * @hw_info_dma_acch: One more DMA handle used subsequently to free the
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 34cff6c..4578e5b 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -125,7 +125,7 @@
 /* Module parameters */
 
 MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
-MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
+MODULE_DESCRIPTION("Siemens PEB20534 PCI Controller");
 MODULE_LICENSE("GPL");
 module_param(debug, int, 0);
 MODULE_PARM_DESC(debug,"Enable/disable extra messages");
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f060332..65bc334 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -232,7 +232,7 @@
 			result);
 		goto error;
 	}
-	/* Extract MAC addresss */
+	/* Extract MAC address */
 	ddi = (void *) skb->data;
 	BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
 	d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 17ecaa4..030cbfd 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -186,7 +186,7 @@
  * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
  *
  * This structure will be used to create a device specific poke table
- * to put the device in a consistant state at boot time.
+ * to put the device in a consistent state at boot time.
  *
  * @address: The device address to poke
  *
@@ -703,7 +703,7 @@
  * @I2400M_BRI_MAC_REINIT: We need to reinitialize the boot
  *     rom after reading the MAC address. This is quite a dirty hack,
  *     if you ask me -- the device requires the bootrom to be
- *     intialized after reading the MAC address.
+ *     initialized after reading the MAC address.
  */
 enum i2400m_bri {
 	I2400M_BRI_SOFT       = 1 << 1,
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 7ad05d4..fd14b91 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -1064,7 +1064,7 @@
 /*
  * EEPROM command register
  */
-#define AR5K_EEPROM_CMD		0x6008			/* Register Addres */
+#define AR5K_EEPROM_CMD		0x6008			/* Register Address */
 #define AR5K_EEPROM_CMD_READ	0x00000001	/* EEPROM read */
 #define AR5K_EEPROM_CMD_WRITE	0x00000002	/* EEPROM write */
 #define AR5K_EEPROM_CMD_RESET	0x00000004	/* EEPROM reset */
@@ -1084,7 +1084,7 @@
 /*
  * EEPROM config register
  */
-#define AR5K_EEPROM_CFG			0x6010			/* Register Addres */
+#define AR5K_EEPROM_CFG			0x6010			/* Register Address */
 #define AR5K_EEPROM_CFG_SIZE		0x00000003		/* Size determination override */
 #define AR5K_EEPROM_CFG_SIZE_AUTO	0
 #define AR5K_EEPROM_CFG_SIZE_4KBIT	1
@@ -1126,7 +1126,7 @@
  * Second station id register (Upper 16 bits of MAC address + PCU settings)
  */
 #define AR5K_STA_ID1			0x8004			/* Register Address */
-#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC addres */
+#define	AR5K_STA_ID1_ADDR_U16		0x0000ffff	/* Upper 16 bits of MAC address */
 #define AR5K_STA_ID1_AP			0x00010000	/* Set AP mode */
 #define AR5K_STA_ID1_ADHOC		0x00020000	/* Set Ad-Hoc mode */
 #define AR5K_STA_ID1_PWR_SV		0x00040000	/* Power save reporting */
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 0dc33b6..be48281 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -1919,7 +1919,7 @@
 	b43_hf_write(dev, b43_hf_read(dev) | B43_HF_HWPCTL);
 }
 
-/* Intialize B/G PHY power control */
+/* Initialize B/G PHY power control */
 static void b43_phy_init_pctl(struct b43_wldev *dev)
 {
 	struct ssb_bus *bus = dev->dev->bus;
diff --git a/drivers/net/wireless/b43legacy/phy.c b/drivers/net/wireless/b43legacy/phy.c
index 35033dd..28e477d 100644
--- a/drivers/net/wireless/b43legacy/phy.c
+++ b/drivers/net/wireless/b43legacy/phy.c
@@ -153,7 +153,7 @@
 	phy->calibrated = 1;
 }
 
-/* intialize B PHY power control
+/* initialize B PHY power control
  * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
  */
 static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index a5dbfea..b5cb3be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -197,7 +197,7 @@
 
  none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq  and no schedules tasklet. */
+	/* only Re-enable if disabled by irq  and no schedules tasklet. */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
 		iwl_enable_interrupts(priv);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index f13a83a..36335b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1154,7 +1154,7 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 
@@ -1368,7 +1368,7 @@
 	}
 
 	/* Re-enable all interrupts */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
index a08b4e5..bb1a742 100644
--- a/drivers/net/wireless/iwlwifi/iwl-legacy.c
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -619,7 +619,7 @@
 
 none:
 	/* re-enable interrupts here since we don't have anything to service. */
-	/* only Re-enable if diabled by irq */
+	/* only Re-enable if disabled by irq */
 	if (test_bit(STATUS_INT_ENABLED, &priv->status))
 		iwl_enable_interrupts(priv);
 	spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4776323..49493d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -107,7 +107,7 @@
 	/*
 	 * XXX: The MAC address in the command buffer is often changed from
 	 * the original sent to the device. That is, the MAC address
-	 * written to the command buffer often is not the same MAC adress
+	 * written to the command buffer often is not the same MAC address
 	 * read from the command buffer when the command returns. This
 	 * issue has not yet been resolved and this debugging is left to
 	 * observe the problem.
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 2c8cc95..ec2c75d 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -630,7 +630,7 @@
 	printk(KERN_DEBUG "islpci_alloc_memory\n");
 #endif
 
-	/* remap the PCI device base address to accessable */
+	/* remap the PCI device base address to accessible */
 	if (!(priv->device_base =
 	      ioremap(pci_resource_start(priv->pdev, 0),
 		      ISL38XX_PCI_MEM_SIZE))) {
@@ -709,7 +709,7 @@
 				   PCI_DMA_FROMDEVICE);
 		if (!priv->pci_map_rx_address[counter]) {
 			/* error mapping the buffer to device
-			   accessable memory address */
+			   accessible memory address */
 			printk(KERN_ERR "failed to map skb DMA'able\n");
 			goto out_free;
 		}
@@ -773,7 +773,7 @@
 		priv->data_low_rx[counter] = NULL;
 	}
 
-	/* Free the acces control list and the WPA list */
+	/* Free the access control list and the WPA list */
 	prism54_acl_clean(&priv->acl);
 	prism54_wpa_bss_ie_clean(priv);
 	mgt_clean(priv);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 2fc52bc..d44f8e2 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -450,7 +450,7 @@
 				   MAX_FRAGMENT_SIZE_RX + 2,
 				   PCI_DMA_FROMDEVICE);
 		if (unlikely(!priv->pci_map_rx_address[index])) {
-			/* error mapping the buffer to device accessable memory address */
+			/* error mapping the buffer to device accessible memory address */
 			DEBUG(SHOW_ERROR_MESSAGES,
 			      "Error mapping DMA address\n");
 
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 658542d..f3da051 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -273,7 +273,7 @@
 	intf->beacon = entry;
 
 	/*
-	 * The MAC adddress must be configured after the device
+	 * The MAC address must be configured after the device
 	 * has been initialized. Otherwise the device can reset
 	 * the MAC registers.
 	 * The BSSID address must only be configured in AP mode,
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/wl1251/acx.h
index e54b21a..efcc3aa 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/wl1251/acx.h
@@ -1272,10 +1272,10 @@
 /* OBSOLETE */
 #define WL1251_ACX_INTR_WAKE_ON_HOST	BIT(6)
 
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1251_ACX_INTR_TRACE_A		BIT(7)
 
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1251_ACX_INTR_TRACE_B		BIT(8)
 
 /* Command processing completion */
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index 13fbeec..c0ce2c8 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -419,7 +419,7 @@
 #define WL1251_FW_NAME "wl1251-fw.bin"
 #define WL1251_NVS_NAME "wl1251-nvs.bin"
 
-#define WL1251_POWER_ON_SLEEP 10 /* in miliseconds */
+#define WL1251_POWER_ON_SLEEP 10 /* in milliseconds */
 
 #define WL1251_PART_DOWN_MEM_START	0x0
 #define WL1251_PART_DOWN_MEM_SIZE	0x16800
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index 9cbc3f4..7bd8e4d 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -47,9 +47,9 @@
 #define WL1271_ACX_INTR_HW_AVAILABLE       BIT(5)
 /* The MISC bit is used for aggregation of RX, TxComplete and TX rate update */
 #define WL1271_ACX_INTR_DATA               BIT(6)
-/* Trace meassge on MBOX #A */
+/* Trace message on MBOX #A */
 #define WL1271_ACX_INTR_TRACE_A            BIT(7)
-/* Trace meassge on MBOX #B */
+/* Trace message on MBOX #B */
 #define WL1271_ACX_INTR_TRACE_B            BIT(8)
 
 #define WL1271_ACX_INTR_ALL		   0xFFFFFFFF
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index ce3d31f..9050dd9 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -416,8 +416,8 @@
 
 /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
    on in case is has been shut down shortly before */
-#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
-#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
+#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
 
 /* Macros to handle wl1271.sta_rate_set */
 #define HW_BG_RATES_MASK	0xffff
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index ee82df6..3e5befe4 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -192,7 +192,7 @@
 }
 
 /*
- * Get Ethernet MAC addresss.
+ * Get Ethernet MAC address.
  *
  * WARNING: We switch to FPAGE0 and switc back again.
  *          Making sure there is no other WL function beening called by ISR.
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9..546de57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@
 
 	if (unlikely(!netif_carrier_ok(dev) ||
 		     (frags > 1 && !xennet_can_sg(dev)) ||
-		     netif_needs_gso(dev, skb))) {
+		     netif_needs_gso(skb, netif_skb_features(skb)))) {
 		spin_unlock_irq(&np->tx_lock);
 		goto drop;
 	}
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
new file mode 100644
index 0000000..ffedfd4
--- /dev/null
+++ b/drivers/nfc/Kconfig
@@ -0,0 +1,30 @@
+#
+# Near Field Communication (NFC) devices
+#
+
+menuconfig NFC_DEVICES
+	bool "NFC devices"
+	default n
+	---help---
+	  You'll have to say Y if your computer contains an NFC device that
+	  you want to use under Linux.
+
+	  You can say N here if you don't have any Near Field Communication
+	  devices connected to your computer.
+
+if NFC_DEVICES
+
+config PN544_NFC
+	tristate "PN544 NFC driver"
+	depends on I2C
+	select CRC_CCITT
+	default n
+	---help---
+	  Say yes if you want PN544 Near Field Communication driver.
+	  This is for i2c connected version. If unsure, say N here.
+
+	  To compile this driver as a module, choose m here. The module will
+	  be called pn544.
+
+
+endif # NFC_DEVICES
diff --git a/drivers/nfc/Makefile b/drivers/nfc/Makefile
new file mode 100644
index 0000000..a4efb16
--- /dev/null
+++ b/drivers/nfc/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for nfc devices
+#
+
+obj-$(CONFIG_PN544_NFC)		+= pn544.o
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
new file mode 100644
index 0000000..401c44b
--- /dev/null
+++ b/drivers/nfc/pn544.c
@@ -0,0 +1,891 @@
+/*
+ * Driver for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltonen <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/completion.h>
+#include <linux/crc-ccitt.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nfc/pn544.h>
+#include <linux/poll.h>
+#include <linux/regulator/consumer.h>
+#include <linux/serial_core.h> /* for TCGETS */
+#include <linux/slab.h>
+
+#define DRIVER_CARD	"PN544 NFC"
+#define DRIVER_DESC	"NFC driver for PN544"
+
+static struct i2c_device_id pn544_id_table[] = {
+	{ PN544_DRIVER_NAME, 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, pn544_id_table);
+
+#define HCI_MODE	0
+#define FW_MODE		1
+
+enum pn544_state {
+	PN544_ST_COLD,
+	PN544_ST_FW_READY,
+	PN544_ST_READY,
+};
+
+enum pn544_irq {
+	PN544_NONE,
+	PN544_INT,
+};
+
+struct pn544_info {
+	struct miscdevice miscdev;
+	struct i2c_client *i2c_dev;
+	struct regulator_bulk_data regs[2];
+
+	enum pn544_state state;
+	wait_queue_head_t read_wait;
+	loff_t read_offset;
+	enum pn544_irq read_irq;
+	struct mutex read_mutex; /* Serialize read_irq access */
+	struct mutex mutex; /* Serialize info struct access */
+	u8 *buf;
+	unsigned int buflen;
+};
+
+static const char reg_vdd_io[]	= "Vdd_IO";
+static const char reg_vbat[]	= "VBat";
+
+/* sysfs interface */
+static ssize_t pn544_test(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct pn544_info *info = dev_get_drvdata(dev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pdata->test());
+}
+
+static int pn544_enable(struct pn544_info *info, int mode)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	int r;
+
+	r = regulator_bulk_enable(ARRAY_SIZE(info->regs), info->regs);
+	if (r < 0)
+		return r;
+
+	pdata = client->dev.platform_data;
+	info->read_irq = PN544_NONE;
+	if (pdata->enable)
+		pdata->enable(mode);
+
+	if (mode) {
+		info->state = PN544_ST_FW_READY;
+		dev_dbg(&client->dev, "now in FW-mode\n");
+	} else {
+		info->state = PN544_ST_READY;
+		dev_dbg(&client->dev, "now in HCI-mode\n");
+	}
+
+	usleep_range(10000, 15000);
+
+	return 0;
+}
+
+static void pn544_disable(struct pn544_info *info)
+{
+	struct pn544_nfc_platform_data *pdata;
+	struct i2c_client *client = info->i2c_dev;
+
+	pdata = client->dev.platform_data;
+	if (pdata->disable)
+		pdata->disable();
+
+	info->state = PN544_ST_COLD;
+
+	dev_dbg(&client->dev, "Now in OFF-mode\n");
+
+	msleep(PN544_RESETVEN_TIME);
+
+	info->read_irq = PN544_NONE;
+	regulator_bulk_disable(ARRAY_SIZE(info->regs), info->regs);
+}
+
+static int check_crc(u8 *buf, int buflen)
+{
+	u8 len;
+	u16 crc;
+
+	len = buf[0] + 1;
+	if (len < 4 || len != buflen || len > PN544_MSG_MAX_SIZE) {
+		pr_err(PN544_DRIVER_NAME
+		       ": CRC; corrupt packet len %u (%d)\n", len, buflen);
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	crc = crc_ccitt(0xffff, buf, len - 2);
+	crc = ~crc;
+
+	if (buf[len-2] != (crc & 0xff) || buf[len-1] != (crc >> 8)) {
+		pr_err(PN544_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n",
+		       crc, buf[len-1], buf[len-2]);
+
+		print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE,
+			       16, 2, buf, buflen, false);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int pn544_i2c_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	if (len < 4 || len != (buf[0] + 1)) {
+		dev_err(&client->dev, "%s: Illegal message length: %d\n",
+			__func__, len);
+		return -EINVAL;
+	}
+
+	if (check_crc(buf, len))
+		return -EINVAL;
+
+	usleep_range(3000, 6000);
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_i2c_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r;
+	u8 len;
+
+	/*
+	 * You could read a packet in one go, but then you'd need to read
+	 * max size and rest would be 0xff fill, so we do split reads.
+	 */
+	r = i2c_master_recv(client, &len, 1);
+	dev_dbg(&client->dev, "recv1: %d\n", r);
+
+	if (r != 1)
+		return -EREMOTEIO;
+
+	if (len < PN544_LLC_HCI_OVERHEAD)
+		len = PN544_LLC_HCI_OVERHEAD;
+	else if (len > (PN544_MSG_MAX_SIZE - 1))
+		len = PN544_MSG_MAX_SIZE - 1;
+
+	if (1 + len > buflen) /* len+(data+crc16) */
+		return -EMSGSIZE;
+
+	buf[0] = len;
+
+	r = i2c_master_recv(client, buf + 1, len);
+	dev_dbg(&client->dev, "recv2: %d\n", r);
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	usleep_range(3000, 6000);
+
+	return r + 1;
+}
+
+static int pn544_fw_write(struct i2c_client *client, u8 *buf, int len)
+{
+	int r;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	if (len < PN544_FW_HEADER_SIZE ||
+	    (PN544_FW_HEADER_SIZE + (buf[1] << 8) + buf[2]) != len)
+		return -EINVAL;
+
+	r = i2c_master_send(client, buf, len);
+	dev_dbg(&client->dev, "fw send: %d\n", r);
+
+	if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+		usleep_range(6000, 10000);
+		r = i2c_master_send(client, buf, len);
+		dev_dbg(&client->dev, "fw send2: %d\n", r);
+	}
+
+	if (r != len)
+		return -EREMOTEIO;
+
+	return r;
+}
+
+static int pn544_fw_read(struct i2c_client *client, u8 *buf, int buflen)
+{
+	int r, len;
+
+	if (buflen < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	r = i2c_master_recv(client, buf, PN544_FW_HEADER_SIZE);
+	dev_dbg(&client->dev, "FW recv1: %d\n", r);
+
+	if (r < 0)
+		return r;
+
+	if (r < PN544_FW_HEADER_SIZE)
+		return -EINVAL;
+
+	len = (buf[1] << 8) + buf[2];
+	if (len == 0) /* just header, no additional data */
+		return r;
+
+	if (len > buflen - PN544_FW_HEADER_SIZE)
+		return -EMSGSIZE;
+
+	r = i2c_master_recv(client, buf + PN544_FW_HEADER_SIZE, len);
+	dev_dbg(&client->dev, "fw recv2: %d\n", r);
+
+	if (r != len)
+		return -EINVAL;
+
+	return r + PN544_FW_HEADER_SIZE;
+}
+
+static irqreturn_t pn544_irq_thread_fn(int irq, void *dev_id)
+{
+	struct pn544_info *info = dev_id;
+	struct i2c_client *client = info->i2c_dev;
+
+	BUG_ON(!info);
+	BUG_ON(irq != info->i2c_dev->irq);
+
+	dev_dbg(&client->dev, "IRQ\n");
+
+	mutex_lock(&info->read_mutex);
+	info->read_irq = PN544_INT;
+	mutex_unlock(&info->read_mutex);
+
+	wake_up_interruptible(&info->read_wait);
+
+	return IRQ_HANDLED;
+}
+
+static enum pn544_irq pn544_irq_state(struct pn544_info *info)
+{
+	enum pn544_irq irq;
+
+	mutex_lock(&info->read_mutex);
+	irq = info->read_irq;
+	mutex_unlock(&info->read_mutex);
+	/*
+	 * XXX: should we check GPIO-line status directly?
+	 * return pdata->irq_status() ? PN544_INT : PN544_NONE;
+	 */
+
+	return irq;
+}
+
+static ssize_t pn544_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *offset)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	enum pn544_irq irq;
+	size_t len;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, count: %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	irq = pn544_irq_state(info);
+	if (irq == PN544_NONE) {
+		if (file->f_flags & O_NONBLOCK) {
+			r = -EAGAIN;
+			goto out;
+		}
+
+		if (wait_event_interruptible(info->read_wait,
+					     (info->read_irq == PN544_INT))) {
+			r = -ERESTARTSYS;
+			goto out;
+		}
+	}
+
+	if (info->state == PN544_ST_FW_READY) {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_fw_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "FW read failed: %d\n", r);
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	} else {
+		len = min(count, info->buflen);
+
+		mutex_lock(&info->read_mutex);
+		r = pn544_i2c_read(info->i2c_dev, info->buf, len);
+		info->read_irq = PN544_NONE;
+		mutex_unlock(&info->read_mutex);
+
+		if (r < 0) {
+			dev_err(&info->i2c_dev->dev, "read failed (%d)\n", r);
+			goto out;
+		}
+		print_hex_dump(KERN_DEBUG, "read: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, r, false);
+
+		*offset += r;
+		if (copy_to_user(buf, info->buf, r)) {
+			r = -EFAULT;
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static unsigned int pn544_poll(struct file *file, poll_table *wait)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p\n", __func__, info);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	poll_wait(file, &info->read_wait, wait);
+
+	if (pn544_irq_state(info) == PN544_INT) {
+		r = POLLIN | POLLRDNORM;
+		goto out;
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static ssize_t pn544_write(struct file *file, const char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	ssize_t	len;
+	int r;
+
+	dev_dbg(&client->dev, "%s: info: %p, count %zu\n", __func__,
+		info, count);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * XXX: should we detect rset-writes and clean possible
+	 * read_irq state
+	 */
+	if (info->state == PN544_ST_FW_READY) {
+		size_t fw_len;
+
+		if (count < PN544_FW_HEADER_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "FW write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		fw_len = PN544_FW_HEADER_SIZE + (info->buf[1] << 8) +
+			info->buf[2];
+
+		if (len > fw_len) /* 1 msg at a time */
+			len = fw_len;
+
+		r = pn544_fw_write(info->i2c_dev, info->buf, len);
+	} else {
+		if (count < PN544_LLC_MIN_SIZE) {
+			r = -EINVAL;
+			goto out;
+		}
+
+		len = min(count, info->buflen);
+		if (copy_from_user(info->buf, buf, len)) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		print_hex_dump(KERN_DEBUG, "write: ", DUMP_PREFIX_NONE,
+			       16, 2, info->buf, len, false);
+
+		if (len > (info->buf[0] + 1)) /* 1 msg at a time */
+			len  = info->buf[0] + 1;
+
+		r = pn544_i2c_write(info->i2c_dev, info->buf, len);
+	}
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+
+}
+
+static long pn544_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	struct pn544_nfc_platform_data *pdata;
+	unsigned int val;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, cmd: 0x%x\n", __func__, info, cmd);
+
+	mutex_lock(&info->mutex);
+
+	if (info->state == PN544_ST_COLD) {
+		r = -ENODEV;
+		goto out;
+	}
+
+	pdata = info->i2c_dev->dev.platform_data;
+	switch (cmd) {
+	case PN544_GET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_GET_FW_MODE\n", __func__);
+
+		val = (info->state == PN544_ST_FW_READY);
+		if (copy_to_user((void __user *)arg, &val, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		break;
+
+	case PN544_SET_FW_MODE:
+		dev_dbg(&client->dev, "%s:  PN544_SET_FW_MODE\n", __func__);
+
+		if (copy_from_user(&val, (void __user *)arg, sizeof(val))) {
+			r = -EFAULT;
+			goto out;
+		}
+
+		if (val) {
+			if (info->state == PN544_ST_FW_READY)
+				break;
+
+			pn544_disable(info);
+			r = pn544_enable(info, FW_MODE);
+			if (r < 0)
+				goto out;
+		} else {
+			if (info->state == PN544_ST_READY)
+				break;
+			pn544_disable(info);
+			r = pn544_enable(info, HCI_MODE);
+			if (r < 0)
+				goto out;
+		}
+		file->f_pos = info->read_offset;
+		break;
+
+	case TCGETS:
+		dev_dbg(&client->dev, "%s:  TCGETS\n", __func__);
+
+		r = -ENOIOCTLCMD;
+		break;
+
+	default:
+		dev_err(&client->dev, "Unknown ioctl 0x%x\n", cmd);
+		r = -ENOIOCTLCMD;
+		break;
+	}
+
+out:
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static int pn544_open(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+
+	/*
+	 * Only 1 at a time.
+	 * XXX: maybe user (counter) would work better
+	 */
+	if (info->state != PN544_ST_COLD) {
+		r = -EBUSY;
+		goto out;
+	}
+
+	file->f_pos = info->read_offset;
+	r = pn544_enable(info, HCI_MODE);
+
+out:
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_close(struct inode *inode, struct file *file)
+{
+	struct pn544_info *info = container_of(file->private_data,
+					       struct pn544_info, miscdev);
+	struct i2c_client *client = info->i2c_dev;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n",
+		__func__, info, info->i2c_dev);
+
+	mutex_lock(&info->mutex);
+	pn544_disable(info);
+	mutex_unlock(&info->mutex);
+
+	return 0;
+}
+
+static const struct file_operations pn544_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.read		= pn544_read,
+	.write		= pn544_write,
+	.poll		= pn544_poll,
+	.open		= pn544_open,
+	.release	= pn544_close,
+	.unlocked_ioctl	= pn544_ioctl,
+};
+
+#ifdef CONFIG_PM
+static int pn544_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info;
+	int r = 0;
+
+	dev_info(&client->dev, "***\n%s: client %p\n***\n", __func__, client);
+
+	info = i2c_get_clientdata(client);
+	dev_info(&client->dev, "%s: info: %p, client %p\n", __func__,
+		 info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_FW_READY:
+		/* Do not suspend while upgrading FW, please! */
+		r = -EPERM;
+		break;
+
+	case PN544_ST_READY:
+		/*
+		 * CHECK: Device should be in standby-mode. No way to check?
+		 * Allowing low power mode for the regulator is potentially
+		 * dangerous if pn544 does not go to suspension.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+	return r;
+}
+
+static int pn544_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct pn544_info *info = i2c_get_clientdata(client);
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s: info: %p, client %p\n", __func__,
+		info, client);
+
+	mutex_lock(&info->mutex);
+
+	switch (info->state) {
+	case PN544_ST_READY:
+		/*
+		 * CHECK: If regulator low power mode is allowed in
+		 * pn544_suspend, we should go back to normal mode
+		 * here.
+		 */
+		break;
+
+	case PN544_ST_COLD:
+		break;
+
+	case PN544_ST_FW_READY:
+		break;
+	};
+
+	mutex_unlock(&info->mutex);
+
+	return r;
+}
+
+static SIMPLE_DEV_PM_OPS(pn544_pm_ops, pn544_suspend, pn544_resume);
+#endif
+
+static struct device_attribute pn544_attr =
+	__ATTR(nfc_test, S_IRUGO, pn544_test, NULL);
+
+static int __devinit pn544_probe(struct i2c_client *client,
+				 const struct i2c_device_id *id)
+{
+	struct pn544_info *info;
+	struct pn544_nfc_platform_data *pdata;
+	int r = 0;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+	dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
+
+	/* private data allocation */
+	info = kzalloc(sizeof(struct pn544_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info.\n");
+		r = -ENOMEM;
+		goto err_info_alloc;
+	}
+
+	info->buflen = max(PN544_MSG_MAX_SIZE, PN544_MAX_I2C_TRANSFER);
+	info->buf = kzalloc(info->buflen, GFP_KERNEL);
+	if (!info->buf) {
+		dev_err(&client->dev,
+			"Cannot allocate memory for pn544_info->buf.\n");
+		r = -ENOMEM;
+		goto err_buf_alloc;
+	}
+
+	info->regs[0].supply = reg_vdd_io;
+	info->regs[1].supply = reg_vbat;
+	r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
+				 info->regs);
+	if (r < 0)
+		goto err_kmalloc;
+
+	info->i2c_dev = client;
+	info->state = PN544_ST_COLD;
+	info->read_irq = PN544_NONE;
+	mutex_init(&info->read_mutex);
+	mutex_init(&info->mutex);
+	init_waitqueue_head(&info->read_wait);
+	i2c_set_clientdata(client, info);
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "No platform data\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	if (!pdata->request_resources) {
+		dev_err(&client->dev, "request_resources() missing\n");
+		r = -EINVAL;
+		goto err_reg;
+	}
+
+	r = pdata->request_resources(client);
+	if (r) {
+		dev_err(&client->dev, "Cannot get platform resources\n");
+		goto err_reg;
+	}
+
+	r = request_threaded_irq(client->irq, NULL, pn544_irq_thread_fn,
+				 IRQF_TRIGGER_RISING, PN544_DRIVER_NAME,
+				 info);
+	if (r < 0) {
+		dev_err(&client->dev, "Unable to register IRQ handler\n");
+		goto err_res;
+	}
+
+	/* If we don't have the test we don't need the sysfs file */
+	if (pdata->test) {
+		r = device_create_file(&client->dev, &pn544_attr);
+		if (r) {
+			dev_err(&client->dev,
+				"sysfs registration failed, error %d\n", r);
+			goto err_irq;
+		}
+	}
+
+	info->miscdev.minor = MISC_DYNAMIC_MINOR;
+	info->miscdev.name = PN544_DRIVER_NAME;
+	info->miscdev.fops = &pn544_fops;
+	info->miscdev.parent = &client->dev;
+	r = misc_register(&info->miscdev);
+	if (r < 0) {
+		dev_err(&client->dev, "Device registration failed\n");
+		goto err_sysfs;
+	}
+
+	dev_dbg(&client->dev, "%s: info: %p, pdata %p, client %p\n",
+		__func__, info, pdata, client);
+
+	return 0;
+
+err_sysfs:
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+err_irq:
+	free_irq(client->irq, info);
+err_res:
+	if (pdata->free_resources)
+		pdata->free_resources();
+err_reg:
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+err_kmalloc:
+	kfree(info->buf);
+err_buf_alloc:
+	kfree(info);
+err_info_alloc:
+	return r;
+}
+
+static __devexit int pn544_remove(struct i2c_client *client)
+{
+	struct pn544_info *info = i2c_get_clientdata(client);
+	struct pn544_nfc_platform_data *pdata = client->dev.platform_data;
+
+	dev_dbg(&client->dev, "%s\n", __func__);
+
+	misc_deregister(&info->miscdev);
+	if (pdata->test)
+		device_remove_file(&client->dev, &pn544_attr);
+
+	if (info->state != PN544_ST_COLD) {
+		if (pdata->disable)
+			pdata->disable();
+
+		info->read_irq = PN544_NONE;
+	}
+
+	free_irq(client->irq, info);
+	if (pdata->free_resources)
+		pdata->free_resources();
+
+	regulator_bulk_free(ARRAY_SIZE(info->regs), info->regs);
+	kfree(info->buf);
+	kfree(info);
+
+	return 0;
+}
+
+static struct i2c_driver pn544_driver = {
+	.driver = {
+		.name = PN544_DRIVER_NAME,
+#ifdef CONFIG_PM
+		.pm = &pn544_pm_ops,
+#endif
+	},
+	.probe = pn544_probe,
+	.id_table = pn544_id_table,
+	.remove = __devexit_p(pn544_remove),
+};
+
+static int __init pn544_init(void)
+{
+	int r;
+
+	pr_debug(DRIVER_DESC ": %s\n", __func__);
+
+	r = i2c_add_driver(&pn544_driver);
+	if (r) {
+		pr_err(PN544_DRIVER_NAME ": driver registration failed\n");
+		return r;
+	}
+
+	return 0;
+}
+
+static void __exit pn544_exit(void)
+{
+	i2c_del_driver(&pn544_driver);
+	pr_info(DRIVER_DESC ", Exiting.\n");
+}
+
+module_init(pn544_init);
+module_exit(pn544_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 5b1630e..a9523fd 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -45,6 +45,7 @@
         depends on PCI && X86 && XEN
         select HOTPLUG
         select PCI_XEN
+	select XEN_XENBUS_FRONTEND
         default y
         help
           The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pcmcia/m32r_cfc.h b/drivers/pcmcia/m32r_cfc.h
index 8146e3b..f558e1a 100644
--- a/drivers/pcmcia/m32r_cfc.h
+++ b/drivers/pcmcia/m32r_cfc.h
@@ -9,7 +9,7 @@
 #endif
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m32r_pcc.h b/drivers/pcmcia/m32r_pcc.h
index e4fffe4..f95c585 100644
--- a/drivers/pcmcia/m32r_pcc.h
+++ b/drivers/pcmcia/m32r_pcc.h
@@ -5,7 +5,7 @@
 #define M32R_MAX_PCC	2
 
 /*
- * M32R PC Card Controler
+ * M32R PC Card Controller
  */
 #define M32R_PCC0_BASE        0x00ef7000
 #define M32R_PCC1_BASE        0x00ef7020
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 99d4f23..0db4827 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1198,7 +1198,7 @@
 	out_be32(M8XX_PGCRX(1),
 		 M8XX_PGCRX_CXOE | (mk_int_int_mask(hwirq) << 16));
 
-	/* intialize the fixed memory windows */
+	/* initialize the fixed memory windows */
 
 	for (i = 0; i < PCMCIA_SOCKETS_NO; i++) {
 		for (m = 0; m < PCMCIA_MEM_WIN_NO; m++) {
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ee40d68..c5c4b8c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1021,7 +1021,7 @@
 	return 0;
 }
 
-static struct backlight_ops acer_bl_ops = {
+static const struct backlight_ops acer_bl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d235f44..f3aa6a7 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -640,7 +640,7 @@
 	return asus_lcd_set(asus, value);
 }
 
-static struct backlight_ops asusbl_ops = {
+static const struct backlight_ops asusbl_ops = {
 	.get_brightness = asus_read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index ca05aef..4633fd8 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1467,7 +1467,7 @@
 	return 0;
 }
 
-static struct backlight_ops asus_backlight_data = {
+static const struct backlight_ops asus_backlight_data = {
 	.get_brightness = read_brightness,
 	.update_status  = set_brightness_status,
 };
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index cf8a89a..34657f9 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -546,7 +546,7 @@
 	return buffer->output[1];
 }
 
-static struct backlight_ops dell_ops = {
+static const struct backlight_ops dell_ops = {
 	.get_brightness = dell_get_intensity,
 	.update_status  = dell_send_intensity,
 };
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e9fc530..49d9ad7 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1126,7 +1126,7 @@
 	return set_brightness(bd, bd->props.brightness);
 }
 
-static struct backlight_ops eeepcbl_ops = {
+static const struct backlight_ops eeepcbl_ops = {
 	.get_brightness = read_brightness,
 	.update_status = update_bl_status,
 };
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ad88b2e..19e92b2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -437,7 +437,7 @@
 	return ret;
 }
 
-static struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsubl_ops = {
 	.get_brightness = bl_get_brightness,
 	.update_status = bl_update_status,
 };
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index b4a95bb..5e83370 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -858,7 +858,7 @@
 }
 
 static struct backlight_device *sony_backlight_device;
-static struct backlight_ops sony_backlight_ops = {
+static const struct backlight_ops sony_backlight_ops = {
 	.update_status = sony_backlight_update_status,
 	.get_brightness = sony_backlight_get_brightness,
 };
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index a974ca3..dd59958 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6110,7 +6110,7 @@
 			       BACKLIGHT_UPDATE_HOTKEY);
 }
 
-static struct backlight_ops ibm_backlight_data = {
+static const struct backlight_ops ibm_backlight_data = {
 	.get_brightness = brightness_get,
 	.update_status  = brightness_update_status,
 };
@@ -7194,7 +7194,7 @@
  * 		TPACPI_FAN_WR_ACPI_FANS (X31/X40/X41)
  *
  *	FIRMWARE BUG: on some models, EC 0x2f might not be initialized at
- *	boot. Apparently the EC does not intialize it, so unless ACPI DSDT
+ *	boot. Apparently the EC does not initialize it, so unless ACPI DSDT
  *	does so, its initial value is meaningless (0x07).
  *
  *	For firmware bugs, refer to:
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 4276da7..209cced 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -841,7 +841,7 @@
 	remove_proc_entry("version", toshiba_proc_dir);
 }
 
-static struct backlight_ops toshiba_backlight_data = {
+static const struct backlight_ops toshiba_backlight_data = {
         .get_brightness = get_lcd,
         .update_status  = set_lcd_status,
 };
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index fe16b48..4a8ae39 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -1,5 +1,5 @@
 /*
- *	iPAQ h1930/h1940/rx1950 battery controler driver
+ *	iPAQ h1930/h1940/rx1950 battery controller driver
  *	Copyright (c) Vasily Khoruzhick
  *	Based on h1940_battery.c by Arnaud Patard
  *
@@ -427,5 +427,5 @@
 module_exit(s3c_adc_bat_exit);
 
 MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
-MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controler driver");
+MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
 MODULE_LICENSE("GPL");
diff --git a/drivers/pps/Kconfig b/drivers/pps/Kconfig
index 1afe4e0..f0d3376 100644
--- a/drivers/pps/Kconfig
+++ b/drivers/pps/Kconfig
@@ -30,6 +30,17 @@
 	  messages to the system log.  Select this if you are having a
 	  problem with PPS support and want to see more of what is going on.
 
+config NTP_PPS
+	bool "PPS kernel consumer support"
+	depends on PPS && !NO_HZ
+	help
+	  This option adds support for direct in-kernel time
+	  syncronization using an external PPS signal.
+
+	  It doesn't work on tickless systems at the moment.
+
 source drivers/pps/clients/Kconfig
 
+source drivers/pps/generators/Kconfig
+
 endmenu
diff --git a/drivers/pps/Makefile b/drivers/pps/Makefile
index 98960dd..4483eaa 100644
--- a/drivers/pps/Makefile
+++ b/drivers/pps/Makefile
@@ -3,7 +3,8 @@
 #
 
 pps_core-y			:= pps.o kapi.o sysfs.o
+pps_core-$(CONFIG_NTP_PPS)	+= kc.o
 obj-$(CONFIG_PPS)		:= pps_core.o
-obj-y				+= clients/
+obj-y				+= clients/ generators/
 
 ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/clients/Kconfig b/drivers/pps/clients/Kconfig
index 4e801bd7..8520a7f 100644
--- a/drivers/pps/clients/Kconfig
+++ b/drivers/pps/clients/Kconfig
@@ -22,4 +22,11 @@
 	  If you say yes here you get support for a PPS source connected
 	  with the CD (Carrier Detect) pin of your serial port.
 
+config PPS_CLIENT_PARPORT
+	tristate "Parallel port PPS client"
+	depends on PPS && PARPORT
+	help
+	  If you say yes here you get support for a PPS source connected
+	  with the interrupt pin of your parallel port.
+
 endif
diff --git a/drivers/pps/clients/Makefile b/drivers/pps/clients/Makefile
index 812c9b1..42517da 100644
--- a/drivers/pps/clients/Makefile
+++ b/drivers/pps/clients/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_PPS_CLIENT_KTIMER)	+= pps-ktimer.o
 obj-$(CONFIG_PPS_CLIENT_LDISC)	+= pps-ldisc.o
+obj-$(CONFIG_PPS_CLIENT_PARPORT) += pps_parport.o
 
 ifeq ($(CONFIG_PPS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c
index e7ef5b8..2728469 100644
--- a/drivers/pps/clients/pps-ktimer.c
+++ b/drivers/pps/clients/pps-ktimer.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -31,7 +32,7 @@
  * Global variables
  */
 
-static int source;
+static struct pps_device *pps;
 static struct timer_list ktimer;
 
 /*
@@ -40,19 +41,14 @@
 
 static void pps_ktimer_event(unsigned long ptr)
 {
-	struct timespec __ts;
-	struct pps_ktime ts;
+	struct pps_event_time ts;
 
 	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
+	pps_get_ts(&ts);
 
-	pr_info("PPS event at %lu\n", jiffies);
+	dev_info(pps->dev, "PPS event at %lu\n", jiffies);
 
-	/* ... and translate it to PPS time data struct */
-	ts.sec = __ts.tv_sec;
-	ts.nsec = __ts.tv_nsec;
-
-	pps_event(source, &ts, PPS_CAPTUREASSERT, NULL);
+	pps_event(pps, &ts, PPS_CAPTUREASSERT, NULL);
 
 	mod_timer(&ktimer, jiffies + HZ);
 }
@@ -61,12 +57,11 @@
  * The echo function
  */
 
-static void pps_ktimer_echo(int source, int event, void *data)
+static void pps_ktimer_echo(struct pps_device *pps, int event, void *data)
 {
-	pr_info("echo %s %s for source %d\n",
+	dev_info(pps->dev, "echo %s %s\n",
 		event & PPS_CAPTUREASSERT ? "assert" : "",
-		event & PPS_CAPTURECLEAR ? "clear" : "",
-		source);
+		event & PPS_CAPTURECLEAR ? "clear" : "");
 }
 
 /*
@@ -89,30 +84,27 @@
 
 static void __exit pps_ktimer_exit(void)
 {
-	del_timer_sync(&ktimer);
-	pps_unregister_source(source);
+	dev_info(pps->dev, "ktimer PPS source unregistered\n");
 
-	pr_info("ktimer PPS source unregistered\n");
+	del_timer_sync(&ktimer);
+	pps_unregister_source(pps);
 }
 
 static int __init pps_ktimer_init(void)
 {
-	int ret;
-
-	ret = pps_register_source(&pps_ktimer_info,
+	pps = pps_register_source(&pps_ktimer_info,
 				PPS_CAPTUREASSERT | PPS_OFFSETASSERT);
-	if (ret < 0) {
-		printk(KERN_ERR "cannot register ktimer source\n");
-		return ret;
+	if (pps == NULL) {
+		pr_err("cannot register PPS source\n");
+		return -ENOMEM;
 	}
-	source = ret;
 
 	setup_timer(&ktimer, pps_ktimer_event, 0);
 	mod_timer(&ktimer, jiffies + HZ);
 
-	pr_info("ktimer PPS source registered at %d\n", source);
+	dev_info(pps->dev, "ktimer PPS source registered\n");
 
-	return  0;
+	return 0;
 }
 
 module_init(pps_ktimer_init);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index 8e1932d..79451f2 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -19,6 +19,8 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/serial_core.h>
 #include <linux/tty.h>
@@ -27,30 +29,18 @@
 #define PPS_TTY_MAGIC		0x0001
 
 static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status,
-				struct timespec *ts)
+				struct pps_event_time *ts)
 {
-	int id = (long)tty->disc_data;
-	struct timespec __ts;
-	struct pps_ktime pps_ts;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	/* First of all we get the time stamp... */
-	getnstimeofday(&__ts);
-
-	/* Does caller give us a timestamp? */
-	if (ts) {	/* Yes. Let's use it! */
-		pps_ts.sec = ts->tv_sec;
-		pps_ts.nsec = ts->tv_nsec;
-	} else {	/* No. Do it ourself! */
-		pps_ts.sec = __ts.tv_sec;
-		pps_ts.nsec = __ts.tv_nsec;
-	}
+	BUG_ON(pps == NULL);
 
 	/* Now do the PPS event report */
-	pps_event(id, &pps_ts, status ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR,
-			NULL);
+	pps_event(pps, ts, status ? PPS_CAPTUREASSERT :
+			PPS_CAPTURECLEAR, NULL);
 
-	pr_debug("PPS %s at %lu on source #%d\n",
-			status ? "assert" : "clear", jiffies, id);
+	dev_dbg(pps->dev, "PPS %s at %lu\n",
+			status ? "assert" : "clear", jiffies);
 }
 
 static int (*alias_n_tty_open)(struct tty_struct *tty);
@@ -60,6 +50,7 @@
 	struct pps_source_info info;
 	struct tty_driver *drv = tty->driver;
 	int index = tty->index + drv->name_base;
+	struct pps_device *pps;
 	int ret;
 
 	info.owner = THIS_MODULE;
@@ -70,34 +61,42 @@
 			PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
 			PPS_CANWAIT | PPS_TSFMT_TSPEC;
 
-	ret = pps_register_source(&info, PPS_CAPTUREBOTH | \
+	pps = pps_register_source(&info, PPS_CAPTUREBOTH | \
 				PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
-	if (ret < 0) {
+	if (pps == NULL) {
 		pr_err("cannot register PPS source \"%s\"\n", info.path);
-		return ret;
+		return -ENOMEM;
 	}
-	tty->disc_data = (void *)(long)ret;
+	tty->disc_data = pps;
 
 	/* Should open N_TTY ldisc too */
 	ret = alias_n_tty_open(tty);
-	if (ret < 0)
-		pps_unregister_source((long)tty->disc_data);
+	if (ret < 0) {
+		pr_err("cannot open tty ldisc \"%s\"\n", info.path);
+		goto err_unregister;
+	}
 
-	pr_info("PPS source #%d \"%s\" added\n", ret, info.path);
+	dev_info(pps->dev, "source \"%s\" added\n", info.path);
 
 	return 0;
+
+err_unregister:
+	tty->disc_data = NULL;
+	pps_unregister_source(pps);
+	return ret;
 }
 
 static void (*alias_n_tty_close)(struct tty_struct *tty);
 
 static void pps_tty_close(struct tty_struct *tty)
 {
-	int id = (long)tty->disc_data;
+	struct pps_device *pps = (struct pps_device *)tty->disc_data;
 
-	pps_unregister_source(id);
 	alias_n_tty_close(tty);
 
-	pr_info("PPS source #%d removed\n", id);
+	tty->disc_data = NULL;
+	dev_info(pps->dev, "removed\n");
+	pps_unregister_source(pps);
 }
 
 static struct tty_ldisc_ops pps_ldisc_ops;
diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c
new file mode 100644
index 0000000..32221ef
--- /dev/null
+++ b/drivers/pps/clients/pps_parport.c
@@ -0,0 +1,258 @@
+/*
+ * pps_parport.c -- kernel parallel port PPS client
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * implement echo over SEL pin
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irqnr.h>
+#include <linux/time.h>
+#include <linux/parport.h>
+#include <linux/pps_kernel.h>
+
+#define DRVDESC "parallel port PPS client"
+
+/* module parameters */
+
+#define CLEAR_WAIT_MAX		100
+#define CLEAR_WAIT_MAX_ERRORS	5
+
+static unsigned int clear_wait = 100;
+MODULE_PARM_DESC(clear_wait,
+	"Maximum number of port reads when polling for signal clear,"
+	" zero turns clear edge capture off entirely");
+module_param(clear_wait, uint, 0);
+
+
+/* internal per port structure */
+struct pps_client_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct pps_device *pps;		/* PPS device */
+	unsigned int cw;		/* port clear timeout */
+	unsigned int cw_err;		/* number of timeouts */
+};
+
+static inline int signal_is_set(struct parport *port)
+{
+	return (port->ops->read_status(port) & PARPORT_STATUS_ACK) != 0;
+}
+
+/* parport interrupt handler */
+static void parport_irq(void *handle)
+{
+	struct pps_event_time ts_assert, ts_clear;
+	struct pps_client_pp *dev = handle;
+	struct parport *port = dev->pardev->port;
+	unsigned int i;
+	unsigned long flags;
+
+	/* first of all we get the time stamp... */
+	pps_get_ts(&ts_assert);
+
+	if (dev->cw == 0)
+		/* clear edge capture disabled */
+		goto out_assert;
+
+	/* try capture the clear edge */
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the port. Reading from IO port is known
+	 * to take approximately 1us while other interrupt handlers can
+	 * take much more potentially.
+	 *
+	 * Interrupts won't be disabled for a long time because the
+	 * number of polls is limited by clear_wait parameter which is
+	 * kept rather low. So it should never be an issue.
+	 */
+	local_irq_save(flags);
+	/* check the signal (no signal means the pulse is lost this time) */
+	if (!signal_is_set(port)) {
+		local_irq_restore(flags);
+		dev_err(dev->pps->dev, "lost the signal\n");
+		goto out_assert;
+	}
+
+	/* poll the port until the signal is unset */
+	for (i = dev->cw; i; i--)
+		if (!signal_is_set(port)) {
+			pps_get_ts(&ts_clear);
+			local_irq_restore(flags);
+			dev->cw_err = 0;
+			goto out_both;
+		}
+	local_irq_restore(flags);
+
+	/* timeout */
+	dev->cw_err++;
+	if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) {
+		dev_err(dev->pps->dev, "disabled clear edge capture after %d"
+				" timeouts\n", dev->cw_err);
+		dev->cw = 0;
+		dev->cw_err = 0;
+	}
+
+out_assert:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	return;
+
+out_both:
+	/* fire assert event */
+	pps_event(dev->pps, &ts_assert,
+			PPS_CAPTUREASSERT, NULL);
+	/* fire clear event */
+	pps_event(dev->pps, &ts_clear,
+			PPS_CAPTURECLEAR, NULL);
+	return;
+}
+
+/* the PPS echo function */
+static void pps_echo(struct pps_device *pps, int event, void *data)
+{
+	dev_info(pps->dev, "echo %s %s\n",
+		event & PPS_CAPTUREASSERT ? "assert" : "",
+		event & PPS_CAPTURECLEAR ? "clear" : "");
+}
+
+static void parport_attach(struct parport *port)
+{
+	struct pps_client_pp *device;
+	struct pps_source_info info = {
+		.name		= KBUILD_MODNAME,
+		.path		= "",
+		.mode		= PPS_CAPTUREBOTH | \
+				  PPS_OFFSETASSERT | PPS_OFFSETCLEAR | \
+				  PPS_ECHOASSERT | PPS_ECHOCLEAR | \
+				  PPS_CANWAIT | PPS_TSFMT_TSPEC,
+		.echo		= pps_echo,
+		.owner		= THIS_MODULE,
+		.dev		= NULL
+	};
+
+	device = kzalloc(sizeof(struct pps_client_pp), GFP_KERNEL);
+	if (!device) {
+		pr_err("memory allocation failed, not attaching\n");
+		return;
+	}
+
+	device->pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, parport_irq, 0, device);
+	if (!device->pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		goto err_free;
+	}
+
+	if (parport_claim_or_block(device->pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	device->pps = pps_register_source(&info,
+			PPS_CAPTUREBOTH | PPS_OFFSETASSERT | PPS_OFFSETCLEAR);
+	if (device->pps == NULL) {
+		pr_err("couldn't register PPS source\n");
+		goto err_release_dev;
+	}
+
+	device->cw = clear_wait;
+
+	port->ops->enable_irq(port);
+
+	pr_info("attached to %s\n", port->name);
+
+	return;
+
+err_release_dev:
+	parport_release(device->pardev);
+err_unregister_dev:
+	parport_unregister_device(device->pardev);
+err_free:
+	kfree(device);
+}
+
+static void parport_detach(struct parport *port)
+{
+	struct pardevice *pardev = port->cad;
+	struct pps_client_pp *device;
+
+	/* FIXME: oooh, this is ugly! */
+	if (strcmp(pardev->name, KBUILD_MODNAME))
+		/* not our port */
+		return;
+
+	device = pardev->private;
+
+	port->ops->disable_irq(port);
+	pps_unregister_source(device->pps);
+	parport_release(pardev);
+	parport_unregister_device(pardev);
+	kfree(device);
+}
+
+static struct parport_driver pps_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (clear_wait > CLEAR_WAIT_MAX) {
+		pr_err("clear_wait value should be not greater"
+				" then %d\n", CLEAR_WAIT_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_parport_exit(void)
+{
+	parport_unregister_driver(&pps_parport_driver);
+}
+
+module_init(pps_parport_init);
+module_exit(pps_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
new file mode 100644
index 0000000..f3a73dd
--- /dev/null
+++ b/drivers/pps/generators/Kconfig
@@ -0,0 +1,13 @@
+#
+# PPS generators configuration
+#
+
+comment "PPS generators support"
+
+config PPS_GENERATOR_PARPORT
+	tristate "Parallel port PPS signal generator"
+	depends on PARPORT
+	help
+	  If you say yes here you get support for a PPS signal generator which
+	  utilizes STROBE pin of a parallel port to send PPS signals. It uses
+	  parport abstraction layer and hrtimers to precisely control the signal.
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
new file mode 100644
index 0000000..303304a6
--- /dev/null
+++ b/drivers/pps/generators/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for PPS generators.
+#
+
+obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+
+ifeq ($(CONFIG_PPS_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
diff --git a/drivers/pps/generators/pps_gen_parport.c b/drivers/pps/generators/pps_gen_parport.c
new file mode 100644
index 0000000..5c32f8d
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_parport.c
@@ -0,0 +1,282 @@
+/*
+ * pps_gen_parport.c -- kernel parallel port PPS signal generator
+ *
+ *
+ * Copyright (C) 2009   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+/*
+ * TODO:
+ * fix issues when realtime clock is adjusted in a leap
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/parport.h>
+
+#define DRVDESC "parallel port PPS signal generator"
+
+#define SIGNAL		0
+#define NO_SIGNAL	PARPORT_CONTROL_STROBE
+
+/* module parameters */
+
+#define SEND_DELAY_MAX		100000
+
+static unsigned int send_delay = 30000;
+MODULE_PARM_DESC(delay,
+	"Delay between setting and dropping the signal (ns)");
+module_param_named(delay, send_delay, uint, 0);
+
+
+#define SAFETY_INTERVAL	3000	/* set the hrtimer earlier for safety (ns) */
+
+/* internal per port structure */
+struct pps_generator_pp {
+	struct pardevice *pardev;	/* parport device */
+	struct hrtimer timer;
+	long port_write_time;		/* calibrated port write time (ns) */
+};
+
+static struct pps_generator_pp device = {
+	.pardev = NULL,
+};
+
+static int attached;
+
+/* calibrated time between a hrtimer event and the reaction */
+static long hrtimer_error = SAFETY_INTERVAL;
+
+/* the kernel hrtimer event */
+static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
+{
+	struct timespec expire_time, ts1, ts2, ts3, dts;
+	struct pps_generator_pp *dev;
+	struct parport *port;
+	long lim, delta;
+	unsigned long flags;
+
+	/* We have to disable interrupts here. The idea is to prevent
+	 * other interrupts on the same processor to introduce random
+	 * lags while polling the clock. getnstimeofday() takes <1us on
+	 * most machines while other interrupt handlers can take much
+	 * more potentially.
+	 *
+	 * NB: approx time with blocked interrupts =
+	 * send_delay + 3 * SAFETY_INTERVAL
+	 */
+	local_irq_save(flags);
+
+	/* first of all we get the time stamp... */
+	getnstimeofday(&ts1);
+	expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
+	dev = container_of(timer, struct pps_generator_pp, timer);
+	lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
+
+	/* check if we are late */
+	if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
+		local_irq_restore(flags);
+		pr_err("we are late this time %ld.%09ld\n",
+				ts1.tv_sec, ts1.tv_nsec);
+		goto done;
+	}
+
+	/* busy loop until the time is right for an assert edge */
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* set the signal */
+	port = dev->pardev->port;
+	port->ops->write_control(port, SIGNAL);
+
+	/* busy loop until the time is right for a clear edge */
+	lim = NSEC_PER_SEC - dev->port_write_time;
+	do {
+		getnstimeofday(&ts2);
+	} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
+
+	/* unset the signal */
+	port->ops->write_control(port, NO_SIGNAL);
+
+	getnstimeofday(&ts3);
+
+	local_irq_restore(flags);
+
+	/* update calibrated port write time */
+	dts = timespec_sub(ts3, ts2);
+	dev->port_write_time =
+		(dev->port_write_time + timespec_to_ns(&dts)) >> 1;
+
+done:
+	/* update calibrated hrtimer error */
+	dts = timespec_sub(ts1, expire_time);
+	delta = timespec_to_ns(&dts);
+	/* If the new error value is bigger then the old, use the new
+	 * value, if not then slowly move towards the new value. This
+	 * way it should be safe in bad conditions and efficient in
+	 * good conditions.
+	 */
+	if (delta >= hrtimer_error)
+		hrtimer_error = delta;
+	else
+		hrtimer_error = (3 * hrtimer_error + delta) >> 2;
+
+	/* update the hrtimer expire time */
+	hrtimer_set_expires(timer,
+			ktime_set(expire_time.tv_sec + 1,
+				NSEC_PER_SEC - (send_delay +
+				dev->port_write_time + SAFETY_INTERVAL +
+				2 * hrtimer_error)));
+
+	return HRTIMER_RESTART;
+}
+
+/* calibrate port write time */
+#define PORT_NTESTS_SHIFT	5
+static void calibrate_port(struct pps_generator_pp *dev)
+{
+	struct parport *port = dev->pardev->port;
+	int i;
+	long acc = 0;
+
+	for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
+		struct timespec a, b;
+		unsigned long irq_flags;
+
+		local_irq_save(irq_flags);
+		getnstimeofday(&a);
+		port->ops->write_control(port, NO_SIGNAL);
+		getnstimeofday(&b);
+		local_irq_restore(irq_flags);
+
+		b = timespec_sub(b, a);
+		acc += timespec_to_ns(&b);
+	}
+
+	dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
+	pr_info("port write takes %ldns\n", dev->port_write_time);
+}
+
+static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
+{
+	struct timespec ts;
+
+	getnstimeofday(&ts);
+
+	return ktime_set(ts.tv_sec +
+			((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
+			NSEC_PER_SEC - (send_delay +
+			dev->port_write_time + 3 * SAFETY_INTERVAL));
+}
+
+static void parport_attach(struct parport *port)
+{
+	if (attached) {
+		/* we already have a port */
+		return;
+	}
+
+	device.pardev = parport_register_device(port, KBUILD_MODNAME,
+			NULL, NULL, NULL, 0, &device);
+	if (!device.pardev) {
+		pr_err("couldn't register with %s\n", port->name);
+		return;
+	}
+
+	if (parport_claim_or_block(device.pardev) < 0) {
+		pr_err("couldn't claim %s\n", port->name);
+		goto err_unregister_dev;
+	}
+
+	pr_info("attached to %s\n", port->name);
+	attached = 1;
+
+	calibrate_port(&device);
+
+	hrtimer_init(&device.timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+	device.timer.function = hrtimer_event;
+#ifdef CONFIG_PREEMPT_RT
+	/* hrtimer interrupt will run in the interrupt context with this */
+	device.timer.irqsafe = 1;
+#endif
+
+	hrtimer_start(&device.timer, next_intr_time(&device), HRTIMER_MODE_ABS);
+
+	return;
+
+err_unregister_dev:
+	parport_unregister_device(device.pardev);
+}
+
+static void parport_detach(struct parport *port)
+{
+	if (port->cad != device.pardev)
+		return;	/* not our port */
+
+	hrtimer_cancel(&device.timer);
+	parport_release(device.pardev);
+	parport_unregister_device(device.pardev);
+}
+
+static struct parport_driver pps_gen_parport_driver = {
+	.name = KBUILD_MODNAME,
+	.attach = parport_attach,
+	.detach = parport_detach,
+};
+
+/* module staff */
+
+static int __init pps_gen_parport_init(void)
+{
+	int ret;
+
+	pr_info(DRVDESC "\n");
+
+	if (send_delay > SEND_DELAY_MAX) {
+		pr_err("delay value should be not greater"
+				" then %d\n", SEND_DELAY_MAX);
+		return -EINVAL;
+	}
+
+	ret = parport_register_driver(&pps_gen_parport_driver);
+	if (ret) {
+		pr_err("unable to register with parport\n");
+		return ret;
+	}
+
+	return  0;
+}
+
+static void __exit pps_gen_parport_exit(void)
+{
+	parport_unregister_driver(&pps_gen_parport_driver);
+	pr_info("hrtimer avg error is %ldns\n", hrtimer_error);
+}
+
+module_init(pps_gen_parport_init);
+module_exit(pps_gen_parport_exit);
+
+MODULE_AUTHOR("Alexander Gordeev <lasaine@lvk.cs.msu.su>");
+MODULE_DESCRIPTION(DRVDESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 1aa02db..cba1b43 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -19,24 +19,20 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/time.h>
+#include <linux/timex.h>
 #include <linux/spinlock.h>
-#include <linux/idr.h>
 #include <linux/fs.h>
 #include <linux/pps_kernel.h>
 #include <linux/slab.h>
 
-/*
- * Global variables
- */
-
-DEFINE_SPINLOCK(pps_idr_lock);
-DEFINE_IDR(pps_idr);
+#include "kc.h"
 
 /*
  * Local functions
@@ -60,60 +56,6 @@
  * Exported functions
  */
 
-/* pps_get_source - find a PPS source
- * @source: the PPS source ID.
- *
- * This function is used to find an already registered PPS source into the
- * system.
- *
- * The function returns NULL if found nothing, otherwise it returns a pointer
- * to the PPS source data struct (the refcounter is incremented by 1).
- */
-
-struct pps_device *pps_get_source(int source)
-{
-	struct pps_device *pps;
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-
-	pps = idr_find(&pps_idr, source);
-	if (pps != NULL)
-		atomic_inc(&pps->usage);
-
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-
-	return pps;
-}
-
-/* pps_put_source - free the PPS source data
- * @pps: a pointer to the PPS source.
- *
- * This function is used to free a PPS data struct if its refcount is 0.
- */
-
-void pps_put_source(struct pps_device *pps)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&pps_idr_lock, flags);
-	BUG_ON(atomic_read(&pps->usage) == 0);
-
-	if (!atomic_dec_and_test(&pps->usage)) {
-		pps = NULL;
-		goto exit;
-	}
-
-	/* No more reference to the PPS source. We can safely remove the
-	 * PPS data struct.
-	 */
-	idr_remove(&pps_idr, pps->id);
-
-exit:
-	spin_unlock_irqrestore(&pps_idr_lock, flags);
-	kfree(pps);
-}
-
 /* pps_register_source - add a PPS source in the system
  * @info: the PPS info struct
  * @default_params: the default PPS parameters of the new source
@@ -122,31 +64,31 @@
  * source is described by info's fields and it will have, as default PPS
  * parameters, the ones specified into default_params.
  *
- * The function returns, in case of success, the PPS source ID.
+ * The function returns, in case of success, the PPS device. Otherwise NULL.
  */
 
-int pps_register_source(struct pps_source_info *info, int default_params)
+struct pps_device *pps_register_source(struct pps_source_info *info,
+		int default_params)
 {
 	struct pps_device *pps;
-	int id;
 	int err;
 
 	/* Sanity checks */
 	if ((info->mode & default_params) != default_params) {
-		printk(KERN_ERR "pps: %s: unsupported default parameters\n",
+		pr_err("%s: unsupported default parameters\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)) != 0 &&
 			info->echo == NULL) {
-		printk(KERN_ERR "pps: %s: echo function is not defined\n",
+		pr_err("%s: echo function is not defined\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
 	}
 	if ((info->mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
-		printk(KERN_ERR "pps: %s: unspecified time format\n",
+		pr_err("%s: unspecified time format\n",
 					info->name);
 		err = -EINVAL;
 		goto pps_register_source_exit;
@@ -168,94 +110,48 @@
 
 	init_waitqueue_head(&pps->queue);
 	spin_lock_init(&pps->lock);
-	atomic_set(&pps->usage, 1);
-
-	/* Get new ID for the new PPS source */
-	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
-		err = -ENOMEM;
-		goto kfree_pps;
-	}
-
-	spin_lock_irq(&pps_idr_lock);
-
-	/* Now really allocate the PPS source.
-	 * After idr_get_new() calling the new source will be freely available
-	 * into the kernel.
-	 */
-	err = idr_get_new(&pps_idr, pps, &id);
-	if (err < 0) {
-		spin_unlock_irq(&pps_idr_lock);
-		goto kfree_pps;
-	}
-
-	id = id & MAX_ID_MASK;
-	if (id >= PPS_MAX_SOURCES) {
-		spin_unlock_irq(&pps_idr_lock);
-
-		printk(KERN_ERR "pps: %s: too many PPS sources in the system\n",
-					info->name);
-		err = -EBUSY;
-		goto free_idr;
-	}
-	pps->id = id;
-
-	spin_unlock_irq(&pps_idr_lock);
 
 	/* Create the char device */
 	err = pps_register_cdev(pps);
 	if (err < 0) {
-		printk(KERN_ERR "pps: %s: unable to create char device\n",
+		pr_err("%s: unable to create char device\n",
 					info->name);
-		goto free_idr;
+		goto kfree_pps;
 	}
 
-	pr_info("new PPS source %s at ID %d\n", info->name, id);
+	dev_info(pps->dev, "new PPS source %s\n", info->name);
 
-	return id;
-
-free_idr:
-	spin_lock_irq(&pps_idr_lock);
-	idr_remove(&pps_idr, id);
-	spin_unlock_irq(&pps_idr_lock);
+	return pps;
 
 kfree_pps:
 	kfree(pps);
 
 pps_register_source_exit:
-	printk(KERN_ERR "pps: %s: unable to register source\n", info->name);
+	pr_err("%s: unable to register source\n", info->name);
 
-	return err;
+	return NULL;
 }
 EXPORT_SYMBOL(pps_register_source);
 
 /* pps_unregister_source - remove a PPS source from the system
- * @source: the PPS source ID
+ * @pps: the PPS source
  *
  * This function is used to remove a previously registered PPS source from
  * the system.
  */
 
-void pps_unregister_source(int source)
+void pps_unregister_source(struct pps_device *pps)
 {
-	struct pps_device *pps;
-
-	spin_lock_irq(&pps_idr_lock);
-	pps = idr_find(&pps_idr, source);
-
-	if (!pps) {
-		BUG();
-		spin_unlock_irq(&pps_idr_lock);
-		return;
-	}
-	spin_unlock_irq(&pps_idr_lock);
-
+	pps_kc_remove(pps);
 	pps_unregister_cdev(pps);
-	pps_put_source(pps);
+
+	/* don't have to kfree(pps) here because it will be done on
+	 * device destruction */
 }
 EXPORT_SYMBOL(pps_unregister_source);
 
 /* pps_event - register a PPS event into the system
- * @source: the PPS source ID
+ * @pps: the PPS device
  * @ts: the event timestamp
  * @event: the event type
  * @data: userdef pointer
@@ -263,78 +159,72 @@
  * This function is used by each PPS client in order to register a new
  * PPS event into the system (it's usually called inside an IRQ handler).
  *
- * If an echo function is associated with the PPS source it will be called
+ * If an echo function is associated with the PPS device it will be called
  * as:
- *	pps->info.echo(source, event, data);
+ *	pps->info.echo(pps, event, data);
  */
-
-void pps_event(int source, struct pps_ktime *ts, int event, void *data)
+void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
+		void *data)
 {
-	struct pps_device *pps;
 	unsigned long flags;
 	int captured = 0;
+	struct pps_ktime ts_real;
 
-	if ((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0) {
-		printk(KERN_ERR "pps: unknown event (%x) for source %d\n",
-			event, source);
-		return;
-	}
+	/* check event type */
+	BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
 
-	pps = pps_get_source(source);
-	if (!pps)
-		return;
+	dev_dbg(pps->dev, "PPS event at %ld.%09ld\n",
+			ts->ts_real.tv_sec, ts->ts_real.tv_nsec);
 
-	pr_debug("PPS event on source %d at %llu.%06u\n",
-			pps->id, (unsigned long long) ts->sec, ts->nsec);
+	timespec_to_pps_ktime(&ts_real, ts->ts_real);
 
 	spin_lock_irqsave(&pps->lock, flags);
 
 	/* Must call the echo function? */
 	if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
-		pps->info.echo(source, event, data);
+		pps->info.echo(pps, event, data);
 
 	/* Check the event */
 	pps->current_mode = pps->params.mode;
-	if ((event & PPS_CAPTUREASSERT) &
-			(pps->params.mode & PPS_CAPTUREASSERT)) {
+	if (event & pps->params.mode & PPS_CAPTUREASSERT) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETASSERT)
-			pps_add_offset(ts, &pps->params.assert_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.assert_off_tu);
 
 		/* Save the time stamp */
-		pps->assert_tu = *ts;
+		pps->assert_tu = ts_real;
 		pps->assert_sequence++;
-		pr_debug("capture assert seq #%u for source %d\n",
-			pps->assert_sequence, source);
+		dev_dbg(pps->dev, "capture assert seq #%u\n",
+			pps->assert_sequence);
 
 		captured = ~0;
 	}
-	if ((event & PPS_CAPTURECLEAR) &
-			(pps->params.mode & PPS_CAPTURECLEAR)) {
+	if (event & pps->params.mode & PPS_CAPTURECLEAR) {
 		/* We have to add an offset? */
 		if (pps->params.mode & PPS_OFFSETCLEAR)
-			pps_add_offset(ts, &pps->params.clear_off_tu);
+			pps_add_offset(&ts_real,
+					&pps->params.clear_off_tu);
 
 		/* Save the time stamp */
-		pps->clear_tu = *ts;
+		pps->clear_tu = ts_real;
 		pps->clear_sequence++;
-		pr_debug("capture clear seq #%u for source %d\n",
-			pps->clear_sequence, source);
+		dev_dbg(pps->dev, "capture clear seq #%u\n",
+			pps->clear_sequence);
 
 		captured = ~0;
 	}
 
-	/* Wake up iif captured somthing */
+	pps_kc_event(pps, ts, event);
+
+	/* Wake up if captured something */
 	if (captured) {
-		pps->go = ~0;
-		wake_up_interruptible(&pps->queue);
+		pps->last_ev++;
+		wake_up_interruptible_all(&pps->queue);
 
 		kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
 	}
 
 	spin_unlock_irqrestore(&pps->lock, flags);
-
-	/* Now we can release the PPS source for (possible) deregistration */
-	pps_put_source(pps);
 }
 EXPORT_SYMBOL(pps_event);
diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c
new file mode 100644
index 0000000..079e930
--- /dev/null
+++ b/drivers/pps/kc.c
@@ -0,0 +1,122 @@
+/*
+ * PPS kernel consumer API
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pps_kernel.h>
+
+#include "kc.h"
+
+/*
+ * Global variables
+ */
+
+/* state variables to bind kernel consumer */
+DEFINE_SPINLOCK(pps_kc_hardpps_lock);
+/* PPS API (RFC 2783): current source and mode for kernel consumer */
+struct pps_device *pps_kc_hardpps_dev;	/* unique pointer to device */
+int pps_kc_hardpps_mode;		/* mode bits for kernel consumer */
+
+/* pps_kc_bind - control PPS kernel consumer binding
+ * @pps: the PPS source
+ * @bind_args: kernel consumer bind parameters
+ *
+ * This function is used to bind or unbind PPS kernel consumer according to
+ * supplied parameters. Should not be called in interrupt context.
+ */
+int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args)
+{
+	/* Check if another consumer is already bound */
+	spin_lock_irq(&pps_kc_hardpps_lock);
+
+	if (bind_args->edge == 0)
+		if (pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = 0;
+			pps_kc_hardpps_dev = NULL;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "unbound kernel"
+					" consumer\n");
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "selected kernel consumer"
+					" is not bound\n");
+			return -EINVAL;
+		}
+	else
+		if (pps_kc_hardpps_dev == NULL ||
+				pps_kc_hardpps_dev == pps) {
+			pps_kc_hardpps_mode = bind_args->edge;
+			pps_kc_hardpps_dev = pps;
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_info(pps->dev, "bound kernel consumer: "
+				"edge=0x%x\n", bind_args->edge);
+		} else {
+			spin_unlock_irq(&pps_kc_hardpps_lock);
+			dev_err(pps->dev, "another kernel consumer"
+					" is already bound\n");
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+/* pps_kc_remove - unbind kernel consumer on PPS source removal
+ * @pps: the PPS source
+ *
+ * This function is used to disable kernel consumer on PPS source removal
+ * if this source was bound to PPS kernel consumer. Can be called on any
+ * source safely. Should not be called in interrupt context.
+ */
+void pps_kc_remove(struct pps_device *pps)
+{
+	spin_lock_irq(&pps_kc_hardpps_lock);
+	if (pps == pps_kc_hardpps_dev) {
+		pps_kc_hardpps_mode = 0;
+		pps_kc_hardpps_dev = NULL;
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+		dev_info(pps->dev, "unbound kernel consumer"
+				" on device removal\n");
+	} else
+		spin_unlock_irq(&pps_kc_hardpps_lock);
+}
+
+/* pps_kc_event - call hardpps() on PPS event
+ * @pps: the PPS source
+ * @ts: PPS event timestamp
+ * @event: PPS event edge
+ *
+ * This function calls hardpps() when an event from bound PPS source occurs.
+ */
+void pps_kc_event(struct pps_device *pps, struct pps_event_time *ts,
+		int event)
+{
+	unsigned long flags;
+
+	/* Pass some events to kernel consumer if activated */
+	spin_lock_irqsave(&pps_kc_hardpps_lock, flags);
+	if (pps == pps_kc_hardpps_dev && event & pps_kc_hardpps_mode)
+		hardpps(&ts->ts_real, &ts->ts_raw);
+	spin_unlock_irqrestore(&pps_kc_hardpps_lock, flags);
+}
diff --git a/drivers/pps/kc.h b/drivers/pps/kc.h
new file mode 100644
index 0000000..d296fcd
--- /dev/null
+++ b/drivers/pps/kc.h
@@ -0,0 +1,46 @@
+/*
+ * PPS kernel consumer API header
+ *
+ * Copyright (C) 2009-2010   Alexander Gordeev <lasaine@lvk.cs.msu.su>
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation; either version 2 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef LINUX_PPS_KC_H
+#define LINUX_PPS_KC_H
+
+#include <linux/errno.h>
+#include <linux/pps_kernel.h>
+
+#ifdef CONFIG_NTP_PPS
+
+extern int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args);
+extern void pps_kc_remove(struct pps_device *pps);
+extern void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event);
+
+
+#else /* CONFIG_NTP_PPS */
+
+static inline int pps_kc_bind(struct pps_device *pps,
+		struct pps_bind_args *bind_args) { return -EOPNOTSUPP; }
+static inline void pps_kc_remove(struct pps_device *pps) {}
+static inline void pps_kc_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event) {}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KC_H */
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index ca5183b..2baadd2 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -19,6 +19,7 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -26,9 +27,13 @@
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 #include <linux/idr.h>
+#include <linux/mutex.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/pps_kernel.h>
+#include <linux/slab.h>
+
+#include "kc.h"
 
 /*
  * Local variables
@@ -37,6 +42,9 @@
 static dev_t pps_devt;
 static struct class *pps_class;
 
+static DEFINE_MUTEX(pps_idr_lock);
+static DEFINE_IDR(pps_idr);
+
 /*
  * Char device methods
  */
@@ -61,15 +69,13 @@
 {
 	struct pps_device *pps = file->private_data;
 	struct pps_kparams params;
-	struct pps_fdata fdata;
-	unsigned long ticks;
 	void __user *uarg = (void __user *) arg;
 	int __user *iuarg = (int __user *) arg;
 	int err;
 
 	switch (cmd) {
 	case PPS_GETPARAMS:
-		pr_debug("PPS_GETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETPARAMS\n");
 
 		spin_lock_irq(&pps->lock);
 
@@ -85,7 +91,7 @@
 		break;
 
 	case PPS_SETPARAMS:
-		pr_debug("PPS_SETPARAMS: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_SETPARAMS\n");
 
 		/* Check the capabilities */
 		if (!capable(CAP_SYS_TIME))
@@ -95,14 +101,14 @@
 		if (err)
 			return -EFAULT;
 		if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) {
-			pr_debug("capture mode unspecified (%x)\n",
+			dev_dbg(pps->dev, "capture mode unspecified (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
 
 		/* Check for supported capabilities */
 		if ((params.mode & ~pps->info.mode) != 0) {
-			pr_debug("unsupported capabilities (%x)\n",
+			dev_dbg(pps->dev, "unsupported capabilities (%x)\n",
 								params.mode);
 			return -EINVAL;
 		}
@@ -115,7 +121,7 @@
 		/* Restore the read only parameters */
 		if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) {
 			/* section 3.3 of RFC 2783 interpreted */
-			pr_debug("time format unspecified (%x)\n",
+			dev_dbg(pps->dev, "time format unspecified (%x)\n",
 								params.mode);
 			pps->params.mode |= PPS_TSFMT_TSPEC;
 		}
@@ -128,7 +134,7 @@
 		break;
 
 	case PPS_GETCAP:
-		pr_debug("PPS_GETCAP: source %d\n", pps->id);
+		dev_dbg(pps->dev, "PPS_GETCAP\n");
 
 		err = put_user(pps->info.mode, iuarg);
 		if (err)
@@ -136,20 +142,26 @@
 
 		break;
 
-	case PPS_FETCH:
-		pr_debug("PPS_FETCH: source %d\n", pps->id);
+	case PPS_FETCH: {
+		struct pps_fdata fdata;
+		unsigned int ev;
+
+		dev_dbg(pps->dev, "PPS_FETCH\n");
 
 		err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata));
 		if (err)
 			return -EFAULT;
 
-		pps->go = 0;
+		ev = pps->last_ev;
 
 		/* Manage the timeout */
 		if (fdata.timeout.flags & PPS_TIME_INVALID)
-			err = wait_event_interruptible(pps->queue, pps->go);
+			err = wait_event_interruptible(pps->queue,
+					ev != pps->last_ev);
 		else {
-			pr_debug("timeout %lld.%09d\n",
+			unsigned long ticks;
+
+			dev_dbg(pps->dev, "timeout %lld.%09d\n",
 					(long long) fdata.timeout.sec,
 					fdata.timeout.nsec);
 			ticks = fdata.timeout.sec * HZ;
@@ -157,7 +169,9 @@
 
 			if (ticks != 0) {
 				err = wait_event_interruptible_timeout(
-						pps->queue, pps->go, ticks);
+						pps->queue,
+						ev != pps->last_ev,
+						ticks);
 				if (err == 0)
 					return -ETIMEDOUT;
 			}
@@ -165,7 +179,7 @@
 
 		/* Check for pending signals */
 		if (err == -ERESTARTSYS) {
-			pr_debug("pending signal caught\n");
+			dev_dbg(pps->dev, "pending signal caught\n");
 			return -EINTR;
 		}
 
@@ -185,10 +199,44 @@
 			return -EFAULT;
 
 		break;
+	}
+	case PPS_KC_BIND: {
+		struct pps_bind_args bind_args;
 
+		dev_dbg(pps->dev, "PPS_KC_BIND\n");
+
+		/* Check the capabilities */
+		if (!capable(CAP_SYS_TIME))
+			return -EPERM;
+
+		if (copy_from_user(&bind_args, uarg,
+					sizeof(struct pps_bind_args)))
+			return -EFAULT;
+
+		/* Check for supported capabilities */
+		if ((bind_args.edge & ~pps->info.mode) != 0) {
+			dev_err(pps->dev, "unsupported capabilities (%x)\n",
+					bind_args.edge);
+			return -EINVAL;
+		}
+
+		/* Validate parameters roughly */
+		if (bind_args.tsformat != PPS_TSFMT_TSPEC ||
+				(bind_args.edge & ~PPS_CAPTUREBOTH) != 0 ||
+				bind_args.consumer != PPS_KC_HARDPPS) {
+			dev_err(pps->dev, "invalid kernel consumer bind"
+					" parameters (%x)\n", bind_args.edge);
+			return -EINVAL;
+		}
+
+		err = pps_kc_bind(pps, &bind_args);
+		if (err < 0)
+			return err;
+
+		break;
+	}
 	default:
 		return -ENOTTY;
-		break;
 	}
 
 	return 0;
@@ -198,12 +246,6 @@
 {
 	struct pps_device *pps = container_of(inode->i_cdev,
 						struct pps_device, cdev);
-	int found;
-
-	found = pps_get_source(pps->id) != 0;
-	if (!found)
-		return -ENODEV;
-
 	file->private_data = pps;
 
 	return 0;
@@ -211,11 +253,6 @@
 
 static int pps_cdev_release(struct inode *inode, struct file *file)
 {
-	struct pps_device *pps = file->private_data;
-
-	/* Free the PPS source and wake up (possible) deregistration */
-	pps_put_source(pps);
-
 	return 0;
 }
 
@@ -233,25 +270,67 @@
 	.release	= pps_cdev_release,
 };
 
+static void pps_device_destruct(struct device *dev)
+{
+	struct pps_device *pps = dev_get_drvdata(dev);
+
+	/* release id here to protect others from using it while it's
+	 * still in use */
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	kfree(dev);
+	kfree(pps);
+}
+
 int pps_register_cdev(struct pps_device *pps)
 {
 	int err;
+	dev_t devt;
 
-	pps->devno = MKDEV(MAJOR(pps_devt), pps->id);
+	mutex_lock(&pps_idr_lock);
+	/* Get new ID for the new PPS source */
+	if (idr_pre_get(&pps_idr, GFP_KERNEL) == 0) {
+		mutex_unlock(&pps_idr_lock);
+		return -ENOMEM;
+	}
+
+	/* Now really allocate the PPS source.
+	 * After idr_get_new() calling the new source will be freely available
+	 * into the kernel.
+	 */
+	err = idr_get_new(&pps_idr, pps, &pps->id);
+	mutex_unlock(&pps_idr_lock);
+
+	if (err < 0)
+		return err;
+
+	pps->id &= MAX_ID_MASK;
+	if (pps->id >= PPS_MAX_SOURCES) {
+		pr_err("%s: too many PPS sources in the system\n",
+					pps->info.name);
+		err = -EBUSY;
+		goto free_idr;
+	}
+
+	devt = MKDEV(MAJOR(pps_devt), pps->id);
+
 	cdev_init(&pps->cdev, &pps_cdev_fops);
 	pps->cdev.owner = pps->info.owner;
 
-	err = cdev_add(&pps->cdev, pps->devno, 1);
+	err = cdev_add(&pps->cdev, devt, 1);
 	if (err) {
-		printk(KERN_ERR "pps: %s: failed to add char device %d:%d\n",
+		pr_err("%s: failed to add char device %d:%d\n",
 				pps->info.name, MAJOR(pps_devt), pps->id);
-		return err;
+		goto free_idr;
 	}
-	pps->dev = device_create(pps_class, pps->info.dev, pps->devno, NULL,
+	pps->dev = device_create(pps_class, pps->info.dev, devt, pps,
 							"pps%d", pps->id);
 	if (IS_ERR(pps->dev))
 		goto del_cdev;
-	dev_set_drvdata(pps->dev, pps);
+
+	pps->dev->release = pps_device_destruct;
 
 	pr_debug("source %s got cdev (%d:%d)\n", pps->info.name,
 			MAJOR(pps_devt), pps->id);
@@ -261,12 +340,17 @@
 del_cdev:
 	cdev_del(&pps->cdev);
 
+free_idr:
+	mutex_lock(&pps_idr_lock);
+	idr_remove(&pps_idr, pps->id);
+	mutex_unlock(&pps_idr_lock);
+
 	return err;
 }
 
 void pps_unregister_cdev(struct pps_device *pps)
 {
-	device_destroy(pps_class, pps->devno);
+	device_destroy(pps_class, pps->dev->devt);
 	cdev_del(&pps->cdev);
 }
 
@@ -286,14 +370,14 @@
 
 	pps_class = class_create(THIS_MODULE, "pps");
 	if (!pps_class) {
-		printk(KERN_ERR "pps: failed to allocate class\n");
+		pr_err("failed to allocate class\n");
 		return -ENOMEM;
 	}
 	pps_class->dev_attrs = pps_attrs;
 
 	err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
 	if (err < 0) {
-		printk(KERN_ERR "pps: failed to allocate char device region\n");
+		pr_err("failed to allocate char device region\n");
 		goto remove_class;
 	}
 
diff --git a/drivers/ps3/Makefile b/drivers/ps3/Makefile
index ccea15c..50cb1e1 100644
--- a/drivers/ps3/Makefile
+++ b/drivers/ps3/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_PS3_VUART) += ps3-vuart.o
 obj-$(CONFIG_PS3_PS3AV) += ps3av_mod.o
-ps3av_mod-objs		+= ps3av.o ps3av_cmd.o
+ps3av_mod-y		:= ps3av.o ps3av_cmd.o
 obj-$(CONFIG_PPC_PS3) += sys-manager-core.o
 obj-$(CONFIG_PS3_SYS_MANAGER) += ps3-sys-manager.o
 obj-$(CONFIG_PS3_STORAGE) += ps3stor_lib.o
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index 1eb82c4..467e82b 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -46,7 +46,6 @@
 DEFINE_SPINLOCK(rio_global_list_lock);
 
 static int next_destid = 0;
-static int next_switchid = 0;
 static int next_net = 0;
 static int next_comptag = 1;
 
@@ -378,12 +377,30 @@
 	struct rio_dev *rdev;
 	struct rio_switch *rswitch = NULL;
 	int result, rdid;
+	size_t size;
+	u32 swpinfo = 0;
 
-	rdev = kzalloc(sizeof(struct rio_dev), GFP_KERNEL);
+	size = sizeof(struct rio_dev);
+	if (rio_mport_read_config_32(port, destid, hopcount,
+				     RIO_PEF_CAR, &result))
+		return NULL;
+
+	if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_SWP_INFO_CAR, &swpinfo);
+		if (result & RIO_PEF_SWITCH) {
+			size += (RIO_GET_TOTAL_PORTS(swpinfo) *
+				sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
+		}
+	}
+
+	rdev = kzalloc(size, GFP_KERNEL);
 	if (!rdev)
 		return NULL;
 
 	rdev->net = net;
+	rdev->pef = result;
+	rdev->swpinfo = swpinfo;
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR,
 				 &result);
 	rdev->did = result >> 16;
@@ -397,8 +414,6 @@
 	rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR,
 				 &result);
 	rdev->asm_rev = result >> 16;
-	rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR,
-				 &rdev->pef);
 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
 		rdev->efptr = result & 0xffff;
 		rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid,
@@ -408,11 +423,6 @@
 						hopcount, RIO_EFB_ERR_MGMNT);
 	}
 
-	if (rdev->pef & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) {
-		rio_mport_read_config_32(port, destid, hopcount,
-					 RIO_SWP_INFO_CAR, &rdev->swpinfo);
-	}
-
 	rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR,
 				 &rdev->src_ops);
 	rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR,
@@ -427,6 +437,10 @@
 		rio_mport_write_config_32(port, destid, hopcount,
 					  RIO_COMPONENT_TAG_CSR, next_comptag);
 		rdev->comp_tag = next_comptag++;
+	}  else {
+		rio_mport_read_config_32(port, destid, hopcount,
+					 RIO_COMPONENT_TAG_CSR,
+					 &rdev->comp_tag);
 	}
 
 	if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) {
@@ -437,21 +451,20 @@
 				next_destid++;
 		} else
 			rdev->destid = rio_get_device_id(port, destid, hopcount);
-	} else
-		/* Switch device has an associated destID */
-		rdev->destid = RIO_INVALID_DESTID;
+
+		rdev->hopcount = 0xff;
+	} else {
+		/* Switch device has an associated destID which
+		 * will be adjusted later
+		 */
+		rdev->destid = destid;
+		rdev->hopcount = hopcount;
+	}
 
 	/* If a PE has both switch and other functions, show it as a switch */
 	if (rio_is_switch(rdev)) {
-		rswitch = kzalloc(sizeof(*rswitch) +
-				  RIO_GET_TOTAL_PORTS(rdev->swpinfo) *
-				  sizeof(rswitch->nextdev[0]),
-				  GFP_KERNEL);
-		if (!rswitch)
-			goto cleanup;
-		rswitch->switchid = next_switchid;
-		rswitch->hopcount = hopcount;
-		rswitch->destid = destid;
+		rswitch = rdev->rswitch;
+		rswitch->switchid = rdev->comp_tag & RIO_CTAG_UDEVID;
 		rswitch->port_ok = 0;
 		rswitch->route_table = kzalloc(sizeof(u8)*
 					RIO_MAX_ROUTE_ENTRIES(port->sys_size),
@@ -462,15 +475,13 @@
 		for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
 				rdid++)
 			rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
-		rdev->rswitch = rswitch;
-		rswitch->rdev = rdev;
 		dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id,
-			     rdev->rswitch->switchid);
+			     rswitch->switchid);
 		rio_switch_init(rdev, do_enum);
 
-		if (do_enum && rdev->rswitch->clr_table)
-			rdev->rswitch->clr_table(port, destid, hopcount,
-						 RIO_GLOBAL_TABLE);
+		if (do_enum && rswitch->clr_table)
+			rswitch->clr_table(port, destid, hopcount,
+					   RIO_GLOBAL_TABLE);
 
 		list_add_tail(&rswitch->node, &rio_switches);
 
@@ -506,10 +517,9 @@
 	return rdev;
 
 cleanup:
-	if (rswitch) {
+	if (rswitch->route_table)
 		kfree(rswitch->route_table);
-		kfree(rswitch);
-	}
+
 	kfree(rdev);
 	return NULL;
 }
@@ -632,8 +642,7 @@
 
 /**
  * rio_route_add_entry- Add a route entry to a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Port number to be routed
@@ -647,31 +656,31 @@
  * on failure.
  */
 static int
-rio_route_add_entry(struct rio_mport *mport, struct rio_switch *rswitch,
+rio_route_add_entry(struct rio_dev *rdev,
 		    u16 table, u16 route_destid, u8 route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->add_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->add_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
 
 /**
  * rio_route_get_entry- Read a route entry in a switch routing table
- * @mport: Master port to send transaction
- * @rswitch: Switch device
+ * @rdev: RIO device
  * @table: Routing table ID
  * @route_destid: Destination ID to be routed
  * @route_port: Pointer to read port number into
@@ -685,23 +694,24 @@
  * on failure.
  */
 static int
-rio_route_get_entry(struct rio_mport *mport, struct rio_switch *rswitch, u16 table,
+rio_route_get_entry(struct rio_dev *rdev, u16 table,
 		    u16 route_destid, u8 *route_port, int lock)
 {
 	int rc;
 
 	if (lock) {
-		rc = rio_lock_device(mport, rswitch->destid,
-				     rswitch->hopcount, 1000);
+		rc = rio_lock_device(rdev->net->hport, rdev->destid,
+				     rdev->hopcount, 1000);
 		if (rc)
 			return rc;
 	}
 
-	rc = rswitch->get_entry(mport, rswitch->destid,
-					rswitch->hopcount, table,
-					route_destid, route_port);
+	rc = rdev->rswitch->get_entry(rdev->net->hport, rdev->destid,
+				      rdev->hopcount, table,
+				      route_destid, route_port);
 	if (lock)
-		rio_unlock_device(mport, rswitch->destid, rswitch->hopcount);
+		rio_unlock_device(rdev->net->hport, rdev->destid,
+				  rdev->hopcount);
 
 	return rc;
 }
@@ -809,16 +819,15 @@
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
 		sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo);
-		rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+		rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 				    port->host_deviceid, sw_inport, 0);
 		rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
 
 		for (destid = 0; destid < next_destid; destid++) {
 			if (destid == port->host_deviceid)
 				continue;
-			rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
+			rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 					    destid, sw_inport, 0);
 			rdev->rswitch->route_table[destid] = sw_inport;
 		}
@@ -850,8 +859,7 @@
 				    "RIO: scanning device on port %d\n",
 				    port_num);
 				rdev->rswitch->port_ok |= (1 << port_num);
-				rio_route_add_entry(port, rdev->rswitch,
-						RIO_GLOBAL_TABLE,
+				rio_route_add_entry(rdev, RIO_GLOBAL_TABLE,
 						RIO_ANY_DESTID(port->sys_size),
 						port_num, 0);
 
@@ -865,7 +873,7 @@
 					     destid < next_destid; destid++) {
 						if (destid == port->host_deviceid)
 							continue;
-						rio_route_add_entry(port, rdev->rswitch,
+						rio_route_add_entry(rdev,
 								    RIO_GLOBAL_TABLE,
 								    destid,
 								    port_num,
@@ -904,7 +912,7 @@
 				next_destid++;
 		}
 
-		rdev->rswitch->destid = sw_destid;
+		rdev->destid = sw_destid;
 	} else
 		pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n",
 		    rio_name(rdev), rdev->vid, rdev->did);
@@ -941,7 +949,7 @@
  */
 static int __devinit
 rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
-	      u8 hopcount)
+	      u8 hopcount, struct rio_dev *prev, int prev_port)
 {
 	u8 port_num, route_port;
 	struct rio_dev *rdev;
@@ -951,14 +959,15 @@
 	if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) {
 		/* Add device to the global and bus/net specific list. */
 		list_add_tail(&rdev->net_list, &net->devices);
+		rdev->prev = prev;
+		if (prev && rio_is_switch(prev))
+			prev->rswitch->nextdev[prev_port] = rdev;
 	} else
 		return -1;
 
 	if (rio_is_switch(rdev)) {
-		next_switchid++;
-
 		/* Associated destid is how we accessed this switch */
-		rdev->rswitch->destid = destid;
+		rdev->destid = destid;
 
 		pr_debug(
 		    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
@@ -981,7 +990,7 @@
 				for (ndestid = 0;
 				     ndestid < RIO_ANY_DESTID(port->sys_size);
 				     ndestid++) {
-					rio_route_get_entry(port, rdev->rswitch,
+					rio_route_get_entry(rdev,
 							    RIO_GLOBAL_TABLE,
 							    ndestid,
 							    &route_port, 0);
@@ -992,8 +1001,8 @@
 				if (ndestid == RIO_ANY_DESTID(port->sys_size))
 					continue;
 				rio_unlock_device(port, destid, hopcount);
-				if (rio_disc_peer
-				    (net, port, ndestid, hopcount + 1) < 0)
+				if (rio_disc_peer(net, port, ndestid,
+					hopcount + 1, rdev, port_num) < 0)
 					return -1;
 			}
 		}
@@ -1069,14 +1078,14 @@
  */
 static void rio_update_route_tables(struct rio_mport *port)
 {
-	struct rio_dev *rdev;
+	struct rio_dev *rdev, *swrdev;
 	struct rio_switch *rswitch;
 	u8 sport;
 	u16 destid;
 
 	list_for_each_entry(rdev, &rio_devices, global_list) {
 
-		destid = (rio_is_switch(rdev))?rdev->rswitch->destid:rdev->destid;
+		destid = rdev->destid;
 
 		list_for_each_entry(rswitch, &rio_switches, node) {
 
@@ -1084,14 +1093,16 @@
 				continue;
 
 			if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) {
+				swrdev = sw_to_rio_dev(rswitch);
+
 				/* Skip if destid ends in empty switch*/
-				if (rswitch->destid == destid)
+				if (swrdev->destid == destid)
 					continue;
 
-				sport = RIO_GET_PORT_NUM(rswitch->rdev->swpinfo);
+				sport = RIO_GET_PORT_NUM(swrdev->swpinfo);
 
 				if (rswitch->add_entry)	{
-					rio_route_add_entry(port, rswitch,
+					rio_route_add_entry(swrdev,
 						RIO_GLOBAL_TABLE, destid,
 						sport, 0);
 					rswitch->route_table[destid] = sport;
@@ -1203,21 +1214,20 @@
 
 	list_for_each_entry(rdev, &rio_devices, global_list)
 		if (rio_is_switch(rdev)) {
-			rio_lock_device(rdev->net->hport, rdev->rswitch->destid,
-					rdev->rswitch->hopcount, 1000);
+			rio_lock_device(rdev->net->hport, rdev->destid,
+					rdev->hopcount, 1000);
 			for (i = 0;
 			     i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
 			     i++) {
-				if (rio_route_get_entry
-				    (rdev->net->hport, rdev->rswitch,
-				     RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
+				if (rio_route_get_entry(rdev,
+					RIO_GLOBAL_TABLE, i, &sport, 0) < 0)
 					continue;
 				rdev->rswitch->route_table[i] = sport;
 			}
 
 			rio_unlock_device(rdev->net->hport,
-					  rdev->rswitch->destid,
-					  rdev->rswitch->hopcount);
+					  rdev->destid,
+					  rdev->hopcount);
 		}
 }
 
@@ -1284,7 +1294,7 @@
 						   mport->host_deviceid);
 
 		if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
-					0) < 0) {
+					0, NULL, 0) < 0) {
 			printk(KERN_INFO
 			       "RIO: master port %d device has failed discovery\n",
 			       mport->id);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 137ed93..76b4185 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -217,7 +217,7 @@
 
 	err = device_create_bin_file(&rdev->dev, &rio_config_attr);
 
-	if (!err && rdev->rswitch) {
+	if (!err && (rdev->pef & RIO_PEF_SWITCH)) {
 		err = device_create_file(&rdev->dev, &dev_attr_routes);
 		if (!err && rdev->rswitch->sw_sysfs)
 			err = rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_CREATE);
@@ -239,7 +239,7 @@
 void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
 {
 	device_remove_bin_file(&rdev->dev, &rio_config_attr);
-	if (rdev->rswitch) {
+	if (rdev->pef & RIO_PEF_SWITCH) {
 		device_remove_file(&rdev->dev, &dev_attr_routes);
 		if (rdev->rswitch->sw_sysfs)
 			rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 7b5080c..cc2a3b7 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -471,16 +471,9 @@
  */
 int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
 	u32 regval;
 
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
+	rio_read_config_32(rdev,
 				 rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				 &regval);
 	if (lock)
@@ -488,7 +481,7 @@
 	else
 		regval &= ~RIO_PORT_N_CTL_LOCKOUT;
 
-	rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
+	rio_write_config_32(rdev,
 				  rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum),
 				  regval);
 	return 0;
@@ -507,7 +500,7 @@
 rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
 {
 	u32 result;
-	int p_port, dstid, rc = -EIO;
+	int p_port, rc = -EIO;
 	struct rio_dev *prev = NULL;
 
 	/* Find switch with failed RIO link */
@@ -522,9 +515,7 @@
 	if (prev == NULL)
 		goto err_out;
 
-	dstid = (rdev->pef & RIO_PEF_SWITCH) ?
-			rdev->rswitch->destid : rdev->destid;
-	p_port = prev->rswitch->route_table[dstid];
+	p_port = prev->rswitch->route_table[rdev->destid];
 
 	if (p_port != RIO_INVALID_ROUTE) {
 		pr_debug("RIO: link failed on [%s]-P%d\n",
@@ -567,15 +558,8 @@
  */
 static int rio_chk_dev_access(struct rio_dev *rdev)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_chk_dev_access(rdev->net->hport, destid, hopcount);
+	return rio_mport_chk_dev_access(rdev->net->hport,
+					rdev->destid, rdev->hopcount);
 }
 
 /**
@@ -588,23 +572,20 @@
 static int
 rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int checkcount;
 
 	if (lnkresp) {
 		/* Read from link maintenance response register
 		 * to clear valid bit */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		udelay(50);
 	}
 
 	/* Issue Input-status command */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum),
 		RIO_MNT_REQ_CMD_IS);
 
@@ -615,7 +596,7 @@
 	checkcount = 3;
 	while (checkcount--) {
 		udelay(50);
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum),
 			&regval);
 		if (regval & RIO_PORT_N_MNT_RSP_RVAL) {
@@ -635,15 +616,12 @@
  */
 static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum];
 	u32 regval;
 	u32 far_ackid, far_linkstat, near_ackid;
 
 	if (err_status == 0)
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 
@@ -661,7 +639,7 @@
 			 pnum, regval);
 		far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5;
 		far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT;
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 			&regval);
 		pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval);
@@ -679,9 +657,8 @@
 			/* Align near outstanding/outbound ackIDs with
 			 * far inbound.
 			 */
-			rio_mport_write_config_32(mport, destid,
-				hopcount, rdev->phys_efptr +
-					RIO_PORT_N_ACK_STS_CSR(pnum),
+			rio_write_config_32(rdev,
+				rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum),
 				(near_ackid << 24) |
 					(far_ackid << 8) | far_ackid);
 			/* Align far outstanding/outbound ackIDs with
@@ -698,7 +675,7 @@
 				pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n");
 		}
 rd_err:
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -710,7 +687,7 @@
 				     RIO_GET_PORT_NUM(nextdev->swpinfo), NULL);
 		udelay(50);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum),
 			&err_status);
 		pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status);
@@ -730,13 +707,10 @@
 int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg)
 {
 	struct rio_dev *rdev;
-	struct rio_mport *mport;
-	u8 hopcount;
-	u16 destid;
 	u32 err_status, em_perrdet, em_ltlerrdet;
 	int rc, portnum;
 
-	rdev = rio_get_comptag(pw_msg->em.comptag, NULL);
+	rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL);
 	if (rdev == NULL) {
 		/* Device removed or enumeration error */
 		pr_debug("RIO: %s No matching device for CTag 0x%08x\n",
@@ -800,17 +774,13 @@
 		return 0;
 	}
 
-	mport = rdev->net->hport;
-	destid = rdev->rswitch->destid;
-	hopcount = rdev->rswitch->hopcount;
-
 	/*
 	 * Process the port-write notification from switch
 	 */
 	if (rdev->rswitch->em_handle)
 		rdev->rswitch->em_handle(rdev, portnum);
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 	pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status);
@@ -840,7 +810,7 @@
 			rdev->rswitch->port_ok &= ~(1 << portnum);
 			rio_set_port_lockout(rdev, portnum, 1);
 
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ACK_STS_CSR(portnum),
 				RIO_PORT_N_ACK_CLEAR);
@@ -851,28 +821,28 @@
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n",
 			 portnum, em_perrdet);
 		/* Clear EM Port N Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0);
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n",
 			 em_ltlerrdet);
 		/* Clear EM L/T Layer Error Detect CSR */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0);
 	}
 
 	/* Clear remaining error bits and Port-Write Pending bit */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			err_status);
 
diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c
index 0bb871c..095016a 100644
--- a/drivers/rapidio/switches/idt_gen2.c
+++ b/drivers/rapidio/switches/idt_gen2.c
@@ -209,9 +209,6 @@
 static int
 idtg2_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int i, tmp;
 
@@ -220,29 +217,25 @@
 	 * All standard EM configuration should be performed at upper level.
 	 */
 
-	pr_debug("RIO: %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Set Port-Write info CSR: PRIO=3 and CRF=1 */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PW_INFO_CSR, 0x0000e000);
+	rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000);
 
 	/*
 	 * Configure LT LAYER error reporting.
 	 */
 
 	/* Enable standard (RIO.p8) error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LT_ERR_REPORT_EN,
+	rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN,
 			REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR |
 			REM_LTL_ERR_UNSUPTR);
 
 	/* Use Port-Writes for LT layer error reporting.
 	 * Enable per-port reset
 	 */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_DEV_CTRL_1,
+	rio_read_config_32(rdev, IDT_DEV_CTRL_1, &regval);
+	rio_write_config_32(rdev, IDT_DEV_CTRL_1,
 			regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH);
 
 	/*
@@ -250,45 +243,40 @@
 	 */
 
 	/* Report all RIO.p8 errors supported by device */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
+	rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037);
 
 	/* Configure reporting of implementation specific errors/events */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_PORT_ISERR_REPORT_EN_BC, IDT_PORT_INIT_TX_ACQUIRED);
+	rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC,
+			    IDT_PORT_INIT_TX_ACQUIRED);
 
 	/* Use Port-Writes for port error reporting and enable error logging */
 	tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo);
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_PORT_OPS(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev, IDT_PORT_OPS(i), &regval);
+		rio_write_config_32(rdev,
 				IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW |
 				IDT_PORT_OPS_PL_ELOG |
 				IDT_PORT_OPS_LL_ELOG |
 				IDT_PORT_OPS_LT_ELOG);
 	}
 	/* Overwrite error log if full */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
+	rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR);
 
 	/*
 	 * Configure LANE error reporting.
 	 */
 
 	/* Disable line error reporting */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_LANE_ERR_REPORT_EN_BC, 0);
+	rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0);
 
 	/* Use Port-Writes for lane error reporting (when enabled)
 	 * (do per-lane update because lanes may have different configuration)
 	 */
 	tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16;
 	for (i = 0; i < tmp; i++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				IDT_LANE_CTRL(i), regval | IDT_LANE_CTRL_GENPW);
+		rio_read_config_32(rdev, IDT_LANE_CTRL(i), &regval);
+		rio_write_config_32(rdev, IDT_LANE_CTRL(i),
+				    regval | IDT_LANE_CTRL_GENPW);
 	}
 
 	/*
@@ -296,41 +284,32 @@
 	 */
 
 	/* Disable JTAG and I2C Error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_PORT_ERR_CAP_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0);
 
 	/* Disable JTAG and I2C Error reporting/logging */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_AUX_ERR_REPORT_EN, 0);
+	rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0);
 
 	/* Disable Port-Write notification from JTAG */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_JTAG_CTRL, 0);
+	rio_write_config_32(rdev, IDT_JTAG_CTRL, 0);
 
 	/* Disable Port-Write notification from I2C */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_I2C_MCTRL,
-			regval & ~IDT_I2C_MCTRL_GENPW);
+	rio_read_config_32(rdev, IDT_I2C_MCTRL, &regval);
+	rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW);
 
 	/*
 	 * Configure CFG_BLK error reporting.
 	 */
 
 	/* Disable Configuration Block error capture */
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_CAPTURE_EN, 0);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0);
 
 	/* Disable Port-Writes for Configuration Block error reporting */
-	rio_mport_read_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT, &regval);
-	rio_mport_write_config_32(mport, destid, hopcount,
-			IDT_CFGBLK_ERR_REPORT,
-			regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
+	rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, &regval);
+	rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT,
+			    regval & ~IDT_CFGBLK_ERR_REPORT_GENPW);
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 
 	return 0;
@@ -339,18 +318,15 @@
 static int
 idtg2_em_handler(struct rio_dev *rdev, u8 portnum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval, em_perrdet, em_ltlerrdet;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet);
 	if (em_ltlerrdet) {
 		/* Service Logical/Transport Layer Error(s) */
 		if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) {
 			/* Implementation specific error reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_ISLTL_ADDRESS_CAP, &regval);
 
 			pr_debug("RIO: %s Implementation Specific LTL errors" \
@@ -358,13 +334,12 @@
 				 rio_name(rdev), em_ltlerrdet, regval);
 
 			/* Clear implementation specific address capture CSR */
-			rio_mport_write_config_32(mport, destid, hopcount,
-					IDT_ISLTL_ADDRESS_CAP, 0);
+			rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0);
 
 		}
 	}
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 		rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet);
 	if (em_perrdet) {
 		/* Service Port-Level Error(s) */
@@ -372,14 +347,14 @@
 			/* Implementation Specific port error reported */
 
 			/* Get IS errors reported */
-			rio_mport_read_config_32(mport, destid, hopcount,
+			rio_read_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), &regval);
 
 			pr_debug("RIO: %s Implementation Specific Port" \
 				 " errors 0x%x\n", rio_name(rdev), regval);
 
 			/* Clear all implementation specific events */
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					IDT_PORT_ISERR_DET(portnum), 0);
 		}
 	}
@@ -391,14 +366,10 @@
 idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf)
 {
 	struct rio_dev *rdev = to_rio_dev(dev);
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	ssize_t len = 0;
 	u32 regval;
 
-	while (!rio_mport_read_config_32(mport, destid, hopcount,
-					 IDT_ERR_RD, &regval)) {
+	while (!rio_read_config_32(rdev, IDT_ERR_RD, &regval)) {
 		if (!regval)    /* 0 = end of log */
 			break;
 		len += snprintf(buf + len, PAGE_SIZE - len,
@@ -445,3 +416,5 @@
 
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1848, idtg2_switch_init);
 DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTCPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTVPS1616, idtg2_switch_init);
+DECLARE_RIO_SWITCH_INIT(RIO_VID_IDT, RIO_DID_IDTSPS1616, idtg2_switch_init);
diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c
index fc9f637..3a97107 100644
--- a/drivers/rapidio/switches/idtcps.c
+++ b/drivers/rapidio/switches/idtcps.c
@@ -117,10 +117,6 @@
 
 static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
-
 	pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
 	rdev->rswitch->add_entry = idtcps_route_add_entry;
 	rdev->rswitch->get_entry = idtcps_route_get_entry;
@@ -132,7 +128,7 @@
 
 	if (do_enum) {
 		/* set TVAL = ~50us */
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
 	}
 
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
index b9a389b..3994c00 100644
--- a/drivers/rapidio/switches/tsi568.c
+++ b/drivers/rapidio/switches/tsi568.c
@@ -113,22 +113,17 @@
 static int
 tsi568_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI568 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	/* Make sure that Port-Writes are disabled (for all ports) */
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
-		rio_mport_read_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
-				TSI568_SP_MODE(portnum),
-				regval | TSI568_SP_MODE_PW_DIS);
+		rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
+		rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
+				    regval | TSI568_SP_MODE_PW_DIS);
 	}
 
 	return 0;
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
index 2003fb6..1a62934 100644
--- a/drivers/rapidio/switches/tsi57x.c
+++ b/drivers/rapidio/switches/tsi57x.c
@@ -158,48 +158,45 @@
 static int
 tsi57x_em_init(struct rio_dev *rdev)
 {
-	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 regval;
 	int portnum;
 
-	pr_debug("TSI578 %s [%d:%d]\n", __func__, destid, hopcount);
+	pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
 
 	for (portnum = 0;
 	     portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
 		/* Make sure that Port-Writes are enabled (for all ports) */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_MODE(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_MODE(portnum),
 				regval & ~TSI578_SP_MODE_PW_DIS);
 
 		/* Clear all pending interrupts */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				&regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				rdev->phys_efptr +
 					RIO_PORT_N_ERR_STS_CSR(portnum),
 				regval & 0x07120214);
 
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_INT_STATUS(portnum),
 				regval & 0x000700bd);
 
 		/* Enable all interrupts to allow ports to send a port-write */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum), &regval);
-		rio_mport_write_config_32(mport, destid, hopcount,
+		rio_write_config_32(rdev,
 				TSI578_SP_CTL_INDEP(portnum),
 				regval | 0x000b0000);
 
 		/* Skip next (odd) port if the current port is in x4 mode */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				&regval);
 		if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
@@ -207,7 +204,7 @@
 	}
 
 	/* set TVAL = ~50us */
-	rio_mport_write_config_32(mport, destid, hopcount,
+	rio_write_config_32(rdev,
 		rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
 
 	return 0;
@@ -217,14 +214,12 @@
 tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
 {
 	struct rio_mport *mport = rdev->net->hport;
-	u16 destid = rdev->rswitch->destid;
-	u8 hopcount = rdev->rswitch->hopcount;
 	u32 intstat, err_status;
 	int sendcount, checkcount;
 	u8 route_port;
 	u32 regval;
 
-	rio_mport_read_config_32(mport, destid, hopcount,
+	rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum),
 			&err_status);
 
@@ -232,15 +227,15 @@
 	    (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES |
 			  RIO_PORT_N_ERR_STS_PW_INP_ES))) {
 		/* Remove any queued packets by locking/unlocking port */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 			&regval);
 		if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval | RIO_PORT_N_CTL_LOCKOUT);
 			udelay(50);
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 				rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum),
 				regval);
 		}
@@ -248,7 +243,7 @@
 		/* Read from link maintenance response register to clear
 		 * valid bit
 		 */
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 			rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum),
 			&regval);
 
@@ -257,13 +252,12 @@
 		 */
 		sendcount = 3;
 		while (sendcount) {
-			rio_mport_write_config_32(mport, destid, hopcount,
+			rio_write_config_32(rdev,
 					  TSI578_SP_CS_TX(portnum), 0x40fc8000);
 			checkcount = 3;
 			while (checkcount--) {
 				udelay(50);
-				rio_mport_read_config_32(
-					mport, destid, hopcount,
+				rio_read_config_32(rdev,
 					rdev->phys_efptr +
 						RIO_PORT_N_MNT_RSP_CSR(portnum),
 					&regval);
@@ -277,25 +271,23 @@
 
 exit_es:
 	/* Clear implementation specific error status bits */
-	rio_mport_read_config_32(mport, destid, hopcount,
-				 TSI578_SP_INT_STATUS(portnum), &intstat);
+	rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
 	pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
-		 destid, hopcount, portnum, intstat);
+		 rdev->destid, rdev->hopcount, portnum, intstat);
 
 	if (intstat & 0x10000) {
-		rio_mport_read_config_32(mport, destid, hopcount,
+		rio_read_config_32(rdev,
 				TSI578_SP_LUT_PEINF(portnum), &regval);
 		regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
 		route_port = rdev->rswitch->route_table[regval];
 		pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
 			rio_name(rdev), portnum, regval);
-		tsi57x_route_add_entry(mport, destid, hopcount,
+		tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
 				RIO_GLOBAL_TABLE, regval, route_port);
 	}
 
-	rio_mport_write_config_32(mport, destid, hopcount,
-				  TSI578_SP_INT_STATUS(portnum),
-				  intstat & 0x000700bd);
+	rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
+			    intstat & 0x000700bd);
 
 	return 0;
 }
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 2ce2eb7..dd63084 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -249,7 +249,7 @@
 }
 
 static int pm8607_set_voltage(struct regulator_dev *rdev,
-			      int min_uV, int max_uV)
+			      int min_uV, int max_uV, unsigned *selector)
 {
 	struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
 	uint8_t val, mask;
@@ -263,6 +263,7 @@
 	ret = choose_voltage(rdev, min_uV, max_uV);
 	if (ret < 0)
 		return -EINVAL;
+	*selector = ret;
 	val = (uint8_t)(ret << info->vol_shift);
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index dd30e88..e1d9436 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -186,13 +186,25 @@
 	 This driver provides support for the voltage regulators of the
 	 PCAP2 PMIC.
 
+config REGULATOR_MC13XXX_CORE
+	tristate
+
 config REGULATOR_MC13783
 	tristate "Support regulators on Freescale MC13783 PMIC"
 	depends on MFD_MC13783
+	select REGULATOR_MC13XXX_CORE
 	help
 	  Say y here to support the regulators found on the Freescale MC13783
 	  PMIC.
 
+config REGULATOR_MC13892
+	tristate "Support regulators on Freescale MC13892 PMIC"
+	depends on MFD_MC13XXX
+	select REGULATOR_MC13XXX_CORE
+	help
+	  Say y here to support the regulators found on the Freescale MC13892
+	  PMIC.
+
 config REGULATOR_AB3100
 	tristate "ST-Ericsson AB3100 Regulator functions"
 	depends on AB3100_CORE
@@ -250,5 +262,15 @@
 	help
 	  This driver supports TPS6586X voltage regulator chips.
 
+config REGULATOR_TPS6524X
+	tristate "TI TPS6524X Power regulators"
+	depends on SPI
+	help
+	  This driver supports TPS6524X voltage regulator chips. TPS6524X
+	  provides three step-down converters and two general-purpose LDO
+	  voltage regulators.  This device is interfaced using a customized
+	  serial interface currently supported on the sequencer serial
+	  port controller.
+
 endif
 
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index bff8157..0b5e88c 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -30,10 +30,13 @@
 obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
 obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
 obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
+obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
+obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 
 obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
 obj-$(CONFIG_REGULATOR_TPS6507X) += tps6507x-regulator.o
+obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
 obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_AB8500)	+= ab8500.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349266..ed6feaf 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -362,7 +362,8 @@
 }
 
 static int ab3100_set_voltage_regulator(struct regulator_dev *reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct ab3100_regulator *abreg = reg->reg_data;
 	u8 regval;
@@ -373,6 +374,8 @@
 	if (bestindex < 0)
 		return bestindex;
 
+	*selector = bestindex;
+
 	err = abx500_get_register_interruptible(abreg->dev, 0,
 						abreg->regreg, &regval);
 	if (err) {
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index db6b70f..d9a052c 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -3,18 +3,13 @@
  *
  * License Terms: GNU General Public License v2
  *
- * Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
+ *          Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
  *
  * AB8500 peripheral regulators
  *
- * AB8500 supports the following regulators,
- * LDOs - VAUDIO, VANAMIC2/2, VDIGMIC, VINTCORE12, VTVOUT,
- *        VAUX1/2/3, VANA
- *
- * for DB8500 cut 1.0 and previous versions of the silicon, all accesses
- * to registers are through the DB8500 SPI. In cut 1.1 onwards, these
- * accesses are through the DB8500 PRCMU I2C
- *
+ * AB8500 supports the following regulators:
+ *   VAUX1/2/3, VINTCORE, VTVOUT, VAUDIO, VAMIC1/2, VDMIC, VANA
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -28,38 +23,37 @@
 
 /**
  * struct ab8500_regulator_info - ab8500 regulator information
+ * @dev: device pointer
  * @desc: regulator description
- * @ab8500: ab8500 parent
  * @regulator_dev: regulator device
  * @max_uV: maximum voltage (for variable voltage supplies)
  * @min_uV: minimum voltage (for variable voltage supplies)
  * @fixed_uV: typical voltage (for fixed voltage supplies)
  * @update_bank: bank to control on/off
  * @update_reg: register to control on/off
- * @mask: mask to enable/disable regulator
- * @enable: bits to enable the regulator in normal(high power) mode
+ * @update_mask: mask to enable/disable regulator
+ * @update_val_enable: bits to enable the regulator in normal (high power) mode
  * @voltage_bank: bank to control regulator voltage
  * @voltage_reg: register to control regulator voltage
  * @voltage_mask: mask to control regulator voltage
- * @supported_voltages: supported voltage table
+ * @voltages: supported voltage table
  * @voltages_len: number of supported voltages for the regulator
  */
 struct ab8500_regulator_info {
 	struct device		*dev;
 	struct regulator_desc	desc;
-	struct ab8500		*ab8500;
 	struct regulator_dev	*regulator;
 	int max_uV;
 	int min_uV;
 	int fixed_uV;
 	u8 update_bank;
 	u8 update_reg;
-	u8 mask;
-	u8 enable;
+	u8 update_mask;
+	u8 update_val_enable;
 	u8 voltage_bank;
 	u8 voltage_reg;
 	u8 voltage_mask;
-	int const *supported_voltages;
+	int const *voltages;
 	int voltages_len;
 };
 
@@ -83,6 +77,17 @@
 	3300000,
 };
 
+static const int ldo_vaux3_voltages[] = {
+	1200000,
+	1500000,
+	1800000,
+	2100000,
+	2500000,
+	2750000,
+	2790000,
+	2910000,
+};
+
 static const int ldo_vintcore_voltages[] = {
 	1200000,
 	1225000,
@@ -95,57 +100,80 @@
 
 static int ab8500_regulator_enable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, info->enable);
+		info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set enable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-enable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, info->update_val_enable);
+
 	return ret;
 }
 
 static int ab8500_regulator_disable(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, info->mask, 0x0);
+		info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 			"couldn't set disable bits for regulator\n");
+
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-disable (bank, reg, mask, value): 0x%x, 0x%x, 0x%x, 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, 0x0);
+
 	return ret;
 }
 
 static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	ret = abx500_get_register_interruptible(info->dev,
-		info->update_bank, info->update_reg, &value);
+		info->update_bank, info->update_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read 0x%x register\n", info->update_reg);
 		return ret;
 	}
 
-	if (value & info->mask)
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-is_enabled (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->update_bank, info->update_reg,
+		info->update_mask, regval);
+
+	if (regval & info->update_mask)
 		return true;
 	else
 		return false;
@@ -153,12 +181,12 @@
 
 static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* return the uV for the fixed regulators */
 	if (info->fixed_uV)
@@ -167,33 +195,40 @@
 	if (selector >= info->voltages_len)
 		return -EINVAL;
 
-	return info->supported_voltages[selector];
+	return info->voltages[selector];
 }
 
 static int ab8500_regulator_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id, ret;
+	int ret, val;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-	u8 value;
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
-	ret = abx500_get_register_interruptible(info->dev, info->voltage_bank,
-		info->voltage_reg, &value);
+	ret = abx500_get_register_interruptible(info->dev,
+			info->voltage_bank, info->voltage_reg, &regval);
 	if (ret < 0) {
 		dev_err(rdev_get_dev(rdev),
 			"couldn't read voltage reg for regulator\n");
 		return ret;
 	}
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	/* vintcore has a different layout */
-	value &= info->voltage_mask;
-	if (regulator_id == AB8500_LDO_INTCORE)
-		ret = info->supported_voltages[value >> 0x3];
+	val = regval & info->voltage_mask;
+	if (info->desc.id == AB8500_LDO_INTCORE)
+		ret = info->voltages[val >> 0x3];
 	else
-		ret = info->supported_voltages[value];
+		ret = info->voltages[val];
 
 	return ret;
 }
@@ -206,8 +241,8 @@
 
 	/* check the supported voltage */
 	for (i = 0; i < info->voltages_len; i++) {
-		if ((info->supported_voltages[i] >= min_uV) &&
-		    (info->supported_voltages[i] <= max_uV))
+		if ((info->voltages[i] >= min_uV) &&
+		    (info->voltages[i] <= max_uV))
 			return i;
 	}
 
@@ -215,14 +250,17 @@
 }
 
 static int ab8500_regulator_set_voltage(struct regulator_dev *rdev,
-		int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
-	int regulator_id, ret;
+	int ret;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
+	u8 regval;
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	/* get the appropriate voltages within the range */
 	ret = ab8500_get_best_voltage_index(rdev, min_uV, max_uV);
@@ -232,14 +270,23 @@
 		return ret;
 	}
 
+	*selector = ret;
+
 	/* set the registers for the request */
+	regval = (u8)ret;
 	ret = abx500_mask_and_set_register_interruptible(info->dev,
-		info->voltage_bank, info->voltage_reg,
-		info->voltage_mask, (u8)ret);
+			info->voltage_bank, info->voltage_reg,
+			info->voltage_mask, regval);
 	if (ret < 0)
 		dev_err(rdev_get_dev(rdev),
 		"couldn't set voltage reg for regulator\n");
 
+	dev_vdbg(rdev_get_dev(rdev),
+		"%s-set_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x,"
+		" 0x%x\n",
+		info->desc.name, info->voltage_bank, info->voltage_reg,
+		info->voltage_mask, regval);
+
 	return ret;
 }
 
@@ -254,17 +301,17 @@
 
 static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
 {
-	int regulator_id;
 	struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
 
-	regulator_id = rdev_get_id(rdev);
-	if (regulator_id >= AB8500_NUM_REGULATORS)
+	if (info == NULL) {
+		dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
 		return -EINVAL;
+	}
 
 	return info->fixed_uV;
 }
 
-static struct regulator_ops ab8500_ldo_fixed_ops = {
+static struct regulator_ops ab8500_regulator_fixed_ops = {
 	.enable		= ab8500_regulator_enable,
 	.disable	= ab8500_regulator_disable,
 	.is_enabled	= ab8500_regulator_is_enabled,
@@ -272,89 +319,198 @@
 	.list_voltage	= ab8500_list_voltage,
 };
 
-#define AB8500_LDO(_id, min, max, bank, reg, reg_mask,		\
-		reg_enable, volt_bank, volt_reg, volt_mask,	\
-		voltages, len_volts)				\
-{								\
-	.desc	= {						\
-		.name	= "LDO-" #_id,				\
-		.ops	= &ab8500_regulator_ops,		\
-		.type	= REGULATOR_VOLTAGE,			\
-		.id	= AB8500_LDO_##_id,			\
-		.owner	= THIS_MODULE,				\
-	},							\
-	.min_uV		= (min) * 1000,				\
-	.max_uV		= (max) * 1000,				\
-	.update_bank	= bank,					\
-	.update_reg	= reg,					\
-	.mask		= reg_mask,				\
-	.enable		= reg_enable,				\
-	.voltage_bank	= volt_bank,				\
-	.voltage_reg	= volt_reg,				\
-	.voltage_mask	= volt_mask,				\
-	.supported_voltages = voltages,				\
-	.voltages_len	= len_volts,				\
-	.fixed_uV	= 0,					\
-}
-
-#define AB8500_FIXED_LDO(_id, fixed, bank, reg,		\
-			reg_mask, reg_enable)		\
-{							\
-	.desc	= {					\
-		.name	= "LDO-" #_id,			\
-		.ops	= &ab8500_ldo_fixed_ops,	\
-		.type	= REGULATOR_VOLTAGE,		\
-		.id	= AB8500_LDO_##_id,		\
-		.owner	= THIS_MODULE,			\
-	},						\
-	.fixed_uV	= fixed * 1000,			\
-	.update_bank	= bank,				\
-	.update_reg	= reg,				\
-	.mask		= reg_mask,			\
-	.enable		= reg_enable,			\
-}
-
-static struct ab8500_regulator_info ab8500_regulator_info[] = {
+static struct ab8500_regulator_info
+		ab8500_regulator_info[AB8500_NUM_REGULATORS] = {
 	/*
-	 * Variable Voltage LDOs
-	 * name, min uV, max uV, ctrl bank, ctrl reg, reg mask, enable mask,
-	 *      volt ctrl bank, volt ctrl reg, volt ctrl mask, volt table,
-	 *      num supported volts
+	 * Variable Voltage Regulators
+	 *   name, min mV, max mV,
+	 *   update bank, reg, mask, enable val
+	 *   volt bank, reg, mask, table, table length
 	 */
-	AB8500_LDO(AUX1, 1100, 3300, 0x04, 0x09, 0x3, 0x1, 0x04, 0x1f, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX2, 1100, 3300, 0x04, 0x09, 0xc, 0x4, 0x04, 0x20, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(AUX3, 1100, 3300, 0x04, 0x0a, 0x3, 0x1, 0x04, 0x21, 0xf,
-			ldo_vauxn_voltages, ARRAY_SIZE(ldo_vauxn_voltages)),
-	AB8500_LDO(INTCORE, 1100, 3300, 0x03, 0x80, 0x4, 0x4, 0x03, 0x80, 0x38,
-		ldo_vintcore_voltages, ARRAY_SIZE(ldo_vintcore_voltages)),
+	[AB8500_LDO_AUX1] = {
+		.desc = {
+			.name		= "LDO-AUX1",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x1f,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX2] = {
+		.desc = {
+			.name		= "LDO-AUX2",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vauxn_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x09,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x20,
+		.voltage_mask		= 0x0f,
+		.voltages		= ldo_vauxn_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vauxn_voltages),
+	},
+	[AB8500_LDO_AUX3] = {
+		.desc = {
+			.name		= "LDO-AUX3",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUX3,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vaux3_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x0a,
+		.update_mask		= 0x03,
+		.update_val_enable	= 0x01,
+		.voltage_bank		= 0x04,
+		.voltage_reg		= 0x21,
+		.voltage_mask		= 0x07,
+		.voltages		= ldo_vaux3_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vaux3_voltages),
+	},
+	[AB8500_LDO_INTCORE] = {
+		.desc = {
+			.name		= "LDO-INTCORE",
+			.ops		= &ab8500_regulator_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_INTCORE,
+			.owner		= THIS_MODULE,
+			.n_voltages	= ARRAY_SIZE(ldo_vintcore_voltages),
+		},
+		.min_uV			= 1100000,
+		.max_uV			= 3300000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x44,
+		.update_val_enable	= 0x04,
+		.voltage_bank		= 0x03,
+		.voltage_reg		= 0x80,
+		.voltage_mask		= 0x38,
+		.voltages		= ldo_vintcore_voltages,
+		.voltages_len		= ARRAY_SIZE(ldo_vintcore_voltages),
+	},
 
 	/*
-	 * Fixed Voltage LDOs
-	 *		 name,	o/p uV, ctrl bank, ctrl reg, enable, disable
+	 * Fixed Voltage Regulators
+	 *   name, fixed mV,
+	 *   update bank, reg, mask, enable val
 	 */
-	AB8500_FIXED_LDO(TVOUT,	  2000, 0x03,      0x80,     0x2,    0x2),
-	AB8500_FIXED_LDO(AUDIO,   2000, 0x03,      0x83,     0x2,    0x2),
-	AB8500_FIXED_LDO(ANAMIC1, 2050, 0x03,      0x83,     0x4,    0x4),
-	AB8500_FIXED_LDO(ANAMIC2, 2050, 0x03,      0x83,     0x8,    0x8),
-	AB8500_FIXED_LDO(DMIC,    1800, 0x03,      0x83,     0x10,   0x10),
-	AB8500_FIXED_LDO(ANA,     1200, 0x03,      0x83,     0xc,    0x4),
+	[AB8500_LDO_TVOUT] = {
+		.desc = {
+			.name		= "LDO-TVOUT",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_TVOUT,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x80,
+		.update_mask		= 0x82,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_AUDIO] = {
+		.desc = {
+			.name		= "LDO-AUDIO",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_AUDIO,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2000000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x02,
+		.update_val_enable	= 0x02,
+	},
+	[AB8500_LDO_ANAMIC1] = {
+		.desc = {
+			.name		= "LDO-ANAMIC1",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC1,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x08,
+		.update_val_enable	= 0x08,
+	},
+	[AB8500_LDO_ANAMIC2] = {
+		.desc = {
+			.name		= "LDO-ANAMIC2",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANAMIC2,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 2050000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x10,
+		.update_val_enable	= 0x10,
+	},
+	[AB8500_LDO_DMIC] = {
+		.desc = {
+			.name		= "LDO-DMIC",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_DMIC,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1800000,
+		.update_bank		= 0x03,
+		.update_reg		= 0x83,
+		.update_mask		= 0x04,
+		.update_val_enable	= 0x04,
+	},
+	[AB8500_LDO_ANA] = {
+		.desc = {
+			.name		= "LDO-ANA",
+			.ops		= &ab8500_regulator_fixed_ops,
+			.type		= REGULATOR_VOLTAGE,
+			.id		= AB8500_LDO_ANA,
+			.owner		= THIS_MODULE,
+			.n_voltages	= 1,
+		},
+		.fixed_uV		= 1200000,
+		.update_bank		= 0x04,
+		.update_reg		= 0x06,
+		.update_mask		= 0x0c,
+		.update_val_enable	= 0x04,
+	},
+
+
 };
 
-static inline struct ab8500_regulator_info *find_regulator_info(int id)
-{
-	struct ab8500_regulator_info *info;
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
-		info = &ab8500_regulator_info[i];
-		if (info->desc.id == id)
-			return info;
-	}
-	return NULL;
-}
-
 static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
 {
 	struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
@@ -366,6 +522,16 @@
 		return -EINVAL;
 	}
 	pdata = dev_get_platdata(ab8500->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "null pdata\n");
+		return -EINVAL;
+	}
+
+	/* make sure the platform data has the correct size */
+	if (pdata->num_regulator != ARRAY_SIZE(ab8500_regulator_info)) {
+		dev_err(&pdev->dev, "platform configuration error\n");
+		return -EINVAL;
+	}
 
 	/* register all regulators */
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
@@ -374,10 +540,22 @@
 		/* assign per-regulator data */
 		info = &ab8500_regulator_info[i];
 		info->dev = &pdev->dev;
-		info->ab8500 = ab8500;
 
+		/* fix for hardware before ab8500v2.0 */
+		if (abx500_get_chip_id(info->dev) < 0x20) {
+			if (info->desc.id == AB8500_LDO_AUX3) {
+				info->desc.n_voltages =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltages = ldo_vauxn_voltages;
+				info->voltages_len =
+					ARRAY_SIZE(ldo_vauxn_voltages);
+				info->voltage_mask = 0xf;
+			}
+		}
+
+		/* register regulator with framework */
 		info->regulator = regulator_register(&info->desc, &pdev->dev,
-				pdata->regulator[i], info);
+				&pdata->regulator[i], info);
 		if (IS_ERR(info->regulator)) {
 			err = PTR_ERR(info->regulator);
 			dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -389,6 +567,9 @@
 			}
 			return err;
 		}
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-probed\n", info->desc.name);
 	}
 
 	return 0;
@@ -401,6 +582,10 @@
 	for (i = 0; i < ARRAY_SIZE(ab8500_regulator_info); i++) {
 		struct ab8500_regulator_info *info = NULL;
 		info = &ab8500_regulator_info[i];
+
+		dev_vdbg(rdev_get_dev(info->regulator),
+			"%s-remove\n", info->desc.name);
+
 		regulator_unregister(info->regulator);
 	}
 
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ba521f0..9fa2095 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -13,8 +13,11 @@
  *
  */
 
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -25,16 +28,30 @@
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/regulator.h>
+
 #include "dummy.h"
 
-#define REGULATOR_VERSION "0.5"
+#define rdev_err(rdev, fmt, ...)					\
+	pr_err("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_warn(rdev, fmt, ...)					\
+	pr_warn("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_info(rdev, fmt, ...)					\
+	pr_info("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
+#define rdev_dbg(rdev, fmt, ...)					\
+	pr_debug("%s: " fmt, rdev_get_name(rdev), ##__VA_ARGS__)
 
 static DEFINE_MUTEX(regulator_list_mutex);
 static LIST_HEAD(regulator_list);
 static LIST_HEAD(regulator_map_list);
-static int has_full_constraints;
+static bool has_full_constraints;
 static bool board_wants_dummy_regulator;
 
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_root;
+#endif
+
 /*
  * struct regulator_map
  *
@@ -71,6 +88,8 @@
 static unsigned int _regulator_get_mode(struct regulator_dev *rdev);
 static void _notifier_call_chain(struct regulator_dev *rdev,
 				  unsigned long event, void *data);
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV);
 
 static const char *rdev_get_name(struct regulator_dev *rdev)
 {
@@ -111,13 +130,11 @@
 	BUG_ON(*min_uV > *max_uV);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -132,6 +149,27 @@
 	return 0;
 }
 
+/* Make sure we select a voltage that suits the needs of all
+ * regulator consumers
+ */
+static int regulator_check_consumers(struct regulator_dev *rdev,
+				     int *min_uV, int *max_uV)
+{
+	struct regulator *regulator;
+
+	list_for_each_entry(regulator, &rdev->consumer_list, list) {
+		if (*max_uV > regulator->max_uV)
+			*max_uV = regulator->max_uV;
+		if (*min_uV < regulator->min_uV)
+			*min_uV = regulator->min_uV;
+	}
+
+	if (*min_uV > *max_uV)
+		return -EINVAL;
+
+	return 0;
+}
+
 /* current constraint check */
 static int regulator_check_current_limit(struct regulator_dev *rdev,
 					int *min_uA, int *max_uA)
@@ -139,13 +177,11 @@
 	BUG_ON(*min_uA > *max_uA);
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_CURRENT)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 
@@ -174,18 +210,15 @@
 	}
 
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_MODE)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	if (!(rdev->constraints->valid_modes_mask & mode)) {
-		printk(KERN_ERR "%s: invalid mode %x for %s\n",
-		       __func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid mode %x\n", mode);
 		return -EINVAL;
 	}
 	return 0;
@@ -195,13 +228,11 @@
 static int regulator_check_drms(struct regulator_dev *rdev)
 {
 	if (!rdev->constraints) {
-		printk(KERN_ERR "%s: no constraints for %s\n", __func__,
-		       rdev_get_name(rdev));
+		rdev_err(rdev, "no constraints\n");
 		return -ENODEV;
 	}
 	if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_DRMS)) {
-		printk(KERN_ERR "%s: operation not allowed for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "operation not allowed\n");
 		return -EPERM;
 	}
 	return 0;
@@ -553,18 +584,21 @@
 
 	err = regulator_check_drms(rdev);
 	if (err < 0 || !rdev->desc->ops->get_optimum_mode ||
-	    !rdev->desc->ops->get_voltage || !rdev->desc->ops->set_mode)
+	    (!rdev->desc->ops->get_voltage &&
+	     !rdev->desc->ops->get_voltage_sel) ||
+	    !rdev->desc->ops->set_mode)
 		return;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0)
 		return;
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0)
 		return;
@@ -598,20 +632,17 @@
 	 */
 	if (!rstate->enabled && !rstate->disabled) {
 		if (can_set_state)
-			printk(KERN_WARNING "%s: No configuration for %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_warn(rdev, "No configuration\n");
 		return 0;
 	}
 
 	if (rstate->enabled && rstate->disabled) {
-		printk(KERN_ERR "%s: invalid configuration for %s\n",
-		       __func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid configuration\n");
 		return -EINVAL;
 	}
 
 	if (!can_set_state) {
-		printk(KERN_ERR "%s: no way to set suspend state\n",
-			__func__);
+		rdev_err(rdev, "no way to set suspend state\n");
 		return -EINVAL;
 	}
 
@@ -620,15 +651,14 @@
 	else
 		ret = rdev->desc->ops->set_suspend_disable(rdev);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to enabled/disable\n", __func__);
+		rdev_err(rdev, "failed to enabled/disable\n");
 		return ret;
 	}
 
 	if (rdev->desc->ops->set_suspend_voltage && rstate->uV > 0) {
 		ret = rdev->desc->ops->set_suspend_voltage(rdev, rstate->uV);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set voltage\n",
-				__func__);
+			rdev_err(rdev, "failed to set voltage\n");
 			return ret;
 		}
 	}
@@ -636,7 +666,7 @@
 	if (rdev->desc->ops->set_suspend_mode && rstate->mode > 0) {
 		ret = rdev->desc->ops->set_suspend_mode(rdev, rstate->mode);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set mode\n", __func__);
+			rdev_err(rdev, "failed to set mode\n");
 			return ret;
 		}
 	}
@@ -714,29 +744,27 @@
 	if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY)
 		count += sprintf(buf + count, "standby");
 
-	printk(KERN_INFO "regulator: %s: %s\n", rdev_get_name(rdev), buf);
+	rdev_info(rdev, "%s\n", buf);
 }
 
 static int machine_constraints_voltage(struct regulator_dev *rdev,
 	struct regulation_constraints *constraints)
 {
 	struct regulator_ops *ops = rdev->desc->ops;
-	const char *name = rdev_get_name(rdev);
 	int ret;
 
 	/* do we need to apply the constraint voltage */
 	if (rdev->constraints->apply_uV &&
-		rdev->constraints->min_uV == rdev->constraints->max_uV &&
-		ops->set_voltage) {
-		ret = ops->set_voltage(rdev,
-			rdev->constraints->min_uV, rdev->constraints->max_uV);
-			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to apply %duV constraint to %s\n",
-				       __func__,
-				       rdev->constraints->min_uV, name);
-				rdev->constraints = NULL;
-				return ret;
-			}
+	    rdev->constraints->min_uV == rdev->constraints->max_uV) {
+		ret = _regulator_do_set_voltage(rdev,
+						rdev->constraints->min_uV,
+						rdev->constraints->max_uV);
+		if (ret < 0) {
+			rdev_err(rdev, "failed to apply %duV constraint\n",
+				 rdev->constraints->min_uV);
+			rdev->constraints = NULL;
+			return ret;
+		}
 	}
 
 	/* constrain machine-level voltage specs to fit
@@ -765,8 +793,7 @@
 
 		/* else require explicit machine-level constraints */
 		if (cmin <= 0 || cmax <= 0 || cmax < cmin) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "invalid", name);
+			rdev_err(rdev, "invalid voltage constraints\n");
 			return -EINVAL;
 		}
 
@@ -787,22 +814,19 @@
 
 		/* final: [min_uV..max_uV] valid iff constraints valid */
 		if (max_uV < min_uV) {
-			pr_err("%s: %s '%s' voltage constraints\n",
-				       __func__, "unsupportable", name);
+			rdev_err(rdev, "unsupportable voltage constraints\n");
 			return -EINVAL;
 		}
 
 		/* use regulator's subset of machine constraints */
 		if (constraints->min_uV < min_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "min_uV",
-					constraints->min_uV, min_uV);
+			rdev_dbg(rdev, "override min_uV, %d -> %d\n",
+				 constraints->min_uV, min_uV);
 			constraints->min_uV = min_uV;
 		}
 		if (constraints->max_uV > max_uV) {
-			pr_debug("%s: override '%s' %s, %d -> %d\n",
-				       __func__, name, "max_uV",
-					constraints->max_uV, max_uV);
+			rdev_dbg(rdev, "override max_uV, %d -> %d\n",
+				 constraints->max_uV, max_uV);
 			constraints->max_uV = max_uV;
 		}
 	}
@@ -822,26 +846,25 @@
  * set_mode.
  */
 static int set_machine_constraints(struct regulator_dev *rdev,
-	struct regulation_constraints *constraints)
+	const struct regulation_constraints *constraints)
 {
 	int ret = 0;
-	const char *name;
 	struct regulator_ops *ops = rdev->desc->ops;
 
-	rdev->constraints = constraints;
+	rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+				    GFP_KERNEL);
+	if (!rdev->constraints)
+		return -ENOMEM;
 
-	name = rdev_get_name(rdev);
-
-	ret = machine_constraints_voltage(rdev, constraints);
+	ret = machine_constraints_voltage(rdev, rdev->constraints);
 	if (ret != 0)
 		goto out;
 
 	/* do we need to setup our suspend state */
 	if (constraints->initial_state) {
-		ret = suspend_prepare(rdev, constraints->initial_state);
+		ret = suspend_prepare(rdev, rdev->constraints->initial_state);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to set suspend state for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to set suspend state\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -849,17 +872,14 @@
 
 	if (constraints->initial_mode) {
 		if (!ops->set_mode) {
-			printk(KERN_ERR "%s: no set_mode operation for %s\n",
-			       __func__, name);
+			rdev_err(rdev, "no set_mode operation\n");
 			ret = -EINVAL;
 			goto out;
 		}
 
-		ret = ops->set_mode(rdev, constraints->initial_mode);
+		ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "%s: failed to set initial mode for %s: %d\n",
-			       __func__, name, ret);
+			rdev_err(rdev, "failed to set initial mode: %d\n", ret);
 			goto out;
 		}
 	}
@@ -867,11 +887,11 @@
 	/* If the constraints say the regulator should be on at this point
 	 * and we have control then make sure it is enabled.
 	 */
-	if ((constraints->always_on || constraints->boot_on) && ops->enable) {
+	if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
+	    ops->enable) {
 		ret = ops->enable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to enable %s\n",
-			       __func__, name);
+			rdev_err(rdev, "failed to enable\n");
 			rdev->constraints = NULL;
 			goto out;
 		}
@@ -899,9 +919,8 @@
 	err = sysfs_create_link(&rdev->dev.kobj, &supply_rdev->dev.kobj,
 				"supply");
 	if (err) {
-		printk(KERN_ERR
-		       "%s: could not add device link %s err %d\n",
-		       __func__, supply_rdev->dev.kobj.name, err);
+		rdev_err(rdev, "could not add device link %s err %d\n",
+			 supply_rdev->dev.kobj.name, err);
 		       goto out;
 	}
 	rdev->supply = supply_rdev;
@@ -957,10 +976,10 @@
 			continue;
 
 		dev_dbg(consumer_dev, "%s/%s is '%s' supply; fail %s/%s\n",
-				dev_name(&node->regulator->dev),
-				node->regulator->desc->name,
-				supply,
-				dev_name(&rdev->dev), rdev_get_name(rdev));
+			dev_name(&node->regulator->dev),
+			node->regulator->desc->name,
+			supply,
+			dev_name(&rdev->dev), rdev_get_name(rdev));
 		return -EBUSY;
 	}
 
@@ -1031,8 +1050,7 @@
 		regulator->dev_attr.show = device_requested_uA_show;
 		err = device_create_file(dev, &regulator->dev_attr);
 		if (err < 0) {
-			printk(KERN_WARNING "%s: could not add regulator_dev"
-				" load sysfs\n", __func__);
+			rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
 			goto attr_name_err;
 		}
 
@@ -1049,9 +1067,8 @@
 		err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
 					buf);
 		if (err) {
-			printk(KERN_WARNING
-			       "%s: could not add device link %s err %d\n",
-			       __func__, dev->kobj.name, err);
+			rdev_warn(rdev, "could not add device link %s err %d\n",
+				  dev->kobj.name, err);
 			goto link_name_err;
 		}
 	}
@@ -1088,7 +1105,7 @@
 	int ret;
 
 	if (id == NULL) {
-		printk(KERN_ERR "regulator: get() with no identifier\n");
+		pr_err("get() with no identifier\n");
 		return regulator;
 	}
 
@@ -1122,8 +1139,8 @@
 	 * substitute in a dummy regulator so consumers can continue.
 	 */
 	if (!has_full_constraints) {
-		pr_warning("%s supply %s not found, using dummy regulator\n",
-			   devname, id);
+		pr_warn("%s supply %s not found, using dummy regulator\n",
+			devname, id);
 		rdev = dummy_regulator_rdev;
 		goto found;
 	}
@@ -1274,8 +1291,7 @@
 			ret = _regulator_enable(rdev->supply);
 			mutex_unlock(&rdev->supply->mutex);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to enable %s: %d\n",
-				       __func__, rdev_get_name(rdev), ret);
+				rdev_err(rdev, "failed to enable: %d\n", ret);
 				return ret;
 			}
 		}
@@ -1302,13 +1318,13 @@
 			if (ret >= 0) {
 				delay = ret;
 			} else {
-				printk(KERN_WARNING
-					"%s: enable_time() failed for %s: %d\n",
-					__func__, rdev_get_name(rdev),
-					ret);
+				rdev_warn(rdev, "enable_time() failed: %d\n",
+					   ret);
 				delay = 0;
 			}
 
+			trace_regulator_enable(rdev_get_name(rdev));
+
 			/* Allow the regulator to ramp; it would be useful
 			 * to extend this for bulk operations so that the
 			 * regulators can ramp together.  */
@@ -1316,6 +1332,8 @@
 			if (ret < 0)
 				return ret;
 
+			trace_regulator_enable_delay(rdev_get_name(rdev));
+
 			if (delay >= 1000) {
 				mdelay(delay / 1000);
 				udelay(delay % 1000);
@@ -1323,9 +1341,10 @@
 				udelay(delay);
 			}
 
+			trace_regulator_enable_complete(rdev_get_name(rdev));
+
 		} else if (ret < 0) {
-			printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
-			       __func__, rdev_get_name(rdev), ret);
+			rdev_err(rdev, "is_enabled() failed: %d\n", ret);
 			return ret;
 		}
 		/* Fallthrough on positive return values - already enabled */
@@ -1367,8 +1386,7 @@
 	*supply_rdev_ptr = NULL;
 
 	if (WARN(rdev->use_count <= 0,
-			"unbalanced disables for %s\n",
-			rdev_get_name(rdev)))
+		 "unbalanced disables for %s\n", rdev_get_name(rdev)))
 		return -EIO;
 
 	/* are we the last user and permitted to disable ? */
@@ -1378,13 +1396,16 @@
 		/* we are last user */
 		if (_regulator_can_change_status(rdev) &&
 		    rdev->desc->ops->disable) {
+			trace_regulator_disable(rdev_get_name(rdev));
+
 			ret = rdev->desc->ops->disable(rdev);
 			if (ret < 0) {
-				printk(KERN_ERR "%s: failed to disable %s\n",
-				       __func__, rdev_get_name(rdev));
+				rdev_err(rdev, "failed to disable\n");
 				return ret;
 			}
 
+			trace_regulator_disable_complete(rdev_get_name(rdev));
+
 			_notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
 					     NULL);
 		}
@@ -1451,8 +1472,7 @@
 		/* ah well, who wants to live forever... */
 		ret = rdev->desc->ops->disable(rdev);
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to force disable %s\n",
-			       __func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to force disable\n");
 			return ret;
 		}
 		/* notify other consumers that power has been forced off */
@@ -1605,6 +1625,62 @@
 	return 0;
 }
 
+static int _regulator_do_set_voltage(struct regulator_dev *rdev,
+				     int min_uV, int max_uV)
+{
+	int ret;
+	unsigned int selector;
+
+	trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
+
+	if (rdev->desc->ops->set_voltage) {
+		ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
+						   &selector);
+
+		if (rdev->desc->ops->list_voltage)
+			selector = rdev->desc->ops->list_voltage(rdev,
+								 selector);
+		else
+			selector = -1;
+	} else if (rdev->desc->ops->set_voltage_sel) {
+		int best_val = INT_MAX;
+		int i;
+
+		selector = 0;
+
+		/* Find the smallest voltage that falls within the specified
+		 * range.
+		 */
+		for (i = 0; i < rdev->desc->n_voltages; i++) {
+			ret = rdev->desc->ops->list_voltage(rdev, i);
+			if (ret < 0)
+				continue;
+
+			if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+				best_val = ret;
+				selector = i;
+			}
+		}
+
+		if (best_val != INT_MAX) {
+			ret = rdev->desc->ops->set_voltage_sel(rdev, selector);
+			selector = best_val;
+		} else {
+			ret = -EINVAL;
+		}
+	} else {
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
+				     NULL);
+
+	trace_regulator_set_voltage_complete(rdev_get_name(rdev), selector);
+
+	return ret;
+}
+
 /**
  * regulator_set_voltage - set regulator output voltage
  * @regulator: regulator source
@@ -1626,12 +1702,20 @@
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV)
 {
 	struct regulator_dev *rdev = regulator->rdev;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&rdev->mutex);
 
+	/* If we're setting the same range as last time the change
+	 * should be a noop (some cpufreq implementations use the same
+	 * voltage for multiple frequencies, for example).
+	 */
+	if (regulator->min_uV == min_uV && regulator->max_uV == max_uV)
+		goto out;
+
 	/* sanity check */
-	if (!rdev->desc->ops->set_voltage) {
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1642,18 +1726,76 @@
 		goto out;
 	regulator->min_uV = min_uV;
 	regulator->max_uV = max_uV;
-	ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV);
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
 
 out:
-	_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, NULL);
 	mutex_unlock(&rdev->mutex);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(regulator_set_voltage);
 
+/**
+ * regulator_sync_voltage - re-apply last regulator output voltage
+ * @regulator: regulator source
+ *
+ * Re-apply the last configured voltage.  This is intended to be used
+ * where some external control source the consumer is cooperating with
+ * has caused the configured voltage to change.
+ */
+int regulator_sync_voltage(struct regulator *regulator)
+{
+	struct regulator_dev *rdev = regulator->rdev;
+	int ret, min_uV, max_uV;
+
+	mutex_lock(&rdev->mutex);
+
+	if (!rdev->desc->ops->set_voltage &&
+	    !rdev->desc->ops->set_voltage_sel) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* This is only going to work if we've had a voltage configured. */
+	if (!regulator->min_uV && !regulator->max_uV) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	min_uV = regulator->min_uV;
+	max_uV = regulator->max_uV;
+
+	/* This should be a paranoia check... */
+	ret = regulator_check_voltage(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = regulator_check_consumers(rdev, &min_uV, &max_uV);
+	if (ret < 0)
+		goto out;
+
+	ret = _regulator_do_set_voltage(rdev, min_uV, max_uV);
+
+out:
+	mutex_unlock(&rdev->mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_sync_voltage);
+
 static int _regulator_get_voltage(struct regulator_dev *rdev)
 {
-	/* sanity check */
+	int sel;
+
+	if (rdev->desc->ops->get_voltage_sel) {
+		sel = rdev->desc->ops->get_voltage_sel(rdev);
+		if (sel < 0)
+			return sel;
+		return rdev->desc->ops->list_voltage(rdev, sel);
+	}
 	if (rdev->desc->ops->get_voltage)
 		return rdev->desc->ops->get_voltage(rdev);
 	else
@@ -1880,21 +2022,20 @@
 		goto out;
 
 	/* get output voltage */
-	output_uV = rdev->desc->ops->get_voltage(rdev);
+	output_uV = _regulator_get_voltage(rdev);
 	if (output_uV <= 0) {
-		printk(KERN_ERR "%s: invalid output voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid output voltage found\n");
 		goto out;
 	}
 
 	/* get input voltage */
-	if (rdev->supply && rdev->supply->desc->ops->get_voltage)
-		input_uV = rdev->supply->desc->ops->get_voltage(rdev->supply);
-	else
+	input_uV = 0;
+	if (rdev->supply)
+		input_uV = _regulator_get_voltage(rdev->supply);
+	if (input_uV <= 0)
 		input_uV = rdev->constraints->input_uV;
 	if (input_uV <= 0) {
-		printk(KERN_ERR "%s: invalid input voltage found for %s\n",
-			__func__, rdev_get_name(rdev));
+		rdev_err(rdev, "invalid input voltage found\n");
 		goto out;
 	}
 
@@ -1907,16 +2048,14 @@
 						 total_uA_load);
 	ret = regulator_check_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to get optimum mode for %s @"
-			" %d uA %d -> %d uV\n", __func__, rdev_get_name(rdev),
-			total_uA_load, input_uV, output_uV);
+		rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
+			 total_uA_load, input_uV, output_uV);
 		goto out;
 	}
 
 	ret = rdev->desc->ops->set_mode(rdev, mode);
 	if (ret < 0) {
-		printk(KERN_ERR "%s: failed to set optimum mode %x for %s\n",
-			__func__, mode, rdev_get_name(rdev));
+		rdev_err(rdev, "failed to set optimum mode %x\n", mode);
 		goto out;
 	}
 	ret = mode;
@@ -2047,7 +2186,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to enable %s: %d\n", consumers[i].supply, ret);
+	pr_err("Failed to enable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_disable(consumers[i].consumer);
 
@@ -2082,8 +2221,7 @@
 	return 0;
 
 err:
-	printk(KERN_ERR "Failed to disable %s: %d\n", consumers[i].supply,
-	       ret);
+	pr_err("Failed to disable %s: %d\n", consumers[i].supply, ret);
 	for (--i; i >= 0; --i)
 		regulator_enable(consumers[i].consumer);
 
@@ -2166,7 +2304,7 @@
 	int			status = 0;
 
 	/* some attributes need specific methods to be displayed */
-	if (ops->get_voltage) {
+	if (ops->get_voltage || ops->get_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_microvolts);
 		if (status < 0)
 			return status;
@@ -2207,7 +2345,7 @@
 		return status;
 
 	/* constraints need specific supporting methods */
-	if (ops->set_voltage) {
+	if (ops->set_voltage || ops->set_voltage_sel) {
 		status = device_create_file(dev, &dev_attr_min_microvolts);
 		if (status < 0)
 			return status;
@@ -2271,6 +2409,23 @@
 	return status;
 }
 
+static void rdev_init_debugfs(struct regulator_dev *rdev)
+{
+#ifdef CONFIG_DEBUG_FS
+	rdev->debugfs = debugfs_create_dir(rdev_get_name(rdev), debugfs_root);
+	if (IS_ERR(rdev->debugfs) || !rdev->debugfs) {
+		rdev_warn(rdev, "Failed to create debugfs directory\n");
+		rdev->debugfs = NULL;
+		return;
+	}
+
+	debugfs_create_u32("use_count", 0444, rdev->debugfs,
+			   &rdev->use_count);
+	debugfs_create_u32("open_count", 0444, rdev->debugfs,
+			   &rdev->open_count);
+#endif
+}
+
 /**
  * regulator_register - register regulator
  * @regulator_desc: regulator to register
@@ -2282,7 +2437,7 @@
  * Returns 0 on success.
  */
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data)
 {
 	static atomic_t regulator_no = ATOMIC_INIT(0);
@@ -2302,6 +2457,22 @@
 	if (!init_data)
 		return ERR_PTR(-EINVAL);
 
+	/* Only one of each should be implemented */
+	WARN_ON(regulator_desc->ops->get_voltage &&
+		regulator_desc->ops->get_voltage_sel);
+	WARN_ON(regulator_desc->ops->set_voltage &&
+		regulator_desc->ops->set_voltage_sel);
+
+	/* If we're using selectors we must implement list_voltage. */
+	if (regulator_desc->ops->get_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+	if (regulator_desc->ops->set_voltage_sel &&
+	    !regulator_desc->ops->list_voltage) {
+		return ERR_PTR(-EINVAL);
+	}
+
 	rdev = kzalloc(sizeof(struct regulator_dev), GFP_KERNEL);
 	if (rdev == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -2399,6 +2570,8 @@
 	}
 
 	list_add(&rdev->list, &regulator_list);
+
+	rdev_init_debugfs(rdev);
 out:
 	mutex_unlock(&regulator_list_mutex);
 	return rdev;
@@ -2431,12 +2604,16 @@
 		return;
 
 	mutex_lock(&regulator_list_mutex);
+#ifdef CONFIG_DEBUG_FS
+	debugfs_remove_recursive(rdev->debugfs);
+#endif
 	WARN_ON(rdev->open_count);
 	unset_regulator_supplies(rdev);
 	list_del(&rdev->list);
 	if (rdev->supply)
 		sysfs_remove_link(&rdev->dev.kobj, "supply");
 	device_unregister(&rdev->dev);
+	kfree(rdev->constraints);
 	mutex_unlock(&regulator_list_mutex);
 }
 EXPORT_SYMBOL_GPL(regulator_unregister);
@@ -2465,8 +2642,7 @@
 		mutex_unlock(&rdev->mutex);
 
 		if (ret < 0) {
-			printk(KERN_ERR "%s: failed to prepare %s\n",
-				__func__, rdev_get_name(rdev));
+			rdev_err(rdev, "failed to prepare\n");
 			goto out;
 		}
 	}
@@ -2572,10 +2748,16 @@
 {
 	int ret;
 
-	printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
-
 	ret = class_register(&regulator_class);
 
+#ifdef CONFIG_DEBUG_FS
+	debugfs_root = debugfs_create_dir("regulator", NULL);
+	if (IS_ERR(debugfs_root) || !debugfs_root) {
+		pr_warn("regulator: Failed to create debugfs directory\n");
+		debugfs_root = NULL;
+	}
+#endif
+
 	regulator_dummy_init();
 
 	return ret;
@@ -2590,7 +2772,6 @@
 	struct regulator_ops *ops;
 	struct regulation_constraints *c;
 	int enabled, ret;
-	const char *name;
 
 	mutex_lock(&regulator_list_mutex);
 
@@ -2602,8 +2783,6 @@
 		ops = rdev->desc->ops;
 		c = rdev->constraints;
 
-		name = rdev_get_name(rdev);
-
 		if (!ops->disable || (c && c->always_on))
 			continue;
 
@@ -2624,13 +2803,10 @@
 		if (has_full_constraints) {
 			/* We log since this may kill the system if it
 			 * goes wrong. */
-			printk(KERN_INFO "%s: disabling %s\n",
-			       __func__, name);
+			rdev_info(rdev, "disabling\n");
 			ret = ops->disable(rdev);
 			if (ret != 0) {
-				printk(KERN_ERR
-				       "%s: couldn't disable %s: %d\n",
-				       __func__, name, ret);
+				rdev_err(rdev, "couldn't disable: %d\n", ret);
 			}
 		} else {
 			/* The intention is that in future we will
@@ -2638,9 +2814,7 @@
 			 * so warn even if we aren't going to do
 			 * anything here.
 			 */
-			printk(KERN_WARNING
-			       "%s: incomplete constraints, leaving %s on\n",
-			       __func__, name);
+			rdev_warn(rdev, "incomplete constraints, leaving on\n");
 		}
 
 unlock:
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index f8c4661..362e082 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,7 +107,7 @@
 
 /* DA9030/DA9034 common operations */
 static int da903x_set_ldo_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -119,6 +119,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -187,7 +188,8 @@
 
 /* DA9030 specific operations */
 static int da9030_set_ldo1_15_voltage(struct regulator_dev *rdev,
-				       int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -200,6 +202,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 	val |= DA9030_LDO_UNLOCK; /* have to set UNLOCK bits */
@@ -214,7 +217,8 @@
 }
 
 static int da9030_set_ldo14_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da903x_dev = to_da903x_dev(rdev);
@@ -234,6 +238,7 @@
 		val = (min_uV - thresh + info->step_uV - 1) / info->step_uV;
 	}
 
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -263,7 +268,7 @@
 
 /* DA9034 specific operations */
 static int da9034_set_dvc_voltage(struct regulator_dev *rdev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -276,6 +281,7 @@
 	}
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
@@ -289,7 +295,7 @@
 }
 
 static int da9034_set_ldo12_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
 	struct device *da9034_dev = to_da903x_dev(rdev);
@@ -302,6 +308,7 @@
 
 	val = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
 	val = (val >= 20) ? val - 12 : ((val > 7) ? 8 : val);
+	*selector = val;
 	val <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1)  << info->vol_shift;
 
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index b8cc638..e4b3592 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -58,7 +58,9 @@
 	return data;
 }
 
-static int isl6271a_set_voltage(struct regulator_dev *dev, int minuV, int maxuV)
+static int isl6271a_set_voltage(struct regulator_dev *dev,
+				int minuV, int maxuV,
+				unsigned *selector)
 {
 	struct isl_pmic *pmic = rdev_get_drvdata(dev);
 	int vsel, err, data;
@@ -78,6 +80,8 @@
 	/* Convert the microvolts to data for the chip */
 	data = (vsel - ISL6271A_VOLTAGE_MIN) / ISL6271A_VOLTAGE_STEP;
 
+	*selector = data;
+
 	mutex_lock(&pmic->mtx);
 
 	err = i2c_smbus_write_byte(pmic->client, data);
@@ -169,7 +173,7 @@
 						init_data, pmic);
 		if (IS_ERR(pmic->rdev[i])) {
 			dev_err(&i2c->dev, "failed to register %s\n", id->name);
-			err = PTR_ERR(pmic->rdev);
+			err = PTR_ERR(pmic->rdev[i]);
 			goto error;
 		}
 	}
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3bb82b6..0f22ef1 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -168,7 +168,8 @@
 }
 
 static int lp3971_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3971_LDO1;
@@ -187,6 +188,8 @@
 	if (val > LDO_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	return lp3971_set_bits(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo),
 			LDO_VOL_CONTR_MASK << LDO_VOL_CONTR_SHIFT(ldo),
 			val << LDO_VOL_CONTR_SHIFT(ldo));
@@ -256,7 +259,8 @@
 }
 
 static int lp3971_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3971 *lp3971 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3971_DCDC1;
@@ -277,6 +281,8 @@
 	if (val > BUCK_TARGET_VOL_MAX_IDX || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3971_set_bits(lp3971, LP3971_BUCK_TARGET_VOL1_REG(buck),
 	       BUCK_TARGET_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index e07062f..6aa1b50 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -292,7 +292,8 @@
 }
 
 static int lp3972_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV,
+				  unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int ldo = rdev_get_id(dev) - LP3972_LDO1;
@@ -313,6 +314,8 @@
 	if (val > LP3972_LDO_VOL_MAX_IDX(ldo) || vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	shift = LP3972_LDO_VOL_CONTR_SHIFT(ldo);
 	ret = lp3972_set_bits(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo),
 		LP3972_LDO_VOL_MASK(ldo) << shift, val << shift);
@@ -416,7 +419,8 @@
 }
 
 static int lp3972_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV,
+				   unsigned int *selector)
 {
 	struct lp3972 *lp3972 = rdev_get_drvdata(dev);
 	int buck = rdev_get_id(dev) - LP3972_DCDC1;
@@ -438,6 +442,8 @@
 	    vol_map[val] > max_vol)
 		return -EINVAL;
 
+	*selector = val;
+
 	ret = lp3972_set_bits(lp3972, LP3972_BUCK_VOL1_REG(buck),
 				LP3972_BUCK_VOL_MASK, val);
 	if (ret)
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 559cfa2..3f49512 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -63,12 +63,12 @@
 	return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
 }
 
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned *selector)
 {
 	struct max1586_data *max1586 = rdev_get_drvdata(rdev);
 	struct i2c_client *client = max1586->client;
 	unsigned range_uV = max1586->max_uV - max1586->min_uV;
-	unsigned selector;
 	u8 v3_prog;
 
 	if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
@@ -76,15 +76,15 @@
 	if (min_uV < max1586->min_uV)
 		min_uV = max1586->min_uV;
 
-	selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
+	*selector = ((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL +
 			range_uV - 1) / range_uV;
-	if (max1586_v3_calc_voltage(max1586, selector) > max_uV)
+	if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
-		max1586_v3_calc_voltage(max1586, selector) / 1000);
+		max1586_v3_calc_voltage(max1586, *selector) / 1000);
 
-	v3_prog = I2C_V3_SELECT | (u8) selector;
+	v3_prog = I2C_V3_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v3_prog);
 }
 
@@ -110,10 +110,10 @@
 	return voltages_uv[selector];
 }
 
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			  unsigned int *selector)
 {
 	struct i2c_client *client = rdev_get_drvdata(rdev);
-	unsigned selector;
 	u8 v6_prog;
 
 	if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
@@ -122,21 +122,21 @@
 		return -EINVAL;
 
 	if (min_uV < 1800000)
-		selector = 0;
+		*selector = 0;
 	else if (min_uV < 2500000)
-		selector = 1;
+		*selector = 1;
 	else if (min_uV < 3000000)
-		selector = 2;
+		*selector = 2;
 	else if (min_uV >= 3000000)
-		selector = 3;
+		*selector = 3;
 
-	if (max1586_v6_calc_voltage(selector) > max_uV)
+	if (max1586_v6_calc_voltage(*selector) > max_uV)
 		return -EINVAL;
 
 	dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
-		max1586_v6_calc_voltage(selector) / 1000);
+		max1586_v6_calc_voltage(*selector) / 1000);
 
-	v6_prog = I2C_V6_SELECT | (u8) selector;
+	v6_prog = I2C_V6_SELECT | (u8) *selector;
 	return i2c_smbus_write_byte(client, v6_prog);
 }
 
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 6b60a9c..30eb9e5 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -155,7 +155,7 @@
 }
 
 static int max8649_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -168,6 +168,7 @@
 	data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
 		/ MAX8649_DCDC_STEP;
 	mask = MAX8649_VOL_MASK;
+	*selector = data & mask;
 
 	return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
 }
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index c570e6e..33f5d9a 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -141,7 +141,8 @@
 	return MAX8660_DCDC_MIN_UV + selector * MAX8660_DCDC_STEP;
 }
 
-static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_dcdc_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 reg, selector, bits;
@@ -154,6 +155,7 @@
 
 	selector = (min_uV - (MAX8660_DCDC_MIN_UV - MAX8660_DCDC_STEP + 1))
 			/ MAX8660_DCDC_STEP;
+	*s = selector;
 
 	ret = max8660_dcdc_list(rdev, selector);
 	if (ret < 0 || ret > max_uV)
@@ -196,7 +198,8 @@
 	return MAX8660_LDO5_MIN_UV + selector * MAX8660_LDO5_STEP;
 }
 
-static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo5_set(struct regulator_dev *rdev, int min_uV, int max_uV,
+			    unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -213,6 +216,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	ret = max8660_write(max8660, MAX8660_MDTV2, 0, selector);
 	if (ret)
 		return ret;
@@ -270,7 +275,8 @@
 	return MAX8660_LDO67_MIN_UV + selector * MAX8660_LDO67_STEP;
 }
 
-static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV, int max_uV)
+static int max8660_ldo67_set(struct regulator_dev *rdev, int min_uV,
+			     int max_uV, unsigned int *s)
 {
 	struct max8660 *max8660 = rdev_get_drvdata(rdev);
 	u8 selector;
@@ -288,6 +294,8 @@
 	if (ret < 0 || ret > max_uV)
 		return -EINVAL;
 
+	*s = selector;
+
 	if (rdev_get_id(rdev) == MAX8660_V6)
 		return max8660_write(max8660, MAX8660_L12VCR, 0xf0, selector);
 	else
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index 552cad8..8ae1475 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -55,7 +55,7 @@
 }
 
 static int max8925_set_voltage(struct regulator_dev *rdev,
-			       int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned int *selector)
 {
 	struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 	unsigned char data, mask;
@@ -66,6 +66,7 @@
 		return -EINVAL;
 	}
 	data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+	*selector = data;
 	data <<= info->vol_shift;
 	mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
 
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 0d5dda4..a8f4ecf 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -133,7 +133,7 @@
 }
 
 static int max8952_set_voltage(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+			       int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8952_data *max8952 = rdev_get_drvdata(rdev);
 	s8 vid = -1, i;
@@ -156,6 +156,7 @@
 	if (vid >= 0 && vid < MAX8952_NUM_DVS_MODE) {
 		max8952->vid0 = (vid % 2 == 1);
 		max8952->vid1 = (((vid >> 1) % 2) == 1);
+		*selector = vid;
 		gpio_set_value(max8952->pdata->gpio_vid0, max8952->vid0);
 		gpio_set_value(max8952->pdata->gpio_vid1, max8952->vid1);
 	} else
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 5c20756..7568df6 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -304,7 +304,7 @@
 }
 
 static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
-				int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct i2c_client *i2c = max8998->iodev->i2c;
@@ -331,6 +331,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
@@ -352,7 +354,7 @@
 }
 
 static int max8998_set_voltage_buck(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct max8998_data *max8998 = rdev_get_drvdata(rdev);
 	struct max8998_platform_data *pdata =
@@ -384,6 +386,8 @@
 	if (desc->min + desc->step*i > max_vol)
 		return -EINVAL;
 
+	*selector = i;
+
 	ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
 	if (ret)
 		return ret;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index ecd99f5..3e5d0c3 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -1,6 +1,7 @@
 /*
  * Regulator Driver for Freescale MC13783 PMIC
  *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
  * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
  *
@@ -17,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
+#include "mc13xxx.h"
 
 #define MC13783_REG_SWITCHERS5			29
 #define MC13783_REG_SWITCHERS5_SW3EN			(1 << 20)
@@ -89,154 +91,106 @@
 #define MC13783_REG_POWERMISC_PWGTSPI_M			(3 << 15)
 
 
-struct mc13783_regulator {
-	struct regulator_desc desc;
-	int reg;
-	int enable_bit;
-	int vsel_reg;
-	int vsel_shift;
-	int vsel_mask;
-	int const *voltages;
-};
-
 /* Voltage Values */
-static const int const mc13783_sw3_val[] = {
+static const int mc13783_sw3_val[] = {
 	5000000, 5000000, 5000000, 5500000,
 };
 
-static const int const mc13783_vaudio_val[] = {
+static const int mc13783_vaudio_val[] = {
 	2775000,
 };
 
-static const int const mc13783_viohi_val[] = {
+static const int mc13783_viohi_val[] = {
 	2775000,
 };
 
-static const int const mc13783_violo_val[] = {
+static const int mc13783_violo_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vdig_val[] = {
+static const int mc13783_vdig_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 };
 
-static const int const mc13783_vgen_val[] = {
+static const int mc13783_vgen_val[] = {
 	1200000, 1300000, 1500000, 1800000,
 	1100000, 2000000, 2775000, 2400000,
 };
 
-static const int const mc13783_vrfdig_val[] = {
+static const int mc13783_vrfdig_val[] = {
 	1200000, 1500000, 1800000, 1875000,
 };
 
-static const int const mc13783_vrfref_val[] = {
+static const int mc13783_vrfref_val[] = {
 	2475000, 2600000, 2700000, 2775000,
 };
 
-static const int const mc13783_vrfcp_val[] = {
+static const int mc13783_vrfcp_val[] = {
 	2700000, 2775000,
 };
 
-static const int const mc13783_vsim_val[] = {
+static const int mc13783_vsim_val[] = {
 	1800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vesim_val[] = {
+static const int mc13783_vesim_val[] = {
 	1800000, 2900000,
 };
 
-static const int const mc13783_vcam_val[] = {
+static const int mc13783_vcam_val[] = {
 	1500000, 1800000, 2500000, 2550000,
 	2600000, 2750000, 2800000, 3000000,
 };
 
-static const int const mc13783_vrfbg_val[] = {
+static const int mc13783_vrfbg_val[] = {
 	1250000,
 };
 
-static const int const mc13783_vvib_val[] = {
+static const int mc13783_vvib_val[] = {
 	1300000, 1800000, 2000000, 3000000,
 };
 
-static const int const mc13783_vmmc_val[] = {
+static const int mc13783_vmmc_val[] = {
 	1600000, 1800000, 2000000, 2600000,
 	2700000, 2800000, 2900000, 3000000,
 };
 
-static const int const mc13783_vrf_val[] = {
+static const int mc13783_vrf_val[] = {
 	1500000, 1875000, 2700000, 2775000,
 };
 
-static const int const mc13783_gpo_val[] = {
+static const int mc13783_gpo_val[] = {
 	3100000,
 };
 
-static const int const mc13783_pwgtdrv_val[] = {
+static const int mc13783_pwgtdrv_val[] = {
 	5500000,
 };
 
-static struct regulator_ops mc13783_regulator_ops;
-static struct regulator_ops mc13783_fixed_regulator_ops;
 static struct regulator_ops mc13783_gpo_regulator_ops;
 
-#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages)	\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_regulator_ops,			\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.vsel_reg = MC13783_REG_ ## _vsel_reg,			\
-		.vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
-		.vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_DEFINE(prefix, name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13783_REG_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
 
-#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_fixed_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_FIXED_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13xxx_fixed_regulator_ops)
 
-#define MC13783_GPO_DEFINE(prefix, _name, _reg,  _voltages)		\
-	[MC13783_ ## prefix ## _ ## _name] = {				\
-		.desc = {						\
-			.name = #prefix "_" #_name,			\
-			.n_voltages = ARRAY_SIZE(_voltages),		\
-			.ops = &mc13783_gpo_regulator_ops,		\
-			.type = REGULATOR_VOLTAGE,			\
-			.id = MC13783_ ## prefix ## _ ## _name,		\
-			.owner = THIS_MODULE,				\
-		},							\
-		.reg = MC13783_REG_ ## _reg,				\
-		.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN,	\
-		.voltages =  _voltages,					\
-	}
+#define MC13783_GPO_DEFINE(prefix, name, reg, voltages)		\
+	MC13xxx_GPO_DEFINE(MC13783_REG_, name, reg, voltages, \
+			mc13783_gpo_regulator_ops)
 
 #define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 #define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages)		\
-	MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
+	MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages)
 
-static struct mc13783_regulator mc13783_regulators[] = {
+static struct mc13xxx_regulator mc13783_regulators[] = {
 	MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
 
-	MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
-	MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+	MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+	MC13783_FIXED_DEFINE(REG, VIOHI, REGULATORMODE0, mc13783_viohi_val),
 	MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0,	\
 			    mc13783_violo_val),
 	MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
@@ -255,7 +209,7 @@
 			    mc13783_vesim_val),
 	MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
 			    mc13783_vcam_val),
-	MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+	MC13783_FIXED_DEFINE(REG, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
 	MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vvib_val),
 	MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1,	\
@@ -266,215 +220,24 @@
 			    mc13783_vmmc_val),
 	MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1,	\
 			    mc13783_vmmc_val),
-	MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
-	MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
-	MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, GPO1, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO2, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO3, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, GPO4, POWERMISC, mc13783_gpo_val),
+	MC13783_GPO_DEFINE(REG, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+	MC13783_GPO_DEFINE(REG, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
 };
 
-struct mc13783_regulator_priv {
-	struct mc13783 *mc13783;
-	u32 powermisc_pwgt_state;
-	struct regulator_dev *regulators[];
-};
-
-static int mc13783_regulator_enable(struct regulator_dev *rdev)
+static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+		u32 val)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit,
-			mc13783_regulators[id].enable_bit);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_disable(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].reg,
-			mc13783_regulators[id].enable_bit, 0);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	return (val & mc13783_regulators[id].enable_bit) != 0;
-}
-
-static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
-						unsigned selector)
-{
-	int id = rdev_get_id(rdev);
-
-	if (selector >= mc13783_regulators[id].desc.n_voltages)
-		return -EINVAL;
-
-	return mc13783_regulators[id].voltages[selector];
-}
-
-static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int reg_id = rdev_get_id(rdev);
-	int i;
-	int bestmatch;
-	int bestindex;
-
-	/*
-	 * Locate the minimum voltage fitting the criteria on
-	 * this regulator. The switchable voltages are not
-	 * in strict falling order so we need to check them
-	 * all for the best match.
-	 */
-	bestmatch = INT_MAX;
-	bestindex = -1;
-	for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
-		if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
-		    mc13783_regulators[reg_id].voltages[i] < bestmatch) {
-			bestmatch = mc13783_regulators[reg_id].voltages[i];
-			bestindex = i;
-		}
-	}
-
-	if (bestindex < 0 || bestmatch > max_uV) {
-		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
-				min_uV, max_uV);
-		return -EINVAL;
-	}
-	return bestindex;
-}
-
-static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int value, id = rdev_get_id(rdev);
-	int ret;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	/* Find the best index */
-	value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
-	dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
-	if (value < 0)
-		return value;
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
-			mc13783_regulators[id].vsel_mask,
-			value << mc13783_regulators[id].vsel_shift);
-	mc13783_unlock(priv->mc13783);
-
-	return ret;
-}
-
-static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
-	int ret, id = rdev_get_id(rdev);
-	unsigned int val;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783,
-				mc13783_regulators[id].vsel_reg, &val);
-	mc13783_unlock(priv->mc13783);
-
-	if (ret)
-		return ret;
-
-	val = (val & mc13783_regulators[id].vsel_mask)
-		>> mc13783_regulators[id].vsel_shift;
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
-
-	BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
-
-	return mc13783_regulators[id].voltages[val];
-}
-
-static struct regulator_ops mc13783_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_regulator_set_voltage,
-	.get_voltage = mc13783_regulator_get_voltage,
-};
-
-static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
-		__func__, id, min_uV, max_uV);
-
-	if (min_uV >= mc13783_regulators[id].voltages[0] &&
-	    max_uV <= mc13783_regulators[id].voltages[0])
-		return 0;
-	else
-		return -EINVAL;
-}
-
-static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
-{
-	int id = rdev_get_id(rdev);
-
-	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
-
-	return mc13783_regulators[id].voltages[0];
-}
-
-static struct regulator_ops mc13783_fixed_regulator_ops = {
-	.enable = mc13783_regulator_enable,
-	.disable = mc13783_regulator_disable,
-	.is_enabled = mc13783_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
-};
-
-static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
-				 u32 val)
-{
-	struct mc13783 *mc13783 = priv->mc13783;
+	struct mc13xxx *mc13783 = priv->mc13xxx;
 	int ret;
 	u32 valread;
 
 	BUG_ON(val & ~mask);
 
-	ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+	ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
 	if (ret)
 		return ret;
 
@@ -489,34 +252,36 @@
 	valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 						priv->powermisc_pwgt_state;
 
-	return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+	return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
 }
 
 static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
-	u32 en_val = mc13783_regulators[id].enable_bit;
+	u32 en_val = mc13xxx_regulators[id].enable_bit;
 
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate enable value is 0 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
 		en_val = 0;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					en_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int id = rdev_get_id(rdev);
 	int ret;
 	u32 dis_val = 0;
@@ -524,27 +289,28 @@
 	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
 
 	/* Power Gate disable value is 1 */
-	if (id == MC13783_REGU_PWGT1SPI ||
-	    id == MC13783_REGU_PWGT2SPI)
-		dis_val = mc13783_regulators[id].enable_bit;
+	if (id == MC13783_REG_PWGT1SPI ||
+	    id == MC13783_REG_PWGT2SPI)
+		dis_val = mc13xxx_regulators[id].enable_bit;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit,
 					dis_val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	return ret;
 }
 
 static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
 {
-	struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
 	int ret, id = rdev_get_id(rdev);
 	unsigned int val;
 
-	mc13783_lock(priv->mc13783);
-	ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
-	mc13783_unlock(priv->mc13783);
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
 
 	if (ret)
 		return ret;
@@ -554,22 +320,22 @@
 	val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
 	      (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
 
-	return (val & mc13783_regulators[id].enable_bit) != 0;
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
 }
 
 static struct regulator_ops mc13783_gpo_regulator_ops = {
 	.enable = mc13783_gpo_regulator_enable,
 	.disable = mc13783_gpo_regulator_disable,
 	.is_enabled = mc13783_gpo_regulator_is_enabled,
-	.list_voltage = mc13783_regulator_list_voltage,
-	.set_voltage = mc13783_fixed_regulator_set_voltage,
-	.get_voltage = mc13783_fixed_regulator_get_voltage,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
 };
 
 static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv;
-	struct mc13783 *mc13783 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	struct mc13783_regulator_init_data *init_data;
@@ -583,7 +349,8 @@
 	if (!priv)
 		return -ENOMEM;
 
-	priv->mc13783 = mc13783;
+	priv->mc13xxx_regulators = mc13783_regulators;
+	priv->mc13xxx = mc13783;
 
 	for (i = 0; i < pdata->num_regulators; i++) {
 		init_data = &pdata->regulators[i];
@@ -613,7 +380,7 @@
 
 static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
 {
-	struct mc13783_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
 	struct mc13783_regulator_platform_data *pdata =
 		dev_get_platdata(&pdev->dev);
 	int i;
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
new file mode 100644
index 0000000..1b8f739
--- /dev/null
+++ b/drivers/regulator/mc13892-regulator.c
@@ -0,0 +1,635 @@
+/*
+ * Regulator Driver for Freescale MC13892 PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on draft driver from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/mc13892.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+#define MC13892_REVISION			7
+
+#define MC13892_POWERCTL0			13
+#define MC13892_POWERCTL0_USEROFFSPI		3
+#define MC13892_POWERCTL0_VCOINCELLVSEL		20
+#define MC13892_POWERCTL0_VCOINCELLVSEL_M	(7<<20)
+#define MC13892_POWERCTL0_VCOINCELLEN		(1<<23)
+
+#define MC13892_SWITCHERS0_SWxHI		(1<<23)
+
+#define MC13892_SWITCHERS0			24
+#define MC13892_SWITCHERS0_SW1VSEL		0
+#define MC13892_SWITCHERS0_SW1VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS0_SW1HI		(1<<23)
+#define MC13892_SWITCHERS0_SW1EN		0
+
+#define MC13892_SWITCHERS1			25
+#define MC13892_SWITCHERS1_SW2VSEL		0
+#define MC13892_SWITCHERS1_SW2VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS1_SW2HI		(1<<23)
+#define MC13892_SWITCHERS1_SW2EN		0
+
+#define MC13892_SWITCHERS2			26
+#define MC13892_SWITCHERS2_SW3VSEL		0
+#define MC13892_SWITCHERS2_SW3VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS2_SW3HI		(1<<23)
+#define MC13892_SWITCHERS2_SW3EN		0
+
+#define MC13892_SWITCHERS3			27
+#define MC13892_SWITCHERS3_SW4VSEL		0
+#define MC13892_SWITCHERS3_SW4VSEL_M		(0x1f<<0)
+#define MC13892_SWITCHERS3_SW4HI		(1<<23)
+#define MC13892_SWITCHERS3_SW4EN		0
+
+#define MC13892_SWITCHERS4			28
+#define MC13892_SWITCHERS4_SW1MODE		0
+#define MC13892_SWITCHERS4_SW1MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS4_SW1MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS4_SW2MODE		10
+#define MC13892_SWITCHERS4_SW2MODE_AUTO		(8<<10)
+#define MC13892_SWITCHERS4_SW2MODE_M		(0xf<<10)
+
+#define MC13892_SWITCHERS5			29
+#define MC13892_SWITCHERS5_SW3MODE		0
+#define MC13892_SWITCHERS5_SW3MODE_AUTO		(8<<0)
+#define MC13892_SWITCHERS5_SW3MODE_M		(0xf<<0)
+#define MC13892_SWITCHERS5_SW4MODE		8
+#define MC13892_SWITCHERS5_SW4MODE_AUTO		(8<<8)
+#define MC13892_SWITCHERS5_SW4MODE_M		(0xf<<8)
+#define MC13892_SWITCHERS5_SWBSTEN		(1<<20)
+
+#define MC13892_REGULATORSETTING0		30
+#define MC13892_REGULATORSETTING0_VGEN1VSEL	0
+#define MC13892_REGULATORSETTING0_VDIGVSEL	4
+#define MC13892_REGULATORSETTING0_VGEN2VSEL	6
+#define MC13892_REGULATORSETTING0_VPLLVSEL	9
+#define MC13892_REGULATORSETTING0_VUSB2VSEL	11
+#define MC13892_REGULATORSETTING0_VGEN3VSEL	14
+#define MC13892_REGULATORSETTING0_VCAMVSEL	16
+
+#define MC13892_REGULATORSETTING0_VGEN1VSEL_M	(3<<0)
+#define MC13892_REGULATORSETTING0_VDIGVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING0_VGEN2VSEL_M	(7<<6)
+#define MC13892_REGULATORSETTING0_VPLLVSEL_M	(3<<9)
+#define MC13892_REGULATORSETTING0_VUSB2VSEL_M	(3<<11)
+#define MC13892_REGULATORSETTING0_VGEN3VSEL_M	(1<<14)
+#define MC13892_REGULATORSETTING0_VCAMVSEL_M	(3<<16)
+
+#define MC13892_REGULATORSETTING1		31
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL	2
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL	4
+#define MC13892_REGULATORSETTING1_VSDVSEL	6
+
+#define MC13892_REGULATORSETTING1_VVIDEOVSEL_M	(3<<2)
+#define MC13892_REGULATORSETTING1_VAUDIOVSEL_M	(3<<4)
+#define MC13892_REGULATORSETTING1_VSDVSEL_M	(7<<6)
+
+#define MC13892_REGULATORMODE0			32
+#define MC13892_REGULATORMODE0_VGEN1EN		(1<<0)
+#define MC13892_REGULATORMODE0_VGEN1STDBY	(1<<1)
+#define MC13892_REGULATORMODE0_VGEN1MODE	(1<<2)
+#define MC13892_REGULATORMODE0_VIOHIEN		(1<<3)
+#define MC13892_REGULATORMODE0_VIOHISTDBY	(1<<4)
+#define MC13892_REGULATORMODE0_VIOHIMODE	(1<<5)
+#define MC13892_REGULATORMODE0_VDIGEN		(1<<9)
+#define MC13892_REGULATORMODE0_VDIGSTDBY	(1<<10)
+#define MC13892_REGULATORMODE0_VDIGMODE		(1<<11)
+#define MC13892_REGULATORMODE0_VGEN2EN		(1<<12)
+#define MC13892_REGULATORMODE0_VGEN2STDBY	(1<<13)
+#define MC13892_REGULATORMODE0_VGEN2MODE	(1<<14)
+#define MC13892_REGULATORMODE0_VPLLEN		(1<<15)
+#define MC13892_REGULATORMODE0_VPLLSTDBY	(1<<16)
+#define MC13892_REGULATORMODE0_VPLLMODE		(1<<17)
+#define MC13892_REGULATORMODE0_VUSB2EN		(1<<18)
+#define MC13892_REGULATORMODE0_VUSB2STDBY	(1<<19)
+#define MC13892_REGULATORMODE0_VUSB2MODE	(1<<20)
+
+#define MC13892_REGULATORMODE1			33
+#define MC13892_REGULATORMODE1_VGEN3EN		(1<<0)
+#define MC13892_REGULATORMODE1_VGEN3STDBY	(1<<1)
+#define MC13892_REGULATORMODE1_VGEN3MODE	(1<<2)
+#define MC13892_REGULATORMODE1_VCAMEN		(1<<6)
+#define MC13892_REGULATORMODE1_VCAMSTDBY	(1<<7)
+#define MC13892_REGULATORMODE1_VCAMMODE		(1<<8)
+#define MC13892_REGULATORMODE1_VCAMCONFIGEN	(1<<9)
+#define MC13892_REGULATORMODE1_VVIDEOEN		(1<<12)
+#define MC13892_REGULATORMODE1_VVIDEOSTDBY	(1<<13)
+#define MC13892_REGULATORMODE1_VVIDEOMODE	(1<<14)
+#define MC13892_REGULATORMODE1_VAUDIOEN		(1<<15)
+#define MC13892_REGULATORMODE1_VAUDIOSTDBY	(1<<16)
+#define MC13892_REGULATORMODE1_VAUDIOMODE	(1<<17)
+#define MC13892_REGULATORMODE1_VSDEN		(1<<18)
+#define MC13892_REGULATORMODE1_VSDSTDBY		(1<<19)
+#define MC13892_REGULATORMODE1_VSDMODE		(1<<20)
+
+#define MC13892_POWERMISC			34
+#define MC13892_POWERMISC_GPO1EN		(1<<6)
+#define MC13892_POWERMISC_GPO2EN		(1<<8)
+#define MC13892_POWERMISC_GPO3EN		(1<<10)
+#define MC13892_POWERMISC_GPO4EN		(1<<12)
+#define MC13892_POWERMISC_PWGT1SPIEN		(1<<15)
+#define MC13892_POWERMISC_PWGT2SPIEN		(1<<16)
+#define MC13892_POWERMISC_GPO4ADINEN		(1<<21)
+
+#define MC13892_POWERMISC_PWGTSPI_M		(3 << 15)
+
+#define MC13892_USB1				50
+#define MC13892_USB1_VUSBEN			(1<<3)
+
+static const int mc13892_vcoincell[] = {
+	2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
+	3200000, 3300000,
+};
+
+static const int mc13892_sw1[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000
+};
+
+static const int mc13892_sw[] = {
+	600000,   625000,  650000,  675000,  700000,  725000,
+	750000,   775000,  800000,  825000,  850000,  875000,
+	900000,   925000,  950000,  975000, 1000000, 1025000,
+	1050000, 1075000, 1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000, 1300000, 1325000,
+	1350000, 1375000, 1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000, 1600000, 1625000,
+	1650000, 1675000, 1700000, 1725000, 1750000, 1775000,
+	1800000, 1825000, 1850000, 1875000
+};
+
+static const int mc13892_swbst[] = {
+	5000000,
+};
+
+static const int mc13892_viohi[] = {
+	2775000,
+};
+
+static const int mc13892_vpll[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vdig[] = {
+	1050000, 1250000, 1650000, 1800000,
+};
+
+static const int mc13892_vsd[] = {
+	1800000, 2000000, 2600000, 2700000,
+	2800000, 2900000, 3000000, 3150000,
+};
+
+static const int mc13892_vusb2[] = {
+	2400000, 2600000, 2700000, 2775000,
+};
+
+static const int mc13892_vvideo[] = {
+	2700000, 2775000, 2500000, 2600000,
+};
+
+static const int mc13892_vaudio[] = {
+	2300000, 2500000, 2775000, 3000000,
+};
+
+static const int mc13892_vcam[] = {
+	2500000, 2600000, 2750000, 3000000,
+};
+
+static const int mc13892_vgen1[] = {
+	1200000, 1500000, 2775000, 3150000,
+};
+
+static const int mc13892_vgen2[] = {
+	1200000, 1500000, 1600000, 1800000,
+	2700000, 2800000, 3000000, 3150000,
+};
+
+static const int mc13892_vgen3[] = {
+	1800000, 2900000,
+};
+
+static const int mc13892_vusb[] = {
+	3300000,
+};
+
+static const int mc13892_gpo[] = {
+	2750000,
+};
+
+static const int mc13892_pwgtdrv[] = {
+	5000000,
+};
+
+static struct regulator_ops mc13892_gpo_regulator_ops;
+/* sw regulators need special care due to the "hi bit" */
+static struct regulator_ops mc13892_sw_regulator_ops;
+
+
+#define MC13892_FIXED_DEFINE(name, reg, voltages)		\
+	MC13xxx_FIXED_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13xxx_fixed_regulator_ops)
+
+#define MC13892_GPO_DEFINE(name, reg, voltages)			\
+	MC13xxx_GPO_DEFINE(MC13892_, name, reg, voltages,	\
+			mc13892_gpo_regulator_ops)
+
+#define MC13892_SW_DEFINE(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13892_sw_regulator_ops)
+
+#define MC13892_DEFINE_REGU(name, reg, vsel_reg, voltages)	\
+	MC13xxx_DEFINE(MC13892_, name, reg, vsel_reg, voltages, \
+			mc13xxx_regulator_ops)
+
+static struct mc13xxx_regulator mc13892_regulators[] = {
+	MC13892_DEFINE_REGU(VCOINCELL, POWERCTL0, POWERCTL0, mc13892_vcoincell),
+	MC13892_SW_DEFINE(SW1, SWITCHERS0, SWITCHERS0, mc13892_sw1),
+	MC13892_SW_DEFINE(SW2, SWITCHERS1, SWITCHERS1, mc13892_sw),
+	MC13892_SW_DEFINE(SW3, SWITCHERS2, SWITCHERS2, mc13892_sw),
+	MC13892_SW_DEFINE(SW4, SWITCHERS3, SWITCHERS3, mc13892_sw),
+	MC13892_FIXED_DEFINE(SWBST, SWITCHERS5, mc13892_swbst),
+	MC13892_FIXED_DEFINE(VIOHI, REGULATORMODE0, mc13892_viohi),
+	MC13892_DEFINE_REGU(VPLL, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vpll),
+	MC13892_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vdig),
+	MC13892_DEFINE_REGU(VSD, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vsd),
+	MC13892_DEFINE_REGU(VUSB2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vusb2),
+	MC13892_DEFINE_REGU(VVIDEO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vvideo),
+	MC13892_DEFINE_REGU(VAUDIO, REGULATORMODE1, REGULATORSETTING1,	\
+		mc13892_vaudio),
+	MC13892_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vcam),
+	MC13892_DEFINE_REGU(VGEN1, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen1),
+	MC13892_DEFINE_REGU(VGEN2, REGULATORMODE0, REGULATORSETTING0,	\
+		mc13892_vgen2),
+	MC13892_DEFINE_REGU(VGEN3, REGULATORMODE1, REGULATORSETTING0,	\
+		mc13892_vgen3),
+	MC13892_FIXED_DEFINE(VUSB, USB1, mc13892_vusb),
+	MC13892_GPO_DEFINE(GPO1, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO2, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO3, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(GPO4, POWERMISC, mc13892_gpo),
+	MC13892_GPO_DEFINE(PWGT1SPI, POWERMISC, mc13892_pwgtdrv),
+	MC13892_GPO_DEFINE(PWGT2SPI, POWERMISC, mc13892_pwgtdrv),
+};
+
+static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask,
+				 u32 val)
+{
+	struct mc13xxx *mc13892 = priv->mc13xxx;
+	int ret;
+	u32 valread;
+
+	BUG_ON(val & ~mask);
+
+	ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread);
+	if (ret)
+		return ret;
+
+	/* Update the stored state for Power Gates. */
+	priv->powermisc_pwgt_state =
+		(priv->powermisc_pwgt_state & ~mask) | val;
+	priv->powermisc_pwgt_state &= MC13892_POWERMISC_PWGTSPI_M;
+
+	/* Construct the new register value */
+	valread = (valread & ~mask) | val;
+	/* Overwrite the PWGTxEN with the stored version */
+	valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) |
+		priv->powermisc_pwgt_state;
+
+	return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread);
+}
+
+static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 en_val = mc13892_regulators[id].enable_bit;
+	u32 mask = mc13892_regulators[id].enable_bit;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate enable value is 0 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		en_val = 0;
+
+	if (id == MC13892_GPO4)
+		mask |= MC13892_POWERMISC_GPO4ADINEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mask, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int id = rdev_get_id(rdev);
+	int ret;
+	u32 dis_val = 0;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	/* Power Gate disable value is 1 */
+	if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI)
+		dis_val = mc13892_regulators[id].enable_bit;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit,
+		dis_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	/* Power Gates state is stored in powermisc_pwgt_state
+	 * where the meaning of bits is negated */
+	val = (val & ~MC13892_POWERMISC_PWGTSPI_M) |
+		(priv->powermisc_pwgt_state ^ MC13892_POWERMISC_PWGTSPI_M);
+
+	return (val & mc13892_regulators[id].enable_bit) != 0;
+}
+
+
+static struct regulator_ops mc13892_gpo_regulator_ops = {
+	.enable = mc13892_gpo_regulator_enable,
+	.disable = mc13892_gpo_regulator_disable,
+	.is_enabled = mc13892_gpo_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+
+static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val, hi;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+	if (ret)
+		return ret;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	val = (val & mc13892_regulators[id].vsel_mask)
+		>> mc13892_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	if (hi)
+		val = (25000 * val) + 1100000;
+	else
+		val = (25000 * val) + 600000;
+
+	return val;
+}
+
+static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int hi, value, val, mask, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	value = mc13892_regulators[id].voltages[value];
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+		mc13892_regulators[id].vsel_reg, &val);
+	if (ret)
+		goto err;
+
+	hi  = val & MC13892_SWITCHERS0_SWxHI;
+	if (value > 1375)
+		hi = 1;
+	if (value < 1100)
+		hi = 0;
+
+	if (hi) {
+		value = (value - 1100000) / 25000;
+		value |= MC13892_SWITCHERS0_SWxHI;
+	} else
+		value = (value - 600000) / 25000;
+
+	mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+			mask, value << mc13892_regulators[id].vsel_shift);
+err:
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static struct regulator_ops mc13892_sw_regulator_ops = {
+	.is_enabled = mc13xxx_sw_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13892_sw_regulator_set_voltage,
+	.get_voltage = mc13892_sw_regulator_get_voltage,
+};
+
+static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	unsigned int en_val = 0;
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+
+	if (mode == REGULATOR_MODE_FAST)
+		en_val = MC13892_REGULATORMODE1_VCAMCONFIGEN;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg,
+		MC13892_REGULATORMODE1_VCAMCONFIGEN, en_val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static unsigned int mc13892_vcam_get_mode(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13892_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	if (val & MC13892_REGULATORMODE1_VCAMCONFIGEN)
+		return REGULATOR_MODE_FAST;
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+
+static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv;
+	struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	struct mc13xxx_regulator_init_data *init_data;
+	int i, ret;
+	u32 val;
+
+	priv = kzalloc(sizeof(*priv) +
+		pdata->num_regulators * sizeof(priv->regulators[0]),
+		GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->mc13xxx_regulators = mc13892_regulators;
+	priv->mc13xxx = mc13892;
+
+	mc13xxx_lock(mc13892);
+	ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
+	if (ret)
+		goto err_free;
+
+	/* enable switch auto mode */
+	if ((val & 0x0000FFFF) == 0x45d0) {
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS4,
+			MC13892_SWITCHERS4_SW1MODE_M |
+			MC13892_SWITCHERS4_SW2MODE_M,
+			MC13892_SWITCHERS4_SW1MODE_AUTO |
+			MC13892_SWITCHERS4_SW2MODE_AUTO);
+		if (ret)
+			goto err_free;
+
+		ret = mc13xxx_reg_rmw(mc13892, MC13892_SWITCHERS5,
+			MC13892_SWITCHERS5_SW3MODE_M |
+			MC13892_SWITCHERS5_SW4MODE_M,
+			MC13892_SWITCHERS5_SW3MODE_AUTO |
+			MC13892_SWITCHERS5_SW4MODE_AUTO);
+		if (ret)
+			goto err_free;
+	}
+	mc13xxx_unlock(mc13892);
+
+	mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
+		= mc13892_vcam_set_mode;
+	mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
+		= mc13892_vcam_get_mode;
+	for (i = 0; i < pdata->num_regulators; i++) {
+		init_data = &pdata->regulators[i];
+		priv->regulators[i] = regulator_register(
+			&mc13892_regulators[init_data->id].desc,
+			&pdev->dev, init_data->init_data, priv);
+
+		if (IS_ERR(priv->regulators[i])) {
+			dev_err(&pdev->dev, "failed to register regulator %s\n",
+				mc13892_regulators[i].desc.name);
+			ret = PTR_ERR(priv->regulators[i]);
+			goto err;
+		}
+	}
+
+	platform_set_drvdata(pdev, priv);
+
+	return 0;
+err:
+	while (--i >= 0)
+		regulator_unregister(priv->regulators[i]);
+
+err_free:
+	mc13xxx_unlock(mc13892);
+	kfree(priv);
+
+	return ret;
+}
+
+static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
+{
+	struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+	struct mc13xxx_regulator_platform_data *pdata =
+		dev_get_platdata(&pdev->dev);
+	int i;
+
+	platform_set_drvdata(pdev, NULL);
+
+	for (i = 0; i < pdata->num_regulators; i++)
+		regulator_unregister(priv->regulators[i]);
+
+	kfree(priv);
+	return 0;
+}
+
+static struct platform_driver mc13892_regulator_driver = {
+	.driver	= {
+		.name	= "mc13892-regulator",
+		.owner	= THIS_MODULE,
+	},
+	.remove	= __devexit_p(mc13892_regulator_remove),
+	.probe	= mc13892_regulator_probe,
+};
+
+static int __init mc13892_regulator_init(void)
+{
+	return platform_driver_register(&mc13892_regulator_driver);
+}
+subsys_initcall(mc13892_regulator_init);
+
+static void __exit mc13892_regulator_exit(void)
+{
+	platform_driver_unregister(&mc13892_regulator_driver);
+}
+module_exit(mc13892_regulator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13892 PMIC");
+MODULE_ALIAS("platform:mc13892-regulator");
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
new file mode 100644
index 0000000..f53d31b
--- /dev/null
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -0,0 +1,241 @@
+/*
+ * Regulator Driver for Freescale MC13xxx PMIC
+ *
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * Based on mc13783 regulator driver :
+ * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Regs infos taken from mc13xxx drivers from freescale and mc13xxx.pdf file
+ * from freescale
+ */
+
+#include <linux/mfd/mc13xxx.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include "mc13xxx.h"
+
+static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit,
+			mc13xxx_regulators[id].enable_bit);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_disable(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].reg,
+			mc13xxx_regulators[id].enable_bit, 0);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx, mc13xxx_regulators[id].reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	return (val & mc13xxx_regulators[id].enable_bit) != 0;
+}
+
+int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector)
+{
+	int id = rdev_get_id(rdev);
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+
+	if (selector >= mc13xxx_regulators[id].desc.n_voltages)
+		return -EINVAL;
+
+	return mc13xxx_regulators[id].voltages[selector];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
+
+int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int reg_id = rdev_get_id(rdev);
+	int i;
+	int bestmatch;
+	int bestindex;
+
+	/*
+	 * Locate the minimum voltage fitting the criteria on
+	 * this regulator. The switchable voltages are not
+	 * in strict falling order so we need to check them
+	 * all for the best match.
+	 */
+	bestmatch = INT_MAX;
+	bestindex = -1;
+	for (i = 0; i < mc13xxx_regulators[reg_id].desc.n_voltages; i++) {
+		if (mc13xxx_regulators[reg_id].voltages[i] >= min_uV &&
+		    mc13xxx_regulators[reg_id].voltages[i] < bestmatch) {
+			bestmatch = mc13xxx_regulators[reg_id].voltages[i];
+			bestindex = i;
+		}
+	}
+
+	if (bestindex < 0 || bestmatch > max_uV) {
+		dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+				min_uV, max_uV);
+		return -EINVAL;
+	}
+	return bestindex;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_get_best_voltage_index);
+
+static int mc13xxx_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+		int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int value, id = rdev_get_id(rdev);
+	int ret;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	/* Find the best index */
+	value = mc13xxx_get_best_voltage_index(rdev, min_uV, max_uV);
+	dev_dbg(rdev_get_dev(rdev), "%s best value: %d\n", __func__, value);
+	if (value < 0)
+		return value;
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13xxx_regulators[id].vsel_reg,
+			mc13xxx_regulators[id].vsel_mask,
+			value << mc13xxx_regulators[id].vsel_shift);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	return ret;
+}
+
+static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int ret, id = rdev_get_id(rdev);
+	unsigned int val;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	mc13xxx_lock(priv->mc13xxx);
+	ret = mc13xxx_reg_read(priv->mc13xxx,
+				mc13xxx_regulators[id].vsel_reg, &val);
+	mc13xxx_unlock(priv->mc13xxx);
+
+	if (ret)
+		return ret;
+
+	val = (val & mc13xxx_regulators[id].vsel_mask)
+		>> mc13xxx_regulators[id].vsel_shift;
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+	BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
+
+	return mc13xxx_regulators[id].voltages[val];
+}
+
+struct regulator_ops mc13xxx_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_regulator_set_voltage,
+	.get_voltage = mc13xxx_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
+
+int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
+	       int max_uV, unsigned *selector)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+		__func__, id, min_uV, max_uV);
+
+	if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
+	    max_uV <= mc13xxx_regulators[id].voltages[0])
+		return 0;
+	else
+		return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
+
+int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+	struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
+	struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
+	int id = rdev_get_id(rdev);
+
+	dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+	return mc13xxx_regulators[id].voltages[0];
+}
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
+
+struct regulator_ops mc13xxx_fixed_regulator_ops = {
+	.enable = mc13xxx_regulator_enable,
+	.disable = mc13xxx_regulator_disable,
+	.is_enabled = mc13xxx_regulator_is_enabled,
+	.list_voltage = mc13xxx_regulator_list_voltage,
+	.set_voltage = mc13xxx_fixed_regulator_set_voltage,
+	.get_voltage = mc13xxx_fixed_regulator_get_voltage,
+};
+EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
+
+int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	return 1;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
+MODULE_ALIAS("mc13xxx-regulator-core");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
new file mode 100644
index 0000000..2775826
--- /dev/null
+++ b/drivers/regulator/mc13xxx.h
@@ -0,0 +1,101 @@
+/*
+ * mc13xxx.h - regulators for the Freescale mc13xxx PMIC
+ *
+ *  Copyright (C) 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_REGULATOR_MC13XXX_H
+#define __LINUX_REGULATOR_MC13XXX_H
+
+#include <linux/regulator/driver.h>
+
+struct mc13xxx_regulator {
+	struct regulator_desc desc;
+	int reg;
+	int enable_bit;
+	int vsel_reg;
+	int vsel_shift;
+	int vsel_mask;
+	int hi_bit;
+	int const *voltages;
+};
+
+struct mc13xxx_regulator_priv {
+	struct mc13xxx *mc13xxx;
+	u32 powermisc_pwgt_state;
+	struct mc13xxx_regulator *mc13xxx_regulators;
+	struct regulator_dev *regulators[];
+};
+
+extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
+extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
+extern int mc13xxx_get_best_voltage_index(struct regulator_dev *rdev,
+						int min_uV, int max_uV);
+extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
+						unsigned selector);
+extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+		int min_uV, int max_uV, unsigned *selector);
+extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+
+extern struct regulator_ops mc13xxx_regulator_ops;
+extern struct regulator_ops mc13xxx_fixed_regulator_ops;
+
+#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,			\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.vsel_reg = prefix ## _vsel_reg,			\
+		.vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
+		.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_GPO_DEFINE(prefix, _name, _reg,  _voltages, _ops)	\
+	[prefix ## _name] = {				\
+		.desc = {						\
+			.name = #prefix "_" #_name,			\
+			.n_voltages = ARRAY_SIZE(_voltages),		\
+			.ops = &_ops,		\
+			.type = REGULATOR_VOLTAGE,			\
+			.id = prefix ## _name,		\
+			.owner = THIS_MODULE,				\
+		},							\
+		.reg = prefix ## _reg,				\
+		.enable_bit = prefix ## _reg ## _ ## _name ## EN,	\
+		.voltages =  _voltages,					\
+	}
+
+#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(SW, _name, _reg, _vsel_reg, _voltages, ops)
+#define MC13xxx_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages, ops)	\
+	MC13xxx_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages, ops)
+
+#endif
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 29d0566..31f6e11 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -151,7 +151,8 @@
 };
 
 static int pcap_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
 	void *pcap = rdev_get_drvdata(rdev);
@@ -170,10 +171,12 @@
 			i = 0;
 
 		uV = vreg->voltage_table[i] * 1000;
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = i;
 			return ezx_pcap_set_bits(pcap, vreg->reg,
 					(vreg->n_voltages - 1) << vreg->index,
 					i << vreg->index);
+		}
 
 		if (i == 0 && rdev_get_id(rdev) == V1)
 			i = vreg->n_voltages - 1;
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index c8f41dc..69a11d9 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -108,7 +108,8 @@
 }
 
 static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
-						int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct pcf50633 *pcf;
 	int regulator_id, millivolts;
@@ -147,6 +148,8 @@
 		return -EINVAL;
 	}
 
+	*selector = volt_bits;
+
 	return pcf50633_reg_write(pcf, regnr, volt_bits);
 }
 
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index cd6d4fc..60a7ca5 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -321,7 +321,8 @@
 }
 
 static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int dcdc = rdev_get_id(dev);
@@ -346,6 +347,8 @@
 			break;
 	}
 
+	*selector = vsel;
+
 	/* write to the register in case we found a match */
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
@@ -371,7 +374,7 @@
 }
 
 static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -396,6 +399,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index 020f587..0647552 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -369,7 +369,8 @@
 }
 
 static int tps6507x_pmic_dcdc_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					  int min_uV, int max_uV,
+					  unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, dcdc = rdev_get_id(dev);
@@ -415,6 +416,8 @@
 	if (vsel == tps->info[dcdc]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
@@ -450,7 +453,8 @@
 }
 
 static int tps6507x_pmic_ldo_set_voltage(struct regulator_dev *dev,
-				int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
 	int data, vsel, ldo = rdev_get_id(dev);
@@ -483,6 +487,8 @@
 	if (vsel == tps->info[ldo]->table_len)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	data = tps6507x_pmic_reg_read(tps, reg);
 	if (data < 0)
 		return data;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
new file mode 100644
index 0000000..176a6be
--- /dev/null
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -0,0 +1,693 @@
+/*
+ * Regulator driver for TPS6524x PMIC
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#define REG_LDO_SET		0x0
+#define LDO_ILIM_MASK		1	/* 0 = 400-800, 1 = 900-1500 */
+#define LDO_VSEL_MASK		0x0f
+#define LDO2_ILIM_SHIFT		12
+#define LDO2_VSEL_SHIFT		4
+#define LDO1_ILIM_SHIFT		8
+#define LDO1_VSEL_SHIFT		0
+
+#define REG_BLOCK_EN		0x1
+#define BLOCK_MASK		1
+#define BLOCK_LDO1_SHIFT	0
+#define BLOCK_LDO2_SHIFT	1
+#define BLOCK_LCD_SHIFT		2
+#define BLOCK_USB_SHIFT		3
+
+#define REG_DCDC_SET		0x2
+#define DCDC_VDCDC_MASK		0x1f
+#define DCDC_VDCDC1_SHIFT	0
+#define DCDC_VDCDC2_SHIFT	5
+#define DCDC_VDCDC3_SHIFT	10
+
+#define REG_DCDC_EN		0x3
+#define DCDCDCDC_EN_MASK	0x1
+#define DCDCDCDC1_EN_SHIFT	0
+#define DCDCDCDC1_PG_MSK	BIT(1)
+#define DCDCDCDC2_EN_SHIFT	2
+#define DCDCDCDC2_PG_MSK	BIT(3)
+#define DCDCDCDC3_EN_SHIFT	4
+#define DCDCDCDC3_PG_MSK	BIT(5)
+
+#define REG_USB			0x4
+#define USB_ILIM_SHIFT		0
+#define USB_ILIM_MASK		0x3
+#define USB_TSD_SHIFT		2
+#define USB_TSD_MASK		0x3
+#define USB_TWARN_SHIFT		4
+#define USB_TWARN_MASK		0x3
+#define USB_IWARN_SD		BIT(6)
+#define USB_FAST_LOOP		BIT(7)
+
+#define REG_ALARM		0x5
+#define ALARM_LDO1		BIT(0)
+#define ALARM_DCDC1		BIT(1)
+#define ALARM_DCDC2		BIT(2)
+#define ALARM_DCDC3		BIT(3)
+#define ALARM_LDO2		BIT(4)
+#define ALARM_USB_WARN		BIT(5)
+#define ALARM_USB_ALARM		BIT(6)
+#define ALARM_LCD		BIT(9)
+#define ALARM_TEMP_WARM		BIT(10)
+#define ALARM_TEMP_HOT		BIT(11)
+#define ALARM_NRST		BIT(14)
+#define ALARM_POWERUP		BIT(15)
+
+#define REG_INT_ENABLE		0x6
+#define INT_LDO1		BIT(0)
+#define INT_DCDC1		BIT(1)
+#define INT_DCDC2		BIT(2)
+#define INT_DCDC3		BIT(3)
+#define INT_LDO2		BIT(4)
+#define INT_USB_WARN		BIT(5)
+#define INT_USB_ALARM		BIT(6)
+#define INT_LCD			BIT(9)
+#define INT_TEMP_WARM		BIT(10)
+#define INT_TEMP_HOT		BIT(11)
+#define INT_GLOBAL_EN		BIT(15)
+
+#define REG_INT_STATUS		0x7
+#define STATUS_LDO1		BIT(0)
+#define STATUS_DCDC1		BIT(1)
+#define STATUS_DCDC2		BIT(2)
+#define STATUS_DCDC3		BIT(3)
+#define STATUS_LDO2		BIT(4)
+#define STATUS_USB_WARN		BIT(5)
+#define STATUS_USB_ALARM	BIT(6)
+#define STATUS_LCD		BIT(9)
+#define STATUS_TEMP_WARM	BIT(10)
+#define STATUS_TEMP_HOT		BIT(11)
+
+#define REG_SOFTWARE_RESET	0xb
+#define REG_WRITE_ENABLE	0xd
+#define REG_REV_ID		0xf
+
+#define N_DCDC			3
+#define N_LDO			2
+#define N_SWITCH		2
+#define N_REGULATORS		(3 /* DCDC */ + \
+				 2 /* LDO */  + \
+				 2 /* switch */)
+
+#define FIXED_ILIMSEL		BIT(0)
+#define FIXED_VOLTAGE		BIT(1)
+
+#define CMD_READ(reg)		((reg) << 6)
+#define CMD_WRITE(reg)		(BIT(5) | (reg) << 6)
+#define STAT_CLK		BIT(3)
+#define STAT_WRITE		BIT(2)
+#define STAT_INVALID		BIT(1)
+#define STAT_WP			BIT(0)
+
+struct field {
+	int		reg;
+	int		shift;
+	int		mask;
+};
+
+struct supply_info {
+	const char	*name;
+	int		n_voltages;
+	const int	*voltages;
+	int		fixed_voltage;
+	int		n_ilimsels;
+	const int	*ilimsels;
+	int		fixed_ilimsel;
+	int		flags;
+	struct field	enable, voltage, ilimsel;
+};
+
+struct tps6524x {
+	struct device		*dev;
+	struct spi_device	*spi;
+	struct mutex		lock;
+	struct regulator_desc	desc[N_REGULATORS];
+	struct regulator_dev	*rdev[N_REGULATORS];
+};
+
+static int __read_reg(struct tps6524x *hw, int reg)
+{
+	int error = 0;
+	u16 cmd = CMD_READ(reg), in;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].rx_buf = &in;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "read reg %d, data %x, status %x\n",
+		reg, in, status);
+
+	if (!(status & STAT_CLK) || (status & STAT_WRITE))
+		return -EIO;
+
+	if (status & STAT_INVALID)
+		return -EINVAL;
+
+	return in;
+}
+
+static int read_reg(struct tps6524x *hw, int reg)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+	ret = __read_reg(hw, reg);
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int __write_reg(struct tps6524x *hw, int reg, int val)
+{
+	int error = 0;
+	u16 cmd = CMD_WRITE(reg), out = val;
+	u8 status;
+	struct spi_message m;
+	struct spi_transfer t[3];
+
+	spi_message_init(&m);
+	memset(t, 0, sizeof(t));
+
+	t[0].tx_buf = &cmd;
+	t[0].len = 2;
+	t[0].bits_per_word = 12;
+	spi_message_add_tail(&t[0], &m);
+
+	t[1].tx_buf = &out;
+	t[1].len = 2;
+	t[1].bits_per_word = 16;
+	spi_message_add_tail(&t[1], &m);
+
+	t[2].rx_buf = &status;
+	t[2].len = 1;
+	t[2].bits_per_word = 4;
+	spi_message_add_tail(&t[2], &m);
+
+	error = spi_sync(hw->spi, &m);
+	if (error < 0)
+		return error;
+
+	dev_dbg(hw->dev, "wrote reg %d, data %x, status %x\n",
+		reg, out, status);
+
+	if (!(status & STAT_CLK) || !(status & STAT_WRITE))
+		return -EIO;
+
+	if (status & (STAT_INVALID | STAT_WP))
+		return -EINVAL;
+
+	return error;
+}
+
+static int __rmw_reg(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	ret = __read_reg(hw, reg);
+	if (ret < 0)
+		return ret;
+
+	ret &= ~mask;
+	ret |= val;
+
+	ret = __write_reg(hw, reg, ret);
+
+	return (ret < 0) ? ret : 0;
+}
+
+static int rmw_protect(struct tps6524x *hw, int reg, int mask, int val)
+{
+	int ret;
+
+	mutex_lock(&hw->lock);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 1);
+	if (ret) {
+		dev_err(hw->dev, "failed to set write enable\n");
+		goto error;
+	}
+
+	ret = __rmw_reg(hw, reg, mask, val);
+	if (ret)
+		dev_err(hw->dev, "failed to rmw register %d\n", reg);
+
+	ret = __write_reg(hw, REG_WRITE_ENABLE, 0);
+	if (ret) {
+		dev_err(hw->dev, "failed to clear write enable\n");
+		goto error;
+	}
+
+error:
+	mutex_unlock(&hw->lock);
+
+	return ret;
+}
+
+static int read_field(struct tps6524x *hw, const struct field *field)
+{
+	int tmp;
+
+	tmp = read_reg(hw, field->reg);
+	if (tmp < 0)
+		return tmp;
+
+	return (tmp >> field->shift) & field->mask;
+}
+
+static int write_field(struct tps6524x *hw, const struct field *field,
+		       int val)
+{
+	if (val & ~field->mask)
+		return -EOVERFLOW;
+
+	return rmw_protect(hw, field->reg,
+				    field->mask << field->shift,
+				    val << field->shift);
+}
+
+static const int dcdc1_voltages[] = {
+	 800000,  825000,  850000,  875000,
+	 900000,  925000,  950000,  975000,
+	1000000, 1025000, 1050000, 1075000,
+	1100000, 1125000, 1150000, 1175000,
+	1200000, 1225000, 1250000, 1275000,
+	1300000, 1325000, 1350000, 1375000,
+	1400000, 1425000, 1450000, 1475000,
+	1500000, 1525000, 1550000, 1575000,
+};
+
+static const int dcdc2_voltages[] = {
+	1400000, 1450000, 1500000, 1550000,
+	1600000, 1650000, 1700000, 1750000,
+	1800000, 1850000, 1900000, 1950000,
+	2000000, 2050000, 2100000, 2150000,
+	2200000, 2250000, 2300000, 2350000,
+	2400000, 2450000, 2500000, 2550000,
+	2600000, 2650000, 2700000, 2750000,
+	2800000, 2850000, 2900000, 2950000,
+};
+
+static const int dcdc3_voltages[] = {
+	2400000, 2450000, 2500000, 2550000, 2600000,
+	2650000, 2700000, 2750000, 2800000, 2850000,
+	2900000, 2950000, 3000000, 3050000, 3100000,
+	3150000, 3200000, 3250000, 3300000, 3350000,
+	3400000, 3450000, 3500000, 3550000, 3600000,
+};
+
+static const int ldo1_voltages[] = {
+	4300000, 4350000, 4400000, 4450000,
+	4500000, 4550000, 4600000, 4650000,
+	4700000, 4750000, 4800000, 4850000,
+	4900000, 4950000, 5000000, 5050000,
+};
+
+static const int ldo2_voltages[] = {
+	1100000, 1150000, 1200000, 1250000,
+	1300000, 1700000, 1750000, 1800000,
+	1850000, 1900000, 3150000, 3200000,
+	3250000, 3300000, 3350000, 3400000,
+};
+
+static const int ldo_ilimsel[] = {
+	400000, 1500000
+};
+
+static const int usb_ilimsel[] = {
+	200000, 400000, 800000, 1000000
+};
+
+#define __MK_FIELD(_reg, _mask, _shift) \
+	{ .reg = (_reg), .mask = (_mask), .shift = (_shift), }
+
+static const struct supply_info supply_info[N_REGULATORS] = {
+	{
+		.name		= "DCDC1",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc1_voltages),
+		.voltages	= dcdc1_voltages,
+		.fixed_ilimsel	= 2400000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC1_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC1_SHIFT),
+	},
+	{
+		.name		= "DCDC2",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc2_voltages),
+		.voltages	= dcdc2_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					     DCDCDCDC2_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC2_SHIFT),
+	},
+	{
+		.name		= "DCDC3",
+		.flags		= FIXED_ILIMSEL,
+		.n_voltages	= ARRAY_SIZE(dcdc3_voltages),
+		.voltages	= dcdc3_voltages,
+		.fixed_ilimsel	= 1200000,
+		.enable		= __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
+					DCDCDCDC3_EN_SHIFT),
+		.voltage	= __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
+					     DCDC_VDCDC3_SHIFT),
+	},
+	{
+		.name		= "LDO1",
+		.n_voltages	= ARRAY_SIZE(ldo1_voltages),
+		.voltages	= ldo1_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO1_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO1_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO1_ILIM_SHIFT),
+	},
+	{
+		.name		= "LDO2",
+		.n_voltages	= ARRAY_SIZE(ldo2_voltages),
+		.voltages	= ldo2_voltages,
+		.n_ilimsels	= ARRAY_SIZE(ldo_ilimsel),
+		.ilimsels	= ldo_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LDO2_SHIFT),
+		.voltage	= __MK_FIELD(REG_LDO_SET, LDO_VSEL_MASK,
+					     LDO2_VSEL_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_LDO_SET, LDO_ILIM_MASK,
+					     LDO2_ILIM_SHIFT),
+	},
+	{
+		.name		= "USB",
+		.flags		= FIXED_VOLTAGE,
+		.fixed_voltage	= 5000000,
+		.n_ilimsels	= ARRAY_SIZE(usb_ilimsel),
+		.ilimsels	= usb_ilimsel,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_USB_SHIFT),
+		.ilimsel	= __MK_FIELD(REG_USB, USB_ILIM_MASK,
+					     USB_ILIM_SHIFT),
+	},
+	{
+		.name		= "LCD",
+		.flags		= FIXED_VOLTAGE | FIXED_ILIMSEL,
+		.fixed_voltage	= 5000000,
+		.fixed_ilimsel	=  400000,
+		.enable		= __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
+					     BLOCK_LCD_SHIFT),
+	},
+};
+
+static int list_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return selector ? -EINVAL : info->fixed_voltage;
+
+	return ((selector < info->n_voltages) ?
+		info->voltages[selector] : -EINVAL);
+}
+
+static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	unsigned i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_voltages; i++)
+		if (min_uV <= info->voltages[i] &&
+		    max_uV >= info->voltages[i])
+			break;
+
+	if (i >= info->n_voltages)
+		i = info->n_voltages - 1;
+
+	*selector = info->voltages[i];
+
+	return write_field(hw, &info->voltage, i);
+}
+
+static int get_voltage(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_VOLTAGE)
+		return info->fixed_voltage;
+
+	ret = read_field(hw, &info->voltage);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_voltages))
+		return -EIO;
+
+	return info->voltages[ret];
+}
+
+static int set_current_limit(struct regulator_dev *rdev, int min_uA,
+			     int max_uA)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int i;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return -EINVAL;
+
+	for (i = 0; i < info->n_ilimsels; i++)
+		if (min_uA <= info->ilimsels[i] &&
+		    max_uA >= info->ilimsels[i])
+			break;
+
+	if (i >= info->n_ilimsels)
+		return -EINVAL;
+
+	return write_field(hw, &info->ilimsel, i);
+}
+
+static int get_current_limit(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+	int ret;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	if (info->flags & FIXED_ILIMSEL)
+		return info->fixed_ilimsel;
+
+	ret = read_field(hw, &info->ilimsel);
+	if (ret < 0)
+		return ret;
+	if (WARN_ON(ret >= info->n_ilimsels))
+		return -EIO;
+
+	return info->ilimsels[ret];
+}
+
+static int enable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 1);
+}
+
+static int disable_supply(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return write_field(hw, &info->enable, 0);
+}
+
+static int is_supply_enabled(struct regulator_dev *rdev)
+{
+	const struct supply_info *info;
+	struct tps6524x *hw;
+
+	hw	= rdev_get_drvdata(rdev);
+	info	= &supply_info[rdev_get_id(rdev)];
+
+	return read_field(hw, &info->enable);
+}
+
+static struct regulator_ops regulator_ops = {
+	.is_enabled		= is_supply_enabled,
+	.enable			= enable_supply,
+	.disable		= disable_supply,
+	.get_voltage		= get_voltage,
+	.set_voltage		= set_voltage,
+	.list_voltage		= list_voltage,
+	.set_current_limit	= set_current_limit,
+	.get_current_limit	= get_current_limit,
+};
+
+static int __devexit pmic_remove(struct spi_device *spi)
+{
+	struct tps6524x *hw = spi_get_drvdata(spi);
+	int i;
+
+	if (!hw)
+		return 0;
+	for (i = 0; i < N_REGULATORS; i++) {
+		if (hw->rdev[i])
+			regulator_unregister(hw->rdev[i]);
+		hw->rdev[i] = NULL;
+	}
+	spi_set_drvdata(spi, NULL);
+	kfree(hw);
+	return 0;
+}
+
+static int __devinit pmic_probe(struct spi_device *spi)
+{
+	struct tps6524x *hw;
+	struct device *dev = &spi->dev;
+	const struct supply_info *info = supply_info;
+	struct regulator_init_data *init_data;
+	int ret = 0, i;
+
+	init_data = dev->platform_data;
+	if (!init_data) {
+		dev_err(dev, "could not find regulator platform data\n");
+		return -EINVAL;
+	}
+
+	hw = kzalloc(sizeof(struct tps6524x), GFP_KERNEL);
+	if (!hw) {
+		dev_err(dev, "cannot allocate regulator private data\n");
+		return -ENOMEM;
+	}
+	spi_set_drvdata(spi, hw);
+
+	memset(hw, 0, sizeof(struct tps6524x));
+	hw->dev = dev;
+	hw->spi = spi_dev_get(spi);
+	mutex_init(&hw->lock);
+
+	for (i = 0; i < N_REGULATORS; i++, info++, init_data++) {
+		hw->desc[i].name	= info->name;
+		hw->desc[i].id		= i;
+		hw->desc[i].n_voltages	= info->n_voltages;
+		hw->desc[i].ops		= &regulator_ops;
+		hw->desc[i].type	= REGULATOR_VOLTAGE;
+		hw->desc[i].owner	= THIS_MODULE;
+
+		if (info->flags & FIXED_VOLTAGE)
+			hw->desc[i].n_voltages = 1;
+
+		hw->rdev[i] = regulator_register(&hw->desc[i], dev,
+						 init_data, hw);
+		if (IS_ERR(hw->rdev[i])) {
+			ret = PTR_ERR(hw->rdev[i]);
+			hw->rdev[i] = NULL;
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	pmic_remove(spi);
+	return ret;
+}
+
+static struct spi_driver pmic_driver = {
+	.probe		= pmic_probe,
+	.remove		= __devexit_p(pmic_remove),
+	.driver		= {
+		.name	= "tps6524x",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init pmic_driver_init(void)
+{
+	return spi_register_driver(&pmic_driver);
+}
+module_init(pmic_driver_init);
+
+static void __exit pmic_driver_exit(void)
+{
+	spi_unregister_driver(&pmic_driver);
+}
+module_exit(pmic_driver_exit);
+
+MODULE_DESCRIPTION("TPS6524X PMIC Driver");
+MODULE_AUTHOR("Cyril Chemparathy");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:tps6524x");
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 6d20b04..bb04a75 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -85,7 +85,8 @@
 
 static int __tps6586x_ldo_set_voltage(struct device *parent,
 				      struct tps6586x_regulator *ri,
-				      int min_uV, int max_uV)
+				      int min_uV, int max_uV,
+				      unsigned *selector)
 {
 	int val, uV;
 	uint8_t mask;
@@ -100,6 +101,8 @@
 		/* use the first in-range value */
 		if (min_uV <= uV && uV <= max_uV) {
 
+			*selector = val;
+
 			val <<= ri->volt_shift;
 			mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift;
 
@@ -111,12 +114,13 @@
 }
 
 static int tps6586x_ldo_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 
-	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	return __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					  selector);
 }
 
 static int tps6586x_ldo_get_voltage(struct regulator_dev *rdev)
@@ -140,13 +144,14 @@
 }
 
 static int tps6586x_dvm_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct tps6586x_regulator *ri = rdev_get_drvdata(rdev);
 	struct device *parent = to_tps6586x_dev(rdev);
 	int ret;
 
-	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV);
+	ret = __tps6586x_ldo_set_voltage(parent, ri, min_uV, max_uV,
+					 selector);
 	if (ret)
 		return ret;
 
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index a57262a..bd332cf 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -329,7 +329,8 @@
 }
 
 static int
-twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl4030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -345,9 +346,11 @@
 		/* REVISIT for VAUX2, first match may not be best/lowest */
 
 		/* use the first in-range value */
-		if (min_uV <= uV && uV <= max_uV)
+		if (min_uV <= uV && uV <= max_uV) {
+			*selector = vsel;
 			return twlreg_write(info, TWL_MODULE_PM_RECEIVER,
 							VREG_VOLTAGE, vsel);
+		}
 	}
 
 	return -EDOM;
@@ -389,7 +392,8 @@
 }
 
 static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
+twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+		       unsigned *selector)
 {
 	struct twlreg_info	*info = rdev_get_drvdata(rdev);
 	int			vsel;
@@ -402,6 +406,7 @@
 	 * mV = 1000mv + 100mv * (vsel - 1)
 	 */
 	vsel = (min_uV/1000 - 1000)/100 + 1;
+	*selector = vsel;
 	return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
 
 }
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index dbfaf59..8b0d2c4 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -302,7 +302,7 @@
 }
 
 static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -314,6 +314,8 @@
 	if (vsel < 0)
 		return vsel;
 
+	*selector = vsel;
+
 	/* If this value is already set then do a GPIO update if we can */
 	if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
 		return wm831x_buckv_set_dvs(rdev, 0);
@@ -375,14 +377,14 @@
 	return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel);
 }
 
-static int wm831x_buckv_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 
 	if (dcdc->dvs_gpio && dcdc->dvs_gpio_state)
-		return wm831x_buckv_list_voltage(rdev, dcdc->dvs_vsel);
+		return dcdc->dvs_vsel;
 	else
-		return wm831x_buckv_list_voltage(rdev, dcdc->on_vsel);
+		return dcdc->on_vsel;
 }
 
 /* Current limit options */
@@ -424,7 +426,7 @@
 
 static struct regulator_ops wm831x_buckv_ops = {
 	.set_voltage = wm831x_buckv_set_voltage,
-	.get_voltage = wm831x_buckv_get_voltage,
+	.get_voltage_sel = wm831x_buckv_get_voltage_sel,
 	.list_voltage = wm831x_buckv_list_voltage,
 	.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
 	.set_current_limit = wm831x_buckv_set_current_limit,
@@ -636,7 +638,7 @@
 }
 
 static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
-					int min_uV, int max_uV)
+					int min_uV, int max_uV, int *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -650,16 +652,20 @@
 	if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
-				    int min_uV, int max_uV)
+				    int min_uV, int max_uV,
+				    unsigned *selector)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
+					    selector);
 }
 
 static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
@@ -667,11 +673,12 @@
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_buckp_get_voltage(struct regulator_dev *rdev)
+static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = dcdc->wm831x;
@@ -682,12 +689,12 @@
 	if (val < 0)
 		return val;
 
-	return wm831x_buckp_list_voltage(rdev, val & WM831X_DC3_ON_VSEL_MASK);
+	return val & WM831X_DC3_ON_VSEL_MASK;
 }
 
 static struct regulator_ops wm831x_buckp_ops = {
 	.set_voltage = wm831x_buckp_set_voltage,
-	.get_voltage = wm831x_buckp_get_voltage,
+	.get_voltage_sel = wm831x_buckp_get_voltage_sel,
 	.list_voltage = wm831x_buckp_list_voltage,
 	.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
 
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 9edf8f6..c94fc5b 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -113,7 +113,8 @@
 }
 
 static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+					 int min_uV, int max_uV,
+					 unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -133,16 +134,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				     int min_uV, int max_uV,
+				     unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					     selector);
 }
 
 static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -150,11 +155,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_gp_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_gp_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -167,7 +173,7 @@
 
 	ret &= WM831X_LDO1_ON_VSEL_MASK;
 
-	return wm831x_gp_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -287,7 +293,7 @@
 
 static struct regulator_ops wm831x_gp_ldo_ops = {
 	.list_voltage = wm831x_gp_ldo_list_voltage,
-	.get_voltage = wm831x_gp_ldo_get_voltage,
+	.get_voltage_sel = wm831x_gp_ldo_get_voltage_sel,
 	.set_voltage = wm831x_gp_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
 	.get_mode = wm831x_gp_ldo_get_mode,
@@ -413,7 +419,8 @@
 }
 
 static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
-					 int min_uV, int max_uV)
+				       int min_uV, int max_uV,
+				       unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -433,16 +440,19 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_ON_CONTROL;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+					   selector);
 }
 
 static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -450,11 +460,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+	unsigned int selector;
 
-	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_aldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_aldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -467,7 +478,7 @@
 
 	ret &= WM831X_LDO7_ON_VSEL_MASK;
 
-	return wm831x_aldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -548,7 +559,7 @@
 
 static struct regulator_ops wm831x_aldo_ops = {
 	.list_voltage = wm831x_aldo_list_voltage,
-	.get_voltage = wm831x_aldo_get_voltage,
+	.get_voltage_sel = wm831x_aldo_get_voltage_sel,
 	.set_voltage = wm831x_aldo_set_voltage,
 	.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
 	.get_mode = wm831x_aldo_get_mode,
@@ -666,7 +677,8 @@
 
 static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
 					    int reg,
-					    int min_uV, int max_uV)
+					    int min_uV, int max_uV,
+					    unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -680,16 +692,20 @@
 	if (ret < min_uV || ret > max_uV)
 		return -EINVAL;
 
+	*selector = vsel;
+
 	return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
 }
 
 static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
-				     int min_uV, int max_uV)
+					int min_uV, int max_uV,
+					unsigned *selector)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
+						selector);
 }
 
 static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
@@ -697,11 +713,12 @@
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+	unsigned selector;
 
-	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV);
+	return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
 }
 
-static int wm831x_alive_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm831x_alive_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
 	struct wm831x *wm831x = ldo->wm831x;
@@ -714,7 +731,7 @@
 
 	ret &= WM831X_LDO11_ON_VSEL_MASK;
 
-	return wm831x_alive_ldo_list_voltage(rdev, ret);
+	return ret;
 }
 
 static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -736,7 +753,7 @@
 
 static struct regulator_ops wm831x_alive_ldo_ops = {
 	.list_voltage = wm831x_alive_ldo_list_voltage,
-	.get_voltage = wm831x_alive_ldo_get_voltage,
+	.get_voltage_sel = wm831x_alive_ldo_get_voltage_sel,
 	.set_voltage = wm831x_alive_ldo_set_voltage,
 	.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
 	.get_status = wm831x_alive_ldo_get_status,
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index fe4b8a8..1bcb22c 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -360,7 +360,7 @@
 EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
 
 static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				   int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev), mV,
@@ -397,17 +397,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all DCDCs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_dcdc_get_voltage(struct regulator_dev *rdev)
+static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, dcdc = rdev_get_id(rdev);
-	u16 val;
 
 	switch (dcdc) {
 	case WM8350_DCDC_1:
@@ -429,8 +430,7 @@
 	}
 
 	/* all DCDCs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
-	return wm8350_dcdc_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
 }
 
 static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
@@ -754,7 +754,7 @@
 }
 
 static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
-	int max_uV)
+				  int max_uV, unsigned *selector)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
@@ -797,17 +797,18 @@
 		return -EINVAL;
 	}
 
+	*selector = mV;
+
 	/* all LDOs have same mV bits */
 	val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
 	wm8350_reg_write(wm8350, volt_reg, val | mV);
 	return 0;
 }
 
-static int wm8350_ldo_get_voltage(struct regulator_dev *rdev)
+static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
 	int volt_reg, ldo = rdev_get_id(rdev);
-	u16 val;
 
 	switch (ldo) {
 	case WM8350_LDO_1:
@@ -827,8 +828,7 @@
 	}
 
 	/* all LDOs have same mV bits */
-	val = wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
-	return wm8350_ldo_val_to_mvolts(val) * 1000;
+	return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
 }
 
 static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
@@ -1225,7 +1225,7 @@
 
 static struct regulator_ops wm8350_dcdc_ops = {
 	.set_voltage = wm8350_dcdc_set_voltage,
-	.get_voltage = wm8350_dcdc_get_voltage,
+	.get_voltage_sel = wm8350_dcdc_get_voltage_sel,
 	.list_voltage = wm8350_dcdc_list_voltage,
 	.enable = wm8350_dcdc_enable,
 	.disable = wm8350_dcdc_disable,
@@ -1249,7 +1249,7 @@
 
 static struct regulator_ops wm8350_ldo_ops = {
 	.set_voltage = wm8350_ldo_set_voltage,
-	.get_voltage = wm8350_ldo_get_voltage,
+	.get_voltage_sel = wm8350_ldo_get_voltage_sel,
 	.list_voltage = wm8350_ldo_list_voltage,
 	.enable = wm8350_ldo_enable,
 	.disable = wm8350_ldo_disable,
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 924c7eb..b42d01c 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -67,7 +67,7 @@
 }
 
 static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				  int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -93,6 +93,8 @@
 		val += 0xf;
 	}
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev),
 			       WM8400_LDO1_VSEL_MASK, val);
 }
@@ -156,7 +158,7 @@
 }
 
 static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
-				  int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *selector)
 {
 	struct wm8400 *wm8400 = rdev_get_drvdata(dev);
 	u16 val;
@@ -171,6 +173,8 @@
 		return -EINVAL;
 	BUG_ON(850000 + (25000 * val) < min_uV);
 
+	*selector = val;
+
 	return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset,
 			       WM8400_DC1_VSEL_MASK, val);
 }
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 03713bc..35b2958 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -86,7 +86,7 @@
 	return (selector * 100000) + 2400000;
 }
 
-static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo1_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -95,13 +95,11 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
-
-	return wm8994_ldo1_list_voltage(rdev, val);
+	return (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
 }
 
 static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
@@ -111,6 +109,7 @@
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO1_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
@@ -124,20 +123,29 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo1_list_voltage,
-	.get_voltage = wm8994_ldo1_get_voltage,
+	.get_voltage_sel = wm8994_ldo1_get_voltage_sel,
 	.set_voltage = wm8994_ldo1_set_voltage,
 };
 
 static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
 				    unsigned int selector)
 {
+	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
 	if (selector > WM8994_LDO2_MAX_SELECTOR)
 		return -EINVAL;
 
-	return (selector * 100000) + 900000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		return (selector * 100000) + 900000;
+	case WM8958:
+		return (selector * 100000) + 1000000;
+	default:
+		return -EINVAL;
+	}
 }
 
-static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+static int wm8994_ldo2_get_voltage_sel(struct regulator_dev *rdev)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int val;
@@ -146,22 +154,31 @@
 	if (val < 0)
 		return val;
 
-	val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
-
-	return wm8994_ldo2_list_voltage(rdev, val);
+	return (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
 }
 
 static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
-				   int min_uV, int max_uV)
+				   int min_uV, int max_uV, unsigned *s)
 {
 	struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
 	int selector, v;
 
-	selector = (min_uV - 900000) / 100000;
+	switch (ldo->wm8994->type) {
+	case WM8994:
+		selector = (min_uV - 900000) / 100000;
+		break;
+	case WM8958:
+		selector = (min_uV - 1000000) / 100000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
 	v = wm8994_ldo2_list_voltage(rdev, selector);
 	if (v < 0 || v > max_uV)
 		return -EINVAL;
 
+	*s = selector;
 	selector <<= WM8994_LDO2_VSEL_SHIFT;
 
 	return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
@@ -175,7 +192,7 @@
 	.enable_time = wm8994_ldo_enable_time,
 
 	.list_voltage = wm8994_ldo2_list_voltage,
-	.get_voltage = wm8994_ldo2_get_voltage,
+	.get_voltage_sel = wm8994_ldo2_get_voltage_sel,
 	.set_voltage = wm8994_ldo2_set_voltage,
 };
 
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index e6539cb..9583cbc 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -16,6 +16,7 @@
 #include <linux/kdev_t.h>
 #include <linux/idr.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 
 #include "rtc-core.h"
 
@@ -152,6 +153,18 @@
 	spin_lock_init(&rtc->irq_task_lock);
 	init_waitqueue_head(&rtc->irq_queue);
 
+	/* Init timerqueue */
+	timerqueue_init_head(&rtc->timerqueue);
+	INIT_WORK(&rtc->irqwork, rtc_timer_do_work);
+	/* Init aie timer */
+	rtc_timer_init(&rtc->aie_timer, rtc_aie_update_irq, (void *)rtc);
+	/* Init uie timer */
+	rtc_timer_init(&rtc->uie_rtctimer, rtc_uie_update_irq, (void *)rtc);
+	/* Init pie timer */
+	hrtimer_init(&rtc->pie_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	rtc->pie_timer.function = rtc_pie_update_irq;
+	rtc->pie_enabled = 0;
+
 	strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE);
 	dev_set_name(&rtc->dev, "rtc%d", id);
 
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c8162..90384b9 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -14,6 +14,21 @@
 #include <linux/rtc.h>
 #include <linux/sched.h>
 #include <linux/log2.h>
+#include <linux/workqueue.h>
+
+static int __rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
+{
+	int err;
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->read_time)
+		err = -EINVAL;
+	else {
+		memset(tm, 0, sizeof(struct rtc_time));
+		err = rtc->ops->read_time(rtc->dev.parent, tm);
+	}
+	return err;
+}
 
 int rtc_read_time(struct rtc_device *rtc, struct rtc_time *tm)
 {
@@ -23,15 +38,7 @@
 	if (err)
 		return err;
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->read_time)
-		err = -EINVAL;
-	else {
-		memset(tm, 0, sizeof(struct rtc_time));
-		err = rtc->ops->read_time(rtc->dev.parent, tm);
-	}
-
+	err = __rtc_read_time(rtc, tm);
 	mutex_unlock(&rtc->ops_lock);
 	return err;
 }
@@ -106,189 +113,55 @@
 }
 EXPORT_SYMBOL_GPL(rtc_set_mmss);
 
-static int rtc_read_alarm_internal(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
 
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
-	if (rtc->ops == NULL)
-		err = -ENODEV;
-	else if (!rtc->ops->read_alarm)
-		err = -EINVAL;
-	else {
-		memset(alarm, 0, sizeof(struct rtc_wkalrm));
-		err = rtc->ops->read_alarm(rtc->dev.parent, alarm);
-	}
-
+	alarm->enabled = rtc->aie_timer.enabled;
+	if (alarm->enabled)
+		alarm->time = rtc_ktime_to_tm(rtc->aie_timer.node.expires);
 	mutex_unlock(&rtc->ops_lock);
-	return err;
-}
 
-int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
-{
-	int err;
-	struct rtc_time before, now;
-	int first_time = 1;
-	unsigned long t_now, t_alm;
-	enum { none, day, month, year } missing = none;
-	unsigned days;
-
-	/* The lower level RTC driver may return -1 in some fields,
-	 * creating invalid alarm->time values, for reasons like:
-	 *
-	 *   - The hardware may not be capable of filling them in;
-	 *     many alarms match only on time-of-day fields, not
-	 *     day/month/year calendar data.
-	 *
-	 *   - Some hardware uses illegal values as "wildcard" match
-	 *     values, which non-Linux firmware (like a BIOS) may try
-	 *     to set up as e.g. "alarm 15 minutes after each hour".
-	 *     Linux uses only oneshot alarms.
-	 *
-	 * When we see that here, we deal with it by using values from
-	 * a current RTC timestamp for any missing (-1) values.  The
-	 * RTC driver prevents "periodic alarm" modes.
-	 *
-	 * But this can be racey, because some fields of the RTC timestamp
-	 * may have wrapped in the interval since we read the RTC alarm,
-	 * which would lead to us inserting inconsistent values in place
-	 * of the -1 fields.
-	 *
-	 * Reading the alarm and timestamp in the reverse sequence
-	 * would have the same race condition, and not solve the issue.
-	 *
-	 * So, we must first read the RTC timestamp,
-	 * then read the RTC alarm value,
-	 * and then read a second RTC timestamp.
-	 *
-	 * If any fields of the second timestamp have changed
-	 * when compared with the first timestamp, then we know
-	 * our timestamp may be inconsistent with that used by
-	 * the low-level rtc_read_alarm_internal() function.
-	 *
-	 * So, when the two timestamps disagree, we just loop and do
-	 * the process again to get a fully consistent set of values.
-	 *
-	 * This could all instead be done in the lower level driver,
-	 * but since more than one lower level RTC implementation needs it,
-	 * then it's probably best best to do it here instead of there..
-	 */
-
-	/* Get the "before" timestamp */
-	err = rtc_read_time(rtc, &before);
-	if (err < 0)
-		return err;
-	do {
-		if (!first_time)
-			memcpy(&before, &now, sizeof(struct rtc_time));
-		first_time = 0;
-
-		/* get the RTC alarm values, which may be incomplete */
-		err = rtc_read_alarm_internal(rtc, alarm);
-		if (err)
-			return err;
-		if (!alarm->enabled)
-			return 0;
-
-		/* full-function RTCs won't have such missing fields */
-		if (rtc_valid_tm(&alarm->time) == 0)
-			return 0;
-
-		/* get the "after" timestamp, to detect wrapped fields */
-		err = rtc_read_time(rtc, &now);
-		if (err < 0)
-			return err;
-
-		/* note that tm_sec is a "don't care" value here: */
-	} while (   before.tm_min   != now.tm_min
-		 || before.tm_hour  != now.tm_hour
-		 || before.tm_mon   != now.tm_mon
-		 || before.tm_year  != now.tm_year);
-
-	/* Fill in the missing alarm fields using the timestamp; we
-	 * know there's at least one since alarm->time is invalid.
-	 */
-	if (alarm->time.tm_sec == -1)
-		alarm->time.tm_sec = now.tm_sec;
-	if (alarm->time.tm_min == -1)
-		alarm->time.tm_min = now.tm_min;
-	if (alarm->time.tm_hour == -1)
-		alarm->time.tm_hour = now.tm_hour;
-
-	/* For simplicity, only support date rollover for now */
-	if (alarm->time.tm_mday == -1) {
-		alarm->time.tm_mday = now.tm_mday;
-		missing = day;
-	}
-	if (alarm->time.tm_mon == -1) {
-		alarm->time.tm_mon = now.tm_mon;
-		if (missing == none)
-			missing = month;
-	}
-	if (alarm->time.tm_year == -1) {
-		alarm->time.tm_year = now.tm_year;
-		if (missing == none)
-			missing = year;
-	}
-
-	/* with luck, no rollover is needed */
-	rtc_tm_to_time(&now, &t_now);
-	rtc_tm_to_time(&alarm->time, &t_alm);
-	if (t_now < t_alm)
-		goto done;
-
-	switch (missing) {
-
-	/* 24 hour rollover ... if it's now 10am Monday, an alarm that
-	 * that will trigger at 5am will do so at 5am Tuesday, which
-	 * could also be in the next month or year.  This is a common
-	 * case, especially for PCs.
-	 */
-	case day:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "day");
-		t_alm += 24 * 60 * 60;
-		rtc_time_to_tm(t_alm, &alarm->time);
-		break;
-
-	/* Month rollover ... if it's the 31th, an alarm on the 3rd will
-	 * be next month.  An alarm matching on the 30th, 29th, or 28th
-	 * may end up in the month after that!  Many newer PCs support
-	 * this type of alarm.
-	 */
-	case month:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "month");
-		do {
-			if (alarm->time.tm_mon < 11)
-				alarm->time.tm_mon++;
-			else {
-				alarm->time.tm_mon = 0;
-				alarm->time.tm_year++;
-			}
-			days = rtc_month_days(alarm->time.tm_mon,
-					alarm->time.tm_year);
-		} while (days < alarm->time.tm_mday);
-		break;
-
-	/* Year rollover ... easy except for leap years! */
-	case year:
-		dev_dbg(&rtc->dev, "alarm rollover: %s\n", "year");
-		do {
-			alarm->time.tm_year++;
-		} while (rtc_valid_tm(&alarm->time) != 0);
-		break;
-
-	default:
-		dev_warn(&rtc->dev, "alarm rollover not handled\n");
-	}
-
-done:
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+	struct rtc_time tm;
+	long now, scheduled;
+	int err;
+
+	err = rtc_valid_tm(&alarm->time);
+	if (err)
+		return err;
+	rtc_tm_to_time(&alarm->time, &scheduled);
+
+	/* Make sure we're not setting alarms in the past */
+	err = __rtc_read_time(rtc, &tm);
+	rtc_tm_to_time(&tm, &now);
+	if (scheduled <= now)
+		return -ETIME;
+	/*
+	 * XXX - We just checked to make sure the alarm time is not
+	 * in the past, but there is still a race window where if
+	 * the is alarm set for the next second and the second ticks
+	 * over right here, before we set the alarm.
+	 */
+
+	if (!rtc->ops)
+		err = -ENODEV;
+	else if (!rtc->ops->set_alarm)
+		err = -EINVAL;
+	else
+		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+	return err;
+}
+
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
 	int err;
@@ -300,16 +173,18 @@
 	err = mutex_lock_interruptible(&rtc->ops_lock);
 	if (err)
 		return err;
-
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->set_alarm)
-		err = -EINVAL;
-	else
-		err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
+	if (rtc->aie_timer.enabled) {
+		rtc_timer_remove(rtc, &rtc->aie_timer);
+		rtc->aie_timer.enabled = 0;
+	}
+	rtc->aie_timer.node.expires = rtc_tm_to_ktime(alarm->time);
+	rtc->aie_timer.period = ktime_set(0, 0);
+	if (alarm->enabled) {
+		rtc->aie_timer.enabled = 1;
+		rtc_timer_enqueue(rtc, &rtc->aie_timer);
+	}
 	mutex_unlock(&rtc->ops_lock);
-	return err;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(rtc_set_alarm);
 
@@ -319,6 +194,16 @@
 	if (err)
 		return err;
 
+	if (rtc->aie_timer.enabled != enabled) {
+		if (enabled) {
+			rtc->aie_timer.enabled = 1;
+			rtc_timer_enqueue(rtc, &rtc->aie_timer);
+		} else {
+			rtc_timer_remove(rtc, &rtc->aie_timer);
+			rtc->aie_timer.enabled = 0;
+		}
+	}
+
 	if (!rtc->ops)
 		err = -ENODEV;
 	else if (!rtc->ops->alarm_irq_enable)
@@ -337,38 +222,114 @@
 	if (err)
 		return err;
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	if (enabled == 0 && rtc->uie_irq_active) {
-		mutex_unlock(&rtc->ops_lock);
-		return rtc_dev_update_irq_enable_emul(rtc, enabled);
+	/* make sure we're changing state */
+	if (rtc->uie_rtctimer.enabled == enabled)
+		goto out;
+
+	if (enabled) {
+		struct rtc_time tm;
+		ktime_t now, onesec;
+
+		__rtc_read_time(rtc, &tm);
+		onesec = ktime_set(1, 0);
+		now = rtc_tm_to_ktime(tm);
+		rtc->uie_rtctimer.node.expires = ktime_add(now, onesec);
+		rtc->uie_rtctimer.period = ktime_set(1, 0);
+		rtc->uie_rtctimer.enabled = 1;
+		rtc_timer_enqueue(rtc, &rtc->uie_rtctimer);
+	} else {
+		rtc_timer_remove(rtc, &rtc->uie_rtctimer);
+		rtc->uie_rtctimer.enabled = 0;
 	}
-#endif
 
-	if (!rtc->ops)
-		err = -ENODEV;
-	else if (!rtc->ops->update_irq_enable)
-		err = -EINVAL;
-	else
-		err = rtc->ops->update_irq_enable(rtc->dev.parent, enabled);
-
+out:
 	mutex_unlock(&rtc->ops_lock);
-
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	/*
-	 * Enable emulation if the driver did not provide
-	 * the update_irq_enable function pointer or if returned
-	 * -EINVAL to signal that it has been configured without
-	 * interrupts or that are not available at the moment.
-	 */
-	if (err == -EINVAL)
-		err = rtc_dev_update_irq_enable_emul(rtc, enabled);
-#endif
 	return err;
+
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
 
+
 /**
- * rtc_update_irq - report RTC periodic, alarm, and/or update irqs
+ * rtc_handle_legacy_irq - AIE, UIE and PIE event hook
+ * @rtc: pointer to the rtc device
+ *
+ * This function is called when an AIE, UIE or PIE mode interrupt
+ * has occured (or been emulated).
+ *
+ * Triggers the registered irq_task function callback.
+ */
+static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
+{
+	unsigned long flags;
+
+	/* mark one irq of the appropriate mode */
+	spin_lock_irqsave(&rtc->irq_lock, flags);
+	rtc->irq_data = (rtc->irq_data + (num << 8)) | (RTC_IRQF|mode);
+	spin_unlock_irqrestore(&rtc->irq_lock, flags);
+
+	/* call the task func */
+	spin_lock_irqsave(&rtc->irq_task_lock, flags);
+	if (rtc->irq_task)
+		rtc->irq_task->func(rtc->irq_task->private_data);
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
+
+	wake_up_interruptible(&rtc->irq_queue);
+	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+}
+
+
+/**
+ * rtc_aie_update_irq - AIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the aie_timer expires.
+ */
+void rtc_aie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1, RTC_AF);
+}
+
+
+/**
+ * rtc_uie_update_irq - UIE mode rtctimer hook
+ * @private: pointer to the rtc_device
+ *
+ * This functions is called when the uie_timer expires.
+ */
+void rtc_uie_update_irq(void *private)
+{
+	struct rtc_device *rtc = (struct rtc_device *)private;
+	rtc_handle_legacy_irq(rtc, 1,  RTC_UF);
+}
+
+
+/**
+ * rtc_pie_update_irq - PIE mode hrtimer hook
+ * @timer: pointer to the pie mode hrtimer
+ *
+ * This function is used to emulate PIE mode interrupts
+ * using an hrtimer. This function is called when the periodic
+ * hrtimer expires.
+ */
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer)
+{
+	struct rtc_device *rtc;
+	ktime_t period;
+	int count;
+	rtc = container_of(timer, struct rtc_device, pie_timer);
+
+	period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+	count = hrtimer_forward_now(timer, period);
+
+	rtc_handle_legacy_irq(rtc, count, RTC_PF);
+
+	return HRTIMER_RESTART;
+}
+
+/**
+ * rtc_update_irq - Triggered when a RTC interrupt occurs.
  * @rtc: the rtc device
  * @num: how many irqs are being reported (usually one)
  * @events: mask of RTC_IRQF with one or more of RTC_PF, RTC_AF, RTC_UF
@@ -377,19 +338,7 @@
 void rtc_update_irq(struct rtc_device *rtc,
 		unsigned long num, unsigned long events)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc->irq_lock, flags);
-	rtc->irq_data = (rtc->irq_data + (num << 8)) | events;
-	spin_unlock_irqrestore(&rtc->irq_lock, flags);
-
-	spin_lock_irqsave(&rtc->irq_task_lock, flags);
-	if (rtc->irq_task)
-		rtc->irq_task->func(rtc->irq_task->private_data);
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
-	wake_up_interruptible(&rtc->irq_queue);
-	kill_fasync(&rtc->async_queue, SIGIO, POLL_IN);
+	schedule_work(&rtc->irqwork);
 }
 EXPORT_SYMBOL_GPL(rtc_update_irq);
 
@@ -477,18 +426,20 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_state == NULL)
-		return -ENXIO;
-
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
-	if (err == 0)
-		err = rtc->ops->irq_set_state(rtc->dev.parent, enabled);
+	if (enabled) {
+		ktime_t period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+		hrtimer_start(&rtc->pie_timer, period, HRTIMER_MODE_REL);
+	} else {
+		hrtimer_cancel(&rtc->pie_timer);
+	}
+	rtc->pie_enabled = enabled;
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 
 	return err;
 }
@@ -509,21 +460,194 @@
 	int err = 0;
 	unsigned long flags;
 
-	if (rtc->ops->irq_set_freq == NULL)
-		return -ENXIO;
-
 	spin_lock_irqsave(&rtc->irq_task_lock, flags);
 	if (rtc->irq_task != NULL && task == NULL)
 		err = -EBUSY;
 	if (rtc->irq_task != task)
 		err = -EACCES;
-	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
-
 	if (err == 0) {
-		err = rtc->ops->irq_set_freq(rtc->dev.parent, freq);
-		if (err == 0)
-			rtc->irq_freq = freq;
+		rtc->irq_freq = freq;
+		if (rtc->pie_enabled) {
+			ktime_t period;
+			hrtimer_cancel(&rtc->pie_timer);
+			period = ktime_set(0, NSEC_PER_SEC/rtc->irq_freq);
+			hrtimer_start(&rtc->pie_timer, period,
+					HRTIMER_MODE_REL);
+		}
 	}
+	spin_unlock_irqrestore(&rtc->irq_task_lock, flags);
 	return err;
 }
 EXPORT_SYMBOL_GPL(rtc_irq_set_freq);
+
+/**
+ * rtc_timer_enqueue - Adds a rtc_timer to the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being added.
+ *
+ * Enqueues a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	timerqueue_add(&rtc->timerqueue, &timer->node);
+	if (&timer->node == timerqueue_getnext(&rtc->timerqueue)) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(timer->node.expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+	}
+}
+
+/**
+ * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Removes a timer onto the rtc devices timerqueue and sets
+ * the next alarm event appropriately.
+ *
+ * Must hold ops_lock for proper serialization of timerqueue
+ */
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
+{
+	struct timerqueue_node *next = timerqueue_getnext(&rtc->timerqueue);
+	timerqueue_del(&rtc->timerqueue, &timer->node);
+
+	if (next == &timer->node) {
+		struct rtc_wkalrm alarm;
+		int err;
+		next = timerqueue_getnext(&rtc->timerqueue);
+		if (!next)
+			return;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			schedule_work(&rtc->irqwork);
+	}
+}
+
+/**
+ * rtc_timer_do_work - Expires rtc timers
+ * @rtc rtc device
+ * @timer timer being removed.
+ *
+ * Expires rtc timers. Reprograms next alarm event if needed.
+ * Called via worktask.
+ *
+ * Serializes access to timerqueue via ops_lock mutex
+ */
+void rtc_timer_do_work(struct work_struct *work)
+{
+	struct rtc_timer *timer;
+	struct timerqueue_node *next;
+	ktime_t now;
+	struct rtc_time tm;
+
+	struct rtc_device *rtc =
+		container_of(work, struct rtc_device, irqwork);
+
+	mutex_lock(&rtc->ops_lock);
+again:
+	__rtc_read_time(rtc, &tm);
+	now = rtc_tm_to_ktime(tm);
+	while ((next = timerqueue_getnext(&rtc->timerqueue))) {
+		if (next->expires.tv64 > now.tv64)
+			break;
+
+		/* expire timer */
+		timer = container_of(next, struct rtc_timer, node);
+		timerqueue_del(&rtc->timerqueue, &timer->node);
+		timer->enabled = 0;
+		if (timer->task.func)
+			timer->task.func(timer->task.private_data);
+
+		/* Re-add/fwd periodic timers */
+		if (ktime_to_ns(timer->period)) {
+			timer->node.expires = ktime_add(timer->node.expires,
+							timer->period);
+			timer->enabled = 1;
+			timerqueue_add(&rtc->timerqueue, &timer->node);
+		}
+	}
+
+	/* Set next alarm */
+	if (next) {
+		struct rtc_wkalrm alarm;
+		int err;
+		alarm.time = rtc_ktime_to_tm(next->expires);
+		alarm.enabled = 1;
+		err = __rtc_set_alarm(rtc, &alarm);
+		if (err == -ETIME)
+			goto again;
+	}
+
+	mutex_unlock(&rtc->ops_lock);
+}
+
+
+/* rtc_timer_init - Initializes an rtc_timer
+ * @timer: timer to be intiialized
+ * @f: function pointer to be called when timer fires
+ * @data: private data passed to function pointer
+ *
+ * Kernel interface to initializing an rtc_timer.
+ */
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data)
+{
+	timerqueue_init(&timer->node);
+	timer->enabled = 0;
+	timer->task.func = f;
+	timer->task.private_data = data;
+}
+
+/* rtc_timer_start - Sets an rtc_timer to fire in the future
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ * @ expires: time at which to expire the timer
+ * @ period: period that the timer will recur
+ *
+ * Kernel interface to set an rtc_timer
+ */
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+
+	timer->node.expires = expires;
+	timer->period = period;
+
+	timer->enabled = 1;
+	rtc_timer_enqueue(rtc, timer);
+
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+/* rtc_timer_cancel - Stops an rtc_timer
+ * @ rtc: rtc device to be used
+ * @ timer: timer being set
+ *
+ * Kernel interface to cancel an rtc_timer
+ */
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer)
+{
+	int ret = 0;
+	mutex_lock(&rtc->ops_lock);
+	if (timer->enabled)
+		rtc_timer_remove(rtc, timer);
+	timer->enabled = 0;
+	mutex_unlock(&rtc->ops_lock);
+	return ret;
+}
+
+
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 5856167..c7ff8df 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -36,6 +36,7 @@
 #include <linux/platform_device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/log2.h>
+#include <linux/pm.h>
 
 /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
 #include <asm-generic/rtc.h>
@@ -687,7 +688,8 @@
 #if	defined(CONFIG_ATARI)
 	address_space = 64;
 #elif defined(__i386__) || defined(__x86_64__) || defined(__arm__) \
-			|| defined(__sparc__) || defined(__mips__)
+			|| defined(__sparc__) || defined(__mips__) \
+			|| defined(__powerpc__)
 	address_space = 128;
 #else
 #warning Assuming 128 bytes of RTC+NVRAM address space, not 64 bytes.
@@ -850,7 +852,7 @@
 
 #ifdef	CONFIG_PM
 
-static int cmos_suspend(struct device *dev, pm_message_t mesg)
+static int cmos_suspend(struct device *dev)
 {
 	struct cmos_rtc	*cmos = dev_get_drvdata(dev);
 	unsigned char	tmp;
@@ -898,7 +900,7 @@
  */
 static inline int cmos_poweroff(struct device *dev)
 {
-	return cmos_suspend(dev, PMSG_HIBERNATE);
+	return cmos_suspend(dev);
 }
 
 static int cmos_resume(struct device *dev)
@@ -945,9 +947,9 @@
 	return 0;
 }
 
+static SIMPLE_DEV_PM_OPS(cmos_pm_ops, cmos_suspend, cmos_resume);
+
 #else
-#define	cmos_suspend	NULL
-#define	cmos_resume	NULL
 
 static inline int cmos_poweroff(struct device *dev)
 {
@@ -1077,7 +1079,7 @@
 
 static int cmos_pnp_suspend(struct pnp_dev *pnp, pm_message_t mesg)
 {
-	return cmos_suspend(&pnp->dev, mesg);
+	return cmos_suspend(&pnp->dev);
 }
 
 static int cmos_pnp_resume(struct pnp_dev *pnp)
@@ -1157,8 +1159,9 @@
 	.shutdown	= cmos_platform_shutdown,
 	.driver = {
 		.name		= (char *) driver_name,
-		.suspend	= cmos_suspend,
-		.resume		= cmos_resume,
+#ifdef CONFIG_PM
+		.pm		= &cmos_pm_ops,
+#endif
 	}
 };
 
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 0cc0984..212b16e 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,105 +46,6 @@
 	return err;
 }
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-/*
- * Routine to poll RTC seconds field for change as often as possible,
- * after first RTC_UIE use timer to reduce polling
- */
-static void rtc_uie_task(struct work_struct *work)
-{
-	struct rtc_device *rtc =
-		container_of(work, struct rtc_device, uie_task);
-	struct rtc_time tm;
-	int num = 0;
-	int err;
-
-	err = rtc_read_time(rtc, &tm);
-
-	spin_lock_irq(&rtc->irq_lock);
-	if (rtc->stop_uie_polling || err) {
-		rtc->uie_task_active = 0;
-	} else if (rtc->oldsecs != tm.tm_sec) {
-		num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
-		rtc->oldsecs = tm.tm_sec;
-		rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
-		rtc->uie_timer_active = 1;
-		rtc->uie_task_active = 0;
-		add_timer(&rtc->uie_timer);
-	} else if (schedule_work(&rtc->uie_task) == 0) {
-		rtc->uie_task_active = 0;
-	}
-	spin_unlock_irq(&rtc->irq_lock);
-	if (num)
-		rtc_update_irq(rtc, num, RTC_UF | RTC_IRQF);
-}
-static void rtc_uie_timer(unsigned long data)
-{
-	struct rtc_device *rtc = (struct rtc_device *)data;
-	unsigned long flags;
-
-	spin_lock_irqsave(&rtc->irq_lock, flags);
-	rtc->uie_timer_active = 0;
-	rtc->uie_task_active = 1;
-	if ((schedule_work(&rtc->uie_task) == 0))
-		rtc->uie_task_active = 0;
-	spin_unlock_irqrestore(&rtc->irq_lock, flags);
-}
-
-static int clear_uie(struct rtc_device *rtc)
-{
-	spin_lock_irq(&rtc->irq_lock);
-	if (rtc->uie_irq_active) {
-		rtc->stop_uie_polling = 1;
-		if (rtc->uie_timer_active) {
-			spin_unlock_irq(&rtc->irq_lock);
-			del_timer_sync(&rtc->uie_timer);
-			spin_lock_irq(&rtc->irq_lock);
-			rtc->uie_timer_active = 0;
-		}
-		if (rtc->uie_task_active) {
-			spin_unlock_irq(&rtc->irq_lock);
-			flush_work_sync(&rtc->uie_task);
-			spin_lock_irq(&rtc->irq_lock);
-		}
-		rtc->uie_irq_active = 0;
-	}
-	spin_unlock_irq(&rtc->irq_lock);
-	return 0;
-}
-
-static int set_uie(struct rtc_device *rtc)
-{
-	struct rtc_time tm;
-	int err;
-
-	err = rtc_read_time(rtc, &tm);
-	if (err)
-		return err;
-	spin_lock_irq(&rtc->irq_lock);
-	if (!rtc->uie_irq_active) {
-		rtc->uie_irq_active = 1;
-		rtc->stop_uie_polling = 0;
-		rtc->oldsecs = tm.tm_sec;
-		rtc->uie_task_active = 1;
-		if (schedule_work(&rtc->uie_task) == 0)
-			rtc->uie_task_active = 0;
-	}
-	rtc->irq_data = 0;
-	spin_unlock_irq(&rtc->irq_lock);
-	return 0;
-}
-
-int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
-{
-	if (enabled)
-		return set_uie(rtc);
-	else
-		return clear_uie(rtc);
-}
-EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
-
-#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
 
 static ssize_t
 rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -493,11 +394,6 @@
 
 	rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
 
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	INIT_WORK(&rtc->uie_task, rtc_uie_task);
-	setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
-#endif
-
 	cdev_init(&rtc->char_dev, &rtc_dev_fops);
 	rtc->char_dev.owner = rtc->owner;
 }
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index d827ce5..0d559b6 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -106,9 +106,9 @@
 	struct i2c_client	*client;
 	struct rtc_device	*rtc;
 	struct work_struct	work;
-	s32 (*read_block_data)(struct i2c_client *client, u8 command,
+	s32 (*read_block_data)(const struct i2c_client *client, u8 command,
 			       u8 length, u8 *values);
-	s32 (*write_block_data)(struct i2c_client *client, u8 command,
+	s32 (*write_block_data)(const struct i2c_client *client, u8 command,
 				u8 length, const u8 *values);
 };
 
@@ -158,8 +158,8 @@
 
 #define BLOCK_DATA_MAX_TRIES 10
 
-static s32 ds1307_read_block_data_once(struct i2c_client *client, u8 command,
-				  u8 length, u8 *values)
+static s32 ds1307_read_block_data_once(const struct i2c_client *client,
+				       u8 command, u8 length, u8 *values)
 {
 	s32 i, data;
 
@@ -172,7 +172,7 @@
 	return i;
 }
 
-static s32 ds1307_read_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command,
 				  u8 length, u8 *values)
 {
 	u8 oldvalues[I2C_SMBUS_BLOCK_MAX];
@@ -198,7 +198,7 @@
 	return length;
 }
 
-static s32 ds1307_write_block_data(struct i2c_client *client, u8 command,
+static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command,
 				   u8 length, const u8 *values)
 {
 	u8 currvalues[I2C_SMBUS_BLOCK_MAX];
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index 773851f..075f170 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -117,4 +117,32 @@
 }
 EXPORT_SYMBOL(rtc_tm_to_time);
 
+/*
+ * Convert rtc_time to ktime
+ */
+ktime_t rtc_tm_to_ktime(struct rtc_time tm)
+{
+	time_t time;
+	rtc_tm_to_time(&tm, &time);
+	return ktime_set(time, 0);
+}
+EXPORT_SYMBOL_GPL(rtc_tm_to_ktime);
+
+/*
+ * Convert ktime to rtc_time
+ */
+struct rtc_time rtc_ktime_to_tm(ktime_t kt)
+{
+	struct timespec ts;
+	struct rtc_time ret;
+
+	ts = ktime_to_timespec(kt);
+	/* Round up any ns */
+	if (ts.tv_nsec)
+		ts.tv_sec++;
+	rtc_time_to_tm(ts.tv_sec, &ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtc_ktime_to_tm);
+
 MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 657403e..0ec3f588 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -139,12 +139,13 @@
 	if (IS_ERR(rtc))
 		return PTR_ERR(rtc);
 
+	dev_set_drvdata(&spi->dev, rtc);
 	return 0;
 }
 
 static int __devexit max6902_remove(struct spi_device *spi)
 {
-	struct rtc_device *rtc = platform_get_drvdata(spi);
+	struct rtc_device *rtc = dev_get_drvdata(&spi->dev);
 
 	rtc_device_unregister(rtc);
 	return 0;
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 73377b0..e72b523 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -429,13 +429,14 @@
 fail0:
 	iounmap(rtc_base);
 fail:
-	release_resource(mem);
+	release_mem_region(mem->start, resource_size(mem));
 	return -EIO;
 }
 
 static int __exit omap_rtc_remove(struct platform_device *pdev)
 {
 	struct rtc_device	*rtc = platform_get_drvdata(pdev);
+	struct resource		*mem = dev_get_drvdata(&rtc->dev);
 
 	device_init_wakeup(&pdev->dev, 0);
 
@@ -447,8 +448,9 @@
 	if (omap_rtc_timer != omap_rtc_alarm)
 		free_irq(omap_rtc_alarm, rtc);
 
-	release_resource(dev_get_drvdata(&rtc->dev));
 	rtc_device_unregister(rtc);
+	iounmap(rtc_base);
+	release_mem_region(mem->start, resource_size(mem));
 	return 0;
 }
 
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 09e7a05..30b2a82 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -841,7 +841,7 @@
 }
 
 /**
- * Emit buffer of a lan comand.
+ * Emit buffer of a lan command.
  */
 static void
 lcs_lancmd_timeout(unsigned long data)
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 46342fe..303dde0 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -317,7 +317,7 @@
 
 /**
  * zfcp_cfdc_port_denied - Process "access denied" for port
- * @port: The port where the acces has been denied
+ * @port: The port where the access has been denied
  * @qual: The FSF status qualifier for the access denied FSF status
  */
 void zfcp_cfdc_port_denied(struct zfcp_port *port,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index dc5ac6e..a391090 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -416,7 +416,7 @@
 	/* Go back and check they match */
 
 	outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL);	/* Reset program count 0 */
-	bios_addr -= 0x1000;	/* Reset the BIOS adddress      */
+	bios_addr -= 0x1000;	/* Reset the BIOS address */
 	for (i = 0, data32_ptr = (u8 *) & data32;	/* Check the code       */
 	     i < 0x1000;	/* Firmware code size = 4K      */
 	     i++, bios_addr++) {
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index afc9aeb..060ac4b 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -91,7 +91,7 @@
  *	aac_fib_setup	-	setup the fibs
  *	@dev: Adapter to set up
  *
- *	Allocate the PCI space for the fibs, map it and then intialise the
+ *	Allocate the PCI space for the fibs, map it and then initialise the
  *	fib area, the unmapped fib data and also the free list
  */
 
diff --git a/drivers/scsi/aic7xxx_old/aic7xxx.seq b/drivers/scsi/aic7xxx_old/aic7xxx.seq
index 5997e7c..1565be9 100644
--- a/drivers/scsi/aic7xxx_old/aic7xxx.seq
+++ b/drivers/scsi/aic7xxx_old/aic7xxx.seq
@@ -1178,7 +1178,7 @@
 /*
  * Retrieve an SCB by SCBID first searching the disconnected list falling
  * back to DMA'ing the SCB down from the host.  This routine assumes that
- * ARG_1 is the SCBID of interrest and that SINDEX is the position in the
+ * ARG_1 is the SCBID of interest and that SINDEX is the position in the
  * disconnected list to start the search from.  If SINDEX is SCB_LIST_NULL,
  * we go directly to the host for the SCB.
  */
diff --git a/drivers/scsi/aic94xx/aic94xx_reg_def.h b/drivers/scsi/aic94xx/aic94xx_reg_def.h
index 28aaf349..40273a7 100644
--- a/drivers/scsi/aic94xx/aic94xx_reg_def.h
+++ b/drivers/scsi/aic94xx/aic94xx_reg_def.h
@@ -1689,7 +1689,7 @@
 #define		PHY_START_CAL		0x01
 
 /*
- * HST_PCIX2 Registers, Addresss Range: (0x00-0xFC)
+ * HST_PCIX2 Registers, Address Range: (0x00-0xFC)
  */
 #define PCIX_REG_BASE_ADR		0xB8040000
 
@@ -1802,7 +1802,7 @@
 #define PCIC_TP_CTRL	0xFC
 
 /*
- * EXSI Registers, Addresss Range: (0x00-0xFC)
+ * EXSI Registers, Address Range: (0x00-0xFC)
  */
 #define EXSI_REG_BASE_ADR		REG_BASE_ADDR_EXSI
 
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index c43698b..2959327 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -867,7 +867,7 @@
  * resources they have with this SCB, and then call this one at the
  * end of their timeout function.  To do this, one should initialize
  * the ascb->timer.{function, data, expires} prior to calling the post
- * funcion.  The timer is started by the post function.
+ * function. The timer is started by the post function.
  */
 void asd_ascb_timedout(unsigned long data)
 {
diff --git a/drivers/scsi/aic94xx/aic94xx_seq.c b/drivers/scsi/aic94xx/aic94xx_seq.c
index 7437461..390168f 100644
--- a/drivers/scsi/aic94xx/aic94xx_seq.c
+++ b/drivers/scsi/aic94xx/aic94xx_seq.c
@@ -797,7 +797,7 @@
 		int j;
 		/* Start from Page 1 of Mode 0 and 1. */
 		moffs = LSEQ_PAGE_SIZE + i*LSEQ_MODE_SCRATCH_SIZE;
-		/* All the fields of page 1 can be intialized to 0. */
+		/* All the fields of page 1 can be initialized to 0. */
 		for (j = 0; j < LSEQ_PAGE_SIZE; j += 4)
 			asd_write_reg_dword(asd_ha, LmSCRATCH(lseq)+moffs+j,0);
 	}
@@ -938,7 +938,7 @@
 	asd_write_reg_dword(asd_ha, SCBPRO, 0);
 	asd_write_reg_dword(asd_ha, CSEQCON, 0);
 
-	/* Intialize CSEQ Mode 11 Interrupt Vectors.
+	/* Initialize CSEQ Mode 11 Interrupt Vectors.
 	 * The addresses are 16 bit wide and in dword units.
 	 * The values of their macros are in byte units.
 	 * Thus we have to divide by 4. */
@@ -961,7 +961,7 @@
 	asd_write_reg_word(asd_ha, CPRGMCNT, cseq_idle_loop);
 
 	for (i = 0; i < 8; i++) {
-		/* Intialize Mode n Link m Interrupt Enable. */
+		/* Initialize Mode n Link m Interrupt Enable. */
 		asd_write_reg_dword(asd_ha, CMnINTEN(i), EN_CMnRSPMBXF);
 		/* Initialize Mode n Request Mailbox. */
 		asd_write_reg_dword(asd_ha, CMnREQMBX(i), 0);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 9c410b2..c0353cd 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -1838,7 +1838,7 @@
 
 	case BFA_IOIM_SM_ABORT:
 		/*
-		 * IO is alraedy being cleaned up implicitly
+		 * IO is already being cleaned up implicitly
 		 */
 		ioim->io_cbfn = __bfa_cb_ioim_abort;
 		break;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 4e2eb92..43fa986b 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5646,7 +5646,7 @@
 	switch (status) {
 	case BFA_STATUS_OK:
 		/*
-		 * Initialiaze the V-Port fields
+		 * Initialize the V-Port fields
 		 */
 		__vport_fcid(vport) = vport->lps->lp_pid;
 		vport->vport_stats.fdisc_accepts++;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 8f1b5c8..b0f8523 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3796,7 +3796,7 @@
  * adapter_add_device - Adds the device instance to the adaptor instance.
  *
  * @acb: The adapter device to be updated
- * @dcb: A newly created and intialised device instance to add.
+ * @dcb: A newly created and initialised device instance to add.
  **/
 static void adapter_add_device(struct AdapterCtlBlk *acb,
 		struct DeviceCtlBlk *dcb)
@@ -4498,7 +4498,7 @@
  * init_adapter - Grab the resource for the card, setup the adapter
  * information, set the card into a known state, create the various
  * tables etc etc. This basically gets all adapter information all up
- * to date, intialised and gets the chip in sync with it.
+ * to date, initialised and gets the chip in sync with it.
  *
  * @host:	This hosts adapter structure
  * @io_port:	The base I/O port
@@ -4789,7 +4789,7 @@
  * that it finds in the system. The pci_dev strcuture indicates which
  * instance we are being called from.
  * 
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  * @id: Looks like a pointer to the entry in our pci device table
  * that was actually matched by the PCI subsystem.
  *
@@ -4860,7 +4860,7 @@
  * dc395x_remove_one - Called to remove a single instance of the
  * adapter.
  *
- * @dev: The PCI device to intialize.
+ * @dev: The PCI device to initialize.
  **/
 static void __devexit dc395x_remove_one(struct pci_dev *dev)
 {
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index cdc06cd..5962d1a 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1250,7 +1250,7 @@
 /**
  * fc_lun_reset() - Send a LUN RESET command to a device
  *		    and wait for the reply
- * @lport: The local port to sent the comand on
+ * @lport: The local port to sent the command on
  * @fsp:   The FCP packet that identifies the LUN to be reset
  * @id:	   The SCSI command ID
  * @lun:   The LUN ID to be reset
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c06491b..3512abb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1335,7 +1335,7 @@
 }
 
 /**
- * lpfc_param_init - Intializes a cfg attribute
+ * lpfc_param_init - Initializes a cfg attribute
  *
  * Description:
  * Macro that given an attr e.g. hba_queue_depth expands
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index f9f160a..bb01596 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -2852,7 +2852,7 @@
 			if (unlikely(!fcf_record)) {
 				lpfc_printf_log(phba, KERN_ERR,
 					LOG_MBOX | LOG_SLI,
-					"2554 Could not allocate memmory for "
+					"2554 Could not allocate memory for "
 					"fcf record\n");
 				rc = -ENODEV;
 				goto out;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 462242d..6d0b36a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8071,7 +8071,7 @@
 	 * the HBA.
 	 */
 
-	/* HBA interrupt will be diabled after this call */
+	/* HBA interrupt will be disabled after this call */
 	lpfc_sli_hba_down(phba);
 	/* Stop kthread signal shall trigger work_done one more time */
 	kthread_stop(phba->worker_thread);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 634b2fe..a359d2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -10172,7 +10172,7 @@
  * lpfc_sli4_queue_free - free a queue structure and associated memory
  * @queue: The queue structure to free.
  *
- * This function frees a queue structure and the DMAable memeory used for
+ * This function frees a queue structure and the DMAable memory used for
  * the host resident queue. This function must be called after destroying the
  * queue on the HBA.
  **/
diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h
index f564474..8534119 100644
--- a/drivers/scsi/megaraid.h
+++ b/drivers/scsi/megaraid.h
@@ -13,7 +13,7 @@
  */
 
 /*
- * Comand coalescing - This feature allows the driver to be able to combine
+ * Command coalescing - This feature allows the driver to be able to combine
  * two or more commands and issue as one command in order to boost I/O
  * performance. Useful if the nature of the I/O is sequential. It is not very
  * useful for random natured I/Os.
diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
index a7008c0..25506c7 100644
--- a/drivers/scsi/megaraid/megaraid_mm.c
+++ b/drivers/scsi/megaraid/megaraid_mm.c
@@ -224,7 +224,7 @@
 {
 	int err;
 
-	/* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */
+	/* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */
 	mutex_lock(&mraid_mm_mutex);
 	err = mraid_mm_ioctl(filep, cmd, arg);
 	mutex_unlock(&mraid_mm_mutex);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index f8c86b2..b95285f3 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -603,7 +603,7 @@
 #endif
 
 intx:
-	/* intialize the INT-X interrupt */
+	/* initialize the INT-X interrupt */
 	rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME,
 		SHOST_TO_SAS_HA(pm8001_ha->shost));
 	return rc;
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index d53e650..a2ed201 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -477,7 +477,7 @@
 
 
 /**
- * scsi_netlink_init - Called by SCSI subsystem to intialize
+ * scsi_netlink_init - Called by SCSI subsystem to initialize
  * 	the SCSI transport netlink interface
  *
  **/
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4c68d36..490ce21 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -864,13 +864,15 @@
 
 	error = device_add(&sdev->sdev_gendev);
 	if (error) {
-		printk(KERN_INFO "error 1\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add device: %d\n", error);
 		return error;
 	}
 	device_enable_async_suspend(&sdev->sdev_dev);
 	error = device_add(&sdev->sdev_dev);
 	if (error) {
-		printk(KERN_INFO "error 2\n");
+		sdev_printk(KERN_INFO, sdev,
+				"failed to add class device: %d\n", error);
 		device_del(&sdev->sdev_gendev);
 		return error;
 	}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 6b97ded..b4543f5 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1866,7 +1866,7 @@
  *
  * This routine is similar to sym_set_workarounds(), except
  * that, at this point, we already know that the device was
- * successfully intialized at least once before, and so most
+ * successfully initialized at least once before, and so most
  * of the steps taken there are un-needed here.
  */
 static void sym2_reset_workarounds(struct pci_dev *pdev)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 188aff6..c1df767 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -776,24 +776,7 @@
 	bool "Enable UART0 hardware flow control"
 	depends on SERIAL_BFIN_UART0
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART0_CTS_PIN
-	int "UART0 CTS pin"
-	depends on BFIN_UART0_CTSRTS && !BF548
-	default 23
-	help
-	  The default pin is GPIO_GP7.
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART0_RTS_PIN
-	int "UART0 RTS pin"
-	depends on BFIN_UART0_CTSRTS && !BF548
-	default 22
-	help
-	  The default pin is GPIO_GP6.
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART1
 	bool "Enable UART1"
@@ -805,22 +788,7 @@
 	bool "Enable UART1 hardware flow control"
 	depends on SERIAL_BFIN_UART1
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART1_CTS_PIN
-	int "UART1 CTS pin"
-	depends on BFIN_UART1_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART1_RTS_PIN
-	int "UART1 RTS pin"
-	depends on BFIN_UART1_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART2
 	bool "Enable UART2"
@@ -832,22 +800,7 @@
 	bool "Enable UART2 hardware flow control"
 	depends on SERIAL_BFIN_UART2
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART2_CTS_PIN
-	int "UART2 CTS pin"
-	depends on BFIN_UART2_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART2_RTS_PIN
-	int "UART2 RTS pin"
-	depends on BFIN_UART2_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_BFIN_UART3
 	bool "Enable UART3"
@@ -859,22 +812,7 @@
 	bool "Enable UART3 hardware flow control"
 	depends on SERIAL_BFIN_UART3
 	help
-	  Enable hardware flow control in the driver. Using GPIO emulate the CTS/RTS
-	  signal.
-
-config UART3_CTS_PIN
-	int "UART3 CTS pin"
-	depends on BFIN_UART3_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
-
-config UART3_RTS_PIN
-	int "UART3 RTS pin"
-	depends on BFIN_UART3_CTSRTS && !BF548
-	default -1
-	help
-	  Refer to arch/blackfin/mach-*/include/mach/gpio.h to see the GPIO map.
+	  Enable hardware flow control in the driver.
 
 config SERIAL_IMX
 	bool "IMX serial port support"
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 19cac9f..e381b89 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -1,7 +1,7 @@
 /*
  * Blackfin On-Chip Serial Driver
  *
- * Copyright 2006-2008 Analog Devices Inc.
+ * Copyright 2006-2010 Analog Devices Inc.
  *
  * Enter bugs at http://blackfin.uclinux.org/
  *
@@ -12,6 +12,9 @@
 #define SUPPORT_SYSRQ
 #endif
 
+#define DRIVER_NAME "bfin-uart"
+#define pr_fmt(fmt) DRIVER_NAME ": " fmt
+
 #include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/gfp.h>
@@ -23,21 +26,20 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/kgdb.h>
+#include <linux/slab.h>
 #include <linux/dma-mapping.h>
 
-#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
-	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
-#include <linux/kgdb.h>
-#include <asm/irq_regs.h>
-#endif
-
-#include <asm/gpio.h>
-#include <mach/bfin_serial_5xx.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/irq.h>
+#include <asm/portmux.h>
 #include <asm/cacheflush.h>
+#include <asm/dma.h>
+
+#define port_membase(uart)     (((struct bfin_serial_port *)(uart))->port.membase)
+#define get_lsr_cache(uart)    (((struct bfin_serial_port *)(uart))->lsr)
+#define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v))
+#include <asm/bfin_serial.h>
 
 #ifdef CONFIG_SERIAL_BFIN_MODULE
 # undef CONFIG_EARLY_PRINTK
@@ -48,12 +50,11 @@
 #endif
 
 /* UART name and device definitions */
-#define BFIN_SERIAL_NAME	"ttyBF"
+#define BFIN_SERIAL_DEV_NAME	"ttyBF"
 #define BFIN_SERIAL_MAJOR	204
 #define BFIN_SERIAL_MINOR	64
 
-static struct bfin_serial_port bfin_serial_ports[BFIN_UART_NR_PORTS];
-static int nr_active_ports = ARRAY_SIZE(bfin_serial_resource);
+static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS];
 
 #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
 	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -743,14 +744,14 @@
 		}
 	}
 	if (uart->rts_pin >= 0) {
-		gpio_request(uart->rts_pin, DRIVER_NAME);
 		gpio_direction_output(uart->rts_pin, 0);
 	}
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (request_irq(uart->status_irq,
+	if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
 		bfin_serial_mctrl_cts_int,
 		IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+		uart->cts_pin = -1;
 		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
 	}
 
@@ -796,11 +797,9 @@
 #ifdef CONFIG_SERIAL_BFIN_CTSRTS
 	if (uart->cts_pin >= 0)
 		free_irq(gpio_to_irq(uart->cts_pin), uart);
-	if (uart->rts_pin >= 0)
-		gpio_free(uart->rts_pin);
 #endif
 #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
-	if (UART_GET_IER(uart) & EDSSI)
+	if (uart->cts_pin >= 0)
 		free_irq(uart->status_irq, uart);
 #endif
 }
@@ -962,33 +961,33 @@
  */
 static void bfin_serial_set_ldisc(struct uart_port *port, int ld)
 {
-	int line = port->line;
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 	unsigned short val;
 
 	switch (ld) {
 	case N_IRDA:
-		val = UART_GET_GCTL(&bfin_serial_ports[line]);
+		val = UART_GET_GCTL(uart);
 		val |= (IREN | RPOLC);
-		UART_PUT_GCTL(&bfin_serial_ports[line], val);
+		UART_PUT_GCTL(uart, val);
 		break;
 	default:
-		val = UART_GET_GCTL(&bfin_serial_ports[line]);
+		val = UART_GET_GCTL(uart);
 		val &= ~(IREN | RPOLC);
-		UART_PUT_GCTL(&bfin_serial_ports[line], val);
+		UART_PUT_GCTL(uart, val);
 	}
 }
 
 static void bfin_serial_reset_irda(struct uart_port *port)
 {
-	int line = port->line;
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
 	unsigned short val;
 
-	val = UART_GET_GCTL(&bfin_serial_ports[line]);
+	val = UART_GET_GCTL(uart);
 	val &= ~(IREN | RPOLC);
-	UART_PUT_GCTL(&bfin_serial_ports[line], val);
+	UART_PUT_GCTL(uart, val);
 	SSYNC();
 	val |= (IREN | RPOLC);
-	UART_PUT_GCTL(&bfin_serial_ports[line], val);
+	UART_PUT_GCTL(uart, val);
 	SSYNC();
 }
 
@@ -1070,85 +1069,6 @@
 #endif
 };
 
-static void __init bfin_serial_hw_init(void)
-{
-#ifdef CONFIG_SERIAL_BFIN_UART0
-	peripheral_request(P_UART0_TX, DRIVER_NAME);
-	peripheral_request(P_UART0_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART1
-	peripheral_request(P_UART1_TX, DRIVER_NAME);
-	peripheral_request(P_UART1_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART1_CTSRTS) && defined(CONFIG_BF54x)
-	peripheral_request(P_UART1_RTS, DRIVER_NAME);
-	peripheral_request(P_UART1_CTS, DRIVER_NAME);
-# endif
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART2
-	peripheral_request(P_UART2_TX, DRIVER_NAME);
-	peripheral_request(P_UART2_RX, DRIVER_NAME);
-#endif
-
-#ifdef CONFIG_SERIAL_BFIN_UART3
-	peripheral_request(P_UART3_TX, DRIVER_NAME);
-	peripheral_request(P_UART3_RX, DRIVER_NAME);
-
-# if defined(CONFIG_BFIN_UART3_CTSRTS) && defined(CONFIG_BF54x)
-	peripheral_request(P_UART3_RTS, DRIVER_NAME);
-	peripheral_request(P_UART3_CTS, DRIVER_NAME);
-# endif
-#endif
-}
-
-static void __init bfin_serial_init_ports(void)
-{
-	static int first = 1;
-	int i;
-
-	if (!first)
-		return;
-	first = 0;
-
-	bfin_serial_hw_init();
-
-	for (i = 0; i < nr_active_ports; i++) {
-		spin_lock_init(&bfin_serial_ports[i].port.lock);
-		bfin_serial_ports[i].port.uartclk   = get_sclk();
-		bfin_serial_ports[i].port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
-		bfin_serial_ports[i].port.ops       = &bfin_serial_pops;
-		bfin_serial_ports[i].port.line      = i;
-		bfin_serial_ports[i].port.iotype    = UPIO_MEM;
-		bfin_serial_ports[i].port.membase   =
-			(void __iomem *)bfin_serial_resource[i].uart_base_addr;
-		bfin_serial_ports[i].port.mapbase   =
-			bfin_serial_resource[i].uart_base_addr;
-		bfin_serial_ports[i].port.irq       =
-			bfin_serial_resource[i].uart_irq;
-		bfin_serial_ports[i].status_irq	    =
-			bfin_serial_resource[i].uart_status_irq;
-		bfin_serial_ports[i].port.flags     = UPF_BOOT_AUTOCONF;
-#ifdef CONFIG_SERIAL_BFIN_DMA
-		bfin_serial_ports[i].tx_done	    = 1;
-		bfin_serial_ports[i].tx_count	    = 0;
-		bfin_serial_ports[i].tx_dma_channel =
-			bfin_serial_resource[i].uart_tx_dma_channel;
-		bfin_serial_ports[i].rx_dma_channel =
-			bfin_serial_resource[i].uart_rx_dma_channel;
-		init_timer(&(bfin_serial_ports[i].rx_dma_timer));
-#endif
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
-	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
-		bfin_serial_ports[i].cts_pin	    =
-			bfin_serial_resource[i].uart_cts_pin;
-		bfin_serial_ports[i].rts_pin	    =
-			bfin_serial_resource[i].uart_rts_pin;
-#endif
-	}
-}
-
 #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
 /*
  * If the port was already initialised (eg, by a boot loader),
@@ -1196,6 +1116,34 @@
 
 static struct uart_driver bfin_serial_reg;
 
+static void bfin_serial_console_putchar(struct uart_port *port, int ch)
+{
+	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+	while (!(UART_GET_LSR(uart) & THRE))
+		barrier();
+	UART_PUT_CHAR(uart, ch);
+}
+
+#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
+		 defined (CONFIG_EARLY_PRINTK) */
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+#define CLASS_BFIN_CONSOLE	"bfin-console"
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct bfin_serial_port *uart = bfin_serial_ports[co->index];
+	unsigned long flags;
+
+	spin_lock_irqsave(&uart->port.lock, flags);
+	uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
+	spin_unlock_irqrestore(&uart->port.lock, flags);
+
+}
+
 static int __init
 bfin_serial_console_setup(struct console *co, char *options)
 {
@@ -1215,9 +1163,12 @@
 	 * if so, search for the first available port that does have
 	 * console support.
 	 */
-	if (co->index == -1 || co->index >= nr_active_ports)
-		co->index = 0;
-	uart = &bfin_serial_ports[co->index];
+	if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS)
+		return -ENODEV;
+
+	uart = bfin_serial_ports[co->index];
+	if (!uart)
+		return -ENODEV;
 
 	if (options)
 		uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -1226,36 +1177,9 @@
 
 	return uart_set_options(&uart->port, co, baud, parity, bits, flow);
 }
-#endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) ||
-				 defined (CONFIG_EARLY_PRINTK) */
-
-#ifdef CONFIG_SERIAL_BFIN_CONSOLE
-static void bfin_serial_console_putchar(struct uart_port *port, int ch)
-{
-	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
-	while (!(UART_GET_LSR(uart) & THRE))
-		barrier();
-	UART_PUT_CHAR(uart, ch);
-	SSYNC();
-}
-
-/*
- * Interrupts are disabled on entering
- */
-static void
-bfin_serial_console_write(struct console *co, const char *s, unsigned int count)
-{
-	struct bfin_serial_port *uart = &bfin_serial_ports[co->index];
-	unsigned long flags;
-
-	spin_lock_irqsave(&uart->port.lock, flags);
-	uart_console_write(&uart->port, s, count, bfin_serial_console_putchar);
-	spin_unlock_irqrestore(&uart->port.lock, flags);
-
-}
 
 static struct console bfin_serial_console = {
-	.name		= BFIN_SERIAL_NAME,
+	.name		= BFIN_SERIAL_DEV_NAME,
 	.write		= bfin_serial_console_write,
 	.device		= uart_console_device,
 	.setup		= bfin_serial_console_setup,
@@ -1263,44 +1187,30 @@
 	.index		= -1,
 	.data		= &bfin_serial_reg,
 };
-
-static int __init bfin_serial_rs_console_init(void)
-{
-	bfin_serial_init_ports();
-	register_console(&bfin_serial_console);
-
-	return 0;
-}
-console_initcall(bfin_serial_rs_console_init);
-
 #define BFIN_SERIAL_CONSOLE	&bfin_serial_console
 #else
 #define BFIN_SERIAL_CONSOLE	NULL
 #endif /* CONFIG_SERIAL_BFIN_CONSOLE */
 
+#ifdef	CONFIG_EARLY_PRINTK
+static struct bfin_serial_port bfin_earlyprintk_port;
+#define CLASS_BFIN_EARLYPRINTK	"bfin-earlyprintk"
 
-#ifdef CONFIG_EARLY_PRINTK
-static __init void early_serial_putc(struct uart_port *port, int ch)
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count)
 {
-	unsigned timeout = 0xffff;
-	struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
+	unsigned long flags;
 
-	while ((!(UART_GET_LSR(uart) & THRE)) && --timeout)
-		cpu_relax();
-	UART_PUT_CHAR(uart, ch);
-}
+	if (bfin_earlyprintk_port.port.line != co->index)
+		return;
 
-static __init void early_serial_write(struct console *con, const char *s,
-					unsigned int n)
-{
-	struct bfin_serial_port *uart = &bfin_serial_ports[con->index];
-	unsigned int i;
-
-	for (i = 0; i < n; i++, s++) {
-		if (*s == '\n')
-			early_serial_putc(&uart->port, '\r');
-		early_serial_putc(&uart->port, *s);
-	}
+	spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags);
+	uart_console_write(&bfin_earlyprintk_port.port, s, count,
+		bfin_serial_console_putchar);
+	spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags);
 }
 
 /*
@@ -1311,18 +1221,326 @@
  */
 static struct __initdata console bfin_early_serial_console = {
 	.name = "early_BFuart",
-	.write = early_serial_write,
+	.write = bfin_earlyprintk_console_write,
 	.device = uart_console_device,
 	.flags = CON_PRINTBUFFER,
 	.index = -1,
 	.data  = &bfin_serial_reg,
 };
+#endif
+
+static struct uart_driver bfin_serial_reg = {
+	.owner			= THIS_MODULE,
+	.driver_name		= DRIVER_NAME,
+	.dev_name		= BFIN_SERIAL_DEV_NAME,
+	.major			= BFIN_SERIAL_MAJOR,
+	.minor			= BFIN_SERIAL_MINOR,
+	.nr			= BFIN_UART_NR_PORTS,
+	.cons			= BFIN_SERIAL_CONSOLE,
+};
+
+static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	return uart_suspend_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_resume(struct platform_device *pdev)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	return uart_resume_port(&bfin_serial_reg, &uart->port);
+}
+
+static int bfin_serial_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct bfin_serial_port *uart = NULL;
+	int ret = 0;
+
+	if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+		dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n");
+		return -ENOENT;
+	}
+
+	if (bfin_serial_ports[pdev->id] == NULL) {
+
+		uart = kzalloc(sizeof(*uart), GFP_KERNEL);
+		if (!uart) {
+			dev_err(&pdev->dev,
+				"fail to malloc bfin_serial_port\n");
+			return -ENOMEM;
+		}
+		bfin_serial_ports[pdev->id] = uart;
+
+#ifdef CONFIG_EARLY_PRINTK
+		if (!(bfin_earlyprintk_port.port.membase
+			&& bfin_earlyprintk_port.port.line == pdev->id)) {
+			/*
+			 * If the peripheral PINs of current port is allocated
+			 * in earlyprintk probe stage, don't do it again.
+			 */
+#endif
+		ret = peripheral_request_list(
+			(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"fail to request bfin serial peripherals\n");
+			goto out_error_free_mem;
+		}
+#ifdef CONFIG_EARLY_PRINTK
+		}
+#endif
+
+		spin_lock_init(&uart->port.lock);
+		uart->port.uartclk   = get_sclk();
+		uart->port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
+		uart->port.ops       = &bfin_serial_pops;
+		uart->port.line      = pdev->id;
+		uart->port.iotype    = UPIO_MEM;
+		uart->port.flags     = UPF_BOOT_AUTOCONF;
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+			ret = -ENOENT;
+			goto out_error_free_peripherals;
+		}
+
+		uart->port.membase = ioremap(res->start,
+			res->end - res->start);
+		if (!uart->port.membase) {
+			dev_err(&pdev->dev, "Cannot map uart IO\n");
+			ret = -ENXIO;
+			goto out_error_free_peripherals;
+		}
+		uart->port.mapbase = res->start;
+
+		uart->port.irq = platform_get_irq(pdev, 0);
+		if (uart->port.irq < 0) {
+			dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+
+		uart->status_irq = platform_get_irq(pdev, 1);
+		if (uart->status_irq < 0) {
+			dev_err(&pdev->dev, "No uart status IRQ specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+
+#ifdef CONFIG_SERIAL_BFIN_DMA
+		uart->tx_done	    = 1;
+		uart->tx_count	    = 0;
+
+		res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "No uart TX DMA channel specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+		uart->tx_dma_channel = res->start;
+
+		res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+		if (res == NULL) {
+			dev_err(&pdev->dev, "No uart RX DMA channel specified\n");
+			ret = -ENOENT;
+			goto out_error_unmap;
+		}
+		uart->rx_dma_channel = res->start;
+
+		init_timer(&(uart->rx_dma_timer));
+#endif
+
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
+	defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+		res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+		if (res == NULL)
+			uart->cts_pin = -1;
+		else
+			uart->cts_pin = res->start;
+
+		res = platform_get_resource(pdev, IORESOURCE_IO, 1);
+		if (res == NULL)
+			uart->rts_pin = -1;
+		else
+			uart->rts_pin = res->start;
+# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
+		if (uart->rts_pin >= 0)
+			gpio_request(uart->rts_pin, DRIVER_NAME);
+# endif
+#endif
+	}
+
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	if (!is_early_platform_device(pdev)) {
+#endif
+		uart = bfin_serial_ports[pdev->id];
+		uart->port.dev = &pdev->dev;
+		dev_set_drvdata(&pdev->dev, uart);
+		ret = uart_add_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	}
+#endif
+
+	if (!ret)
+		return 0;
+
+	if (uart) {
+out_error_unmap:
+		iounmap(uart->port.membase);
+out_error_free_peripherals:
+		peripheral_free_list(
+			(unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+		kfree(uart);
+		bfin_serial_ports[pdev->id] = NULL;
+	}
+
+	return ret;
+}
+
+static int __devexit bfin_serial_remove(struct platform_device *pdev)
+{
+	struct bfin_serial_port *uart = platform_get_drvdata(pdev);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	if (uart) {
+		uart_remove_one_port(&bfin_serial_reg, &uart->port);
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+		if (uart->rts_pin >= 0)
+			gpio_free(uart->rts_pin);
+#endif
+		iounmap(uart->port.membase);
+		peripheral_free_list(
+			(unsigned short *)pdev->dev.platform_data);
+		kfree(uart);
+		bfin_serial_ports[pdev->id] = NULL;
+	}
+
+	return 0;
+}
+
+static struct platform_driver bfin_serial_driver = {
+	.probe		= bfin_serial_probe,
+	.remove		= __devexit_p(bfin_serial_remove),
+	.suspend	= bfin_serial_suspend,
+	.resume		= bfin_serial_resume,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
+static __initdata struct early_platform_driver early_bfin_serial_driver = {
+	.class_str = CLASS_BFIN_CONSOLE,
+	.pdrv = &bfin_serial_driver,
+	.requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init bfin_serial_rs_console_init(void)
+{
+	early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME);
+
+	early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0);
+
+	register_console(&bfin_serial_console);
+
+	return 0;
+}
+console_initcall(bfin_serial_rs_console_init);
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ * Memory can't be allocated dynamically during earlyprink init stage.
+ * So, do individual probe for earlyprink with a static uart port variable.
+ */
+static int bfin_earlyprintk_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	int ret;
+
+	if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) {
+		dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n");
+		return -ENOENT;
+	}
+
+	ret = peripheral_request_list(
+		(unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"fail to request bfin serial peripherals\n");
+			return ret;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+		ret = -ENOENT;
+		goto out_error_free_peripherals;
+	}
+
+	bfin_earlyprintk_port.port.membase = ioremap(res->start,
+			res->end - res->start);
+	if (!bfin_earlyprintk_port.port.membase) {
+		dev_err(&pdev->dev, "Cannot map uart IO\n");
+		ret = -ENXIO;
+		goto out_error_free_peripherals;
+	}
+	bfin_earlyprintk_port.port.mapbase = res->start;
+	bfin_earlyprintk_port.port.line = pdev->id;
+	bfin_earlyprintk_port.port.uartclk = get_sclk();
+	bfin_earlyprintk_port.port.fifosize  = BFIN_UART_TX_FIFO_SIZE;
+	spin_lock_init(&bfin_earlyprintk_port.port.lock);
+
+	return 0;
+
+out_error_free_peripherals:
+	peripheral_free_list(
+		(unsigned short *)pdev->dev.platform_data);
+
+	return ret;
+}
+
+static struct platform_driver bfin_earlyprintk_driver = {
+	.probe		= bfin_earlyprintk_probe,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+	.class_str = CLASS_BFIN_EARLYPRINTK,
+	.pdrv = &bfin_earlyprintk_driver,
+	.requested_id = EARLY_PLATFORM_ID_UNSET,
+};
 
 struct console __init *bfin_earlyserial_init(unsigned int port,
 						unsigned int cflag)
 {
-	struct bfin_serial_port *uart;
 	struct ktermios t;
+	char port_name[20];
+
+	if (port < 0 || port >= BFIN_UART_NR_PORTS)
+		return NULL;
+
+	/*
+	 * Only probe resource of the given port in earlyprintk boot arg.
+	 * The expected port id should be indicated in port name string.
+	 */
+	snprintf(port_name, 20, DRIVER_NAME ".%d", port);
+	early_platform_driver_register(&early_bfin_earlyprintk_driver,
+		port_name);
+	early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0);
+
+	if (!bfin_earlyprintk_port.port.membase)
+		return NULL;
 
 #ifdef CONFIG_SERIAL_BFIN_CONSOLE
 	/*
@@ -1332,124 +1550,36 @@
 	bfin_serial_console.flags &= ~CON_PRINTBUFFER;
 #endif
 
-	if (port == -1 || port >= nr_active_ports)
-		port = 0;
-	bfin_serial_init_ports();
 	bfin_early_serial_console.index = port;
-	uart = &bfin_serial_ports[port];
 	t.c_cflag = cflag;
 	t.c_iflag = 0;
 	t.c_oflag = 0;
 	t.c_lflag = ICANON;
 	t.c_line = port;
-	bfin_serial_set_termios(&uart->port, &t, &t);
+	bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t);
+
 	return &bfin_early_serial_console;
 }
-
 #endif /* CONFIG_EARLY_PRINTK */
 
-static struct uart_driver bfin_serial_reg = {
-	.owner			= THIS_MODULE,
-	.driver_name		= "bfin-uart",
-	.dev_name		= BFIN_SERIAL_NAME,
-	.major			= BFIN_SERIAL_MAJOR,
-	.minor			= BFIN_SERIAL_MINOR,
-	.nr			= BFIN_UART_NR_PORTS,
-	.cons			= BFIN_SERIAL_CONSOLE,
-};
-
-static int bfin_serial_suspend(struct platform_device *dev, pm_message_t state)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_suspend_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-	}
-
-	return 0;
-}
-
-static int bfin_serial_resume(struct platform_device *dev)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_resume_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-	}
-
-	return 0;
-}
-
-static int bfin_serial_probe(struct platform_device *dev)
-{
-	struct resource *res = dev->resource;
-	int i;
-
-	for (i = 0; i < dev->num_resources; i++, res++)
-		if (res->flags & IORESOURCE_MEM)
-			break;
-
-	if (i < dev->num_resources) {
-		for (i = 0; i < nr_active_ports; i++, res++) {
-			if (bfin_serial_ports[i].port.mapbase != res->start)
-				continue;
-			bfin_serial_ports[i].port.dev = &dev->dev;
-			uart_add_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-		}
-	}
-
-	return 0;
-}
-
-static int bfin_serial_remove(struct platform_device *dev)
-{
-	int i;
-
-	for (i = 0; i < nr_active_ports; i++) {
-		if (bfin_serial_ports[i].port.dev != &dev->dev)
-			continue;
-		uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
-		bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
-		gpio_free(bfin_serial_ports[i].cts_pin);
-		gpio_free(bfin_serial_ports[i].rts_pin);
-#endif
-	}
-
-	return 0;
-}
-
-static struct platform_driver bfin_serial_driver = {
-	.probe		= bfin_serial_probe,
-	.remove		= bfin_serial_remove,
-	.suspend	= bfin_serial_suspend,
-	.resume		= bfin_serial_resume,
-	.driver		= {
-		.name	= "bfin-uart",
-		.owner	= THIS_MODULE,
-	},
-};
-
 static int __init bfin_serial_init(void)
 {
 	int ret;
 
-	pr_info("Serial: Blackfin serial driver\n");
-
-	bfin_serial_init_ports();
+	pr_info("Blackfin serial driver\n");
 
 	ret = uart_register_driver(&bfin_serial_reg);
-	if (ret == 0) {
-		ret = platform_driver_register(&bfin_serial_driver);
-		if (ret) {
-			pr_debug("uart register failed\n");
-			uart_unregister_driver(&bfin_serial_reg);
-		}
+	if (ret) {
+		pr_err("failed to register %s:%d\n",
+			bfin_serial_reg.driver_name, ret);
 	}
+
+	ret = platform_driver_register(&bfin_serial_driver);
+	if (ret) {
+		pr_err("fail to register bfin uart\n");
+		uart_unregister_driver(&bfin_serial_reg);
+	}
+
 	return ret;
 }
 
@@ -1463,7 +1593,7 @@
 module_init(bfin_serial_init);
 module_exit(bfin_serial_exit);
 
-MODULE_AUTHOR("Aubrey.Li <aubrey.li@analog.com>");
+MODULE_AUTHOR("Sonic Zhang, Aubrey Li");
 MODULE_DESCRIPTION("Blackfin generic serial port driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR);
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index a067046..1a478bf 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -341,9 +341,9 @@
 /*
  * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
  *  - The buffer is either valid for CPU access, else NULL
- *  - If the buffer is valid, so is its DMA addresss
+ *  - If the buffer is valid, so is its DMA address
  *
- * This driver manages the dma addresss unless message->is_dma_mapped.
+ * This driver manages the dma address unless message->is_dma_mapped.
  */
 static int
 atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4e6245e..6034282 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -38,7 +38,7 @@
 
 
 /*
- * This supports acccess to SPI devices using normal userspace I/O calls.
+ * This supports access to SPI devices using normal userspace I/O calls.
  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
  * and often mask message boundaries, full SPI support requires full duplex
  * transfers.  There are several kinds of internal message boundaries to
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index e2d5869..5c8fcfc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -123,6 +123,8 @@
 
 source "drivers/staging/iio/Kconfig"
 
+source "drivers/staging/cs5535_gpio/Kconfig"
+
 source "drivers/staging/zram/Kconfig"
 
 source "drivers/staging/wlags49_h2/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c7d2224..d538863 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -44,6 +44,7 @@
 obj-$(CONFIG_MRST_RAR_HANDLER)	+= memrar/
 obj-$(CONFIG_DX_SEP)            += sep/
 obj-$(CONFIG_IIO)		+= iio/
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio/
 obj-$(CONFIG_ZRAM)		+= zram/
 obj-$(CONFIG_WLAGS49_H2)	+= wlags49_h2/
 obj-$(CONFIG_WLAGS49_H25)	+= wlags49_h25/
diff --git a/drivers/staging/cs5535_gpio/Kconfig b/drivers/staging/cs5535_gpio/Kconfig
new file mode 100644
index 0000000..a1b3a8d
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Kconfig
@@ -0,0 +1,11 @@
+config CS5535_GPIO
+	tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
+	depends on X86_32
+	help
+	  Note: this driver is DEPRECATED.  Please use the cs5535-gpio module
+	  in the GPIO section instead (CONFIG_GPIO_CS5535).
+
+	  Give userspace access to the GPIO pins on the AMD CS5535 and
+	  CS5536 Geode companion devices.
+
+	  If compiled as a module, it will be called cs5535_gpio.
diff --git a/drivers/staging/cs5535_gpio/Makefile b/drivers/staging/cs5535_gpio/Makefile
new file mode 100644
index 0000000..d67c4b8
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_CS5535_GPIO)	+= cs5535_gpio.o
diff --git a/drivers/staging/cs5535_gpio/TODO b/drivers/staging/cs5535_gpio/TODO
new file mode 100644
index 0000000..98d1cd1
--- /dev/null
+++ b/drivers/staging/cs5535_gpio/TODO
@@ -0,0 +1,6 @@
+This is an obsolete driver for some the CS5535 and CS5536 southbridge GPIOs.
+It has been replaced by a driver that makes use of the Linux GPIO subsystem.
+Please switch to that driver, and let dilinger@queued.net know if there's
+anything missing from the new driver.
+
+This driver is scheduled for removal in 2.6.40.
diff --git a/drivers/char/cs5535_gpio.c b/drivers/staging/cs5535_gpio/cs5535_gpio.c
similarity index 100%
rename from drivers/char/cs5535_gpio.c
rename to drivers/staging/cs5535_gpio/cs5535_gpio.c
diff --git a/drivers/staging/msm/msm_fb_bl.c b/drivers/staging/msm/msm_fb_bl.c
index 033fc94..2a80775 100644
--- a/drivers/staging/msm/msm_fb_bl.c
+++ b/drivers/staging/msm/msm_fb_bl.c
@@ -42,7 +42,7 @@
 	return 0;
 }
 
-static struct backlight_ops msm_fb_bl_ops = {
+static const struct backlight_ops msm_fb_bl_ops = {
 	.get_brightness = msm_fb_bl_get_brightness,
 	.update_status = msm_fb_bl_update_status,
 };
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index ac2d3d0..35f9cda 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,6 +1,5 @@
 TODO:
 	- checkpatch.pl cleanups
-	- port geode gpio calls to newer cs5535 API
 	- see if vx855 gpio API can be made similar enough to cs5535 so we can
 	  share more code
 	- allow simultaneous XO-1 and XO-1.5 support
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 4ca45ec..9f26dc9 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -27,7 +27,6 @@
 #include <asm/uaccess.h>
 #include <linux/ctype.h>
 #include <linux/reboot.h>
-#include <linux/gpio.h>
 #include <asm/tsc.h>
 #include <asm/olpc.h>
 
@@ -49,7 +48,7 @@
 	int (*init)(void);
 	void (*bus_stabilize_wiggle)(void);
 	void (*set_dconload)(int);
-	int (*read_status)(void);
+	u8 (*read_status)(void);
 };
 
 static struct dcon_platform_data *pdata;
@@ -615,7 +614,7 @@
 	__ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store),
 };
 
-static struct backlight_ops dcon_bl_ops = {
+static const struct backlight_ops dcon_bl_ops = {
 	.get_brightness = dconbl_get,
 	.update_status = dconbl_set
 };
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 6453ca4..e566d21 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -29,26 +29,6 @@
 #define DCON_REG_SCAN_INT	9
 #define DCON_REG_BRIGHT		10
 
-/* GPIO registers (CS5536) */
-
-#define MSR_LBAR_GPIO		0x5140000C
-
-#define GPIOx_OUT_VAL     0x00
-#define GPIOx_OUT_EN      0x04
-#define GPIOx_IN_EN       0x20
-#define GPIOx_INV_EN      0x24
-#define GPIOx_IN_FLTR_EN  0x28
-#define GPIOx_EVNTCNT_EN  0x2C
-#define GPIOx_READ_BACK   0x30
-#define GPIOx_EVNT_EN     0x38
-#define GPIOx_NEGEDGE_EN  0x44
-#define GPIOx_NEGEDGE_STS 0x4C
-#define GPIO_FLT7_AMNT    0xD8
-#define GPIO_MAP_X        0xE0
-#define GPIO_MAP_Y        0xE4
-#define GPIO_FE7_SEL      0xF7
-
-
 /* Status values */
 
 #define DCONSTAT_SCANINT	0
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 779fb7d..043198d 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -10,54 +10,70 @@
  * modify it under the terms of version 2 of the GNU General Public
  * License as published by the Free Software Foundation.
  */
-
+#include <linux/cs5535.h>
+#include <linux/gpio.h>
 #include <asm/olpc.h>
 
 #include "olpc_dcon.h"
 
-/* Base address of the GPIO registers */
-static unsigned long gpio_base;
-
-/*
- * List of GPIOs that we care about:
- * (in)  GPIO12   -- DCONBLANK
- * (in)  GPIO[56] -- DCONSTAT[01]
- * (out) GPIO11   -- DCONLOAD
- */
-
-#define IN_GPIOS ((1<<5) | (1<<6) | (1<<7) | (1<<12))
-#define OUT_GPIOS (1<<11)
-
 static int dcon_init_xo_1(void)
 {
-	unsigned long lo, hi;
 	unsigned char lob;
 
-	rdmsr(MSR_LBAR_GPIO, lo, hi);
-
-	/* Check the mask and whether GPIO is enabled (sanity check) */
-	if (hi != 0x0000f001) {
-		printk(KERN_ERR "GPIO not enabled -- cannot use DCON\n");
-		return -ENODEV;
+	if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT0 GPIO\n");
+		return -EIO;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request STAT1 GPIO\n");
+		goto err_gp_stat1;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request IRQ GPIO\n");
+		goto err_gp_irq;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request LOAD GPIO\n");
+		goto err_gp_load;
+	}
+	if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) {
+		printk(KERN_ERR "olpc-dcon: failed to request BLANK GPIO\n");
+		goto err_gp_blank;
 	}
 
-	/* Mask off the IO base address */
-	gpio_base = lo & 0x0000ff00;
-
 	/* Turn off the event enable for GPIO7 just to be safe */
-	outl(1 << (16+7), gpio_base + GPIOx_EVNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+
+	/*
+	 * Determine the current state by reading the GPIO bit; earlier
+	 * stages of the boot process have established the state.
+	 *
+	 * Note that we read GPIO_OUPUT_VAL rather than GPIO_READ_BACK here;
+	 * this is because OFW will disable input for the pin and set a value..
+	 * READ_BACK will only contain a valid value if input is enabled and
+	 * then a value is set.  So, future readings of the pin can use
+	 * READ_BACK, but the first one cannot.  Awesome, huh?
+	 */
+	dcon_source = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL)
+		? DCON_SOURCE_CPU
+		: DCON_SOURCE_DCON;
+	dcon_pending = dcon_source;
 
 	/* Set the directions for the GPIO pins */
-	outl(OUT_GPIOS | (IN_GPIOS << 16), gpio_base + GPIOx_OUT_EN);
-	outl(IN_GPIOS | (OUT_GPIOS << 16), gpio_base + GPIOx_IN_EN);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT0);
+	gpio_direction_input(OLPC_GPIO_DCON_STAT1);
+	gpio_direction_input(OLPC_GPIO_DCON_IRQ);
+	gpio_direction_input(OLPC_GPIO_DCON_BLANK);
+	gpio_direction_output(OLPC_GPIO_DCON_LOAD,
+			dcon_source == DCON_SOURCE_CPU);
 
 	/* Set up the interrupt mappings */
 
 	/* Set the IRQ to pair 2 */
-	geode_gpio_event_irq(OLPC_GPIO_DCON_IRQ, 2);
+	cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0);
 
 	/* Enable group 2 to trigger the DCON interrupt */
-	geode_gpio_set_irq(2, DCON_IRQ);
+	cs5535_gpio_set_irq(2, DCON_IRQ);
 
 	/* Select edge level for interrupt (in PIC) */
 	lob = inb(0x4d0);
@@ -65,52 +81,61 @@
 	outb(lob, 0x4d0);
 
 	/* Register the interupt handler */
-	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver))
-		return -EIO;
+	if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", &dcon_driver)) {
+		printk(KERN_ERR "olpc-dcon: failed to request DCON's irq\n");
+		goto err_req_irq;
+	}
 
 	/* Clear INV_EN for GPIO7 (DCONIRQ) */
-	outl((1<<(16+7)), gpio_base + GPIOx_INV_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT);
 
 	/* Enable filter for GPIO12 (DCONBLANK) */
-	outl(1<<(12), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER);
 
 	/* Disable filter for GPIO7 */
-	outl(1<<(16+7), gpio_base + GPIOx_IN_FLTR_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER);
 
 	/* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-
-	outl(1<<(16+7), gpio_base + GPIOx_EVNTCNT_EN);
-	outl(1<<(16+12), gpio_base + GPIOx_EVNTCNT_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT);
 
 	/* Add GPIO12 to the Filter Event Pair #7 */
-	outb(12, gpio_base + GPIO_FE7_SEL);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL);
 
 	/* Turn off negative Edge Enable for GPIO12 */
-	outl(1<<(16+12), gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Enable negative Edge Enable for GPIO7 */
-	outl(1<<7, gpio_base + GPIOx_NEGEDGE_EN);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN);
 
 	/* Zero the filter amount for Filter Event Pair #7 */
-	outw(0, gpio_base + GPIO_FLT7_AMNT);
+	cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT);
 
 	/* Clear the negative edge status for GPIO7 and GPIO12 */
-	outl((1<<7) | (1<<12), gpio_base+0x4c);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS);
 
 	/* FIXME:  Clear the posiitive status as well, just to be sure */
-	outl((1<<7) | (1<<12), gpio_base+0x48);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS);
 
 	/* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */
-	outl((1<<(7))|(1<<12), gpio_base + GPIOx_EVNT_EN);
-
-	/* Determine the current state by reading the GPIO bit */
-	/* Earlier stages of the boot process have established the state */
-	dcon_source = inl(gpio_base + GPIOx_OUT_VAL) & (1<<11)
-		? DCON_SOURCE_CPU
-		: DCON_SOURCE_DCON;
-	dcon_pending = dcon_source;
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE);
 
 	return 0;
+
+err_req_irq:
+	gpio_free(OLPC_GPIO_DCON_BLANK);
+err_gp_blank:
+	gpio_free(OLPC_GPIO_DCON_LOAD);
+err_gp_load:
+	gpio_free(OLPC_GPIO_DCON_IRQ);
+err_gp_irq:
+	gpio_free(OLPC_GPIO_DCON_STAT1);
+err_gp_stat1:
+	gpio_free(OLPC_GPIO_DCON_STAT0);
+	return -EIO;
 }
 
 static void dcon_wiggle_xo_1(void)
@@ -128,37 +153,44 @@
 	 * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and
 	 * GPIO15.
  	 */
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
-	geode_gpio_clear(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 
 	for (x = 0; x < 16; x++) {
 		udelay(5);
-		geode_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 		udelay(5);
-		geode_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
+		cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL);
 	}
 	udelay(5);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
-	geode_gpio_set(OLPC_GPIO_SMB_CLK|OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1);
+	cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1);
 }
 
 static void dcon_set_dconload_1(int val)
 {
-	if (val)	
-		outl(1<<11, gpio_base + GPIOx_OUT_VAL);
-	else
-		outl(1<<(11 + 16), gpio_base + GPIOx_OUT_VAL);
+	gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
 }
 
-static int dcon_read_status_xo_1(void)
+static u8 dcon_read_status_xo_1(void)
 {
-	int status = inl(gpio_base + GPIOx_READ_BACK) >> 5;
-	
+	u8 status;
+
+	status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+	status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+
 	/* Clear the negative edge status for GPIO7 */
-	outl(1 << 7, gpio_base + GPIOx_NEGEDGE_STS);
+	cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
 
 	return status;
 }
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index cca6a23..4f56098 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -195,9 +195,9 @@
 	}
 }
 
-static int dcon_read_status_xo_1_5(void) 
+static u8 dcon_read_status_xo_1_5(void)
 {
-	int status;
+	u8 status;
 	
 	if (!dcon_was_irq())
 		return -1;
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
index 9279897..b2e9186 100644
--- a/drivers/staging/pohmelfs/net.c
+++ b/drivers/staging/pohmelfs/net.c
@@ -413,7 +413,7 @@
 				if (dentry) {
 					alias = d_materialise_unique(dentry, &npi->vfs_inode);
 					if (alias)
-						dput(dentry);
+						dput(alias);
 				}
 
 				dput(dentry);
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index ac2bf11..701e8d5 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -269,7 +269,7 @@
 	return 0;
 }
 
-static struct backlight_ops backlight_ops = {
+static const struct backlight_ops backlight_ops = {
 	.get_brightness	= get_brightness,
 	.update_status	= update_status,
 };
diff --git a/drivers/staging/smbfs/dir.c b/drivers/staging/smbfs/dir.c
index dd612f5..87a3a9b 100644
--- a/drivers/staging/smbfs/dir.c
+++ b/drivers/staging/smbfs/dir.c
@@ -403,12 +403,6 @@
 void
 smb_new_dentry(struct dentry *dentry)
 {
-	struct smb_sb_info *server = server_from_dentry(dentry);
-
-	if (server->mnt->flags & SMB_MOUNT_CASE)
-		d_set_d_op(dentry, &smbfs_dentry_operations_case);
-	else
-		d_set_d_op(dentry, &smbfs_dentry_operations);
 	dentry->d_time = jiffies;
 }
 
@@ -440,7 +434,6 @@
 	struct smb_fattr finfo;
 	struct inode *inode;
 	int error;
-	struct smb_sb_info *server;
 
 	error = -ENAMETOOLONG;
 	if (dentry->d_name.len > SMB_MAXNAMELEN)
@@ -468,12 +461,6 @@
 		inode = smb_iget(dir->i_sb, &finfo);
 		if (inode) {
 	add_entry:
-			server = server_from_dentry(dentry);
-			if (server->mnt->flags & SMB_MOUNT_CASE)
-				d_set_d_op(dentry, &smbfs_dentry_operations_case);
-			else
-				d_set_d_op(dentry, &smbfs_dentry_operations);
-
 			d_add(dentry, inode);
 			smb_renew_times(dentry);
 			error = 0;
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index 244319d..0778589 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -614,6 +614,10 @@
 		printk(KERN_ERR "smbfs: failed to start smbiod\n");
 		goto out_no_smbiod;
 	}
+	if (server->mnt->flags & SMB_MOUNT_CASE)
+		sb->s_d_op = &smbfs_dentry_operations_case;
+	else
+		sb->s_d_op = &smbfs_dentry_operations;
 
 	/*
 	 * Keep the super block locked while we get the root inode.
diff --git a/drivers/staging/smbfs/proto.h b/drivers/staging/smbfs/proto.h
index 05939a6..3883cb1 100644
--- a/drivers/staging/smbfs/proto.h
+++ b/drivers/staging/smbfs/proto.h
@@ -38,6 +38,8 @@
 extern const struct file_operations smb_dir_operations;
 extern const struct inode_operations smb_dir_inode_operations;
 extern const struct inode_operations smb_dir_inode_operations_unix;
+extern const struct dentry_operations smbfs_dentry_operations_case;
+extern const struct dentry_operations smbfs_dentry_operations;
 extern void smb_new_dentry(struct dentry *dentry);
 extern void smb_renew_times(struct dentry *dentry);
 /* cache.c */
diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
index 0d236f4..b001019 100644
--- a/drivers/telephony/ixj.c
+++ b/drivers/telephony/ixj.c
@@ -284,12 +284,11 @@
 
 module_param(ixjdebug, int, 0);
 
-static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ixj_pci_tbl) = {
 	{ PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
 	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{ }
 };
-
 MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
 
 /************************************************************************
@@ -6581,7 +6580,8 @@
 	case IXJCTL_SET_FILTER:
 		if (copy_from_user(&jf, argp, sizeof(jf))) 
 			retval = -EFAULT;
-		retval = ixj_init_filter(j, &jf);
+		else
+			retval = ixj_init_filter(j, &jf);
 		break;
 	case IXJCTL_SET_FILTER_RAW:
 		if (copy_from_user(&jfr, argp, sizeof(jfr))) 
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 1210534..5408186 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1320,7 +1320,7 @@
 };
 
 /*******************************************************************************
- * USB gadged driver functions
+ * USB gadget driver functions
  *******************************************************************************
  */
 int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index 7779724..1eca8b4 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -3086,7 +3086,7 @@
 
 	kfree(dev->ep);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 
@@ -3406,7 +3406,7 @@
 	/* disable interrupt and set controller to stop state */
 	langwell_udc_stop(dev);
 
-	/* diable IRQ handler */
+	/* disable IRQ handler */
 	if (dev->got_irq)
 		free_irq(pdev->irq, dev);
 	dev->got_irq = 0;
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 20092a2..12fd184 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -98,13 +98,13 @@
 	usb->intr_nesting_cnt--;
 }
 
-/* diable the usb interrupt */
+/* disable the usb interrupt */
 void fhci_usb_disable_interrupt(struct fhci_usb *usb)
 {
 	struct fhci_hcd *fhci = usb->fhci;
 
 	if (usb->intr_nesting_cnt == 0) {
-		/* diable the timer interrupt */
+		/* disable the timer interrupt */
 		disable_irq_nosync(fhci->timer->irq);
 
 		/* disable the usb interrupt */
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c
index 7be548c..38fe058 100644
--- a/drivers/usb/host/fhci-tds.c
+++ b/drivers/usb/host/fhci-tds.c
@@ -271,8 +271,8 @@
 
 /*
  * Collect the submitted frames and inform the application about them
- * It is also prepearing the TDs for new frames. If the Tx interrupts
- * are diabled, the application should call that routine to get
+ * It is also preparing the TDs for new frames. If the Tx interrupts
+ * are disabled, the application should call that routine to get
  * confirmation about the submitted frames. Otherwise, the routine is
  * called frome the interrupt service routine during the Tx interrupt.
  * In that case the application is informed by calling the application
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index e49b75a..f90d003 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1658,7 +1658,7 @@
 
 	spin_lock_irqsave(&imx21->lock, flags);
 
-	/* Reset the Host controler modules */
+	/* Reset the Host controller modules */
 	writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
 		USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
 		imx21->regs + USBOTG_RST_CTRL);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 32149be..e0cb12b 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3094,7 +3094,7 @@
 
 	/* Some boards (mostly VIA?) report bogus overcurrent indications,
 	 * causing massive log spam unless we completely ignore them.  It
-	 * may be relevant that VIA VT8235 controlers, where PORT_POWER is
+	 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
 	 * always set, seem to clear PORT_OCC and PORT_CSC when writing to
 	 * PORT_POWER; that's surprising, but maybe within-spec.
 	 */
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 44f8b922..a6afd15 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -717,7 +717,7 @@
 		goto exit;
 	}
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct adu_device), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c9078e4..e573e4704 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -769,7 +769,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 	dev = kzalloc(sizeof(struct iowarrior), GFP_KERNEL);
 	if (dev == NULL) {
 		dev_err(&interface->dev, "Out of memory\n");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index edffef6..eefb827 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -642,7 +642,7 @@
 	int i;
 	int retval = -ENOMEM;
 
-	/* allocate memory for our device state and intialize it */
+	/* allocate memory for our device state and initialize it */
 
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (dev == NULL) {
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 9b162df..ed58c6c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1684,7 +1684,7 @@
 	struct musb_hw_ep	*hw_ep;
 	unsigned		count = 0;
 
-	/* intialize endpoint list just once */
+	/* initialize endpoint list just once */
 	INIT_LIST_HEAD(&(musb->g.ep_list));
 
 	for (epnum = 0, hw_ep = musb->endpoints;
@@ -1765,7 +1765,7 @@
  *
  * -EINVAL something went wrong (not driver)
  * -EBUSY another gadget is already using the controller
- * -ENOMEM no memeory to perform the operation
+ * -ENOMEM no memory to perform the operation
  *
  * @param driver the gadget driver
  * @param bind the driver's bind function
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index c7b1d81..8cb9d80 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -49,7 +49,7 @@
  *
  *  USB Stack port number    4 (1 based)
  *  WUSB code port index     3 (0 based)
- *  USB Addresss             5 (2 based -- 0 is for default, 1 for root hub)
+ *  USB Address             5 (2 based -- 0 is for default, 1 for root hub)
  *
  *  Now, because we don't use the concept as default address exactly
  *  like the (wired) USB code does, we need to kind of skip it. So we
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 8dce251..bac16345 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -111,7 +111,7 @@
 	return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
 }
 
-static struct backlight_ops atmel_lcdc_bl_ops = {
+static const struct backlight_ops atmel_lcdc_bl_ops = {
 	.update_status = atmel_bl_update_status,
 	.get_brightness = atmel_bl_get_brightness,
 };
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 34a0851..dd9de2e 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1786,7 +1786,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty128_bl_data = {
+static const struct backlight_ops aty128_bl_data = {
 	.get_brightness	= aty128_bl_get_brightness,
 	.update_status	= aty128_bl_update_status,
 };
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 5a3ce3a..767ab4f 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2221,7 +2221,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops aty_bl_data = {
+static const struct backlight_ops aty_bl_data = {
 	.get_brightness = aty_bl_get_brightness,
 	.update_status	= aty_bl_update_status,
 };
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index 256966e..9b811dd 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -128,7 +128,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops radeon_bl_data = {
+static const struct backlight_ops radeon_bl_data = {
 	.get_brightness = radeon_bl_get_brightness,
 	.update_status	= radeon_bl_update_status,
 };
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 38ffc3f..c789c46 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -155,7 +155,7 @@
 	return -EINVAL;
 }
 
-static struct backlight_ops pm860x_backlight_ops = {
+static const struct backlight_ops pm860x_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= pm860x_backlight_update_status,
 	.get_brightness	= pm860x_backlight_get_brightness,
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index c67801e..98ad3e5 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -25,7 +25,7 @@
 struct l4f00242t03_priv {
 	struct spi_device	*spi;
 	struct lcd_device	*ld;
-	int lcd_on:1;
+	int lcd_state;
 	struct regulator *io_reg;
 	struct regulator *core_reg;
 };
@@ -62,11 +62,36 @@
 		regulator_enable(priv->core_reg);
 	}
 
+	l4f00242t03_reset(pdata->reset_gpio);
+
 	gpio_set_value(pdata->data_enable_gpio, 1);
 	msleep(60);
 	spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
 }
 
+static void l4f00242t03_lcd_powerdown(struct spi_device *spi)
+{
+	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	dev_dbg(&spi->dev, "Powering down LCD\n");
+
+	gpio_set_value(pdata->data_enable_gpio, 0);
+
+	if (priv->io_reg)
+		regulator_disable(priv->io_reg);
+
+	if (priv->core_reg)
+		regulator_disable(priv->core_reg);
+}
+
+static int l4f00242t03_lcd_power_get(struct lcd_device *ld)
+{
+	struct l4f00242t03_priv *priv = lcd_get_data(ld);
+
+	return priv->lcd_state;
+}
+
 static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
 {
 	struct l4f00242t03_priv *priv = lcd_get_data(ld);
@@ -79,35 +104,54 @@
 	const u16 disoff = 0x28;
 
 	if (power <= FB_BLANK_NORMAL) {
-		if (priv->lcd_on)
-			return 0;
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Do nothing, the LCD is running */
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			dev_dbg(&spi->dev, "Resuming LCD\n");
 
-		dev_dbg(&spi->dev, "turning on LCD\n");
+			spi_write(spi, (const u8 *)&slpout, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&dison, sizeof(u16));
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+			l4f00242t03_lcd_power_set(priv->ld, power);
+		}
+	} else if (power < FB_BLANK_POWERDOWN) {
+		if (priv->lcd_state <= FB_BLANK_NORMAL) {
+			/* Send the display in standby */
+			dev_dbg(&spi->dev, "Standby the LCD\n");
 
-		spi_write(spi, (const u8 *)&slpout, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&dison, sizeof(u16));
-
-		priv->lcd_on = 1;
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			spi_write(spi, (const u8 *)&slpin, sizeof(u16));
+		} else if (priv->lcd_state < FB_BLANK_POWERDOWN) {
+			/* Do nothing, the LCD is already in standby */
+		} else {
+			/* priv->lcd_state == FB_BLANK_POWERDOWN */
+			l4f00242t03_lcd_init(spi);
+			priv->lcd_state = FB_BLANK_UNBLANK;
+			l4f00242t03_lcd_power_set(ld, power);
+		}
 	} else {
-		if (!priv->lcd_on)
-			return 0;
-
-		dev_dbg(&spi->dev, "turning off LCD\n");
-
-		spi_write(spi, (const u8 *)&disoff, sizeof(u16));
-		msleep(60);
-		spi_write(spi, (const u8 *)&slpin, sizeof(u16));
-
-		priv->lcd_on = 0;
+		/* power == FB_BLANK_POWERDOWN */
+		if (priv->lcd_state != FB_BLANK_POWERDOWN) {
+			/* Clear the screen before shutting down */
+			spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+			msleep(60);
+			l4f00242t03_lcd_powerdown(spi);
+		}
 	}
 
+	priv->lcd_state = power;
+
 	return 0;
 }
 
 static struct lcd_ops l4f_ops = {
 	.set_power	= l4f00242t03_lcd_power_set,
-	.get_power	= NULL,
+	.get_power	= l4f00242t03_lcd_power_get,
 };
 
 static int __devinit l4f00242t03_probe(struct spi_device *spi)
@@ -185,9 +229,9 @@
 	}
 
 	/* Init the LCD */
-	l4f00242t03_reset(pdata->reset_gpio);
 	l4f00242t03_lcd_init(spi);
-	l4f00242t03_lcd_power_set(priv->ld, 1);
+	priv->lcd_state = FB_BLANK_VSYNC_SUSPEND;
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_UNBLANK);
 
 	dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
 
@@ -214,9 +258,11 @@
 	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
 	struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
 
-	l4f00242t03_lcd_power_set(priv->ld, 0);
+	l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
 	lcd_device_unregister(priv->ld);
 
+	dev_set_drvdata(&spi->dev, NULL);
+
 	gpio_free(pdata->data_enable_gpio);
 	gpio_free(pdata->reset_gpio);
 
@@ -230,6 +276,15 @@
 	return 0;
 }
 
+static void l4f00242t03_shutdown(struct spi_device *spi)
+{
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+
+	if (priv)
+		l4f00242t03_lcd_power_set(priv->ld, FB_BLANK_POWERDOWN);
+
+}
+
 static struct spi_driver l4f00242t03_driver = {
 	.driver = {
 		.name	= "l4f00242t03",
@@ -237,6 +292,7 @@
 	},
 	.probe		= l4f00242t03_probe,
 	.remove		= __devexit_p(l4f00242t03_remove),
+	.shutdown	= l4f00242t03_shutdown,
 };
 
 static __init int l4f00242t03_init(void)
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index b2b2c7b..209acc1 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -92,7 +92,7 @@
 	return ret;
 }
 
-static struct backlight_ops max8925_backlight_ops = {
+static const struct backlight_ops max8925_backlight_ops = {
 	.options	= BL_CORE_SUSPENDRESUME,
 	.update_status	= max8925_backlight_update_status,
 	.get_brightness	= max8925_backlight_get_brightness,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 915448e..c97491b 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -375,7 +375,8 @@
 	u16 saved1, saved2;
 	volatile u16 *p;
 
-	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB) {
+	if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB ||
+	    screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
 	      no_vga:
 #ifdef CONFIG_DUMMY_CONSOLE
 		conswitchp = &dummy_con;
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 2fb552a..6aac6d1 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -87,7 +87,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops nvidia_bl_ops = {
+static const struct backlight_ops nvidia_bl_ops = {
 	.get_brightness = nvidia_bl_get_brightness,
 	.update_status	= nvidia_bl_update_status,
 };
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index e1c765d..61026f9 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -465,7 +465,7 @@
 	return 0;
 }
 
-static struct backlight_ops taal_bl_ops = {
+static const struct backlight_ops taal_bl_ops = {
 	.get_brightness = taal_bl_get_intensity,
 	.update_status  = taal_bl_update_status,
 };
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index 618f36b..da38818 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -331,7 +331,7 @@
 	return bd->props.brightness;
 }
 
-static struct backlight_ops riva_bl_ops = {
+static const struct backlight_ops riva_bl_ops = {
 	.get_brightness = riva_bl_get_brightness,
 	.update_status	= riva_bl_update_status,
 };
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index e3eb77d..74d9f54 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -22,6 +22,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
+#include <sound/soc.h>
 #include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index dee64c3..2ab7041 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -536,7 +536,7 @@
 	fbiinit2 = sst_read(FBIINIT2);
 	fbiinit3 = sst_read(FBIINIT3);
 
-	/* everything is reset. we enable fbiinit2/3 remap : dac acces ok */
+	/* everything is reset. we enable fbiinit2/3 remap : dac access ok */
 	pci_write_config_dword(sst_dev, PCI_INIT_ENABLE,
 	                       PCI_EN_INIT_WR | PCI_REMAP_DAC );
 
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig
index 1f51366..f0c9096 100644
--- a/drivers/w1/slaves/Kconfig
+++ b/drivers/w1/slaves/Kconfig
@@ -16,6 +16,17 @@
 	  Say Y here if you want to connect 1-wire
 	  simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
+config W1_SLAVE_DS2423
+	tristate "Counter 1-wire device (DS2423)"
+	select CRC16
+	help
+	  If you enable this you can read the counter values available
+	  in the DS2423 chipset from the w1_slave file under the
+	  sys file system.
+
+	  Say Y here if you want to use a 1-wire
+	  counter family device (DS2423).
+
 config W1_SLAVE_DS2431
 	tristate "1kb EEPROM family support (DS2431)"
 	help
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile
index f1f51f1..3c76350 100644
--- a/drivers/w1/slaves/Makefile
+++ b/drivers/w1/slaves/Makefile
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)	+= w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)	+= w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2423)	+= w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)	+= w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)	+= w1_ds2433.o
 obj-$(CONFIG_W1_SLAVE_DS2760)	+= w1_ds2760.o
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
new file mode 100644
index 0000000..7a7dbe50
--- /dev/null
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -0,0 +1,166 @@
+/*
+ *	w1_ds2423.c
+ *
+ * Copyright (c) 2010 Mika Laitio <lamikr@pilppa.org>
+ *
+ * This driver will read and write the value of 4 counters to w1_slave file in
+ * sys filesystem.
+ * Inspired by the w1_therm and w1_ds2431 drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the therms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/crc16.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+#define CRC16_VALID	0xb001
+#define CRC16_INIT	0
+
+#define COUNTER_COUNT 4
+#define READ_BYTE_COUNT 42
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *buf);
+
+static struct device_attribute w1_counter_attr =
+	__ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL);
+
+static ssize_t w1_counter_read(struct device *device,
+	struct device_attribute *attr, char *out_buf)
+{
+	struct w1_slave *sl = dev_to_w1_slave(device);
+	struct w1_master *dev = sl->master;
+	u8 rbuf[COUNTER_COUNT * READ_BYTE_COUNT];
+	u8 wrbuf[3];
+	int rom_addr;
+	int read_byte_count;
+	int result;
+	ssize_t c;
+	int ii;
+	int p;
+	int crc;
+
+	c		= PAGE_SIZE;
+	rom_addr	= (12 << 5) + 31;
+	wrbuf[0]	= 0xA5;
+	wrbuf[1]	= rom_addr & 0xFF;
+	wrbuf[2]	= rom_addr >> 8;
+	mutex_lock(&dev->mutex);
+	if (!w1_reset_select_slave(sl)) {
+		w1_write_block(dev, wrbuf, 3);
+		read_byte_count = 0;
+		for (p = 0; p < 4; p++) {
+			/*
+			 * 1 byte for first bytes in ram page read
+			 * 4 bytes for counter
+			 * 4 bytes for zero bits
+			 * 2 bytes for crc
+			 * 31 remaining bytes from the ram page
+			 */
+			read_byte_count += w1_read_block(dev,
+				rbuf + (p * READ_BYTE_COUNT), READ_BYTE_COUNT);
+			for (ii = 0; ii < READ_BYTE_COUNT; ++ii)
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "%02x ",
+					rbuf[(p * READ_BYTE_COUNT) + ii]);
+			if (read_byte_count != (p + 1) * READ_BYTE_COUNT) {
+				dev_warn(device,
+					"w1_counter_read() returned %u bytes "
+					"instead of %d bytes wanted.\n",
+					read_byte_count,
+					READ_BYTE_COUNT);
+				c -= snprintf(out_buf + PAGE_SIZE - c,
+					c, "crc=NO\n");
+			} else {
+				if (p == 0) {
+					crc = crc16(CRC16_INIT, wrbuf, 3);
+					crc = crc16(crc, rbuf, 11);
+				} else {
+					/*
+					 * DS2423 calculates crc from all bytes
+					 * read after the previous crc bytes.
+					 */
+					crc = crc16(CRC16_INIT,
+						(rbuf + 11) +
+						((p - 1) * READ_BYTE_COUNT),
+						READ_BYTE_COUNT);
+				}
+				if (crc == CRC16_VALID) {
+					result = 0;
+					for (ii = 4; ii > 0; ii--) {
+						result <<= 8;
+						result |= rbuf[(p *
+							READ_BYTE_COUNT) + ii];
+					}
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=YES c=%d\n", result);
+				} else {
+					c -= snprintf(out_buf + PAGE_SIZE - c,
+						c, "crc=NO\n");
+				}
+			}
+		}
+	} else {
+		c -= snprintf(out_buf + PAGE_SIZE - c, c, "Connection error");
+	}
+	mutex_unlock(&dev->mutex);
+	return PAGE_SIZE - c;
+}
+
+static int w1_f1d_add_slave(struct w1_slave *sl)
+{
+	return device_create_file(&sl->dev, &w1_counter_attr);
+}
+
+static void w1_f1d_remove_slave(struct w1_slave *sl)
+{
+	device_remove_file(&sl->dev, &w1_counter_attr);
+}
+
+static struct w1_family_ops w1_f1d_fops = {
+	.add_slave      = w1_f1d_add_slave,
+	.remove_slave   = w1_f1d_remove_slave,
+};
+
+static struct w1_family w1_family_1d = {
+	.fid = W1_COUNTER_DS2423,
+	.fops = &w1_f1d_fops,
+};
+
+static int __init w1_f1d_init(void)
+{
+	return w1_register_family(&w1_family_1d);
+}
+
+static void __exit w1_f1d_exit(void)
+{
+	w1_unregister_family(&w1_family_1d);
+}
+
+module_init(w1_f1d_init);
+module_exit(w1_f1d_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mika Laitio <lamikr@pilppa.org>");
+MODULE_DESCRIPTION("w1 family 1d driver for DS2423, 4 counters and 4kb ram");
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 3ca1b92..f3b636d 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -30,6 +30,7 @@
 #define W1_FAMILY_SMEM_01	0x01
 #define W1_FAMILY_SMEM_81	0x81
 #define W1_THERM_DS18S20 	0x10
+#define W1_COUNTER_DS2423	0x1D
 #define W1_THERM_DS1822  	0x22
 #define W1_EEPROM_DS2433  	0x23
 #define W1_THERM_DS18B20 	0x28
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a5ad77e..2e2400e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -409,15 +409,26 @@
 	  Most people will say N.
 
 config F71808E_WDT
-	tristate "Fintek F71808E, F71882FG and F71889FG Watchdog"
+	tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog"
 	depends on X86 && EXPERIMENTAL
 	help
 	  This is the driver for the hardware watchdog on the Fintek
-	  F71808E, F71882FG and F71889FG Super I/O controllers.
+	  F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers.
 
 	  You can compile this driver directly into the kernel, or use
 	  it as a module.  The module will be called f71808e_wdt.
 
+config SP5100_TCO
+	tristate "AMD/ATI SP5100 TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware watchdog driver for the AMD/ATI SP5100 chipset. The TCO
+	  (Total Cost of Ownership) timer is a watchdog timer that will reboot
+	  the machine after its expiration. The expiration time can be
+	  configured with the "heartbeat" parameter.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called sp5100_tco.
 
 config GEODE_WDT
 	tristate "AMD Geode CS5535/CS5536 Watchdog"
@@ -631,6 +642,24 @@
 
 	  Most people will say N.
 
+config NV_TCO
+	tristate "nVidia TCO Timer/Watchdog"
+	depends on X86 && PCI
+	---help---
+	  Hardware driver for the TCO timer built into the nVidia Hub family
+	  (such as the MCP51).  The TCO (Total Cost of Ownership) timer is a
+	  watchdog timer that will reboot the machine after its second
+	  expiration. The expiration time can be configured with the
+	  "heartbeat" parameter.
+
+	  On some motherboards the driver may fail to reset the chipset's
+	  NO_REBOOT flag which prevents the watchdog from rebooting the
+	  machine. If this is the case you will get a kernel message like
+	  "failed to reset NO_REBOOT flag, reboot disabled by hardware".
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nv_tco.
+
 config RDC321X_WDT
 	tristate "RDC R-321x SoC watchdog"
 	depends on X86_RDC321X
@@ -722,14 +751,15 @@
 	  Most people will say N.
 
 config W83627HF_WDT
-	tristate "W83627HF Watchdog Timer"
+	tristate "W83627HF/W83627DHG Watchdog Timer"
 	depends on X86
 	---help---
 	  This is the driver for the hardware watchdog on the W83627HF chipset
 	  as used in Advantech PC-9578 and Tyan S2721-533 motherboards
-	  (and likely others).  This watchdog simply watches your kernel to
-	  make sure it doesn't freeze, and if it does, it reboots your computer
-	  after a certain amount of time.
+	  (and likely others). The driver also supports the W83627DHG chip.
+	  This watchdog simply watches your kernel to make sure it doesn't
+	  freeze, and if it does, it reboots your computer after a certain
+	  amount of time.
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called w83627hf_wdt.
@@ -832,10 +862,22 @@
 
 # M68K Architecture
 
-# M68KNOMMU Architecture
+config M548x_WATCHDOG
+	tristate "MCF548x watchdog support"
+	depends on M548x
+	help
+	  To compile this driver as a module, choose M here: the
+	  module will be called m548x_wdt.
 
 # MIPS Architecture
 
+config ATH79_WDT
+	tristate "Atheros AR71XX/AR724X/AR913X hardware watchdog"
+	depends on ATH79
+	help
+	  Hardware driver for the built-in watchdog timer on the Atheros
+	  AR71XX/AR724X/AR913X SoCs.
+
 config BCM47XX_WDT
 	tristate "Broadcom BCM47xx Watchdog Timer"
 	depends on BCM47XX
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 4b0ef38..dd77665 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -68,6 +68,7 @@
 obj-$(CONFIG_ALIM1535_WDT) += alim1535_wdt.o
 obj-$(CONFIG_ALIM7101_WDT) += alim7101_wdt.o
 obj-$(CONFIG_F71808E_WDT) += f71808e_wdt.o
+obj-$(CONFIG_SP5100_TCO) += sp5100_tco.o
 obj-$(CONFIG_GEODE_WDT) += geodewdt.o
 obj-$(CONFIG_SC520_WDT) += sc520_wdt.o
 obj-$(CONFIG_SBC_FITPC2_WATCHDOG) += sbc_fitpc2_wdt.o
@@ -86,6 +87,7 @@
 obj-$(CONFIG_SC1200_WDT) += sc1200wdt.o
 obj-$(CONFIG_SCx200_WDT) += scx200_wdt.o
 obj-$(CONFIG_PC87413_WDT) += pc87413_wdt.o
+obj-$(CONFIG_NV_TCO) += nv_tco.o
 obj-$(CONFIG_RDC321X_WDT) += rdc321x_wdt.o
 obj-$(CONFIG_60XX_WDT) += sbc60xxwdt.o
 obj-$(CONFIG_SBC8360_WDT) += sbc8360.o
@@ -104,10 +106,10 @@
 # M32R Architecture
 
 # M68K Architecture
-
-# M68KNOMMU Architecture
+obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o
 
 # MIPS Architecture
+obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
 obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
 obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
 obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c
index 1e9caea..fa4d360 100644
--- a/drivers/watchdog/alim1535_wdt.c
+++ b/drivers/watchdog/alim1535_wdt.c
@@ -301,7 +301,7 @@
  *	want to register another driver on the same PCI id.
  */
 
-static struct pci_device_id ali_pci_tbl[] = {
+static struct pci_device_id ali_pci_tbl[] __used = {
 	{ PCI_VENDOR_ID_AL, 0x1533, PCI_ANY_ID, PCI_ANY_ID,},
 	{ PCI_VENDOR_ID_AL, 0x1535, PCI_ANY_ID, PCI_ANY_ID,},
 	{ 0, },
diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c
index d8d4da9..4b7a2b4 100644
--- a/drivers/watchdog/alim7101_wdt.c
+++ b/drivers/watchdog/alim7101_wdt.c
@@ -430,7 +430,7 @@
 module_init(alim7101_wdt_init);
 module_exit(alim7101_wdt_unload);
 
-static struct pci_device_id alim7101_pci_tbl[] __devinitdata = {
+static struct pci_device_id alim7101_pci_tbl[] __devinitdata __used = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
 	{ }
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
new file mode 100644
index 0000000..725c84b
--- /dev/null
+++ b/drivers/watchdog/ath79_wdt.c
@@ -0,0 +1,305 @@
+/*
+ * Atheros AR71XX/AR724X/AR913X built-in hardware watchdog timer.
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This driver was based on: drivers/watchdog/ixp4xx_wdt.c
+ *	Author: Deepak Saxena <dsaxena@plexity.net>
+ *	Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * which again was based on sa1100 driver,
+ *	Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ath79.h>
+#include <asm/mach-ath79/ar71xx_regs.h>
+
+#define DRIVER_NAME	"ath79-wdt"
+
+#define WDT_TIMEOUT	15	/* seconds */
+
+#define WDOG_CTRL_LAST_RESET	BIT(31)
+#define WDOG_CTRL_ACTION_MASK	3
+#define WDOG_CTRL_ACTION_NONE	0	/* no action */
+#define WDOG_CTRL_ACTION_GPI	1	/* general purpose interrupt */
+#define WDOG_CTRL_ACTION_NMI	2	/* NMI */
+#define WDOG_CTRL_ACTION_FCR	3	/* full chip reset */
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+			   "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds "
+			  "(default=" __MODULE_STRING(WDT_TIMEOUT) "s)");
+
+static unsigned long wdt_flags;
+
+#define WDT_FLAGS_BUSY		0
+#define WDT_FLAGS_EXPECT_CLOSE	1
+
+static struct clk *wdt_clk;
+static unsigned long wdt_freq;
+static int boot_status;
+static int max_timeout;
+
+static inline void ath79_wdt_keepalive(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+}
+
+static inline void ath79_wdt_enable(void)
+{
+	ath79_wdt_keepalive();
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+}
+
+static inline void ath79_wdt_disable(void)
+{
+	ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+}
+
+static int ath79_wdt_set_timeout(int val)
+{
+	if (val < 1 || val > max_timeout)
+		return -EINVAL;
+
+	timeout = val;
+	ath79_wdt_keepalive();
+
+	return 0;
+}
+
+static int ath79_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_FLAGS_BUSY, &wdt_flags))
+		return -EBUSY;
+
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+	ath79_wdt_enable();
+
+	return nonseekable_open(inode, file);
+}
+
+static int ath79_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags))
+		ath79_wdt_disable();
+	else {
+		pr_crit(DRIVER_NAME ": device closed unexpectedly, "
+			"watchdog timer will not stop!\n");
+		ath79_wdt_keepalive();
+	}
+
+	clear_bit(WDT_FLAGS_BUSY, &wdt_flags);
+	clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+	return 0;
+}
+
+static ssize_t ath79_wdt_write(struct file *file, const char *data,
+				size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_FLAGS_EXPECT_CLOSE, &wdt_flags);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+
+				if (c == 'V')
+					set_bit(WDT_FLAGS_EXPECT_CLOSE,
+						&wdt_flags);
+			}
+		}
+
+		ath79_wdt_keepalive();
+	}
+
+	return len;
+}
+
+static const struct watchdog_info ath79_wdt_info = {
+	.options		= WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
+				  WDIOF_MAGICCLOSE | WDIOF_CARDRESET,
+	.firmware_version	= 0,
+	.identity		= "ATH79 watchdog",
+};
+
+static long ath79_wdt_ioctl(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	int err;
+	int t;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		err = copy_to_user(argp, &ath79_wdt_info,
+				   sizeof(ath79_wdt_info)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		err = put_user(0, p);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		err = put_user(boot_status, p);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		ath79_wdt_keepalive();
+		err = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		err = get_user(t, p);
+		if (err)
+			break;
+
+		err = ath79_wdt_set_timeout(t);
+		if (err)
+			break;
+
+		/* fallthrough */
+	case WDIOC_GETTIMEOUT:
+		err = put_user(timeout, p);
+		break;
+
+	default:
+		err = -ENOTTY;
+		break;
+	}
+
+	return err;
+}
+
+static const struct file_operations ath79_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= ath79_wdt_write,
+	.unlocked_ioctl	= ath79_wdt_ioctl,
+	.open		= ath79_wdt_open,
+	.release	= ath79_wdt_release,
+};
+
+static struct miscdevice ath79_wdt_miscdev = {
+	.minor = WATCHDOG_MINOR,
+	.name = "watchdog",
+	.fops = &ath79_wdt_fops,
+};
+
+static int __devinit ath79_wdt_probe(struct platform_device *pdev)
+{
+	u32 ctrl;
+	int err;
+
+	wdt_clk = clk_get(&pdev->dev, "wdt");
+	if (IS_ERR(wdt_clk))
+		return PTR_ERR(wdt_clk);
+
+	err = clk_enable(wdt_clk);
+	if (err)
+		goto err_clk_put;
+
+	wdt_freq = clk_get_rate(wdt_clk);
+	if (!wdt_freq) {
+		err = -EINVAL;
+		goto err_clk_disable;
+	}
+
+	max_timeout = (0xfffffffful / wdt_freq);
+	if (timeout < 1 || timeout > max_timeout) {
+		timeout = max_timeout;
+		dev_info(&pdev->dev,
+			"timeout value must be 0 < timeout < %d, using %d\n",
+			max_timeout, timeout);
+	}
+
+	ctrl = ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
+	boot_status = (ctrl & WDOG_CTRL_LAST_RESET) ? WDIOF_CARDRESET : 0;
+
+	err = misc_register(&ath79_wdt_miscdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"unable to register misc device, err=%d\n", err);
+		goto err_clk_disable;
+	}
+
+	return 0;
+
+err_clk_disable:
+	clk_disable(wdt_clk);
+err_clk_put:
+	clk_put(wdt_clk);
+	return err;
+}
+
+static int __devexit ath79_wdt_remove(struct platform_device *pdev)
+{
+	misc_deregister(&ath79_wdt_miscdev);
+	clk_disable(wdt_clk);
+	clk_put(wdt_clk);
+	return 0;
+}
+
+static void ath97_wdt_shutdown(struct platform_device *pdev)
+{
+	ath79_wdt_disable();
+}
+
+static struct platform_driver ath79_wdt_driver = {
+	.remove		= __devexit_p(ath79_wdt_remove),
+	.shutdown	= ath97_wdt_shutdown,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init ath79_wdt_init(void)
+{
+	return platform_driver_probe(&ath79_wdt_driver, ath79_wdt_probe);
+}
+module_init(ath79_wdt_init);
+
+static void __exit ath79_wdt_exit(void)
+{
+	platform_driver_unregister(&ath79_wdt_driver);
+}
+module_exit(ath79_wdt_exit);
+
+MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X hardware watchdog driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org");
+MODULE_AUTHOR("Imre Kaloz <kaloz@openwrt.org");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index d11ffb0..7e7ec9c 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -85,6 +85,22 @@
 	return 0;
 }
 
+static void __booke_wdt_set(void *data)
+{
+	u32 val;
+
+	val = mfspr(SPRN_TCR);
+	val &= ~WDTP_MASK;
+	val |= WDTP(booke_wdt_period);
+
+	mtspr(SPRN_TCR, val);
+}
+
+static void booke_wdt_set(void)
+{
+	on_each_cpu(__booke_wdt_set, NULL, 0);
+}
+
 static void __booke_wdt_ping(void *data)
 {
 	mtspr(SPRN_TSR, TSR_ENW|TSR_WIS);
@@ -181,8 +197,7 @@
 #else
 		booke_wdt_period = tmp;
 #endif
-		mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) |
-						WDTP(booke_wdt_period));
+		booke_wdt_set();
 		return 0;
 	case WDIOC_GETTIMEOUT:
 		return put_user(booke_wdt_period, p);
@@ -193,8 +208,15 @@
 	return 0;
 }
 
+/* wdt_is_active stores wether or not the /dev/watchdog device is opened */
+static unsigned long wdt_is_active;
+
 static int booke_wdt_open(struct inode *inode, struct file *file)
 {
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &wdt_is_active))
+		return -EBUSY;
+
 	spin_lock(&booke_wdt_lock);
 	if (booke_wdt_enabled == 0) {
 		booke_wdt_enabled = 1;
@@ -210,8 +232,17 @@
 
 static int booke_wdt_release(struct inode *inode, struct file *file)
 {
+#ifndef CONFIG_WATCHDOG_NOWAYOUT
+	/* Normally, the watchdog is disabled when /dev/watchdog is closed, but
+	 * if CONFIG_WATCHDOG_NOWAYOUT is defined, then it means that the
+	 * watchdog should remain enabled.  So we disable it only if
+	 * CONFIG_WATCHDOG_NOWAYOUT is not defined.
+	 */
 	on_each_cpu(__booke_wdt_disable, NULL, 0);
 	booke_wdt_enabled = 0;
+#endif
+
+	clear_bit(0, &wdt_is_active);
 
 	return 0;
 }
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index 65e5796..d4d8d1f 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -42,18 +42,21 @@
 #define SIO_REG_DEVID		0x20	/* Device ID (2 bytes) */
 #define SIO_REG_DEVREV		0x22	/* Device revision */
 #define SIO_REG_MANID		0x23	/* Fintek ID (2 bytes) */
+#define SIO_REG_ROM_ADDR_SEL	0x27	/* ROM address select */
+#define SIO_REG_MFUNCT1		0x29	/* Multi function select 1 */
+#define SIO_REG_MFUNCT2		0x2a	/* Multi function select 2 */
+#define SIO_REG_MFUNCT3		0x2b	/* Multi function select 3 */
 #define SIO_REG_ENABLE		0x30	/* Logical device enable */
 #define SIO_REG_ADDR		0x60	/* Logical device address (2 bytes) */
 
 #define SIO_FINTEK_ID		0x1934	/* Manufacturers ID */
-#define SIO_F71808_ID		0x0901  /* Chipset ID */
-#define SIO_F71858_ID		0x0507  /* Chipset ID */
+#define SIO_F71808_ID		0x0901	/* Chipset ID */
+#define SIO_F71858_ID		0x0507	/* Chipset ID */
 #define SIO_F71862_ID		0x0601	/* Chipset ID */
+#define SIO_F71869_ID		0x0814	/* Chipset ID */
 #define SIO_F71882_ID		0x0541	/* Chipset ID */
 #define SIO_F71889_ID		0x0723	/* Chipset ID */
 
-#define	F71882FG_REG_START		0x01
-
 #define F71808FG_REG_WDO_CONF		0xf0
 #define F71808FG_REG_WDT_CONF		0xf5
 #define F71808FG_REG_WD_TIME		0xf6
@@ -70,13 +73,15 @@
 #define WATCHDOG_MAX_TIMEOUT	(60 * 255)
 #define WATCHDOG_PULSE_WIDTH	125	/* 125 ms, default pulse width for
 					   watchdog signal */
+#define WATCHDOG_F71862FG_PIN	63	/* default watchdog reset output
+					   pin number 63 */
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
 MODULE_PARM_DESC(force_id, "Override the detected device ID");
 
 static const int max_timeout = WATCHDOG_MAX_TIMEOUT;
-static int timeout = 60;	/* default timeout in seconds */
+static int timeout = WATCHDOG_TIMEOUT;	/* default timeout in seconds */
 module_param(timeout, int, 0);
 MODULE_PARM_DESC(timeout,
 	"Watchdog timeout in seconds. 1<= timeout <="
@@ -89,6 +94,12 @@
 	"Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms"
 			" (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")");
 
+static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN;
+module_param(f71862fg_pin, uint, 0);
+MODULE_PARM_DESC(f71862fg_pin,
+	"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
+			" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
+
 static int nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0444);
 MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
@@ -98,12 +109,13 @@
 MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
 	" given initial timeout. Zero (default) disables this feature.");
 
-enum chips { f71808fg, f71858fg, f71862fg, f71882fg, f71889fg };
+enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg };
 
 static const char *f71808e_names[] = {
 	"f71808fg",
 	"f71858fg",
 	"f71862fg",
+	"f71869",
 	"f71882fg",
 	"f71889fg",
 };
@@ -282,6 +294,28 @@
 	return err;
 }
 
+static int f71862fg_pin_configure(unsigned short ioaddr)
+{
+	/* When ioaddr is non-zero the calling function has to take care of
+	   mutex handling and superio preparation! */
+
+	if (f71862fg_pin == 63) {
+		if (ioaddr) {
+			/* SPI must be disabled first to use this pin! */
+			superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6);
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4);
+		}
+	} else if (f71862fg_pin == 56) {
+		if (ioaddr)
+			superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1);
+	} else {
+		printk(KERN_ERR DRVNAME ": Invalid argument f71862fg_pin=%d\n",
+				f71862fg_pin);
+		return -EINVAL;
+	}
+	return 0;
+}
+
 static int watchdog_start(void)
 {
 	/* Make sure we don't die as soon as the watchdog is enabled below */
@@ -299,19 +333,30 @@
 	switch (watchdog.type) {
 	case f71808fg:
 		/* Set pin 21 to GPIO23/WDTRST#, then to WDTRST# */
-		superio_clear_bit(watchdog.sioaddr, 0x2a, 3);
-		superio_clear_bit(watchdog.sioaddr, 0x2b, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT2, 3);
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 3);
+		break;
+
+	case f71862fg:
+		err = f71862fg_pin_configure(watchdog.sioaddr);
+		if (err)
+			goto exit_superio;
+		break;
+
+	case f71869:
+		/* GPIO14 --> WDTRST# */
+		superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4);
 		break;
 
 	case f71882fg:
 		/* Set pin 56 to WDTRST# */
-		superio_set_bit(watchdog.sioaddr, 0x29, 1);
+		superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1);
 		break;
 
 	case f71889fg:
 		/* set pin 40 to WDTRST# */
-		superio_outb(watchdog.sioaddr, 0x2b,
-				superio_inb(watchdog.sioaddr, 0x2b) & 0xcf);
+		superio_outb(watchdog.sioaddr, SIO_REG_MFUNCT3,
+			superio_inb(watchdog.sioaddr, SIO_REG_MFUNCT3) & 0xcf);
 		break;
 
 	default:
@@ -711,16 +756,19 @@
 	case SIO_F71808_ID:
 		watchdog.type = f71808fg;
 		break;
+	case SIO_F71862_ID:
+		watchdog.type = f71862fg;
+		err = f71862fg_pin_configure(0); /* validate module parameter */
+		break;
+	case SIO_F71869_ID:
+		watchdog.type = f71869;
+		break;
 	case SIO_F71882_ID:
 		watchdog.type = f71882fg;
 		break;
 	case SIO_F71889_ID:
 		watchdog.type = f71889fg;
 		break;
-	case SIO_F71862_ID:
-		/* These have a watchdog, though it isn't implemented (yet). */
-		err = -ENOSYS;
-		goto exit;
 	case SIO_F71858_ID:
 		/* Confirmed (by datasheet) not to have a watchdog. */
 		err = -ENODEV;
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index dea7b5b..24b966d 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -469,7 +469,7 @@
 	unsigned long rom_pl;
 	static int die_nmi_called;
 
-	if (ulReason != DIE_NMI && ulReason != DIE_NMI_IPI)
+	if (ulReason != DIE_NMIUNKNOWN)
 		goto out;
 
 	if (!hpwdt_nmi_decoding)
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index b8838d2..2c6c2b4 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -1,7 +1,7 @@
 /*
  *	intel TCO Watchdog Driver
  *
- *	(c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
+ *	(c) Copyright 2006-2010 Wim Van Sebroeck <wim@iguana.be>.
  *
  *	This program is free software; you can redistribute it and/or
  *	modify it under the terms of the GNU General Public License
@@ -26,13 +26,15 @@
  *	document number 301473-002, 301474-026: 82801F (ICH6)
  *	document number 313082-001, 313075-006: 631xESB, 632xESB
  *	document number 307013-003, 307014-024: 82801G (ICH7)
+ *	document number 322896-001, 322897-001: NM10
  *	document number 313056-003, 313057-017: 82801H (ICH8)
  *	document number 316972-004, 316973-012: 82801I (ICH9)
  *	document number 319973-002, 319974-002: 82801J (ICH10)
  *	document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
  *	document number 320066-003, 320257-008: EP80597 (IICH)
- *	document number TBD                   : Cougar Point (CPT)
+ *	document number 324645-001, 324646-001: Cougar Point (CPT)
  *	document number TBD                   : Patsburg (PBG)
+ *	document number TBD                   : DH89xxCC
  */
 
 /*
@@ -85,6 +87,7 @@
 	TCO_ICH7DH,	/* ICH7DH */
 	TCO_ICH7M,	/* ICH7-M & ICH7-U */
 	TCO_ICH7MDH,	/* ICH7-M DH */
+	TCO_NM10,	/* NM10 */
 	TCO_ICH8,	/* ICH8 & ICH8R */
 	TCO_ICH8DH,	/* ICH8DH */
 	TCO_ICH8DO,	/* ICH8DO */
@@ -149,6 +152,7 @@
 	TCO_CPT31,	/* Cougar Point */
 	TCO_PBG1,	/* Patsburg */
 	TCO_PBG2,	/* Patsburg */
+	TCO_DH89XXCC,	/* DH89xxCC */
 };
 
 static struct {
@@ -174,6 +178,7 @@
 	{"ICH7DH", 2},
 	{"ICH7-M or ICH7-U", 2},
 	{"ICH7-M DH", 2},
+	{"NM10", 2},
 	{"ICH8 or ICH8R", 2},
 	{"ICH8DH", 2},
 	{"ICH8DO", 2},
@@ -238,6 +243,7 @@
 	{"Cougar Point", 2},
 	{"Patsburg", 2},
 	{"Patsburg", 2},
+	{"DH89xxCC", 2},
 	{NULL, 0}
 };
 
@@ -291,6 +297,7 @@
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_30,		TCO_ICH7DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_1,		TCO_ICH7M)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH7_31,		TCO_ICH7MDH)},
+	{ ITCO_PCI_DEVICE(0x27bc,				TCO_NM10)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_0,		TCO_ICH8)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_2,		TCO_ICH8DH)},
 	{ ITCO_PCI_DEVICE(PCI_DEVICE_ID_INTEL_ICH8_3,		TCO_ICH8DO)},
@@ -355,6 +362,7 @@
 	{ ITCO_PCI_DEVICE(0x1c5f,				TCO_CPT31)},
 	{ ITCO_PCI_DEVICE(0x1d40,				TCO_PBG1)},
 	{ ITCO_PCI_DEVICE(0x1d41,				TCO_PBG2)},
+	{ ITCO_PCI_DEVICE(0x2310,				TCO_DH89XXCC)},
 	{ 0, },			/* End of list */
 };
 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 2852bb2..8114719 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -21,7 +21,7 @@
 #include <linux/watchdog.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
-#include <mach/timex.h>
+#include <mach/hardware.h>
 #include <mach/regs-timer.h>
 
 #define WDT_DEFAULT_TIME	5	/* seconds */
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m548x_wdt.c
new file mode 100644
index 0000000..cabbcfe
--- /dev/null
+++ b/drivers/watchdog/m548x_wdt.c
@@ -0,0 +1,227 @@
+/*
+ * drivers/watchdog/m548x_wdt.c
+ *
+ * Watchdog driver for ColdFire MCF548x processors
+ * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
+ *
+ * Adapted from the IXP4xx watchdog driver, which carries these notices:
+ *
+ *  Author: Deepak Saxena <dsaxena@plexity.net>
+ *
+ *  Copyright 2004 (c) MontaVista, Software, Inc.
+ *  Based on sa1100 driver, Copyright (C) 2000 Oleg Drokin <green@crimea.edu>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/ioport.h>
+#include <linux/uaccess.h>
+
+#include <asm/coldfire.h>
+#include <asm/m548xsim.h>
+#include <asm/m548xgpt.h>
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+static unsigned int heartbeat = 30;	/* (secs) Default is 0.5 minute */
+static unsigned long wdt_status;
+
+#define	WDT_IN_USE		0
+#define	WDT_OK_TO_CLOSE		1
+
+static void wdt_enable(void)
+{
+	unsigned int gms0;
+
+	/* preserve GPIO usage, if any */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	if (gms0 & MCF_GPT_GMS_TMS_GPIO)
+		gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK
+							| MCF_GPT_GMS_OD);
+	else
+		gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+	__raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) |
+			MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE;
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_disable(void)
+{
+	unsigned int gms0;
+
+	/* disable watchdog */
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static void wdt_keepalive(void)
+{
+	unsigned int gms0;
+
+	gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0);
+	gms0 |= MCF_GPT_GMS_OCPW(0xA5);
+	__raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
+}
+
+static int m548x_wdt_open(struct inode *inode, struct file *file)
+{
+	if (test_and_set_bit(WDT_IN_USE, &wdt_status))
+		return -EBUSY;
+
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+	wdt_enable();
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t m548x_wdt_write(struct file *file, const char *data,
+						size_t len, loff_t *ppos)
+{
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+			for (i = 0; i != len; i++) {
+				char c;
+
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					set_bit(WDT_OK_TO_CLOSE, &wdt_status);
+			}
+		}
+		wdt_keepalive();
+	}
+	return len;
+}
+
+static const struct watchdog_info ident = {
+	.options	= WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
+				WDIOF_KEEPALIVEPING,
+	.identity	= "Coldfire M548x Watchdog",
+};
+
+static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
+							 unsigned long arg)
+{
+	int ret = -ENOTTY;
+	int time;
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		ret = copy_to_user((struct watchdog_info *)arg, &ident,
+				   sizeof(ident)) ? -EFAULT : 0;
+		break;
+
+	case WDIOC_GETSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_GETBOOTSTATUS:
+		ret = put_user(0, (int *)arg);
+		break;
+
+	case WDIOC_KEEPALIVE:
+		wdt_keepalive();
+		ret = 0;
+		break;
+
+	case WDIOC_SETTIMEOUT:
+		ret = get_user(time, (int *)arg);
+		if (ret)
+			break;
+
+		if (time <= 0 || time > 30) {
+			ret = -EINVAL;
+			break;
+		}
+
+		heartbeat = time;
+		wdt_enable();
+		/* Fall through */
+
+	case WDIOC_GETTIMEOUT:
+		ret = put_user(heartbeat, (int *)arg);
+		break;
+	}
+	return ret;
+}
+
+static int m548x_wdt_release(struct inode *inode, struct file *file)
+{
+	if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
+		wdt_disable();
+	else {
+		printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
+					"timer will not stop\n");
+		wdt_keepalive();
+	}
+	clear_bit(WDT_IN_USE, &wdt_status);
+	clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+
+	return 0;
+}
+
+
+static const struct file_operations m548x_wdt_fops = {
+	.owner		= THIS_MODULE,
+	.llseek		= no_llseek,
+	.write		= m548x_wdt_write,
+	.unlocked_ioctl	= m548x_wdt_ioctl,
+	.open		= m548x_wdt_open,
+	.release	= m548x_wdt_release,
+};
+
+static struct miscdevice m548x_wdt_miscdev = {
+	.minor		= WATCHDOG_MINOR,
+	.name		= "watchdog",
+	.fops		= &m548x_wdt_fops,
+};
+
+static int __init m548x_wdt_init(void)
+{
+	if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
+						"Coldfire M548x Watchdog")) {
+		printk(KERN_WARNING
+				"Coldfire M548x Watchdog : I/O region busy\n");
+		return -EBUSY;
+	}
+	printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
+
+	return misc_register(&m548x_wdt_miscdev);
+}
+
+static void __exit m548x_wdt_exit(void)
+{
+	misc_deregister(&m548x_wdt_miscdev);
+	release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
+}
+
+module_init(m548x_wdt_init);
+module_exit(m548x_wdt_exit);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_DESCRIPTION("Coldfire M548x Watchdog");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
+
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c
new file mode 100644
index 0000000..1a50aa7
--- /dev/null
+++ b/drivers/watchdog/nv_tco.c
@@ -0,0 +1,512 @@
+/*
+ *	nv_tco 0.01:	TCO timer driver for NV chipsets
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Based off i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "nv_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "NV_TCO"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static unsigned int tcobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *nv_tco_platform_device;
+
+/* module parameters */
+#define WATCHDOG_HEARTBEAT 30	/* 30 sec default heartbeat (2<heartbeat<39) */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (2<heartbeat<39, "
+			    "default=" __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static inline unsigned char seconds_to_ticks(int seconds)
+{
+	/* the internal timer is stored as ticks which decrement
+	 * every 0.6 seconds */
+	return (seconds * 10) / 6;
+}
+
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val &= ~TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inl(TCO_CNT(tcobase));
+	val |= TCO_CNT_TCOHALT;
+	outl(val, TCO_CNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	outb(0x01, TCO_RLD(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	int ret = 0;
+	unsigned char tmrval;
+	unsigned long flags;
+	u8 val;
+
+	/*
+	 * note seconds_to_ticks(t) > t, so if t > 0x3f, so is
+	 * tmrval=seconds_to_ticks(t).  Check that the count in seconds isn't
+	 * out of range on it's own (to avoid overflow in tmrval).
+	 */
+	if (t < 0 || t > 0x3f)
+		return -EINVAL;
+	tmrval = seconds_to_ticks(t);
+
+	/* "Values of 0h-3h are ignored and should not be attempted" */
+	if (tmrval > 0x3f || tmrval < 0x04)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	val = inb(TCO_TMR(tcobase));
+	val &= 0xc0;
+	val |= tmrval;
+	outb(val, TCO_TMR(tcobase));
+	val = inb(TCO_TMR(tcobase));
+
+	if ((val & 0x3f) != tmrval)
+		ret = -EINVAL;
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	if (ret)
+		return ret;
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int nv_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_keepalive();
+	tco_timer_start();
+	return nonseekable_open(inode, file);
+}
+
+static int nv_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX "Unexpected close, not stopping "
+		       "watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t nv_tco_write(struct file *file, const char __user *data,
+			    size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/*
+			 * note: just in case someone wrote the magic character
+			 * five months ago...
+			 */
+			tco_expect_close = 0;
+
+			/*
+			 * scan to see whether or not we got the magic
+			 * character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long nv_tco_ioctl(struct file *file, unsigned int cmd,
+			 unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_keepalive();
+			tco_timer_start();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ *	Kernel Interfaces
+ */
+
+static const struct file_operations nv_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		nv_tco_write,
+	.unlocked_ioctl =	nv_tco_ioctl,
+	.open =			nv_tco_open,
+	.release =		nv_tco_release,
+};
+
+static struct miscdevice nv_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&nv_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might one day
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SMBUS,
+	  PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, tco_pci_tbl);
+
+/*
+ *	Init & exit routines
+ */
+
+static unsigned char __init nv_tco_getdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Find the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(tco_pci_tbl, dev) != NULL) {
+			tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!tco_pci)
+		return 0;
+
+	/* Find the base io port */
+	pci_read_config_dword(tco_pci, 0x64, &val);
+	val &= 0xffff;
+	if (val == 0x0001 || val == 0x0000) {
+		/* Something is wrong here, bar isn't setup */
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		return 0;
+	}
+	val &= 0xff00;
+	tcobase = val + 0x40;
+
+	if (!request_region(tcobase, 0x10, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       tcobase);
+		return 0;
+	}
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(30);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_keepalive();
+	tco_timer_stop();
+
+	/* Disable SMI caused by TCO */
+	if (!request_region(MCP51_SMI_EN(tcobase), 4, "NV TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+		       MCP51_SMI_EN(tcobase));
+		goto out;
+	}
+	val = inl(MCP51_SMI_EN(tcobase));
+	val &= ~MCP51_SMI_EN_TCO;
+	outl(val, MCP51_SMI_EN(tcobase));
+	val = inl(MCP51_SMI_EN(tcobase));
+	release_region(MCP51_SMI_EN(tcobase), 4);
+	if (val & MCP51_SMI_EN_TCO) {
+		printk(KERN_ERR PFX "Could not disable SMI caused by TCO\n");
+		goto out;
+	}
+
+	/* Check chipset's NO_REBOOT bit */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val |= MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (!(val & MCP51_SMBUS_SETUP_B_TCO_REBOOT)) {
+		printk(KERN_ERR PFX "failed to reset NO_REBOOT flag, reboot "
+		       "disabled by hardware\n");
+		goto out;
+	}
+
+	return 1;
+out:
+	release_region(tcobase, 0x10);
+	return 0;
+}
+
+static int __devinit nv_tco_init(struct platform_device *dev)
+{
+	int ret;
+
+	/* Check whether or not the hardware watchdog is there */
+	if (!nv_tco_getdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       inl(TCO_STS(tcobase)) & TCO_STS_TCO2TO_STS ? "" : "not ");
+
+	/* Clear out the old status */
+	outl(TCO_STS_RESET, TCO_STS(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+		printk(KERN_INFO PFX "heartbeat value must be 2<heartbeat<39, "
+		       "using %d\n", heartbeat);
+	}
+
+	ret = misc_register(&nv_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor=%d "
+		       "(err=%d)\n", WATCHDOG_MINOR, ret);
+		goto unreg_region;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	tco_timer_stop();
+
+	printk(KERN_INFO PFX "initialized (0x%04x). heartbeat=%d sec "
+	       "(nowayout=%d)\n", tcobase, heartbeat, nowayout);
+
+	return 0;
+
+unreg_region:
+	release_region(tcobase, 0x10);
+	return ret;
+}
+
+static void __devexit nv_tco_cleanup(void)
+{
+	u32 val;
+
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	val &= ~MCP51_SMBUS_SETUP_B_TCO_REBOOT;
+	pci_write_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, val);
+	pci_read_config_dword(tco_pci, MCP51_SMBUS_SETUP_B, &val);
+	if (val & MCP51_SMBUS_SETUP_B_TCO_REBOOT) {
+		printk(KERN_CRIT PFX "Couldn't unset REBOOT bit.  Machine may "
+		       "soon reset\n");
+	}
+
+	/* Deregister */
+	misc_deregister(&nv_tco_miscdev);
+	release_region(tcobase, 0x10);
+}
+
+static int __devexit nv_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		nv_tco_cleanup();
+
+	return 0;
+}
+
+static void nv_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver nv_tco_driver = {
+	.probe		= nv_tco_init,
+	.remove		= __devexit_p(nv_tco_remove),
+	.shutdown	= nv_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init nv_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "NV TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&nv_tco_driver);
+	if (err)
+		return err;
+
+	nv_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(nv_tco_platform_device)) {
+		err = PTR_ERR(nv_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&nv_tco_driver);
+	return err;
+}
+
+static void __exit nv_tco_cleanup_module(void)
+{
+	platform_device_unregister(nv_tco_platform_device);
+	platform_driver_unregister(&nv_tco_driver);
+	printk(KERN_INFO PFX "NV TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(nv_tco_init_module);
+module_exit(nv_tco_cleanup_module);
+
+MODULE_AUTHOR("Mike Waychison");
+MODULE_DESCRIPTION("TCO timer driver for NV chipsets");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h
new file mode 100644
index 0000000..c2d1d04
--- /dev/null
+++ b/drivers/watchdog/nv_tco.h
@@ -0,0 +1,64 @@
+/*
+ *	nv_tco:	TCO timer driver for nVidia chipsets.
+ *
+ *	(c) Copyright 2005 Google Inc., All Rights Reserved.
+ *
+ *	Supported Chipsets:
+ *		- MCP51/MCP55
+ *
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	Neither kernel concepts nor Nils Faerber admit liability nor provide
+ *	warranty for any of this software. This material is provided
+ *	"AS-IS" and at no charge.
+ *
+ *	(c) Copyright 2000	kernel concepts <nils@kernelconcepts.de>
+ *				developed for
+ *                              Jentro AG, Haar/Munich (Germany)
+ *
+ *	TCO timer driver for NV chipsets
+ *	based on softdog.c by Alan Cox <alan@redhat.com>
+ */
+
+/*
+ * Some address definitions for the TCO
+ */
+
+#define TCO_RLD(base)	((base) + 0x00)	/* TCO Timer Reload and Current Value */
+#define TCO_TMR(base)	((base) + 0x01)	/* TCO Timer Initial Value	*/
+
+#define TCO_STS(base)	((base) + 0x04)	/* TCO Status Register		*/
+/*
+ * TCO Boot Status bit: set on TCO reset, reset by software or standby
+ * power-good (survives reboots), unfortunately this bit is never
+ * set.
+ */
+#  define TCO_STS_BOOT_STS	(1 << 9)
+/*
+ * first and 2nd timeout status bits, these also survive a warm boot,
+ * and they work, so we use them.
+ */
+#  define TCO_STS_TCO_INT_STS	(1 << 1)
+#  define TCO_STS_TCO2TO_STS	(1 << 10)
+#  define TCO_STS_RESET		(TCO_STS_BOOT_STS | TCO_STS_TCO2TO_STS | \
+				 TCO_STS_TCO_INT_STS)
+
+#define TCO_CNT(base)	((base) + 0x08)	/* TCO Control Register	*/
+#  define TCO_CNT_TCOHALT	(1 << 12)
+
+#define MCP51_SMBUS_SETUP_B 0xe8
+#  define MCP51_SMBUS_SETUP_B_TCO_REBOOT (1 << 25)
+
+/*
+ * The SMI_EN register is at the base io address + 0x04,
+ * while TCOBASE is + 0x40.
+ */
+#define MCP51_SMI_EN(base)	((base) - 0x40 + 0x04)
+#  define MCP51_SMI_EN_TCO	((1 << 4) | (1 << 5))
diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
new file mode 100644
index 0000000..8083728
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.c
@@ -0,0 +1,480 @@
+/*
+ *	sp5100_tco :	TCO timer driver for sp5100 chipsets
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	Based on i8xx_tco.c:
+ *	(c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights
+ *	Reserved.
+ *				http://www.kernelconcepts.de
+ *
+ *	This program is free software; you can redistribute it and/or
+ *	modify it under the terms of the GNU General Public License
+ *	as published by the Free Software Foundation; either version
+ *	2 of the License, or (at your option) any later version.
+ *
+ *	See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide"
+ */
+
+/*
+ *	Includes, defines, variables, module parameters, ...
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+
+#include "sp5100_tco.h"
+
+/* Module and version information */
+#define TCO_VERSION "0.01"
+#define TCO_MODULE_NAME "SP5100 TCO timer"
+#define TCO_DRIVER_NAME   TCO_MODULE_NAME ", v" TCO_VERSION
+#define PFX TCO_MODULE_NAME ": "
+
+/* internal variables */
+static void __iomem *tcobase;
+static unsigned int pm_iobase;
+static DEFINE_SPINLOCK(tco_lock);	/* Guards the hardware */
+static unsigned long timer_alive;
+static char tco_expect_close;
+static struct pci_dev *sp5100_tco_pci;
+
+/* the watchdog platform device */
+static struct platform_device *sp5100_tco_platform_device;
+
+/* module parameters */
+
+#define WATCHDOG_HEARTBEAT 60	/* 60 sec default heartbeat. */
+static int heartbeat = WATCHDOG_HEARTBEAT;  /* in seconds */
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (default="
+		 __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
+		" (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+/*
+ * Some TCO specific functions
+ */
+static void tco_timer_start(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_stop(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_WDT_START_STOP_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static void tco_timer_keepalive(void)
+{
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tco_lock, flags);
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val |= SP5100_WDT_TRIGGER_BIT;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+}
+
+static int tco_timer_set_heartbeat(int t)
+{
+	unsigned long flags;
+
+	if (t < 0 || t > 0xffff)
+		return -EINVAL;
+
+	/* Write new heartbeat to watchdog */
+	spin_lock_irqsave(&tco_lock, flags);
+	writel(t, SP5100_WDT_COUNT(tcobase));
+	spin_unlock_irqrestore(&tco_lock, flags);
+
+	heartbeat = t;
+	return 0;
+}
+
+/*
+ *	/dev/watchdog handling
+ */
+
+static int sp5100_tco_open(struct inode *inode, struct file *file)
+{
+	/* /dev/watchdog can only be opened once */
+	if (test_and_set_bit(0, &timer_alive))
+		return -EBUSY;
+
+	/* Reload and activate timer */
+	tco_timer_start();
+	tco_timer_keepalive();
+	return nonseekable_open(inode, file);
+}
+
+static int sp5100_tco_release(struct inode *inode, struct file *file)
+{
+	/* Shut off the timer. */
+	if (tco_expect_close == 42) {
+		tco_timer_stop();
+	} else {
+		printk(KERN_CRIT PFX
+			"Unexpected close, not stopping watchdog!\n");
+		tco_timer_keepalive();
+	}
+	clear_bit(0, &timer_alive);
+	tco_expect_close = 0;
+	return 0;
+}
+
+static ssize_t sp5100_tco_write(struct file *file, const char __user *data,
+				size_t len, loff_t *ppos)
+{
+	/* See if we got the magic character 'V' and reload the timer */
+	if (len) {
+		if (!nowayout) {
+			size_t i;
+
+			/* note: just in case someone wrote the magic character
+			 * five months ago... */
+			tco_expect_close = 0;
+
+			/* scan to see whether or not we got the magic character
+			 */
+			for (i = 0; i != len; i++) {
+				char c;
+				if (get_user(c, data + i))
+					return -EFAULT;
+				if (c == 'V')
+					tco_expect_close = 42;
+			}
+		}
+
+		/* someone wrote to us, we should reload the timer */
+		tco_timer_keepalive();
+	}
+	return len;
+}
+
+static long sp5100_tco_ioctl(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	int new_options, retval = -EINVAL;
+	int new_heartbeat;
+	void __user *argp = (void __user *)arg;
+	int __user *p = argp;
+	static const struct watchdog_info ident = {
+		.options =		WDIOF_SETTIMEOUT |
+					WDIOF_KEEPALIVEPING |
+					WDIOF_MAGICCLOSE,
+		.firmware_version =	0,
+		.identity =		TCO_MODULE_NAME,
+	};
+
+	switch (cmd) {
+	case WDIOC_GETSUPPORT:
+		return copy_to_user(argp, &ident,
+			sizeof(ident)) ? -EFAULT : 0;
+	case WDIOC_GETSTATUS:
+	case WDIOC_GETBOOTSTATUS:
+		return put_user(0, p);
+	case WDIOC_SETOPTIONS:
+		if (get_user(new_options, p))
+			return -EFAULT;
+		if (new_options & WDIOS_DISABLECARD) {
+			tco_timer_stop();
+			retval = 0;
+		}
+		if (new_options & WDIOS_ENABLECARD) {
+			tco_timer_start();
+			tco_timer_keepalive();
+			retval = 0;
+		}
+		return retval;
+	case WDIOC_KEEPALIVE:
+		tco_timer_keepalive();
+		return 0;
+	case WDIOC_SETTIMEOUT:
+		if (get_user(new_heartbeat, p))
+			return -EFAULT;
+		if (tco_timer_set_heartbeat(new_heartbeat))
+			return -EINVAL;
+		tco_timer_keepalive();
+		/* Fall through */
+	case WDIOC_GETTIMEOUT:
+		return put_user(heartbeat, p);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/*
+ * Kernel Interfaces
+ */
+
+static const struct file_operations sp5100_tco_fops = {
+	.owner =		THIS_MODULE,
+	.llseek =		no_llseek,
+	.write =		sp5100_tco_write,
+	.unlocked_ioctl =	sp5100_tco_ioctl,
+	.open =			sp5100_tco_open,
+	.release =		sp5100_tco_release,
+};
+
+static struct miscdevice sp5100_tco_miscdev = {
+	.minor =	WATCHDOG_MINOR,
+	.name =		"watchdog",
+	.fops =		&sp5100_tco_fops,
+};
+
+/*
+ * Data for PCI driver interface
+ *
+ * This data only exists for exporting the supported
+ * PCI ids via MODULE_DEVICE_TABLE.  We do not actually
+ * register a pci_driver, because someone else might
+ * want to register another driver on the same PCI id.
+ */
+static struct pci_device_id sp5100_tco_pci_tbl[] = {
+	{ PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, PCI_ANY_ID,
+	  PCI_ANY_ID, },
+	{ 0, },			/* End of list */
+};
+MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+
+/*
+ * Init & exit routines
+ */
+
+static unsigned char __devinit sp5100_tco_setupdevice(void)
+{
+	struct pci_dev *dev = NULL;
+	u32 val;
+
+	/* Match the PCI device */
+	for_each_pci_dev(dev) {
+		if (pci_match_id(sp5100_tco_pci_tbl, dev) != NULL) {
+			sp5100_tco_pci = dev;
+			break;
+		}
+	}
+
+	if (!sp5100_tco_pci)
+		return 0;
+
+	/* Request the IO ports used by this driver */
+	pm_iobase = SP5100_IO_PM_INDEX_REG;
+	if (!request_region(pm_iobase, SP5100_PM_IOPORTS_SIZE, "SP5100 TCO")) {
+		printk(KERN_ERR PFX "I/O address 0x%04x already in use\n",
+			pm_iobase);
+		goto exit;
+	}
+
+	/* Find the watchdog base address. */
+	outb(SP5100_PM_WATCHDOG_BASE3, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE2, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE1, SP5100_IO_PM_INDEX_REG);
+	val = val << 8 | inb(SP5100_IO_PM_DATA_REG);
+	outb(SP5100_PM_WATCHDOG_BASE0, SP5100_IO_PM_INDEX_REG);
+	/* Low three bits of BASE0 are reserved. */
+	val = val << 8 | (inb(SP5100_IO_PM_DATA_REG) & 0xf8);
+
+	tcobase = ioremap(val, SP5100_WDT_MEM_MAP_SIZE);
+	if (tcobase == 0) {
+		printk(KERN_ERR PFX "failed to get tcobase address\n");
+		goto unreg_region;
+	}
+
+	/* Enable watchdog decode bit */
+	pci_read_config_dword(sp5100_tco_pci,
+			      SP5100_PCI_WATCHDOG_MISC_REG,
+			      &val);
+
+	val |= SP5100_PCI_WATCHDOG_DECODE_EN;
+
+	pci_write_config_dword(sp5100_tco_pci,
+			       SP5100_PCI_WATCHDOG_MISC_REG,
+			       val);
+
+	/* Enable Watchdog timer and set the resolution to 1 sec. */
+	outb(SP5100_PM_WATCHDOG_CONTROL, SP5100_IO_PM_INDEX_REG);
+	val = inb(SP5100_IO_PM_DATA_REG);
+	val |= SP5100_PM_WATCHDOG_SECOND_RES;
+	val &= ~SP5100_PM_WATCHDOG_DISABLE;
+	outb(val, SP5100_IO_PM_DATA_REG);
+
+	/* Check that the watchdog action is set to reset the system. */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_ACTION_RESET;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/* Set a reasonable heartbeat before we stop the timer */
+	tco_timer_set_heartbeat(heartbeat);
+
+	/*
+	 * Stop the TCO before we change anything so we don't race with
+	 * a zeroed timer.
+	 */
+	tco_timer_stop();
+
+	/* Done */
+	return 1;
+
+	iounmap(tcobase);
+unreg_region:
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+exit:
+	return 0;
+}
+
+static int __devinit sp5100_tco_init(struct platform_device *dev)
+{
+	int ret;
+	u32 val;
+
+	/* Check whether or not the hardware watchdog is there. If found, then
+	 * set it up.
+	 */
+	if (!sp5100_tco_setupdevice())
+		return -ENODEV;
+
+	/* Check to see if last reboot was due to watchdog timeout */
+	printk(KERN_INFO PFX "Watchdog reboot %sdetected.\n",
+	       readl(SP5100_WDT_CONTROL(tcobase)) & SP5100_PM_WATCHDOG_FIRED ?
+		      "" : "not ");
+
+	/* Clear out the old status */
+	val = readl(SP5100_WDT_CONTROL(tcobase));
+	val &= ~SP5100_PM_WATCHDOG_FIRED;
+	writel(val, SP5100_WDT_CONTROL(tcobase));
+
+	/*
+	 * Check that the heartbeat value is within it's range.
+	 * If not, reset to the default.
+	 */
+	if (tco_timer_set_heartbeat(heartbeat)) {
+		heartbeat = WATCHDOG_HEARTBEAT;
+		tco_timer_set_heartbeat(heartbeat);
+	}
+
+	ret = misc_register(&sp5100_tco_miscdev);
+	if (ret != 0) {
+		printk(KERN_ERR PFX "cannot register miscdev on minor="
+		       "%d (err=%d)\n",
+		       WATCHDOG_MINOR, ret);
+		goto exit;
+	}
+
+	clear_bit(0, &timer_alive);
+
+	printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec"
+		" (nowayout=%d)\n",
+		tcobase, heartbeat, nowayout);
+
+	return 0;
+
+exit:
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+	return ret;
+}
+
+static void __devexit sp5100_tco_cleanup(void)
+{
+	/* Stop the timer before we leave */
+	if (!nowayout)
+		tco_timer_stop();
+
+	/* Deregister */
+	misc_deregister(&sp5100_tco_miscdev);
+	iounmap(tcobase);
+	release_region(pm_iobase, SP5100_PM_IOPORTS_SIZE);
+}
+
+static int __devexit sp5100_tco_remove(struct platform_device *dev)
+{
+	if (tcobase)
+		sp5100_tco_cleanup();
+	return 0;
+}
+
+static void sp5100_tco_shutdown(struct platform_device *dev)
+{
+	tco_timer_stop();
+}
+
+static struct platform_driver sp5100_tco_driver = {
+	.probe		= sp5100_tco_init,
+	.remove		= __devexit_p(sp5100_tco_remove),
+	.shutdown	= sp5100_tco_shutdown,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= TCO_MODULE_NAME,
+	},
+};
+
+static int __init sp5100_tco_init_module(void)
+{
+	int err;
+
+	printk(KERN_INFO PFX "SP5100 TCO WatchDog Timer Driver v%s\n",
+	       TCO_VERSION);
+
+	err = platform_driver_register(&sp5100_tco_driver);
+	if (err)
+		return err;
+
+	sp5100_tco_platform_device = platform_device_register_simple(
+					TCO_MODULE_NAME, -1, NULL, 0);
+	if (IS_ERR(sp5100_tco_platform_device)) {
+		err = PTR_ERR(sp5100_tco_platform_device);
+		goto unreg_platform_driver;
+	}
+
+	return 0;
+
+unreg_platform_driver:
+	platform_driver_unregister(&sp5100_tco_driver);
+	return err;
+}
+
+static void __exit sp5100_tco_cleanup_module(void)
+{
+	platform_device_unregister(sp5100_tco_platform_device);
+	platform_driver_unregister(&sp5100_tco_driver);
+	printk(KERN_INFO PFX "SP5100 TCO Watchdog Module Unloaded.\n");
+}
+
+module_init(sp5100_tco_init_module);
+module_exit(sp5100_tco_cleanup_module);
+
+MODULE_AUTHOR("Priyanka Gupta");
+MODULE_DESCRIPTION("TCO timer driver for SP5100 chipset");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
new file mode 100644
index 0000000..a5a16cc
--- /dev/null
+++ b/drivers/watchdog/sp5100_tco.h
@@ -0,0 +1,41 @@
+/*
+ *	sp5100_tco:	TCO timer driver for sp5100 chipsets.
+ *
+ *	(c) Copyright 2009 Google Inc., All Rights Reserved.
+ *
+ *	TCO timer driver for sp5100 chipsets
+ */
+
+/*
+ * Some address definitions for the Watchdog
+ */
+
+#define SP5100_WDT_MEM_MAP_SIZE		0x08
+#define SP5100_WDT_CONTROL(base)	((base) + 0x00) /* Watchdog Control */
+#define SP5100_WDT_COUNT(base)		((base) + 0x04) /* Watchdog Count */
+
+#define SP5100_WDT_START_STOP_BIT	1
+#define SP5100_WDT_TRIGGER_BIT		(1 << 7)
+
+#define SP5100_PCI_WATCHDOG_MISC_REG	0x41
+#define SP5100_PCI_WATCHDOG_DECODE_EN	(1 << 3)
+
+#define SP5100_PM_IOPORTS_SIZE		0x02
+
+/* These two IO registers are hardcoded and there doesn't seem to be a way to
+ * read them from a register.
+ */
+#define SP5100_IO_PM_INDEX_REG		0xCD6
+#define SP5100_IO_PM_DATA_REG		0xCD7
+
+#define SP5100_PM_WATCHDOG_CONTROL	0x69
+#define SP5100_PM_WATCHDOG_BASE0	0x6C
+#define SP5100_PM_WATCHDOG_BASE1	0x6D
+#define SP5100_PM_WATCHDOG_BASE2	0x6E
+#define SP5100_PM_WATCHDOG_BASE3	0x6F
+
+#define SP5100_PM_WATCHDOG_FIRED	(1 << 1)
+#define SP5100_PM_WATCHDOG_ACTION_RESET	(1 << 2)
+
+#define SP5100_PM_WATCHDOG_DISABLE	1
+#define SP5100_PM_WATCHDOG_SECOND_RES	(3 << 1)
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index 0f5288d..e5c91d4 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -42,7 +42,7 @@
 
 #include <asm/system.h>
 
-#define WATCHDOG_NAME "w83627hf/thf/hg WDT"
+#define WATCHDOG_NAME "w83627hf/thf/hg/dhg WDT"
 #define PFX WATCHDOG_NAME ": "
 #define WATCHDOG_TIMEOUT 60		/* 60 sec default timeout */
 
@@ -89,7 +89,7 @@
 		c = ((inb_p(WDT_EFDR) & 0xf7) | 0x04); /* select WDT0 */
 		outb_p(0x2b, WDT_EFER);
 		outb_p(c, WDT_EFDR);	/* set GPIO3 to WDT0 */
-	} else if (c == 0x88) {	/* W83627EHF */
+	} else if (c == 0x88 || c == 0xa0) {	/* W83627EHF / W83627DHG */
 		outb_p(0x2d, WDT_EFER); /* select GPIO5 */
 		c = inb_p(WDT_EFDR) & ~0x01; /* PIN77 -> WDT0# */
 		outb_p(0x2d, WDT_EFER);
@@ -129,6 +129,8 @@
 	t = inb_p(WDT_EFDR);      /* read CRF5 */
 	t &= ~0x0C;               /* set second mode & disable keyboard
 				    turning off watchdog */
+	t |= 0x02;		  /* enable the WDTO# output low pulse
+				    to the KBRST# pin (PIN60) */
 	outb_p(t, WDT_EFDR);    /* Write back to CRF5 */
 
 	outb_p(0xF7, WDT_EFER); /* Select CRF7 */
@@ -321,7 +323,7 @@
 {
 	int ret;
 
-	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG Super I/O chip initialising.\n");
+	printk(KERN_INFO "WDT driver for the Winbond(TM) W83627HF/THF/HG/DHG Super I/O chip initialising.\n");
 
 	if (wdt_set_heartbeat(timeout)) {
 		wdt_set_heartbeat(WATCHDOG_TIMEOUT);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 6e6180c..5a48ce9 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -29,6 +29,14 @@
 	  firing.
 	  If in doubt, say yes.
 
+config XEN_BACKEND
+	bool "Backend driver support"
+	depends on XEN_DOM0
+	default y
+	help
+	  Support for backend device drivers that provide I/O services
+	  to other virtual machines.
+
 config XENFS
 	tristate "Xen filesystem"
 	default y
@@ -62,6 +70,9 @@
 	 virtual environment, /sys/hypervisor will still be present,
 	 but will have no xen contents.
 
+config XEN_XENBUS_FRONTEND
+       tristate
+
 config XEN_PLATFORM_PCI
 	tristate "xen platform pci device driver"
 	depends on XEN_PVHVM
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 5571f5b..8dca685 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -5,3 +5,8 @@
 xenbus-objs += xenbus_comms.o
 xenbus-objs += xenbus_xs.o
 xenbus-objs += xenbus_probe.o
+
+xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
+xenbus-objs += $(xenbus-be-objs-y)
+
+obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index deb9c4b..baa65e7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -56,7 +56,6 @@
 #include <xen/events.h>
 #include <xen/page.h>
 
-#include <xen/platform_pci.h>
 #include <xen/hvm.h>
 
 #include "xenbus_comms.h"
@@ -73,15 +72,6 @@
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
 
-static void wait_for_devices(struct xenbus_driver *xendrv);
-
-static int xenbus_probe_frontend(const char *type, const char *name);
-
-static void xenbus_dev_shutdown(struct device *_dev);
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
-static int xenbus_dev_resume(struct device *dev);
-
 /* If something in array of ids matches this device, return it. */
 static const struct xenbus_device_id *
 match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
@@ -102,34 +92,7 @@
 
 	return match_device(drv->ids, to_xenbus_device(_dev)) != NULL;
 }
-
-static int xenbus_uevent(struct device *_dev, struct kobj_uevent_env *env)
-{
-	struct xenbus_device *dev = to_xenbus_device(_dev);
-
-	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
-		return -ENOMEM;
-
-	return 0;
-}
-
-/* device/<type>/<id> => <type>-<id> */
-static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
-{
-	nodename = strchr(nodename, '/');
-	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
-		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
-		return -EINVAL;
-	}
-
-	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
-	if (!strchr(bus_id, '/')) {
-		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
-		return -EINVAL;
-	}
-	*strchr(bus_id, '/') = '-';
-	return 0;
-}
+EXPORT_SYMBOL_GPL(xenbus_match);
 
 
 static void free_otherend_details(struct xenbus_device *dev)
@@ -149,7 +112,30 @@
 }
 
 
-int read_otherend_details(struct xenbus_device *xendev,
+static int talk_to_otherend(struct xenbus_device *dev)
+{
+	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
+
+	free_otherend_watch(dev);
+	free_otherend_details(dev);
+
+	return drv->read_otherend_details(dev);
+}
+
+
+
+static int watch_otherend(struct xenbus_device *dev)
+{
+	struct xen_bus_type *bus =
+		container_of(dev->dev.bus, struct xen_bus_type, bus);
+
+	return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
+				    bus->otherend_changed,
+				    "%s/%s", dev->otherend, "state");
+}
+
+
+int xenbus_read_otherend_details(struct xenbus_device *xendev,
 				 char *id_node, char *path_node)
 {
 	int err = xenbus_gather(XBT_NIL, xendev->nodename,
@@ -174,39 +160,11 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_read_otherend_details);
 
-
-static int read_backend_details(struct xenbus_device *xendev)
-{
-	return read_otherend_details(xendev, "backend-id", "backend");
-}
-
-static struct device_attribute xenbus_dev_attrs[] = {
-	__ATTR_NULL
-};
-
-/* Bus type for frontend drivers. */
-static struct xen_bus_type xenbus_frontend = {
-	.root = "device",
-	.levels = 2, 		/* device/type/<id> */
-	.get_bus_id = frontend_bus_id,
-	.probe = xenbus_probe_frontend,
-	.bus = {
-		.name      = "xen",
-		.match     = xenbus_match,
-		.uevent    = xenbus_uevent,
-		.probe     = xenbus_dev_probe,
-		.remove    = xenbus_dev_remove,
-		.shutdown  = xenbus_dev_shutdown,
-		.dev_attrs = xenbus_dev_attrs,
-
-		.suspend   = xenbus_dev_suspend,
-		.resume    = xenbus_dev_resume,
-	},
-};
-
-static void otherend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
+void xenbus_otherend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len,
+			     int ignore_on_shutdown)
 {
 	struct xenbus_device *dev =
 		container_of(watch, struct xenbus_device, otherend_watch);
@@ -234,11 +192,7 @@
 	 * work that can fail e.g., when the rootfs is gone.
 	 */
 	if (system_state > SYSTEM_RUNNING) {
-		struct xen_bus_type *bus = bus;
-		bus = container_of(dev->dev.bus, struct xen_bus_type, bus);
-		/* If we're frontend, drive the state machine to Closed. */
-		/* This should cause the backend to release our resources. */
-		if ((bus == &xenbus_frontend) && (state == XenbusStateClosing))
+		if (ignore_on_shutdown && (state == XenbusStateClosing))
 			xenbus_frontend_closed(dev);
 		return;
 	}
@@ -246,25 +200,7 @@
 	if (drv->otherend_changed)
 		drv->otherend_changed(dev, state);
 }
-
-
-static int talk_to_otherend(struct xenbus_device *dev)
-{
-	struct xenbus_driver *drv = to_xenbus_driver(dev->dev.driver);
-
-	free_otherend_watch(dev);
-	free_otherend_details(dev);
-
-	return drv->read_otherend_details(dev);
-}
-
-
-static int watch_otherend(struct xenbus_device *dev)
-{
-	return xenbus_watch_pathfmt(dev, &dev->otherend_watch, otherend_changed,
-				    "%s/%s", dev->otherend, "state");
-}
-
+EXPORT_SYMBOL_GPL(xenbus_otherend_changed);
 
 int xenbus_dev_probe(struct device *_dev)
 {
@@ -308,8 +244,9 @@
 fail:
 	xenbus_dev_error(dev, err, "xenbus_dev_probe on %s", dev->nodename);
 	xenbus_switch_state(dev, XenbusStateClosed);
-	return -ENODEV;
+	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_probe);
 
 int xenbus_dev_remove(struct device *_dev)
 {
@@ -327,8 +264,9 @@
 	xenbus_switch_state(dev, XenbusStateClosed);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_remove);
 
-static void xenbus_dev_shutdown(struct device *_dev)
+void xenbus_dev_shutdown(struct device *_dev)
 {
 	struct xenbus_device *dev = to_xenbus_device(_dev);
 	unsigned long timeout = 5*HZ;
@@ -349,6 +287,7 @@
  out:
 	put_device(&dev->dev);
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
 
 int xenbus_register_driver_common(struct xenbus_driver *drv,
 				  struct xen_bus_type *bus,
@@ -362,25 +301,7 @@
 
 	return driver_register(&drv->driver);
 }
-
-int __xenbus_register_frontend(struct xenbus_driver *drv,
-			       struct module *owner, const char *mod_name)
-{
-	int ret;
-
-	drv->read_otherend_details = read_backend_details;
-
-	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
-					    owner, mod_name);
-	if (ret)
-		return ret;
-
-	/* If this driver is loaded as a module wait for devices to attach. */
-	wait_for_devices(drv);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_driver_common);
 
 void xenbus_unregister_driver(struct xenbus_driver *drv)
 {
@@ -551,24 +472,7 @@
 	kfree(xendev);
 	return err;
 }
-
-/* device/<typename>/<name> */
-static int xenbus_probe_frontend(const char *type, const char *name)
-{
-	char *nodename;
-	int err;
-
-	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s",
-			     xenbus_frontend.root, type, name);
-	if (!nodename)
-		return -ENOMEM;
-
-	DPRINTK("%s", nodename);
-
-	err = xenbus_probe_node(&xenbus_frontend, type, nodename);
-	kfree(nodename);
-	return err;
-}
+EXPORT_SYMBOL_GPL(xenbus_probe_node);
 
 static int xenbus_probe_device_type(struct xen_bus_type *bus, const char *type)
 {
@@ -582,10 +486,11 @@
 		return PTR_ERR(dir);
 
 	for (i = 0; i < dir_n; i++) {
-		err = bus->probe(type, dir[i]);
+		err = bus->probe(bus, type, dir[i]);
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
@@ -605,9 +510,11 @@
 		if (err)
 			break;
 	}
+
 	kfree(dir);
 	return err;
 }
+EXPORT_SYMBOL_GPL(xenbus_probe_devices);
 
 static unsigned int char_count(const char *str, char c)
 {
@@ -670,32 +577,18 @@
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_changed);
 
-static void frontend_changed(struct xenbus_watch *watch,
-			     const char **vec, unsigned int len)
-{
-	DPRINTK("");
-
-	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
-}
-
-/* We watch for devices appearing and vanishing. */
-static struct xenbus_watch fe_watch = {
-	.node = "device",
-	.callback = frontend_changed,
-};
-
-static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
+int xenbus_dev_suspend(struct device *dev, pm_message_t state)
 {
 	int err = 0;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
 	if (drv->suspend)
 		err = drv->suspend(xdev, state);
 	if (err)
@@ -703,21 +596,20 @@
 		       "xenbus: suspend %s failed: %i\n", dev_name(dev), err);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_suspend);
 
-static int xenbus_dev_resume(struct device *dev)
+int xenbus_dev_resume(struct device *dev)
 {
 	int err;
 	struct xenbus_driver *drv;
-	struct xenbus_device *xdev;
+	struct xenbus_device *xdev
+		= container_of(dev, struct xenbus_device, dev);
 
-	DPRINTK("");
+	DPRINTK("%s", xdev->nodename);
 
 	if (dev->driver == NULL)
 		return 0;
-
 	drv = to_xenbus_driver(dev->driver);
-	xdev = container_of(dev, struct xenbus_device, dev);
-
 	err = talk_to_otherend(xdev);
 	if (err) {
 		printk(KERN_WARNING
@@ -748,6 +640,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(xenbus_dev_resume);
 
 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
 int xenstored_ready = 0;
@@ -776,11 +669,6 @@
 {
 	xenstored_ready = 1;
 
-	/* Enumerate devices in xenstore and watch for changes. */
-	xenbus_probe_devices(&xenbus_frontend);
-	register_xenbus_watch(&fe_watch);
-	xenbus_backend_probe_and_watch();
-
 	/* Notify others that xenstore is up */
 	blocking_notifier_call_chain(&xenstore_chain, 0, NULL);
 }
@@ -809,16 +697,7 @@
 
 	err = -ENODEV;
 	if (!xen_domain())
-		goto out_error;
-
-	/* Register ourselves with the kernel bus subsystem */
-	err = bus_register(&xenbus_frontend.bus);
-	if (err)
-		goto out_error;
-
-	err = xenbus_backend_bus_register();
-	if (err)
-		goto out_unreg_front;
+		return err;
 
 	/*
 	 * Domain0 doesn't have a store_evtchn or store_mfn yet.
@@ -874,7 +753,7 @@
 	if (err) {
 		printk(KERN_WARNING
 		       "XENBUS: Error initializing xenstore comms: %i\n", err);
-		goto out_unreg_back;
+		goto out_error;
 	}
 
 #ifdef CONFIG_XEN_COMPAT_XENFS
@@ -887,133 +766,13 @@
 
 	return 0;
 
-  out_unreg_back:
-	xenbus_backend_bus_unregister();
-
-  out_unreg_front:
-	bus_unregister(&xenbus_frontend.bus);
-
   out_error:
 	if (page != 0)
 		free_page(page);
+
 	return err;
 }
 
 postcore_initcall(xenbus_init);
 
 MODULE_LICENSE("GPL");
-
-static int is_device_connecting(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-	struct xenbus_driver *xendrv;
-
-	/*
-	 * A device with no driver will never connect. We care only about
-	 * devices which should currently be in the process of connecting.
-	 */
-	if (!dev->driver)
-		return 0;
-
-	/* Is this search limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	xendrv = to_xenbus_driver(dev->driver);
-	return (xendev->state < XenbusStateConnected ||
-		(xendev->state == XenbusStateConnected &&
-		 xendrv->is_ready && !xendrv->is_ready(xendev)));
-}
-
-static int exists_connecting_device(struct device_driver *drv)
-{
-	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-				is_device_connecting);
-}
-
-static int print_device_status(struct device *dev, void *data)
-{
-	struct xenbus_device *xendev = to_xenbus_device(dev);
-	struct device_driver *drv = data;
-
-	/* Is this operation limited to a particular driver? */
-	if (drv && (dev->driver != drv))
-		return 0;
-
-	if (!dev->driver) {
-		/* Information only: is this too noisy? */
-		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
-		       xendev->nodename);
-	} else if (xendev->state < XenbusStateConnected) {
-		enum xenbus_state rstate = XenbusStateUnknown;
-		if (xendev->otherend)
-			rstate = xenbus_read_driver_state(xendev->otherend);
-		printk(KERN_WARNING "XENBUS: Timeout connecting "
-		       "to device: %s (local state %d, remote state %d)\n",
-		       xendev->nodename, xendev->state, rstate);
-	}
-
-	return 0;
-}
-
-/* We only wait for device setup after most initcalls have run. */
-static int ready_to_wait_for_devices;
-
-/*
- * On a 5-minute timeout, wait for all devices currently configured.  We need
- * to do this to guarantee that the filesystems and / or network devices
- * needed for boot are available, before we can allow the boot to proceed.
- *
- * This needs to be on a late_initcall, to happen after the frontend device
- * drivers have been initialised, but before the root fs is mounted.
- *
- * A possible improvement here would be to have the tools add a per-device
- * flag to the store entry, indicating whether it is needed at boot time.
- * This would allow people who knew what they were doing to accelerate their
- * boot slightly, but of course needs tools or manual intervention to set up
- * those flags correctly.
- */
-static void wait_for_devices(struct xenbus_driver *xendrv)
-{
-	unsigned long start = jiffies;
-	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
-	unsigned int seconds_waited = 0;
-
-	if (!ready_to_wait_for_devices || !xen_domain())
-		return;
-
-	while (exists_connecting_device(drv)) {
-		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
-			if (!seconds_waited)
-				printk(KERN_WARNING "XENBUS: Waiting for "
-				       "devices to initialise: ");
-			seconds_waited += 5;
-			printk("%us...", 300 - seconds_waited);
-			if (seconds_waited == 300)
-				break;
-		}
-
-		schedule_timeout_interruptible(HZ/10);
-	}
-
-	if (seconds_waited)
-		printk("\n");
-
-	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
-			 print_device_status);
-}
-
-#ifndef MODULE
-static int __init boot_wait_for_devices(void)
-{
-	if (xen_hvm_domain() && !xen_platform_pci_unplug)
-		return -ENODEV;
-
-	ready_to_wait_for_devices = 1;
-	wait_for_devices(NULL);
-	return 0;
-}
-
-late_initcall(boot_wait_for_devices);
-#endif
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 6c5e318..2466581 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,26 +36,15 @@
 
 #define XEN_BUS_ID_SIZE			20
 
-#ifdef CONFIG_XEN_BACKEND
-extern void xenbus_backend_suspend(int (*fn)(struct device *, void *));
-extern void xenbus_backend_resume(int (*fn)(struct device *, void *));
-extern void xenbus_backend_probe_and_watch(void);
-extern int xenbus_backend_bus_register(void);
-extern void xenbus_backend_bus_unregister(void);
-#else
-static inline void xenbus_backend_suspend(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_resume(int (*fn)(struct device *, void *)) {}
-static inline void xenbus_backend_probe_and_watch(void) {}
-static inline int xenbus_backend_bus_register(void) { return 0; }
-static inline void xenbus_backend_bus_unregister(void) {}
-#endif
-
 struct xen_bus_type
 {
 	char *root;
 	unsigned int levels;
 	int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
-	int (*probe)(const char *type, const char *dir);
+	int (*probe)(struct xen_bus_type *bus, const char *type,
+		     const char *dir);
+	void (*otherend_changed)(struct xenbus_watch *watch, const char **vec,
+				 unsigned int len);
 	struct bus_type bus;
 };
 
@@ -73,4 +62,16 @@
 
 extern void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
 
+extern void xenbus_dev_shutdown(struct device *_dev);
+
+extern int xenbus_dev_suspend(struct device *dev, pm_message_t state);
+extern int xenbus_dev_resume(struct device *dev);
+
+extern void xenbus_otherend_changed(struct xenbus_watch *watch,
+				    const char **vec, unsigned int len,
+				    int ignore_on_shutdown);
+
+extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
+					char *id_node, char *path_node);
+
 #endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
new file mode 100644
index 0000000..6cf467b
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -0,0 +1,276 @@
+/******************************************************************************
+ * Talks to Xen Store to figure out what devices we have (backend half).
+ *
+ * Copyright (C) 2005 Rusty Russell, IBM Corporation
+ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
+ * Copyright (C) 2005, 2006 XenSource Ltd
+ * Copyright (C) 2007 Solarflare Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/features.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
+static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	int domid, err;
+	const char *devid, *type, *frontend;
+	unsigned int typelen;
+
+	type = strchr(nodename, '/');
+	if (!type)
+		return -EINVAL;
+	type++;
+	typelen = strcspn(type, "/");
+	if (!typelen || type[typelen] != '/')
+		return -EINVAL;
+
+	devid = strrchr(nodename, '/') + 1;
+
+	err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
+			    "frontend", NULL, &frontend,
+			    NULL);
+	if (err)
+		return err;
+	if (strlen(frontend) == 0)
+		err = -ERANGE;
+	if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
+		err = -ENOENT;
+	kfree(frontend);
+
+	if (err)
+		return err;
+
+	if (snprintf(bus_id, XEN_BUS_ID_SIZE, "%.*s-%i-%s",
+		     typelen, type, domid, devid) >= XEN_BUS_ID_SIZE)
+		return -ENOSPC;
+	return 0;
+}
+
+static int xenbus_uevent_backend(struct device *dev,
+				 struct kobj_uevent_env *env)
+{
+	struct xenbus_device *xdev;
+	struct xenbus_driver *drv;
+	struct xen_bus_type *bus;
+
+	DPRINTK("");
+
+	if (dev == NULL)
+		return -ENODEV;
+
+	xdev = to_xenbus_device(dev);
+	bus = container_of(xdev->dev.bus, struct xen_bus_type, bus);
+	if (xdev == NULL)
+		return -ENODEV;
+
+	/* stuff we want to pass to /sbin/hotplug */
+	if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_PATH=%s", xdev->nodename))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "XENBUS_BASE_PATH=%s", bus->root))
+		return -ENOMEM;
+
+	if (dev->driver) {
+		drv = to_xenbus_driver(dev->driver);
+		if (drv && drv->uevent)
+			return drv->uevent(xdev, env);
+	}
+
+	return 0;
+}
+
+/* backend/<typename>/<frontend-uuid>/<name> */
+static int xenbus_probe_backend_unit(struct xen_bus_type *bus,
+				     const char *dir,
+				     const char *type,
+				     const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s\n", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+/* backend/<typename>/<frontend-domid> */
+static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
+				const char *domid)
+{
+	char *nodename;
+	int err = 0;
+	char **dir;
+	unsigned int i, dir_n = 0;
+
+	DPRINTK("");
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, domid);
+	if (!nodename)
+		return -ENOMEM;
+
+	dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
+	if (IS_ERR(dir)) {
+		kfree(nodename);
+		return PTR_ERR(dir);
+	}
+
+	for (i = 0; i < dir_n; i++) {
+		err = xenbus_probe_backend_unit(bus, nodename, type, dir[i]);
+		if (err)
+			break;
+	}
+	kfree(dir);
+	kfree(nodename);
+	return err;
+}
+
+static void frontend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 0);
+}
+
+static struct device_attribute xenbus_backend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_backend = {
+	.root = "backend",
+	.levels = 3,		/* backend/type/<frontend>/<id> */
+	.get_bus_id = backend_bus_id,
+	.probe = xenbus_probe_backend,
+	.otherend_changed = frontend_changed,
+	.bus = {
+		.name		= "xen-backend",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_backend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_backend_dev_attrs,
+	},
+};
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
+}
+
+static struct xenbus_watch be_watch = {
+	.node = "backend",
+	.callback = backend_changed,
+};
+
+static int read_frontend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "frontend-id", "frontend");
+}
+
+int xenbus_dev_is_online(struct xenbus_device *dev)
+{
+	int rc, val;
+
+	rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
+	if (rc != 1)
+		val = 0; /* no online node present */
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
+
+int __xenbus_register_backend(struct xenbus_driver *drv,
+			      struct module *owner, const char *mod_name)
+{
+	drv->read_otherend_details = read_frontend_details;
+
+	return xenbus_register_driver_common(drv, &xenbus_backend,
+					     owner, mod_name);
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+
+static int backend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_backend);
+	register_xenbus_watch(&be_watch);
+
+	return NOTIFY_DONE;
+}
+
+static int __init xenbus_probe_backend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = backend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_backend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_backend_init);
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
new file mode 100644
index 0000000..5bcc2d6
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -0,0 +1,294 @@
+#define DPRINTK(fmt, args...)				\
+	pr_debug("xenbus_probe (%s:%d) " fmt ".\n",	\
+		 __func__, __LINE__, ##args)
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/notifier.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/xen/hypervisor.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/platform_pci.h>
+
+#include "xenbus_comms.h"
+#include "xenbus_probe.h"
+
+
+/* device/<type>/<id> => <type>-<id> */
+static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
+{
+	nodename = strchr(nodename, '/');
+	if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) {
+		printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename);
+		return -EINVAL;
+	}
+
+	strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE);
+	if (!strchr(bus_id, '/')) {
+		printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id);
+		return -EINVAL;
+	}
+	*strchr(bus_id, '/') = '-';
+	return 0;
+}
+
+/* device/<typename>/<name> */
+static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
+				 const char *name)
+{
+	char *nodename;
+	int err;
+
+	nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name);
+	if (!nodename)
+		return -ENOMEM;
+
+	DPRINTK("%s", nodename);
+
+	err = xenbus_probe_node(bus, type, nodename);
+	kfree(nodename);
+	return err;
+}
+
+static int xenbus_uevent_frontend(struct device *_dev,
+				  struct kobj_uevent_env *env)
+{
+	struct xenbus_device *dev = to_xenbus_device(_dev);
+
+	if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
+		return -ENOMEM;
+
+	return 0;
+}
+
+
+static void backend_changed(struct xenbus_watch *watch,
+			    const char **vec, unsigned int len)
+{
+	xenbus_otherend_changed(watch, vec, len, 1);
+}
+
+static struct device_attribute xenbus_frontend_dev_attrs[] = {
+	__ATTR_NULL
+};
+
+static struct xen_bus_type xenbus_frontend = {
+	.root = "device",
+	.levels = 2,		/* device/type/<id> */
+	.get_bus_id = frontend_bus_id,
+	.probe = xenbus_probe_frontend,
+	.otherend_changed = backend_changed,
+	.bus = {
+		.name		= "xen",
+		.match		= xenbus_match,
+		.uevent		= xenbus_uevent_frontend,
+		.probe		= xenbus_dev_probe,
+		.remove		= xenbus_dev_remove,
+		.shutdown	= xenbus_dev_shutdown,
+		.dev_attrs	= xenbus_frontend_dev_attrs,
+
+		.suspend	= xenbus_dev_suspend,
+		.resume		= xenbus_dev_resume,
+	},
+};
+
+static void frontend_changed(struct xenbus_watch *watch,
+			     const char **vec, unsigned int len)
+{
+	DPRINTK("");
+
+	xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend);
+}
+
+
+/* We watch for devices appearing and vanishing. */
+static struct xenbus_watch fe_watch = {
+	.node = "device",
+	.callback = frontend_changed,
+};
+
+static int read_backend_details(struct xenbus_device *xendev)
+{
+	return xenbus_read_otherend_details(xendev, "backend-id", "backend");
+}
+
+static int is_device_connecting(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+	struct xenbus_driver *xendrv;
+
+	/*
+	 * A device with no driver will never connect. We care only about
+	 * devices which should currently be in the process of connecting.
+	 */
+	if (!dev->driver)
+		return 0;
+
+	/* Is this search limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	xendrv = to_xenbus_driver(dev->driver);
+	return (xendev->state < XenbusStateConnected ||
+		(xendev->state == XenbusStateConnected &&
+		 xendrv->is_ready && !xendrv->is_ready(xendev)));
+}
+
+static int exists_connecting_device(struct device_driver *drv)
+{
+	return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+				is_device_connecting);
+}
+
+static int print_device_status(struct device *dev, void *data)
+{
+	struct xenbus_device *xendev = to_xenbus_device(dev);
+	struct device_driver *drv = data;
+
+	/* Is this operation limited to a particular driver? */
+	if (drv && (dev->driver != drv))
+		return 0;
+
+	if (!dev->driver) {
+		/* Information only: is this too noisy? */
+		printk(KERN_INFO "XENBUS: Device with no driver: %s\n",
+		       xendev->nodename);
+	} else if (xendev->state < XenbusStateConnected) {
+		enum xenbus_state rstate = XenbusStateUnknown;
+		if (xendev->otherend)
+			rstate = xenbus_read_driver_state(xendev->otherend);
+		printk(KERN_WARNING "XENBUS: Timeout connecting "
+		       "to device: %s (local state %d, remote state %d)\n",
+		       xendev->nodename, xendev->state, rstate);
+	}
+
+	return 0;
+}
+
+/* We only wait for device setup after most initcalls have run. */
+static int ready_to_wait_for_devices;
+
+/*
+ * On a 5-minute timeout, wait for all devices currently configured.  We need
+ * to do this to guarantee that the filesystems and / or network devices
+ * needed for boot are available, before we can allow the boot to proceed.
+ *
+ * This needs to be on a late_initcall, to happen after the frontend device
+ * drivers have been initialised, but before the root fs is mounted.
+ *
+ * A possible improvement here would be to have the tools add a per-device
+ * flag to the store entry, indicating whether it is needed at boot time.
+ * This would allow people who knew what they were doing to accelerate their
+ * boot slightly, but of course needs tools or manual intervention to set up
+ * those flags correctly.
+ */
+static void wait_for_devices(struct xenbus_driver *xendrv)
+{
+	unsigned long start = jiffies;
+	struct device_driver *drv = xendrv ? &xendrv->driver : NULL;
+	unsigned int seconds_waited = 0;
+
+	if (!ready_to_wait_for_devices || !xen_domain())
+		return;
+
+	while (exists_connecting_device(drv)) {
+		if (time_after(jiffies, start + (seconds_waited+5)*HZ)) {
+			if (!seconds_waited)
+				printk(KERN_WARNING "XENBUS: Waiting for "
+				       "devices to initialise: ");
+			seconds_waited += 5;
+			printk("%us...", 300 - seconds_waited);
+			if (seconds_waited == 300)
+				break;
+		}
+
+		schedule_timeout_interruptible(HZ/10);
+	}
+
+	if (seconds_waited)
+		printk("\n");
+
+	bus_for_each_dev(&xenbus_frontend.bus, NULL, drv,
+			 print_device_status);
+}
+
+int __xenbus_register_frontend(struct xenbus_driver *drv,
+			       struct module *owner, const char *mod_name)
+{
+	int ret;
+
+	drv->read_otherend_details = read_backend_details;
+
+	ret = xenbus_register_driver_common(drv, &xenbus_frontend,
+					    owner, mod_name);
+	if (ret)
+		return ret;
+
+	/* If this driver is loaded as a module wait for devices to attach. */
+	wait_for_devices(drv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+
+static int frontend_probe_and_watch(struct notifier_block *notifier,
+				   unsigned long event,
+				   void *data)
+{
+	/* Enumerate devices in xenstore and watch for changes. */
+	xenbus_probe_devices(&xenbus_frontend);
+	register_xenbus_watch(&fe_watch);
+
+	return NOTIFY_DONE;
+}
+
+
+static int __init xenbus_probe_frontend_init(void)
+{
+	static struct notifier_block xenstore_notifier = {
+		.notifier_call = frontend_probe_and_watch
+	};
+	int err;
+
+	DPRINTK("");
+
+	/* Register ourselves with the kernel bus subsystem */
+	err = bus_register(&xenbus_frontend.bus);
+	if (err)
+		return err;
+
+	register_xenstore_notifier(&xenstore_notifier);
+
+	return 0;
+}
+subsys_initcall(xenbus_probe_frontend_init);
+
+#ifndef MODULE
+static int __init boot_wait_for_devices(void)
+{
+	if (xen_hvm_domain() && !xen_platform_pci_unplug)
+		return -ENODEV;
+
+	ready_to_wait_for_devices = 1;
+	wait_for_devices(NULL);
+	return 0;
+}
+
+late_initcall(boot_wait_for_devices);
+#endif
+
+MODULE_LICENSE("GPL");
diff --git a/firmware/ihex2fw.c b/firmware/ihex2fw.c
index ba0cf0b..cf38e15 100644
--- a/firmware/ihex2fw.c
+++ b/firmware/ihex2fw.c
@@ -124,8 +124,7 @@
 	if (process_ihex(data, st.st_size))
 		return 1;
 
-	output_records(outfd);
-	return 0;
+	return output_records(outfd);
 }
 
 static int process_ihex(uint8_t *data, ssize_t size)
@@ -269,11 +268,13 @@
 
 		p->addr = htonl(p->addr);
 		p->len = htons(p->len);
-		write(outfd, &p->addr, writelen);
+		if (write(outfd, &p->addr, writelen) != writelen)
+			return 1;
 		p = p->next;
 	}
 	/* EOF record is zero length, since we don't bother to represent
 	   the type field in the binary version */
-	write(outfd, zeroes, 6);
+	if (write(outfd, zeroes, 6) != 6)
+		return 1;
 	return 0;
 }
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
index 7e05114..814ac4e 100644
--- a/fs/9p/Kconfig
+++ b/fs/9p/Kconfig
@@ -9,6 +9,8 @@
 
 	  If unsure, say N.
 
+if 9P_FS
+
 config 9P_FSCACHE
 	bool "Enable 9P client caching support (EXPERIMENTAL)"
 	depends on EXPERIMENTAL
@@ -20,7 +22,6 @@
 
 config 9P_FS_POSIX_ACL
 	bool "9P POSIX Access Control Lists"
-	depends on 9P_FS
 	select FS_POSIX_ACL
 	help
 	  POSIX Access Control Lists (ACLs) support permissions for users and
@@ -30,3 +31,5 @@
 	  Linux website <http://acl.bestbits.at/>.
 
 	  If you don't know what Access Control Lists are, say N
+
+endif
diff --git a/fs/9p/Makefile b/fs/9p/Makefile
index f8ba37e..ab8c127 100644
--- a/fs/9p/Makefile
+++ b/fs/9p/Makefile
@@ -3,6 +3,7 @@
 9p-objs := \
 	vfs_super.o \
 	vfs_inode.o \
+	vfs_inode_dotl.o \
 	vfs_addr.o \
 	vfs_file.o \
 	vfs_dir.o \
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6e58c4c..02a2cf6 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -28,7 +28,7 @@
 {
 	ssize_t size;
 	void *value = NULL;
-	struct posix_acl *acl = NULL;;
+	struct posix_acl *acl = NULL;
 
 	size = v9fs_fid_xattr_get(fid, name, NULL, 0);
 	if (size > 0) {
@@ -365,7 +365,7 @@
 	case ACL_TYPE_DEFAULT:
 		name = POSIX_ACL_XATTR_DEFAULT;
 		if (!S_ISDIR(inode->i_mode)) {
-			retval = -EINVAL;
+			retval = acl ? -EINVAL : 0;
 			goto err_out;
 		}
 		break;
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index cb63968..c4b5d88 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -113,9 +113,27 @@
 
 struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *,
 									char *);
-void v9fs_session_close(struct v9fs_session_info *v9ses);
-void v9fs_session_cancel(struct v9fs_session_info *v9ses);
-void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_close(struct v9fs_session_info *v9ses);
+extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
+extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
+extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+			struct nameidata *nameidata);
+extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
+extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			struct inode *new_dir, struct dentry *new_dentry);
+extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
+			void *p);
+extern struct inode *v9fs_inode(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
+
+extern const struct inode_operations v9fs_dir_inode_operations_dotl;
+extern const struct inode_operations v9fs_file_inode_operations_dotl;
+extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
+extern struct inode *v9fs_inode_dotl(struct v9fs_session_info *v9ses,
+			struct p9_fid *fid,
+			struct super_block *sb);
 
 /* other default globals */
 #define V9FS_PORT	564
@@ -138,3 +156,21 @@
 {
 	return v9ses->flags & V9FS_PROTO_2000L;
 }
+
+/**
+ * v9fs_inode_from_fid - Helper routine to populate an inode by
+ * issuing a attribute request
+ * @v9ses: session information
+ * @fid: fid to issue attribute request for
+ * @sb: superblock on which to create inode
+ *
+ */
+static inline struct inode *
+v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+				struct super_block *sb)
+{
+	if (v9fs_proto_dotl(v9ses))
+		return v9fs_inode_dotl(v9ses, fid, sb);
+	else
+		return v9fs_inode(v9ses, fid, sb);
+}
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index bab0eac..b789f8e 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -59,7 +59,6 @@
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
 void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
-void v9fs_dentry_release(struct dentry *);
 int v9fs_uflags2omode(int uflags, int extended);
 
 ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index 466d2a4..233b7d4 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -86,7 +86,7 @@
  *
  */
 
-void v9fs_dentry_release(struct dentry *dentry)
+static void v9fs_dentry_release(struct dentry *dentry)
 {
 	struct v9fs_dentry *dent;
 	struct p9_fid *temp, *current_fid;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 5978298..b76a40b 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -49,15 +49,8 @@
 
 static const struct inode_operations v9fs_dir_inode_operations;
 static const struct inode_operations v9fs_dir_inode_operations_dotu;
-static const struct inode_operations v9fs_dir_inode_operations_dotl;
 static const struct inode_operations v9fs_file_inode_operations;
-static const struct inode_operations v9fs_file_inode_operations_dotl;
 static const struct inode_operations v9fs_symlink_inode_operations;
-static const struct inode_operations v9fs_symlink_inode_operations_dotl;
-
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		    dev_t rdev);
 
 /**
  * unixmode2p9mode - convert unix mode bits to plan 9
@@ -251,41 +244,6 @@
 #endif
 
 /**
- * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
- * new file system object. This checks the S_ISGID to determine the owning
- * group of the new file system object.
- */
-
-static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
-{
-	BUG_ON(dir_inode == NULL);
-
-	if (dir_inode->i_mode & S_ISGID) {
-		/* set_gid bit is set.*/
-		return dir_inode->i_gid;
-	}
-	return current_fsgid();
-}
-
-/**
- * v9fs_dentry_from_dir_inode - helper function to get the dentry from
- * dir inode.
- *
- */
-
-static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
-{
-	struct dentry *dentry;
-
-	spin_lock(&inode->i_lock);
-	/* Directory should have only one entry. */
-	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
-	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
-	spin_unlock(&inode->i_lock);
-	return dentry;
-}
-
-/**
  * v9fs_get_inode - helper function to setup an inode
  * @sb: superblock
  * @mode: mode to setup inode with
@@ -454,7 +412,7 @@
 #endif
 }
 
-static struct inode *
+struct inode *
 v9fs_inode(struct v9fs_session_info *v9ses, struct p9_fid *fid,
 	struct super_block *sb)
 {
@@ -489,60 +447,6 @@
 	return ERR_PTR(err);
 }
 
-static struct inode *
-v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-	struct super_block *sb)
-{
-	struct inode *ret = NULL;
-	int err;
-	struct p9_stat_dotl *st;
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-	if (IS_ERR(st))
-		return ERR_CAST(st);
-
-	ret = v9fs_get_inode(sb, st->st_mode);
-	if (IS_ERR(ret)) {
-		err = PTR_ERR(ret);
-		goto error;
-	}
-
-	v9fs_stat2inode_dotl(st, ret);
-	ret->i_ino = v9fs_qid2ino(&st->qid);
-#ifdef CONFIG_9P_FSCACHE
-	v9fs_vcookie_set_qid(ret, &st->qid);
-	v9fs_cache_inode_get_cookie(ret);
-#endif
-	err = v9fs_get_acl(ret, fid);
-	if (err) {
-		iput(ret);
-		goto error;
-	}
-	kfree(st);
-	return ret;
-error:
-	kfree(st);
-	return ERR_PTR(err);
-}
-
-/**
- * v9fs_inode_from_fid - Helper routine to populate an inode by
- * issuing a attribute request
- * @v9ses: session information
- * @fid: fid to issue attribute request for
- * @sb: superblock on which to create inode
- *
- */
-static inline struct inode *
-v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-			struct super_block *sb)
-{
-	if (v9fs_proto_dotl(v9ses))
-		return v9fs_inode_dotl(v9ses, fid, sb);
-	else
-		return v9fs_inode(v9ses, fid, sb);
-}
-
 /**
  * v9fs_remove - helper function to remove files and directories
  * @dir: directory inode that is being deleted
@@ -633,12 +537,6 @@
 		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
 		goto error;
 	}
-
-	if (v9ses->cache)
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-	else
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-
 	d_instantiate(dentry, inode);
 	err = v9fs_fid_add(dentry, fid);
 	if (err < 0)
@@ -657,144 +555,6 @@
 }
 
 /**
- * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
- * @dir: directory inode that is being created
- * @dentry:  dentry that is being deleted
- * @mode: create permissions
- * @nd: path information
- *
- */
-
-static int
-v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		struct nameidata *nd)
-{
-	int err = 0;
-	char *name = NULL;
-	gid_t gid;
-	int flags;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL;
-	struct p9_fid *dfid, *ofid;
-	struct file *filp;
-	struct p9_qid qid;
-	struct inode *inode;
-	struct posix_acl *pacl = NULL, *dacl = NULL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	if (nd && nd->flags & LOOKUP_OPEN)
-		flags = nd->intent.open.flags - 1;
-	else {
-		/*
-		 * create call without LOOKUP_OPEN is due
-		 * to mknod of regular files. So use mknod
-		 * operation.
-		 */
-		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
-	}
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
-			"mode:0x%x\n", name, flags, omode);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	/* clone a fid to use for creation */
-	ofid = p9_client_walk(dfid, 0, NULL, 1);
-	if (IS_ERR(ofid)) {
-		err = PTR_ERR(ofid);
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in creat %d\n", err);
-		goto error;
-	}
-	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-				"p9_client_open_dotl failed in creat %d\n",
-				err);
-		goto error;
-	}
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE ||
-	    (nd && nd->flags & LOOKUP_OPEN)) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		/* The fid would get clunked via a dput */
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-	/* if we are opening a file, assign the open fid to the file */
-	if (nd && nd->flags & LOOKUP_OPEN) {
-		filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
-		if (IS_ERR(filp)) {
-			p9_client_clunk(ofid);
-			return PTR_ERR(filp);
-		}
-		filp->private_data = ofid;
-	} else
-		p9_client_clunk(ofid);
-
-	return 0;
-
-error:
-	if (ofid)
-		p9_client_clunk(ofid);
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-/**
  * v9fs_vfs_create - VFS hook to create files
  * @dir: directory inode that is being created
  * @dentry:  dentry that is being deleted
@@ -884,107 +644,6 @@
 	return err;
 }
 
-
-/**
- * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
- * @dir:  inode that is being unlinked
- * @dentry: dentry that is being unlinked
- * @mode: mode for new directory
- *
- */
-
-static int v9fs_vfs_mkdir_dotl(struct inode *dir,
-			       struct dentry *dentry, int omode)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	gid_t gid;
-	char *name;
-	mode_t mode;
-	struct inode *inode;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
-	err = 0;
-	v9ses = v9fs_inode2v9ses(dir);
-
-	omode |= S_IFDIR;
-	if (dir->i_mode & S_ISGID)
-		omode |= S_ISGID;
-
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mkdir %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate
-		 * inode with stat. We need to get an inode
-		 * so that we can set the acl with dentry
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
 /**
  * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode
  * @dir:  inode that is being walked from
@@ -993,7 +652,7 @@
  *
  */
 
-static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
+struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
 				      struct nameidata *nameidata)
 {
 	struct super_block *sb;
@@ -1040,11 +699,6 @@
 		goto error_iput;
 
 inst_out:
-	if (v9ses->cache)
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-	else
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-
 	d_add(dentry, inode);
 	return NULL;
 
@@ -1063,7 +717,7 @@
  *
  */
 
-static int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
+int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 0);
 }
@@ -1075,7 +729,7 @@
  *
  */
 
-static int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
+int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 {
 	return v9fs_remove(i, d, 1);
 }
@@ -1089,7 +743,7 @@
  *
  */
 
-static int
+int
 v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 		struct inode *new_dir, struct dentry *new_dentry)
 {
@@ -1196,42 +850,6 @@
 	return 0;
 }
 
-static int
-v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
-		 struct kstat *stat)
-{
-	int err;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_stat_dotl *st;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
-	err = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
-		return simple_getattr(mnt, dentry, stat);
-
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	/* Ask for all the fields in stat structure. Server will return
-	 * whatever it supports
-	 */
-
-	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
-	if (IS_ERR(st))
-		return PTR_ERR(st);
-
-	v9fs_stat2inode_dotl(st, dentry->d_inode);
-	generic_fillattr(dentry->d_inode, stat);
-	/* Change block size to what the server returned */
-	stat->blksize = st->st_blksize;
-
-	kfree(st);
-	return 0;
-}
-
 /**
  * v9fs_vfs_setattr - set file metadata
  * @dentry: file whose metadata to set
@@ -1291,64 +909,6 @@
 }
 
 /**
- * v9fs_vfs_setattr_dotl - set file metadata
- * @dentry: file whose metadata to set
- * @iattr: metadata assignment structure
- *
- */
-
-int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
-{
-	int retval;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid;
-	struct p9_iattr_dotl p9attr;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "\n");
-
-	retval = inode_change_ok(dentry->d_inode, iattr);
-	if (retval)
-		return retval;
-
-	p9attr.valid = iattr->ia_valid;
-	p9attr.mode = iattr->ia_mode;
-	p9attr.uid = iattr->ia_uid;
-	p9attr.gid = iattr->ia_gid;
-	p9attr.size = iattr->ia_size;
-	p9attr.atime_sec = iattr->ia_atime.tv_sec;
-	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
-	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
-	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
-
-	retval = -EPERM;
-	v9ses = v9fs_inode2v9ses(dentry->d_inode);
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_setattr(fid, &p9attr);
-	if (retval < 0)
-		return retval;
-
-	if ((iattr->ia_valid & ATTR_SIZE) &&
-	    iattr->ia_size != i_size_read(dentry->d_inode)) {
-		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
-		if (retval)
-			return retval;
-	}
-
-	setattr_copy(dentry->d_inode, iattr);
-	mark_inode_dirty(dentry->d_inode);
-	if (iattr->ia_valid & ATTR_MODE) {
-		/* We also want to update ACL when we update mode bits */
-		retval = v9fs_acl_chmod(dentry);
-		if (retval < 0)
-			return retval;
-	}
-	return 0;
-}
-
-/**
  * v9fs_stat2inode - populate an inode structure with mistat info
  * @stat: Plan 9 metadata (mistat) structure
  * @inode: inode to populate
@@ -1426,77 +986,6 @@
 }
 
 /**
- * v9fs_stat2inode_dotl - populate an inode structure with stat info
- * @stat: stat structure
- * @inode: inode to populate
- * @sb: superblock of filesystem
- *
- */
-
-void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
-{
-
-	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
-		inode->i_atime.tv_sec = stat->st_atime_sec;
-		inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		inode->i_mtime.tv_sec = stat->st_mtime_sec;
-		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		inode->i_ctime.tv_sec = stat->st_ctime_sec;
-		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		inode->i_uid = stat->st_uid;
-		inode->i_gid = stat->st_gid;
-		inode->i_nlink = stat->st_nlink;
-		inode->i_mode = stat->st_mode;
-		inode->i_rdev = new_decode_dev(stat->st_rdev);
-
-		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
-			init_special_inode(inode, inode->i_mode, inode->i_rdev);
-
-		i_size_write(inode, stat->st_size);
-		inode->i_blocks = stat->st_blocks;
-	} else {
-		if (stat->st_result_mask & P9_STATS_ATIME) {
-			inode->i_atime.tv_sec = stat->st_atime_sec;
-			inode->i_atime.tv_nsec = stat->st_atime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_MTIME) {
-			inode->i_mtime.tv_sec = stat->st_mtime_sec;
-			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_CTIME) {
-			inode->i_ctime.tv_sec = stat->st_ctime_sec;
-			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
-		}
-		if (stat->st_result_mask & P9_STATS_UID)
-			inode->i_uid = stat->st_uid;
-		if (stat->st_result_mask & P9_STATS_GID)
-			inode->i_gid = stat->st_gid;
-		if (stat->st_result_mask & P9_STATS_NLINK)
-			inode->i_nlink = stat->st_nlink;
-		if (stat->st_result_mask & P9_STATS_MODE) {
-			inode->i_mode = stat->st_mode;
-			if ((S_ISBLK(inode->i_mode)) ||
-						(S_ISCHR(inode->i_mode)))
-				init_special_inode(inode, inode->i_mode,
-								inode->i_rdev);
-		}
-		if (stat->st_result_mask & P9_STATS_RDEV)
-			inode->i_rdev = new_decode_dev(stat->st_rdev);
-		if (stat->st_result_mask & P9_STATS_SIZE)
-			i_size_write(inode, stat->st_size);
-		if (stat->st_result_mask & P9_STATS_BLOCKS)
-			inode->i_blocks = stat->st_blocks;
-	}
-	if (stat->st_result_mask & P9_STATS_GEN)
-			inode->i_generation = stat->st_gen;
-
-	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
-	 * because the inode structure does not have fields for them.
-	 */
-}
-
-/**
  * v9fs_qid2ino - convert qid into inode number
  * @qid: qid to hash
  *
@@ -1602,7 +1091,7 @@
  *
  */
 
-static void
+void
 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
 	char *s = nd_get_link(nd);
@@ -1646,94 +1135,6 @@
 }
 
 /**
- * v9fs_vfs_symlink_dotl - helper function to create symlinks
- * @dir: directory inode containing symlink
- * @dentry: dentry for symlink
- * @symname: symlink data
- *
- * See Also: 9P2000.L RFC for more information
- *
- */
-
-static int
-v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
-		const char *symname)
-{
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *dfid;
-	struct p9_fid *fid = NULL;
-	struct inode *inode;
-	struct p9_qid qid;
-	char *name;
-	int err;
-	gid_t gid;
-
-	name = (char *) dentry->d_name.name;
-	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
-			dir->i_ino, name, symname);
-	v9ses = v9fs_inode2v9ses(dir);
-
-	dfid = v9fs_fid_lookup(dentry->d_parent);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		return err;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-
-	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
-	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
-		goto error;
-	}
-
-	if (v9ses->cache) {
-		/* Now walk from the parent so we can get an unopened fid. */
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-					err);
-			fid = NULL;
-			goto error;
-		}
-
-		/* instantiate inode and assign the unopened fid to dentry */
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-					err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/* Not in cached mode. No need to populate inode with stat */
-		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-
-error:
-	if (fid)
-		p9_client_clunk(fid);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_symlink - helper function to create symlinks
  * @dir: directory inode containing symlink
  * @dentry: dentry for symlink
@@ -1792,77 +1193,6 @@
 }
 
 /**
- * v9fs_vfs_link_dotl - create a hardlink for dotl
- * @old_dentry: dentry for file to link to
- * @dir: inode destination for new link
- * @dentry: dentry for link
- *
- */
-
-static int
-v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
-		struct dentry *dentry)
-{
-	int err;
-	struct p9_fid *dfid, *oldfid;
-	char *name;
-	struct v9fs_session_info *v9ses;
-	struct dentry *dir_dentry;
-
-	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
-			dir->i_ino, old_dentry->d_name.name,
-			dentry->d_name.name);
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid))
-		return PTR_ERR(dfid);
-
-	oldfid = v9fs_fid_lookup(old_dentry);
-	if (IS_ERR(oldfid))
-		return PTR_ERR(oldfid);
-
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
-
-	if (err < 0) {
-		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
-		return err;
-	}
-
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		/* Get the latest stat info from server. */
-		struct p9_fid *fid;
-		struct p9_stat_dotl *st;
-
-		fid = v9fs_fid_lookup(old_dentry);
-		if (IS_ERR(fid))
-			return PTR_ERR(fid);
-
-		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
-		if (IS_ERR(st))
-			return PTR_ERR(st);
-
-		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
-
-		kfree(st);
-	} else {
-		/* Caching disabled. No need to get upto date stat info.
-		 * This dentry will be released immediately. So, just hold the
-		 * inode
-		 */
-		ihold(old_dentry->d_inode);
-	}
-
-	d_set_d_op(dentry, old_dentry->d_op);
-	d_instantiate(dentry, old_dentry->d_inode);
-
-	return err;
-}
-
-/**
  * v9fs_vfs_mknod - create a special file
  * @dir: inode destination for new link
  * @dentry: dentry for file
@@ -1907,160 +1237,6 @@
 	return retval;
 }
 
-/**
- * v9fs_vfs_mknod_dotl - create a special file
- * @dir: inode destination for new link
- * @dentry: dentry for file
- * @mode: mode for creation
- * @rdev: device associated with special file
- *
- */
-static int
-v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
-		dev_t rdev)
-{
-	int err;
-	char *name;
-	mode_t mode;
-	struct v9fs_session_info *v9ses;
-	struct p9_fid *fid = NULL, *dfid = NULL;
-	struct inode *inode;
-	gid_t gid;
-	struct p9_qid qid;
-	struct dentry *dir_dentry;
-	struct posix_acl *dacl = NULL, *pacl = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS,
-		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
-		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
-
-	if (!new_valid_dev(rdev))
-		return -EINVAL;
-
-	v9ses = v9fs_inode2v9ses(dir);
-	dir_dentry = v9fs_dentry_from_dir_inode(dir);
-	dfid = v9fs_fid_lookup(dir_dentry);
-	if (IS_ERR(dfid)) {
-		err = PTR_ERR(dfid);
-		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
-		dfid = NULL;
-		goto error;
-	}
-
-	gid = v9fs_get_fsgid_for_create(dir);
-	mode = omode;
-	/* Update mode based on ACL value */
-	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
-	if (err) {
-		P9_DPRINTK(P9_DEBUG_VFS,
-			   "Failed to get acl values in mknod %d\n", err);
-		goto error;
-	}
-	name = (char *) dentry->d_name.name;
-
-	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
-	if (err < 0)
-		goto error;
-
-	/* instantiate inode and assign the unopened fid to the dentry */
-	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
-		fid = p9_client_walk(dfid, 1, &name, 1);
-		if (IS_ERR(fid)) {
-			err = PTR_ERR(fid);
-			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
-				err);
-			fid = NULL;
-			goto error;
-		}
-
-		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
-				err);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_cached_dentry_operations);
-		d_instantiate(dentry, inode);
-		err = v9fs_fid_add(dentry, fid);
-		if (err < 0)
-			goto error;
-		fid = NULL;
-	} else {
-		/*
-		 * Not in cached mode. No need to populate inode with stat.
-		 * socket syscall returns a fd, so we need instantiate
-		 */
-		inode = v9fs_get_inode(dir->i_sb, mode);
-		if (IS_ERR(inode)) {
-			err = PTR_ERR(inode);
-			goto error;
-		}
-		d_set_d_op(dentry, &v9fs_dentry_operations);
-		d_instantiate(dentry, inode);
-	}
-	/* Now set the ACL based on the default value */
-	v9fs_set_create_acl(dentry, dacl, pacl);
-error:
-	if (fid)
-		p9_client_clunk(fid);
-	return err;
-}
-
-static int
-v9fs_vfs_readlink_dotl(struct dentry *dentry, char *buffer, int buflen)
-{
-	int retval;
-	struct p9_fid *fid;
-	char *target = NULL;
-
-	P9_DPRINTK(P9_DEBUG_VFS, " %s\n", dentry->d_name.name);
-	retval = -EPERM;
-	fid = v9fs_fid_lookup(dentry);
-	if (IS_ERR(fid))
-		return PTR_ERR(fid);
-
-	retval = p9_client_readlink(fid, &target);
-	if (retval < 0)
-		return retval;
-
-	strncpy(buffer, target, buflen);
-	P9_DPRINTK(P9_DEBUG_VFS, "%s -> %s\n", dentry->d_name.name, buffer);
-
-	retval = strnlen(buffer, buflen);
-	return retval;
-}
-
-/**
- * v9fs_vfs_follow_link_dotl - follow a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- *
- */
-
-static void *
-v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
-{
-	int len = 0;
-	char *link = __getname();
-
-	P9_DPRINTK(P9_DEBUG_VFS, "%s n", dentry->d_name.name);
-
-	if (!link)
-		link = ERR_PTR(-ENOMEM);
-	else {
-		len = v9fs_vfs_readlink_dotl(dentry, link, PATH_MAX);
-		if (len < 0) {
-			__putname(link);
-			link = ERR_PTR(len);
-		} else
-			link[min(len, PATH_MAX-1)] = 0;
-	}
-	nd_set_link(nd, link);
-
-	return NULL;
-}
-
 static const struct inode_operations v9fs_dir_inode_operations_dotu = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2075,25 +1251,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_dir_inode_operations_dotl = {
-	.create = v9fs_vfs_create_dotl,
-	.lookup = v9fs_vfs_lookup,
-	.link = v9fs_vfs_link_dotl,
-	.symlink = v9fs_vfs_symlink_dotl,
-	.unlink = v9fs_vfs_unlink,
-	.mkdir = v9fs_vfs_mkdir_dotl,
-	.rmdir = v9fs_vfs_rmdir,
-	.mknod = v9fs_vfs_mknod_dotl,
-	.rename = v9fs_vfs_rename,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_dir_inode_operations = {
 	.create = v9fs_vfs_create,
 	.lookup = v9fs_vfs_lookup,
@@ -2111,16 +1268,6 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_file_inode_operations_dotl = {
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-	.check_acl = v9fs_check_acl,
-};
-
 static const struct inode_operations v9fs_symlink_inode_operations = {
 	.readlink = generic_readlink,
 	.follow_link = v9fs_vfs_follow_link,
@@ -2129,14 +1276,3 @@
 	.setattr = v9fs_vfs_setattr,
 };
 
-static const struct inode_operations v9fs_symlink_inode_operations_dotl = {
-	.readlink = v9fs_vfs_readlink_dotl,
-	.follow_link = v9fs_vfs_follow_link_dotl,
-	.put_link = v9fs_vfs_put_link,
-	.getattr = v9fs_vfs_getattr_dotl,
-	.setattr = v9fs_vfs_setattr_dotl,
-	.setxattr = generic_setxattr,
-	.getxattr = generic_getxattr,
-	.removexattr = generic_removexattr,
-	.listxattr = v9fs_listxattr,
-};
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
new file mode 100644
index 0000000..fe3ffa9
--- /dev/null
+++ b/fs/9p/vfs_inode_dotl.c
@@ -0,0 +1,824 @@
+/*
+ *  linux/fs/9p/vfs_inode_dotl.c
+ *
+ * This file contains vfs inode ops for the 9P2000.L protocol.
+ *
+ *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
+ *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2
+ *  as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to:
+ *  Free Software Foundation
+ *  51 Franklin Street, Fifth Floor
+ *  Boston, MA  02111-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/pagemap.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/inet.h>
+#include <linux/namei.h>
+#include <linux/idr.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/posix_acl.h>
+#include <net/9p/9p.h>
+#include <net/9p/client.h>
+
+#include "v9fs.h"
+#include "v9fs_vfs.h"
+#include "fid.h"
+#include "cache.h"
+#include "xattr.h"
+#include "acl.h"
+
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		    dev_t rdev);
+
+/**
+ * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a
+ * new file system object. This checks the S_ISGID to determine the owning
+ * group of the new file system object.
+ */
+
+static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
+{
+	BUG_ON(dir_inode == NULL);
+
+	if (dir_inode->i_mode & S_ISGID) {
+		/* set_gid bit is set.*/
+		return dir_inode->i_gid;
+	}
+	return current_fsgid();
+}
+
+/**
+ * v9fs_dentry_from_dir_inode - helper function to get the dentry from
+ * dir inode.
+ *
+ */
+
+static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
+{
+	struct dentry *dentry;
+
+	spin_lock(&inode->i_lock);
+	/* Directory should have only one entry. */
+	BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
+	dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+	spin_unlock(&inode->i_lock);
+	return dentry;
+}
+
+struct inode *
+v9fs_inode_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid,
+	struct super_block *sb)
+{
+	struct inode *ret = NULL;
+	int err;
+	struct p9_stat_dotl *st;
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+	if (IS_ERR(st))
+		return ERR_CAST(st);
+
+	ret = v9fs_get_inode(sb, st->st_mode);
+	if (IS_ERR(ret)) {
+		err = PTR_ERR(ret);
+		goto error;
+	}
+
+	v9fs_stat2inode_dotl(st, ret);
+	ret->i_ino = v9fs_qid2ino(&st->qid);
+#ifdef CONFIG_9P_FSCACHE
+	v9fs_vcookie_set_qid(ret, &st->qid);
+	v9fs_cache_inode_get_cookie(ret);
+#endif
+	err = v9fs_get_acl(ret, fid);
+	if (err) {
+		iput(ret);
+		goto error;
+	}
+	kfree(st);
+	return ret;
+error:
+	kfree(st);
+	return ERR_PTR(err);
+}
+
+/**
+ * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol.
+ * @dir: directory inode that is being created
+ * @dentry:  dentry that is being deleted
+ * @mode: create permissions
+ * @nd: path information
+ *
+ */
+
+static int
+v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		struct nameidata *nd)
+{
+	int err = 0;
+	char *name = NULL;
+	gid_t gid;
+	int flags;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL;
+	struct p9_fid *dfid, *ofid;
+	struct file *filp;
+	struct p9_qid qid;
+	struct inode *inode;
+	struct posix_acl *pacl = NULL, *dacl = NULL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	if (nd && nd->flags & LOOKUP_OPEN)
+		flags = nd->intent.open.flags - 1;
+	else {
+		/*
+		 * create call without LOOKUP_OPEN is due
+		 * to mknod of regular files. So use mknod
+		 * operation.
+		 */
+		return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+	}
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_create_dotl: name:%s flags:0x%x "
+			"mode:0x%x\n", name, flags, omode);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	/* clone a fid to use for creation */
+	ofid = p9_client_walk(dfid, 0, NULL, 1);
+	if (IS_ERR(ofid)) {
+		err = PTR_ERR(ofid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in creat %d\n", err);
+		goto error;
+	}
+	err = p9_client_create_dotl(ofid, name, flags, mode, gid, &qid);
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+				"p9_client_open_dotl failed in creat %d\n",
+				err);
+		goto error;
+	}
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	fid = p9_client_walk(dfid, 1, &name, 1);
+	if (IS_ERR(fid)) {
+		err = PTR_ERR(fid);
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
+		fid = NULL;
+		goto error;
+	}
+	inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n", err);
+		goto error;
+	}
+	d_instantiate(dentry, inode);
+	err = v9fs_fid_add(dentry, fid);
+	if (err < 0)
+		goto error;
+
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+	/* Since we are opening a file, assign the open fid to the file */
+	filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
+	if (IS_ERR(filp)) {
+		p9_client_clunk(ofid);
+		return PTR_ERR(filp);
+	}
+	filp->private_data = ofid;
+	return 0;
+
+error:
+	if (ofid)
+		p9_client_clunk(ofid);
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory
+ * @dir:  inode that is being unlinked
+ * @dentry: dentry that is being unlinked
+ * @mode: mode for new directory
+ *
+ */
+
+static int v9fs_vfs_mkdir_dotl(struct inode *dir,
+			       struct dentry *dentry, int omode)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	gid_t gid;
+	char *name;
+	mode_t mode;
+	struct inode *inode;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name);
+	err = 0;
+	v9ses = v9fs_inode2v9ses(dir);
+
+	omode |= S_IFDIR;
+	if (dir->i_mode & S_ISGID)
+		omode |= S_ISGID;
+
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mkdir %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+	err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate
+		 * inode with stat. We need to get an inode
+		 * so that we can set the acl with dentry
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+static int
+v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
+		 struct kstat *stat)
+{
+	int err;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_stat_dotl *st;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dentry: %p\n", dentry);
+	err = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
+		return simple_getattr(mnt, dentry, stat);
+
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	/* Ask for all the fields in stat structure. Server will return
+	 * whatever it supports
+	 */
+
+	st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
+	if (IS_ERR(st))
+		return PTR_ERR(st);
+
+	v9fs_stat2inode_dotl(st, dentry->d_inode);
+	generic_fillattr(dentry->d_inode, stat);
+	/* Change block size to what the server returned */
+	stat->blksize = st->st_blksize;
+
+	kfree(st);
+	return 0;
+}
+
+/**
+ * v9fs_vfs_setattr_dotl - set file metadata
+ * @dentry: file whose metadata to set
+ * @iattr: metadata assignment structure
+ *
+ */
+
+int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
+{
+	int retval;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid;
+	struct p9_iattr_dotl p9attr;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "\n");
+
+	retval = inode_change_ok(dentry->d_inode, iattr);
+	if (retval)
+		return retval;
+
+	p9attr.valid = iattr->ia_valid;
+	p9attr.mode = iattr->ia_mode;
+	p9attr.uid = iattr->ia_uid;
+	p9attr.gid = iattr->ia_gid;
+	p9attr.size = iattr->ia_size;
+	p9attr.atime_sec = iattr->ia_atime.tv_sec;
+	p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+	p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+	p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+
+	retval = -EPERM;
+	v9ses = v9fs_inode2v9ses(dentry->d_inode);
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid))
+		return PTR_ERR(fid);
+
+	retval = p9_client_setattr(fid, &p9attr);
+	if (retval < 0)
+		return retval;
+
+	if ((iattr->ia_valid & ATTR_SIZE) &&
+	    iattr->ia_size != i_size_read(dentry->d_inode)) {
+		retval = vmtruncate(dentry->d_inode, iattr->ia_size);
+		if (retval)
+			return retval;
+	}
+
+	setattr_copy(dentry->d_inode, iattr);
+	mark_inode_dirty(dentry->d_inode);
+	if (iattr->ia_valid & ATTR_MODE) {
+		/* We also want to update ACL when we update mode bits */
+		retval = v9fs_acl_chmod(dentry);
+		if (retval < 0)
+			return retval;
+	}
+	return 0;
+}
+
+/**
+ * v9fs_stat2inode_dotl - populate an inode structure with stat info
+ * @stat: stat structure
+ * @inode: inode to populate
+ * @sb: superblock of filesystem
+ *
+ */
+
+void
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+{
+
+	if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) {
+		inode->i_atime.tv_sec = stat->st_atime_sec;
+		inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		inode->i_mtime.tv_sec = stat->st_mtime_sec;
+		inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		inode->i_ctime.tv_sec = stat->st_ctime_sec;
+		inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		inode->i_uid = stat->st_uid;
+		inode->i_gid = stat->st_gid;
+		inode->i_nlink = stat->st_nlink;
+		inode->i_mode = stat->st_mode;
+		inode->i_rdev = new_decode_dev(stat->st_rdev);
+
+		if ((S_ISBLK(inode->i_mode)) || (S_ISCHR(inode->i_mode)))
+			init_special_inode(inode, inode->i_mode, inode->i_rdev);
+
+		i_size_write(inode, stat->st_size);
+		inode->i_blocks = stat->st_blocks;
+	} else {
+		if (stat->st_result_mask & P9_STATS_ATIME) {
+			inode->i_atime.tv_sec = stat->st_atime_sec;
+			inode->i_atime.tv_nsec = stat->st_atime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_MTIME) {
+			inode->i_mtime.tv_sec = stat->st_mtime_sec;
+			inode->i_mtime.tv_nsec = stat->st_mtime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_CTIME) {
+			inode->i_ctime.tv_sec = stat->st_ctime_sec;
+			inode->i_ctime.tv_nsec = stat->st_ctime_nsec;
+		}
+		if (stat->st_result_mask & P9_STATS_UID)
+			inode->i_uid = stat->st_uid;
+		if (stat->st_result_mask & P9_STATS_GID)
+			inode->i_gid = stat->st_gid;
+		if (stat->st_result_mask & P9_STATS_NLINK)
+			inode->i_nlink = stat->st_nlink;
+		if (stat->st_result_mask & P9_STATS_MODE) {
+			inode->i_mode = stat->st_mode;
+			if ((S_ISBLK(inode->i_mode)) ||
+						(S_ISCHR(inode->i_mode)))
+				init_special_inode(inode, inode->i_mode,
+								inode->i_rdev);
+		}
+		if (stat->st_result_mask & P9_STATS_RDEV)
+			inode->i_rdev = new_decode_dev(stat->st_rdev);
+		if (stat->st_result_mask & P9_STATS_SIZE)
+			i_size_write(inode, stat->st_size);
+		if (stat->st_result_mask & P9_STATS_BLOCKS)
+			inode->i_blocks = stat->st_blocks;
+	}
+	if (stat->st_result_mask & P9_STATS_GEN)
+			inode->i_generation = stat->st_gen;
+
+	/* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION
+	 * because the inode structure does not have fields for them.
+	 */
+}
+
+static int
+v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
+		const char *symname)
+{
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *dfid;
+	struct p9_fid *fid = NULL;
+	struct inode *inode;
+	struct p9_qid qid;
+	char *name;
+	int err;
+	gid_t gid;
+
+	name = (char *) dentry->d_name.name;
+	P9_DPRINTK(P9_DEBUG_VFS, "v9fs_vfs_symlink_dotl : %lu,%s,%s\n",
+			dir->i_ino, name, symname);
+	v9ses = v9fs_inode2v9ses(dir);
+
+	dfid = v9fs_fid_lookup(dentry->d_parent);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		return err;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+
+	/* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */
+	err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err);
+		goto error;
+	}
+
+	if (v9ses->cache) {
+		/* Now walk from the parent so we can get an unopened fid. */
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+					err);
+			fid = NULL;
+			goto error;
+		}
+
+		/* instantiate inode and assign the unopened fid to dentry */
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+					err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/* Not in cached mode. No need to populate inode with stat */
+		inode = v9fs_get_inode(dir->i_sb, S_IFLNK);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+
+error:
+	if (fid)
+		p9_client_clunk(fid);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_link_dotl - create a hardlink for dotl
+ * @old_dentry: dentry for file to link to
+ * @dir: inode destination for new link
+ * @dentry: dentry for link
+ *
+ */
+
+static int
+v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
+		struct dentry *dentry)
+{
+	int err;
+	struct p9_fid *dfid, *oldfid;
+	char *name;
+	struct v9fs_session_info *v9ses;
+	struct dentry *dir_dentry;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n",
+			dir->i_ino, old_dentry->d_name.name,
+			dentry->d_name.name);
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid))
+		return PTR_ERR(dfid);
+
+	oldfid = v9fs_fid_lookup(old_dentry);
+	if (IS_ERR(oldfid))
+		return PTR_ERR(oldfid);
+
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name);
+
+	if (err < 0) {
+		P9_DPRINTK(P9_DEBUG_VFS, "p9_client_link failed %d\n", err);
+		return err;
+	}
+
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		/* Get the latest stat info from server. */
+		struct p9_fid *fid;
+		struct p9_stat_dotl *st;
+
+		fid = v9fs_fid_lookup(old_dentry);
+		if (IS_ERR(fid))
+			return PTR_ERR(fid);
+
+		st = p9_client_getattr_dotl(fid, P9_STATS_BASIC);
+		if (IS_ERR(st))
+			return PTR_ERR(st);
+
+		v9fs_stat2inode_dotl(st, old_dentry->d_inode);
+
+		kfree(st);
+	} else {
+		/* Caching disabled. No need to get upto date stat info.
+		 * This dentry will be released immediately. So, just hold the
+		 * inode
+		 */
+		ihold(old_dentry->d_inode);
+	}
+	d_instantiate(dentry, old_dentry->d_inode);
+
+	return err;
+}
+
+/**
+ * v9fs_vfs_mknod_dotl - create a special file
+ * @dir: inode destination for new link
+ * @dentry: dentry for file
+ * @mode: mode for creation
+ * @rdev: device associated with special file
+ *
+ */
+static int
+v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, int omode,
+		dev_t rdev)
+{
+	int err;
+	char *name;
+	mode_t mode;
+	struct v9fs_session_info *v9ses;
+	struct p9_fid *fid = NULL, *dfid = NULL;
+	struct inode *inode;
+	gid_t gid;
+	struct p9_qid qid;
+	struct dentry *dir_dentry;
+	struct posix_acl *dacl = NULL, *pacl = NULL;
+
+	P9_DPRINTK(P9_DEBUG_VFS,
+		" %lu,%s mode: %x MAJOR: %u MINOR: %u\n", dir->i_ino,
+		dentry->d_name.name, omode, MAJOR(rdev), MINOR(rdev));
+
+	if (!new_valid_dev(rdev))
+		return -EINVAL;
+
+	v9ses = v9fs_inode2v9ses(dir);
+	dir_dentry = v9fs_dentry_from_dir_inode(dir);
+	dfid = v9fs_fid_lookup(dir_dentry);
+	if (IS_ERR(dfid)) {
+		err = PTR_ERR(dfid);
+		P9_DPRINTK(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
+		dfid = NULL;
+		goto error;
+	}
+
+	gid = v9fs_get_fsgid_for_create(dir);
+	mode = omode;
+	/* Update mode based on ACL value */
+	err = v9fs_acl_mode(dir, &mode, &dacl, &pacl);
+	if (err) {
+		P9_DPRINTK(P9_DEBUG_VFS,
+			   "Failed to get acl values in mknod %d\n", err);
+		goto error;
+	}
+	name = (char *) dentry->d_name.name;
+
+	err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid);
+	if (err < 0)
+		goto error;
+
+	/* instantiate inode and assign the unopened fid to the dentry */
+	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+		fid = p9_client_walk(dfid, 1, &name, 1);
+		if (IS_ERR(fid)) {
+			err = PTR_ERR(fid);
+			P9_DPRINTK(P9_DEBUG_VFS, "p9_client_walk failed %d\n",
+				err);
+			fid = NULL;
+			goto error;
+		}
+
+		inode = v9fs_inode_from_fid(v9ses, fid, dir->i_sb);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			P9_DPRINTK(P9_DEBUG_VFS, "inode creation failed %d\n",
+				err);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+		err = v9fs_fid_add(dentry, fid);
+		if (err < 0)
+			goto error;
+		fid = NULL;
+	} else {
+		/*
+		 * Not in cached mode. No need to populate inode with stat.
+		 * socket syscall returns a fd, so we need instantiate
+		 */
+		inode = v9fs_get_inode(dir->i_sb, mode);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto error;
+		}
+		d_instantiate(dentry, inode);
+	}
+	/* Now set the ACL based on the default value */
+	v9fs_set_create_acl(dentry, dacl, pacl);
+error:
+	if (fid)
+		p9_client_clunk(fid);
+	return err;
+}
+
+/**
+ * v9fs_vfs_follow_link_dotl - follow a symlink path
+ * @dentry: dentry for symlink
+ * @nd: nameidata
+ *
+ */
+
+static void *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+{
+	int retval;
+	struct p9_fid *fid;
+	char *link = __getname();
+	char *target;
+
+	P9_DPRINTK(P9_DEBUG_VFS, "%s\n", dentry->d_name.name);
+
+	if (!link) {
+		link = ERR_PTR(-ENOMEM);
+		goto ndset;
+	}
+	fid = v9fs_fid_lookup(dentry);
+	if (IS_ERR(fid)) {
+		__putname(link);
+		link = ERR_PTR(PTR_ERR(fid));
+		goto ndset;
+	}
+	retval = p9_client_readlink(fid, &target);
+	if (!retval) {
+		strcpy(link, target);
+		kfree(target);
+		goto ndset;
+	}
+	__putname(link);
+	link = ERR_PTR(retval);
+ndset:
+	nd_set_link(nd, link);
+	return NULL;
+}
+
+const struct inode_operations v9fs_dir_inode_operations_dotl = {
+	.create = v9fs_vfs_create_dotl,
+	.lookup = v9fs_vfs_lookup,
+	.link = v9fs_vfs_link_dotl,
+	.symlink = v9fs_vfs_symlink_dotl,
+	.unlink = v9fs_vfs_unlink,
+	.mkdir = v9fs_vfs_mkdir_dotl,
+	.rmdir = v9fs_vfs_rmdir,
+	.mknod = v9fs_vfs_mknod_dotl,
+	.rename = v9fs_vfs_rename,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_file_inode_operations_dotl = {
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+	.check_acl = v9fs_check_acl,
+};
+
+const struct inode_operations v9fs_symlink_inode_operations_dotl = {
+	.readlink = generic_readlink,
+	.follow_link = v9fs_vfs_follow_link_dotl,
+	.put_link = v9fs_vfs_put_link,
+	.getattr = v9fs_vfs_getattr_dotl,
+	.setattr = v9fs_vfs_setattr_dotl,
+	.setxattr = generic_setxattr,
+	.getxattr = generic_getxattr,
+	.removexattr = generic_removexattr,
+	.listxattr = v9fs_listxattr,
+};
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index c55c614..dbaabe3 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -141,6 +141,11 @@
 	}
 	v9fs_fill_super(sb, v9ses, flags, data);
 
+	if (v9ses->cache)
+		sb->s_d_op = &v9fs_cached_dentry_operations;
+	else
+		sb->s_d_op = &v9fs_dentry_operations;
+
 	inode = v9fs_get_inode(sb, S_IFDIR | mode);
 	if (IS_ERR(inode)) {
 		retval = PTR_ERR(inode);
@@ -217,9 +222,6 @@
 
 	P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
 
-	if (s->s_root)
-		v9fs_dentry_release(s->s_root);	/* clunk root */
-
 	kill_anon_super(s);
 
 	v9fs_session_cancel(v9ses);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 43ec7df..d288773 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -133,7 +133,7 @@
 			"p9_client_xattrcreate failed %d\n", retval);
 		goto error;
 	}
-	msize = fid->clnt->msize;;
+	msize = fid->clnt->msize;
 	while (value_len) {
 		if (value_len > (msize - P9_IOHDRSZ))
 			write_count = msize - P9_IOHDRSZ;
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index bf7693c..3b4a764 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -276,7 +276,6 @@
 	struct object_info obj;
 	int error;
 
-	d_set_d_op(dentry, &adfs_dentry_operations);
 	lock_kernel();
 	error = adfs_dir_lookup_byname(dir, &dentry->d_name, &obj);
 	if (error == 0) {
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index a4041b5..2d79540 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -473,6 +473,7 @@
 		asb->s_namelen = ADFS_F_NAME_LEN;
 	}
 
+	sb->s_d_op = &adfs_dentry_operations;
 	root = adfs_iget(sb, &root_obj);
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
@@ -483,8 +484,7 @@
 		kfree(asb->s_map);
 		adfs_error(sb, "get root inode failed\n");
 		goto error;
-	} else
-		d_set_d_op(sb->s_root, &adfs_dentry_operations);
+	}
 	unlock_kernel();
 	return 0;
 
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index a8cbdeb..0e95f73 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -201,6 +201,7 @@
 extern const struct address_space_operations	 affs_aops_ofs;
 
 extern const struct dentry_operations	 affs_dentry_operations;
+extern const struct dentry_operations	 affs_intl_dentry_operations;
 
 static inline void
 affs_set_blocksize(struct super_block *sb, int size)
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 944a404..e3e9efc 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -32,7 +32,7 @@
 	.d_compare	= affs_compare_dentry,
 };
 
-static const struct dentry_operations affs_intl_dentry_operations = {
+const struct dentry_operations affs_intl_dentry_operations = {
 	.d_hash		= affs_intl_hash_dentry,
 	.d_compare	= affs_intl_compare_dentry,
 };
@@ -240,7 +240,6 @@
 		if (IS_ERR(inode))
 			return ERR_CAST(inode);
 	}
-	d_set_d_op(dentry, AFFS_SB(sb)->s_flags & SF_INTL ? &affs_intl_dentry_operations : &affs_dentry_operations);
 	d_add(dentry, inode);
 	return NULL;
 }
diff --git a/fs/affs/super.c b/fs/affs/super.c
index d39081b..b31507d 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -477,12 +477,16 @@
 		goto out_error_noinode;
 	}
 
+	if (AFFS_SB(sb)->s_flags & SF_INTL)
+		sb->s_d_op = &affs_intl_dentry_operations;
+	else
+		sb->s_d_op = &affs_dentry_operations;
+
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root) {
 		printk(KERN_ERR "AFFS: Get root inode failed\n");
 		goto out_error;
 	}
-	d_set_d_op(sb->s_root, &affs_dentry_operations);
 
 	pr_debug("AFFS: s_flags=%lX\n",sb->s_flags);
 	return 0;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 34a3263..e6a4ab9 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -62,7 +62,7 @@
 	.setattr	= afs_setattr,
 };
 
-static const struct dentry_operations afs_fs_dentry_operations = {
+const struct dentry_operations afs_fs_dentry_operations = {
 	.d_revalidate	= afs_d_revalidate,
 	.d_delete	= afs_d_delete,
 	.d_release	= afs_d_release,
@@ -582,8 +582,6 @@
 	}
 
 success:
-	d_set_d_op(dentry, &afs_fs_dentry_operations);
-
 	d_add(dentry, inode);
 	_leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }",
 	       fid.vnode,
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 6d4bc1c..ab6db5a 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -486,6 +486,7 @@
  * dir.c
  */
 extern const struct inode_operations afs_dir_inode_operations;
+extern const struct dentry_operations afs_fs_dentry_operations;
 extern const struct file_operations afs_dir_file_operations;
 
 /*
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f901a9d..fb240e8 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -336,6 +336,7 @@
 	if (!root)
 		goto error;
 
+	sb->s_d_op = &afs_fs_dentry_operations;
 	sb->s_root = root;
 
 	_leave(" = 0");
diff --git a/fs/aio.c b/fs/aio.c
index 8c8f6c5..5e00f15 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -798,29 +798,12 @@
 	queue_delayed_work(aio_wq, &ctx->wq, timeout);
 }
 
-
 /*
- * aio_run_iocbs:
- * 	Process all pending retries queued on the ioctx
- * 	run list.
- * Assumes it is operating within the aio issuer's mm
- * context.
- */
-static inline void aio_run_iocbs(struct kioctx *ctx)
-{
-	int requeue;
-
-	spin_lock_irq(&ctx->ctx_lock);
-
-	requeue = __aio_run_iocbs(ctx);
-	spin_unlock_irq(&ctx->ctx_lock);
-	if (requeue)
-		aio_queue_work(ctx);
-}
-
-/*
- * just like aio_run_iocbs, but keeps running them until
- * the list stays empty
+ * aio_run_all_iocbs:
+ *	Process all pending retries queued on the ioctx
+ *	run list, and keep running them until the list
+ *	stays empty.
+ * Assumes it is operating within the aio issuer's mm context.
  */
 static inline void aio_run_all_iocbs(struct kioctx *ctx)
 {
@@ -1839,7 +1822,7 @@
 	long ret = -EINVAL;
 
 	if (likely(ioctx)) {
-		if (likely(min_nr <= nr && min_nr >= 0 && nr >= 0))
+		if (likely(min_nr <= nr && min_nr >= 0))
 			ret = read_events(ioctx, min_nr, nr, events, timeout);
 		put_ioctx(ioctx);
 	}
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c
index 5fd38112a..cbe57f3 100644
--- a/fs/anon_inodes.c
+++ b/fs/anon_inodes.c
@@ -26,12 +26,6 @@
 static struct inode *anon_inode_inode;
 static const struct file_operations anon_inode_fops;
 
-static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
-				int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "anon_inode:", NULL, ANON_INODE_FS_MAGIC);
-}
-
 /*
  * anon_inodefs_dname() is called from d_path().
  */
@@ -41,14 +35,22 @@
 				dentry->d_name.name);
 }
 
+static const struct dentry_operations anon_inodefs_dentry_operations = {
+	.d_dname	= anon_inodefs_dname,
+};
+
+static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
+				int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "anon_inode:", NULL,
+			&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
+}
+
 static struct file_system_type anon_inode_fs_type = {
 	.name		= "anon_inodefs",
 	.mount		= anon_inodefs_mount,
 	.kill_sb	= kill_anon_super,
 };
-static const struct dentry_operations anon_inodefs_dentry_operations = {
-	.d_dname	= anon_inodefs_dname,
-};
 
 /*
  * nop .set_page_dirty method so that people can use .page_mkwrite on
@@ -64,9 +66,9 @@
 };
 
 /**
- * anon_inode_getfd - creates a new file instance by hooking it up to an
- *                    anonymous inode, and a dentry that describe the "class"
- *                    of the file
+ * anon_inode_getfile - creates a new file instance by hooking it up to an
+ *                      anonymous inode, and a dentry that describe the "class"
+ *                      of the file
  *
  * @name:    [in]    name of the "class" of the new file
  * @fops:    [in]    file operations for the new file
@@ -113,7 +115,6 @@
 	 */
 	ihold(anon_inode_inode);
 
-	d_set_d_op(path.dentry, &anon_inodefs_dentry_operations);
 	d_instantiate(path.dentry, anon_inode_inode);
 
 	error = -ENFILE;
diff --git a/fs/befs/endian.h b/fs/befs/endian.h
index 6cb84d8..2722387 100644
--- a/fs/befs/endian.h
+++ b/fs/befs/endian.h
@@ -102,22 +102,22 @@
 }
 
 static inline befs_data_stream
-fsds_to_cpu(const struct super_block *sb, befs_disk_data_stream n)
+fsds_to_cpu(const struct super_block *sb, const befs_disk_data_stream *n)
 {
 	befs_data_stream data;
 	int i;
 
 	for (i = 0; i < BEFS_NUM_DIRECT_BLOCKS; ++i)
-		data.direct[i] = fsrun_to_cpu(sb, n.direct[i]);
+		data.direct[i] = fsrun_to_cpu(sb, n->direct[i]);
 
-	data.max_direct_range = fs64_to_cpu(sb, n.max_direct_range);
-	data.indirect = fsrun_to_cpu(sb, n.indirect);
-	data.max_indirect_range = fs64_to_cpu(sb, n.max_indirect_range);
-	data.double_indirect = fsrun_to_cpu(sb, n.double_indirect);
+	data.max_direct_range = fs64_to_cpu(sb, n->max_direct_range);
+	data.indirect = fsrun_to_cpu(sb, n->indirect);
+	data.max_indirect_range = fs64_to_cpu(sb, n->max_indirect_range);
+	data.double_indirect = fsrun_to_cpu(sb, n->double_indirect);
 	data.max_double_indirect_range = fs64_to_cpu(sb,
-						     n.
+						     n->
 						     max_double_indirect_range);
-	data.size = fs64_to_cpu(sb, n.size);
+	data.size = fs64_to_cpu(sb, n->size);
 
 	return data;
 }
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index de93581..b1d0c79 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -390,7 +390,7 @@
 		int num_blks;
 
 		befs_ino->i_data.ds =
-		    fsds_to_cpu(sb, raw_inode->data.datastream);
+		    fsds_to_cpu(sb, &raw_inode->data.datastream);
 
 		num_blks = befs_count_blocks(sb, &befs_ino->i_data.ds);
 		inode->i_blocks =
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 6884e19..d5b640b 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -66,12 +66,11 @@
 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
 
 static struct linux_binfmt elf_format = {
-		.module		= THIS_MODULE,
-		.load_binary	= load_elf_binary,
-		.load_shlib	= load_elf_library,
-		.core_dump	= elf_core_dump,
-		.min_coredump	= ELF_EXEC_PAGESIZE,
-		.hasvdso	= 1
+	.module		= THIS_MODULE,
+	.load_binary	= load_elf_binary,
+	.load_shlib	= load_elf_library,
+	.core_dump	= elf_core_dump,
+	.min_coredump	= ELF_EXEC_PAGESIZE,
 };
 
 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
@@ -316,8 +315,6 @@
 	return 0;
 }
 
-#ifndef elf_map
-
 static unsigned long elf_map(struct file *filep, unsigned long addr,
 		struct elf_phdr *eppnt, int prot, int type,
 		unsigned long total_size)
@@ -354,8 +351,6 @@
 	return(map_addr);
 }
 
-#endif /* !elf_map */
-
 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
 {
 	int i, first_idx = -1, last_idx = -1;
@@ -421,7 +416,7 @@
 		goto out;
 
 	retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
-			     (char *)elf_phdata,size);
+			     (char *)elf_phdata, size);
 	error = -EIO;
 	if (retval != size) {
 		if (retval < 0)
@@ -601,7 +596,7 @@
 		goto out;
 	if (!elf_check_arch(&loc->elf_ex))
 		goto out;
-	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
+	if (!bprm->file->f_op || !bprm->file->f_op->mmap)
 		goto out;
 
 	/* Now read in all of the header information */
@@ -761,8 +756,8 @@
 			/* There was a PT_LOAD segment with p_memsz > p_filesz
 			   before this one. Map anonymous pages, if needed,
 			   and clear the area.  */
-			retval = set_brk (elf_bss + load_bias,
-					  elf_brk + load_bias);
+			retval = set_brk(elf_bss + load_bias,
+					 elf_brk + load_bias);
 			if (retval) {
 				send_sig(SIGKILL, current, 0);
 				goto out_free_dentry;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 771f235..88da703 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -473,7 +473,7 @@
 static struct dentry *bd_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "bdev:", &bdev_sops, 0x62646576);
+	return mount_pseudo(fs_type, "bdev:", &bdev_sops, NULL, 0x62646576);
 }
 
 static struct file_system_type bd_type = {
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 0ccf9a8..9786963 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -65,7 +65,6 @@
 {
 	struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
 	struct btrfs_root *root;
-	struct dentry *dentry;
 	struct inode *inode;
 	struct btrfs_key key;
 	int index;
@@ -108,10 +107,7 @@
 		return ERR_PTR(-ESTALE);
 	}
 
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &btrfs_dentry_operations);
-	return dentry;
+	return d_obtain_alias(inode);
 fail:
 	srcu_read_unlock(&fs_info->subvol_srcu, index);
 	return ERR_PTR(err);
@@ -166,7 +162,6 @@
 static struct dentry *btrfs_get_parent(struct dentry *child)
 {
 	struct inode *dir = child->d_inode;
-	struct dentry *dentry;
 	struct btrfs_root *root = BTRFS_I(dir)->root;
 	struct btrfs_path *path;
 	struct extent_buffer *leaf;
@@ -223,10 +218,7 @@
 
 	key.type = BTRFS_INODE_ITEM_KEY;
 	key.offset = 0;
-	dentry = d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &btrfs_dentry_operations);
-	return dentry;
+	return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
 fail:
 	btrfs_free_path(path);
 	return ERR_PTR(ret);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a0ff46a..a3798a3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4084,8 +4084,6 @@
 	int index;
 	int ret;
 
-	d_set_d_op(dentry, &btrfs_dentry_operations);
-
 	if (dentry->d_name.len > BTRFS_NAME_LEN)
 		return ERR_PTR(-ENAMETOOLONG);
 
@@ -7117,6 +7115,10 @@
 	alloc_start = offset & ~mask;
 	alloc_end =  (offset + len + mask) & ~mask;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	/*
 	 * wait for ordered IO before we have any locks.  We'll loop again
 	 * below with the locks held.
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 883c6fa..22acdaa 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -460,6 +460,7 @@
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_magic = BTRFS_SUPER_MAGIC;
 	sb->s_op = &btrfs_super_ops;
+	sb->s_d_op = &btrfs_dentry_operations;
 	sb->s_export_op = &btrfs_export_ops;
 	sb->s_xattr = btrfs_xattr_handlers;
 	sb->s_time_gran = 1;
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 9e6c4f2..bd35212 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -2,31 +2,10 @@
 # Makefile for CEPH filesystem.
 #
 
-ifneq ($(KERNELRELEASE),)
-
 obj-$(CONFIG_CEPH_FS) += ceph.o
 
-ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
+ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
 	export.o caps.o snap.o xattr.o \
 	mds_client.o mdsmap.o strings.o ceph_frag.o \
 	debugfs.o
 
-else
-#Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-
-KERNELDIR ?= /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-
-default: all
-
-all:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules
-
-modules_install:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install
-
-clean:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) clean
-
-endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 7ae1b3d..08f65fa 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -60,10 +60,13 @@
 	for (rp = rb_first(&mdsc->request_tree); rp; rp = rb_next(rp)) {
 		req = rb_entry(rp, struct ceph_mds_request, r_node);
 
-		if (req->r_request)
-			seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds);
-		else
+		if (req->r_request && req->r_session)
+			seq_printf(s, "%lld\tmds%d\t", req->r_tid,
+				   req->r_session->s_mds);
+		else if (!req->r_request)
 			seq_printf(s, "%lld\t(no request)\t", req->r_tid);
+		else
+			seq_printf(s, "%lld\t(no session)\t", req->r_tid);
 
 		seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
 
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index fa7ca04..0bc68de 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1224,6 +1224,26 @@
 	}
 }
 
+/*
+ * Return name hash for a given dentry.  This is dependent on
+ * the parent directory's hash function.
+ */
+unsigned ceph_dentry_hash(struct dentry *dn)
+{
+	struct inode *dir = dn->d_parent->d_inode;
+	struct ceph_inode_info *dci = ceph_inode(dir);
+
+	switch (dci->i_dir_layout.dl_dir_hash) {
+	case 0:	/* for backward compat */
+	case CEPH_STR_HASH_LINUX:
+		return dn->d_name.hash;
+
+	default:
+		return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
+				     dn->d_name.name, dn->d_name.len);
+	}
+}
+
 const struct file_operations ceph_dir_fops = {
 	.read = ceph_read_dir,
 	.readdir = ceph_readdir,
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 2297d94..e410561 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -59,7 +59,7 @@
 		dout("encode_fh %p connectable\n", dentry);
 		cfh->ino = ceph_ino(dentry->d_inode);
 		cfh->parent_ino = ceph_ino(parent->d_inode);
-		cfh->parent_name_hash = parent->d_name.hash;
+		cfh->parent_name_hash = ceph_dentry_hash(parent);
 		*max_len = connected_handle_length;
 		type = 2;
 	} else if (*max_len >= handle_length) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e61de4f..e835eff 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -297,6 +297,8 @@
 	ci->i_release_count = 0;
 	ci->i_symlink = NULL;
 
+	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
+
 	ci->i_fragtree = RB_ROOT;
 	mutex_init(&ci->i_fragtree_mutex);
 
@@ -689,6 +691,8 @@
 		inode->i_op = &ceph_dir_iops;
 		inode->i_fop = &ceph_dir_fops;
 
+		ci->i_dir_layout = iinfo->dir_layout;
+
 		ci->i_files = le64_to_cpu(info->files);
 		ci->i_subdirs = le64_to_cpu(info->subdirs);
 		ci->i_rbytes = le64_to_cpu(info->rbytes);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a50fca1..1e30d19 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -60,7 +60,8 @@
  * parse individual inode info
  */
 static int parse_reply_info_in(void **p, void *end,
-			       struct ceph_mds_reply_info_in *info)
+			       struct ceph_mds_reply_info_in *info,
+			       int features)
 {
 	int err = -EIO;
 
@@ -74,6 +75,12 @@
 	info->symlink = *p;
 	*p += info->symlink_len;
 
+	if (features & CEPH_FEATURE_DIRLAYOUTHASH)
+		ceph_decode_copy_safe(p, end, &info->dir_layout,
+				      sizeof(info->dir_layout), bad);
+	else
+		memset(&info->dir_layout, 0, sizeof(info->dir_layout));
+
 	ceph_decode_32_safe(p, end, info->xattr_len, bad);
 	ceph_decode_need(p, end, info->xattr_len, bad);
 	info->xattr_data = *p;
@@ -88,12 +95,13 @@
  * target inode.
  */
 static int parse_reply_info_trace(void **p, void *end,
-				  struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	int err;
 
 	if (info->head->is_dentry) {
-		err = parse_reply_info_in(p, end, &info->diri);
+		err = parse_reply_info_in(p, end, &info->diri, features);
 		if (err < 0)
 			goto out_bad;
 
@@ -114,7 +122,7 @@
 	}
 
 	if (info->head->is_target) {
-		err = parse_reply_info_in(p, end, &info->targeti);
+		err = parse_reply_info_in(p, end, &info->targeti, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -134,7 +142,8 @@
  * parse readdir results
  */
 static int parse_reply_info_dir(void **p, void *end,
-				struct ceph_mds_reply_info_parsed *info)
+				struct ceph_mds_reply_info_parsed *info,
+				int features)
 {
 	u32 num, i = 0;
 	int err;
@@ -182,7 +191,7 @@
 		*p += sizeof(struct ceph_mds_reply_lease);
 
 		/* inode */
-		err = parse_reply_info_in(p, end, &info->dir_in[i]);
+		err = parse_reply_info_in(p, end, &info->dir_in[i], features);
 		if (err < 0)
 			goto out_bad;
 		i++;
@@ -205,7 +214,8 @@
  * parse fcntl F_GETLK results
  */
 static int parse_reply_info_filelock(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				     struct ceph_mds_reply_info_parsed *info,
+				     int features)
 {
 	if (*p + sizeof(*info->filelock_reply) > end)
 		goto bad;
@@ -225,19 +235,21 @@
  * parse extra results
  */
 static int parse_reply_info_extra(void **p, void *end,
-                struct ceph_mds_reply_info_parsed *info)
+				  struct ceph_mds_reply_info_parsed *info,
+				  int features)
 {
 	if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
-		return parse_reply_info_filelock(p, end, info);
+		return parse_reply_info_filelock(p, end, info, features);
 	else
-		return parse_reply_info_dir(p, end, info);
+		return parse_reply_info_dir(p, end, info, features);
 }
 
 /*
  * parse entire mds reply
  */
 static int parse_reply_info(struct ceph_msg *msg,
-			    struct ceph_mds_reply_info_parsed *info)
+			    struct ceph_mds_reply_info_parsed *info,
+			    int features)
 {
 	void *p, *end;
 	u32 len;
@@ -250,7 +262,7 @@
 	/* trace */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_trace(&p, p+len, info);
+		err = parse_reply_info_trace(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -258,7 +270,7 @@
 	/* extra */
 	ceph_decode_32_safe(&p, end, len, bad);
 	if (len > 0) {
-		err = parse_reply_info_extra(&p, p+len, info);
+		err = parse_reply_info_extra(&p, p+len, info, features);
 		if (err < 0)
 			goto out_bad;
 	}
@@ -654,7 +666,7 @@
 		} else {
 			/* dir + name */
 			inode = dir;
-			hash = req->r_dentry->d_name.hash;
+			hash = ceph_dentry_hash(req->r_dentry);
 			is_hash = true;
 		}
 	}
@@ -1693,7 +1705,6 @@
 	struct ceph_msg *msg;
 	int flags = 0;
 
-	req->r_mds = mds;
 	req->r_attempts++;
 	if (req->r_inode) {
 		struct ceph_cap *cap =
@@ -1780,6 +1791,8 @@
 		goto finish;
 	}
 
+	put_request_session(req);
+
 	mds = __choose_mds(mdsc, req);
 	if (mds < 0 ||
 	    ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
@@ -1797,6 +1810,8 @@
 			goto finish;
 		}
 	}
+	req->r_session = get_session(session);
+
 	dout("do_request mds%d session %p state %s\n", mds, session,
 	     session_state_name(session->s_state));
 	if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1809,7 +1824,6 @@
 	}
 
 	/* send request */
-	req->r_session = get_session(session);
 	req->r_resend_mds = -1;   /* forget any previous mds hint */
 
 	if (req->r_request_started == 0)   /* note request start time */
@@ -1863,7 +1877,6 @@
 		if (req->r_session &&
 		    req->r_session->s_mds == mds) {
 			dout(" kicking tid %llu\n", req->r_tid);
-			put_request_session(req);
 			__do_request(mdsc, req);
 		}
 	}
@@ -2056,8 +2069,11 @@
 			goto out;
 		} else  {
 			struct ceph_inode_info *ci = ceph_inode(req->r_inode);
-			struct ceph_cap *cap =
-				ceph_get_cap_for_mds(ci, req->r_mds);;
+			struct ceph_cap *cap = NULL;
+
+			if (req->r_session)
+				cap = ceph_get_cap_for_mds(ci,
+						   req->r_session->s_mds);
 
 			dout("already using auth");
 			if ((!cap || cap != ci->i_auth_cap) ||
@@ -2101,7 +2117,7 @@
 
 	dout("handle_reply tid %lld result %d\n", tid, result);
 	rinfo = &req->r_reply_info;
-	err = parse_reply_info(msg, rinfo);
+	err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
 	mutex_unlock(&mdsc->mutex);
 
 	mutex_lock(&session->s_mutex);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index aabe563..4e3a9cc 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -35,6 +35,7 @@
  */
 struct ceph_mds_reply_info_in {
 	struct ceph_mds_reply_inode *in;
+	struct ceph_dir_layout dir_layout;
 	u32 symlink_len;
 	char *symlink;
 	u32 xattr_len;
@@ -165,7 +166,6 @@
 	struct ceph_mds_client *r_mdsc;
 
 	int r_op;                    /* mds op code */
-	int r_mds;
 
 	/* operation on what? */
 	struct inode *r_inode;              /* arg1 */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 08b460a..bf6f0f3 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -428,7 +428,8 @@
 		goto fail;
 	}
 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
-	fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
+	fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
+		CEPH_FEATURE_DIRLAYOUTHASH;
 	fsc->client->monc.want_mdsmap = 1;
 
 	fsc->mount_options = fsopt;
@@ -443,13 +444,17 @@
 		goto fail_client;
 
 	err = -ENOMEM;
-	fsc->wb_wq = create_workqueue("ceph-writeback");
+	/*
+	 * The number of concurrent works can be high but they don't need
+	 * to be processed in parallel, limit concurrency.
+	 */
+	fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
 	if (fsc->wb_wq == NULL)
 		goto fail_bdi;
-	fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+	fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
 	if (fsc->pg_inv_wq == NULL)
 		goto fail_wb_wq;
-	fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+	fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
 	if (fsc->trunc_wq == NULL)
 		goto fail_pg_inv_wq;
 
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 4553d88..20b907d7 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -239,6 +239,7 @@
 	unsigned i_ceph_flags;
 	unsigned long i_release_count;
 
+	struct ceph_dir_layout i_dir_layout;
 	struct ceph_file_layout i_layout;
 	char *i_symlink;
 
@@ -768,6 +769,7 @@
 extern void ceph_dentry_lru_touch(struct dentry *dn);
 extern void ceph_dentry_lru_del(struct dentry *dn);
 extern void ceph_invalidate_dentry_lease(struct dentry *dentry);
+extern unsigned ceph_dentry_hash(struct dentry *dn);
 
 /*
  * our d_ops vary depending on whether the inode is live,
diff --git a/fs/char_dev.c b/fs/char_dev.c
index e5b9df9..6e99b9d 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -417,18 +417,6 @@
 	return ret;
 }
 
-int cdev_index(struct inode *inode)
-{
-	int idx;
-	struct kobject *kobj;
-
-	kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
-	if (!kobj)
-		return -1;
-	kobject_put(kobj);
-	return idx;
-}
-
 void cd_forget(struct inode *inode)
 {
 	spin_lock(&cdev_lock);
@@ -582,7 +570,6 @@
 EXPORT_SYMBOL(cdev_alloc);
 EXPORT_SYMBOL(cdev_del);
 EXPORT_SYMBOL(cdev_add);
-EXPORT_SYMBOL(cdev_index);
 EXPORT_SYMBOL(__register_chrdev);
 EXPORT_SYMBOL(__unregister_chrdev);
 EXPORT_SYMBOL(directly_mappable_cdev_bdi);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5e7075d..d9f652a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -174,6 +174,12 @@
 		goto out_no_root;
 	}
 
+	/* do that *after* d_alloc_root() - we want NULL ->d_op for root here */
+	if (cifs_sb_master_tcon(cifs_sb)->nocase)
+		sb->s_d_op = &cifs_ci_dentry_ops;
+	else
+		sb->s_d_op = &cifs_dentry_ops;
+
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
 		cFYI(1, "export ops supported");
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 2e77382..1e95dd6 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -130,17 +130,6 @@
 	return full_path;
 }
 
-static void setup_cifs_dentry(struct cifsTconInfo *tcon,
-			      struct dentry *direntry,
-			      struct inode *newinode)
-{
-	if (tcon->nocase)
-		d_set_d_op(direntry, &cifs_ci_dentry_ops);
-	else
-		d_set_d_op(direntry, &cifs_dentry_ops);
-	d_instantiate(direntry, newinode);
-}
-
 /* Inode operations in similar order to how they appear in Linux file fs.h */
 
 int
@@ -327,7 +316,7 @@
 
 cifs_create_set_dentry:
 	if (rc == 0)
-		setup_cifs_dentry(tcon, direntry, newinode);
+		d_instantiate(direntry, newinode);
 	else
 		cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
 
@@ -418,10 +407,6 @@
 
 		rc = cifs_get_inode_info_unix(&newinode, full_path,
 						inode->i_sb, xid);
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 
 		if (rc == 0)
 			d_instantiate(direntry, newinode);
@@ -601,10 +586,6 @@
 				parent_dir_inode->i_sb, xid, NULL);
 
 	if ((rc == 0) && (newInode != NULL)) {
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_add(direntry, newInode);
 		if (posix_open) {
 			filp = lookup_instantiate_filp(nd, direntry,
@@ -631,10 +612,6 @@
 	} else if (rc == -ENOENT) {
 		rc = 0;
 		direntry->d_time = jiffies;
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_add(direntry, NULL);
 	/*	if it was once a directory (but how can we tell?) we could do
 		shrink_dcache_parent(direntry); */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 0c7e369..b06b606 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1324,10 +1324,6 @@
 /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
 	to set uid/gid */
 			inc_nlink(inode);
-			if (pTcon->nocase)
-				d_set_d_op(direntry, &cifs_ci_dentry_ops);
-			else
-				d_set_d_op(direntry, &cifs_dentry_ops);
 
 			cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
 			cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1368,10 +1364,6 @@
 			rc = cifs_get_inode_info(&newinode, full_path, NULL,
 						 inode->i_sb, xid, NULL);
 
-		if (pTcon->nocase)
-			d_set_d_op(direntry, &cifs_ci_dentry_ops);
-		else
-			d_set_d_op(direntry, &cifs_dentry_ops);
 		d_instantiate(direntry, newinode);
 		 /* setting nlink not necessary except in cases where we
 		  * failed to get it from the server or was set bogus */
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index fe2f6a9..306769d 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -524,10 +524,6 @@
 			cFYI(1, "Create symlink ok, getinodeinfo fail rc = %d",
 			      rc);
 		} else {
-			if (pTcon->nocase)
-				d_set_d_op(direntry, &cifs_ci_dentry_ops);
-			else
-				d_set_d_op(direntry, &cifs_dentry_ops);
 			d_instantiate(direntry, newinode);
 		}
 	}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 76b1b37..7f25cc3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -102,11 +102,6 @@
 		return NULL;
 	}
 
-	if (cifs_sb_master_tcon(CIFS_SB(sb))->nocase)
-		d_set_d_op(dentry, &cifs_ci_dentry_ops);
-	else
-		d_set_d_op(dentry, &cifs_dentry_ops);
-
 	alias = d_materialise_unique(dentry, inode);
 	if (alias != NULL) {
 		dput(dentry);
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 5525e1c..6901578 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -20,10 +20,9 @@
 #include <linux/spinlock.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 static atomic_t permission_epoch = ATOMIC_INIT(0);
 
diff --git a/fs/coda/cnode.c b/fs/coda/cnode.c
index 6022405..6475877 100644
--- a/fs/coda/cnode.c
+++ b/fs/coda/cnode.c
@@ -7,9 +7,8 @@
 #include <linux/time.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
+#include "coda_linux.h"
 
 static inline int coda_fideq(struct CodaFid *fid1, struct CodaFid *fid2)
 {
diff --git a/include/linux/coda_cache.h b/fs/coda/coda_cache.h
similarity index 100%
rename from include/linux/coda_cache.h
rename to fs/coda/coda_cache.h
diff --git a/include/linux/coda_fs_i.h b/fs/coda/coda_fs_i.h
similarity index 100%
rename from include/linux/coda_fs_i.h
rename to fs/coda/coda_fs_i.h
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c
index bf4a3fd..2bdbcc1 100644
--- a/fs/coda/coda_linux.c
+++ b/fs/coda/coda_linux.c
@@ -17,9 +17,8 @@
 #include <linux/string.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+#include "coda_linux.h"
 
 /* initialize the debugging variables */
 int coda_fake_statfs;
diff --git a/include/linux/coda_linux.h b/fs/coda/coda_linux.h
similarity index 96%
rename from include/linux/coda_linux.h
rename to fs/coda/coda_linux.h
index 4ccc59c..9b0c532 100644
--- a/include/linux/coda_linux.h
+++ b/fs/coda/coda_linux.h
@@ -20,13 +20,15 @@
 #include <linux/wait.h>		
 #include <linux/types.h>
 #include <linux/fs.h>
-#include <linux/coda_fs_i.h>
+#include "coda_fs_i.h"
 
 /* operations */
 extern const struct inode_operations coda_dir_inode_operations;
 extern const struct inode_operations coda_file_inode_operations;
 extern const struct inode_operations coda_ioctl_inode_operations;
 
+extern const struct dentry_operations coda_dentry_operations;
+
 extern const struct address_space_operations coda_file_aops;
 extern const struct address_space_operations coda_symlink_aops;
 
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 29badd9..2b8dae4 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -23,10 +23,9 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -61,7 +60,7 @@
 }
 #define CODA_EIO_ERROR ((void *) (coda_return_EIO))
 
-static const struct dentry_operations coda_dentry_operations =
+const struct dentry_operations coda_dentry_operations =
 {
 	.d_revalidate	= coda_dentry_revalidate,
 	.d_delete	= coda_dentry_delete,
@@ -126,8 +125,6 @@
 		return ERR_PTR(error);
 
 exit:
-	d_set_d_op(entry, &coda_dentry_operations);
-
 	if (inode && (type & CODA_NOCACHE))
 		coda_flag_inode(inode, C_VATTR | C_PURGE);
 
diff --git a/fs/coda/file.c b/fs/coda/file.c
index c8b50ba..0433057 100644
--- a/fs/coda/file.c
+++ b/fs/coda/file.c
@@ -21,10 +21,9 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
 #include "coda_int.h"
 
 static ssize_t
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 50dc7d1..871b277 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -28,10 +28,9 @@
 #include <linux/vmalloc.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
@@ -45,7 +44,7 @@
 static struct inode *coda_alloc_inode(struct super_block *sb)
 {
 	struct coda_inode_info *ei;
-	ei = (struct coda_inode_info *)kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
+	ei = kmem_cache_alloc(coda_inode_cachep, GFP_KERNEL);
 	if (!ei)
 		return NULL;
 	memset(&ei->c_fid, 0, sizeof(struct CodaFid));
@@ -193,6 +192,7 @@
 	sb->s_blocksize_bits = 12;
 	sb->s_magic = CODA_SUPER_MAGIC;
 	sb->s_op = &coda_super_operations;
+	sb->s_d_op = &coda_dentry_operations;
 	sb->s_bdi = &vc->bdi;
 
 	/* get root fid from Venus: this needs the root inode */
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 741f0bd..6cbb3af 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -19,10 +19,10 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 /* pioctl ops */
 static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags);
 static long coda_pioctl(struct file *filp, unsigned int cmd,
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index 62647a8..8f616e0 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -43,10 +43,10 @@
 #include <asm/uaccess.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
-#include <linux/coda_fs_i.h>
 #include <linux/coda_psdev.h>
 
+#include "coda_linux.h"
+
 #include "coda_int.h"
 
 /* statistics */
diff --git a/fs/coda/symlink.c b/fs/coda/symlink.c
index af78f00..ab94ef6 100644
--- a/fs/coda/symlink.c
+++ b/fs/coda/symlink.c
@@ -16,9 +16,9 @@
 #include <linux/pagemap.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
+
+#include "coda_linux.h"
 
 static int coda_symlink_filler(struct file *file, struct page *page)
 {
diff --git a/fs/coda/upcall.c b/fs/coda/upcall.c
index c3563ca..9727e0c 100644
--- a/fs/coda/upcall.c
+++ b/fs/coda/upcall.c
@@ -33,10 +33,9 @@
 #include <linux/vfs.h>
 
 #include <linux/coda.h>
-#include <linux/coda_linux.h>
 #include <linux/coda_psdev.h>
-#include <linux/coda_fs_i.h>
-#include <linux/coda_cache.h>
+#include "coda_linux.h"
+#include "coda_cache.h"
 
 #include "coda_int.h"
 
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index 026cf68..82bda8f 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -90,6 +90,7 @@
 extern const struct file_operations bin_fops;
 extern const struct inode_operations configfs_dir_inode_operations;
 extern const struct inode_operations configfs_symlink_inode_operations;
+extern const struct dentry_operations configfs_dentry_ops;
 
 extern int configfs_symlink(struct inode *dir, struct dentry *dentry,
 			    const char *symname);
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 36637a8..90ff3cb 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -72,7 +72,7 @@
 	return 1;
 }
 
-static const struct dentry_operations configfs_dentry_ops = {
+const struct dentry_operations configfs_dentry_ops = {
 	.d_iput		= configfs_d_iput,
 	/* simple_delete_dentry() isn't exported */
 	.d_delete	= configfs_d_delete,
@@ -442,7 +442,6 @@
 		return error;
 	}
 
-	d_set_d_op(dentry, &configfs_dentry_ops);
 	d_rehash(dentry);
 
 	return 0;
@@ -489,7 +488,6 @@
 		 */
 		if (dentry->d_name.len > NAME_MAX)
 			return ERR_PTR(-ENAMETOOLONG);
-		d_set_d_op(dentry, &configfs_dentry_ops);
 		d_add(dentry, NULL);
 		return NULL;
 	}
@@ -683,7 +681,6 @@
 	ret = -ENOMEM;
 	child = d_alloc(parent, &name);
 	if (child) {
-		d_set_d_op(child, &configfs_dentry_ops);
 		d_add(child, NULL);
 
 		ret = configfs_attach_group(&parent_group->cg_item,
@@ -1681,7 +1678,6 @@
 	err = -ENOMEM;
 	dentry = d_alloc(configfs_sb->s_root, &name);
 	if (dentry) {
-		d_set_d_op(dentry, &configfs_dentry_ops);
 		d_add(dentry, NULL);
 
 		err = configfs_attach_group(sd->s_element, &group->cg_item,
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index 7d3607f..ecc6217 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -101,6 +101,7 @@
 	configfs_root_group.cg_item.ci_dentry = root;
 	root->d_fsdata = &configfs_root;
 	sb->s_root = root;
+	sb->s_d_op = &configfs_dentry_ops; /* the rest get that */
 	return 0;
 }
 
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index 32fd5fe..e141939 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -34,57 +34,81 @@
 static DEFINE_MUTEX(read_mutex);
 
 
-/* These two macros may change in future, to provide better st_ino
-   semantics. */
-#define CRAMINO(x)	(((x)->offset && (x)->size)?(x)->offset<<2:1)
+/* These macros may change in future, to provide better st_ino semantics. */
 #define OFFSET(x)	((x)->i_ino)
 
-static void setup_inode(struct inode *inode, struct cramfs_inode * cramfs_inode)
+static unsigned long cramino(struct cramfs_inode *cino, unsigned int offset)
 {
+	if (!cino->offset)
+		return offset + 1;
+	if (!cino->size)
+		return offset + 1;
+
+	/*
+	 * The file mode test fixes buggy mkcramfs implementations where
+	 * cramfs_inode->offset is set to a non zero value for entries
+	 * which did not contain data, like devices node and fifos.
+	 */
+	switch (cino->mode & S_IFMT) {
+	case S_IFREG:
+	case S_IFDIR:
+	case S_IFLNK:
+		return cino->offset << 2;
+	default:
+		break;
+	}
+	return offset + 1;
+}
+
+static struct inode *get_cramfs_inode(struct super_block *sb,
+	struct cramfs_inode *cramfs_inode, unsigned int offset)
+{
+	struct inode *inode;
 	static struct timespec zerotime;
+
+	inode = iget_locked(sb, cramino(cramfs_inode, offset));
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	switch (cramfs_inode->mode & S_IFMT) {
+	case S_IFREG:
+		inode->i_fop = &generic_ro_fops;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	case S_IFDIR:
+		inode->i_op = &cramfs_dir_inode_operations;
+		inode->i_fop = &cramfs_directory_operations;
+		break;
+	case S_IFLNK:
+		inode->i_op = &page_symlink_inode_operations;
+		inode->i_data.a_ops = &cramfs_aops;
+		break;
+	default:
+		init_special_inode(inode, cramfs_inode->mode,
+				old_decode_dev(cramfs_inode->size));
+	}
+
 	inode->i_mode = cramfs_inode->mode;
 	inode->i_uid = cramfs_inode->uid;
-	inode->i_size = cramfs_inode->size;
-	inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
 	inode->i_gid = cramfs_inode->gid;
+
+	/* if the lower 2 bits are zero, the inode contains data */
+	if (!(inode->i_ino & 3)) {
+		inode->i_size = cramfs_inode->size;
+		inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
+	}
+
 	/* Struct copy intentional */
 	inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
 	/* inode->i_nlink is left 1 - arguably wrong for directories,
 	   but it's the best we can do without reading the directory
 	   contents.  1 yields the right result in GNU find, even
 	   without -noleaf option. */
-	if (S_ISREG(inode->i_mode)) {
-		inode->i_fop = &generic_ro_fops;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else if (S_ISDIR(inode->i_mode)) {
-		inode->i_op = &cramfs_dir_inode_operations;
-		inode->i_fop = &cramfs_directory_operations;
-	} else if (S_ISLNK(inode->i_mode)) {
-		inode->i_op = &page_symlink_inode_operations;
-		inode->i_data.a_ops = &cramfs_aops;
-	} else {
-		init_special_inode(inode, inode->i_mode,
-			old_decode_dev(cramfs_inode->size));
-	}
-}
 
-static struct inode *get_cramfs_inode(struct super_block *sb,
-				struct cramfs_inode * cramfs_inode)
-{
-	struct inode *inode;
-	if (CRAMINO(cramfs_inode) == 1) {
-		inode = new_inode(sb);
-		if (inode) {
-			inode->i_ino = 1;
-			setup_inode(inode, cramfs_inode);
-		}
-	} else {
-		inode = iget_locked(sb, CRAMINO(cramfs_inode));
-		if (inode && (inode->i_state & I_NEW)) {
-			setup_inode(inode, cramfs_inode);
-			unlock_new_inode(inode);
-		}
-	}
+	unlock_new_inode(inode);
+
 	return inode;
 }
 
@@ -265,6 +289,9 @@
 		printk(KERN_ERR "cramfs: root is not a directory\n");
 		goto out;
 	}
+	/* correct strange, hard-coded permissions of mkcramfs */
+	super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
+
 	root_offset = super.root.offset << 2;
 	if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
 		sbi->size=super.size;
@@ -289,7 +316,7 @@
 
 	/* Set it all up.. */
 	sb->s_op = &cramfs_ops;
-	root = get_cramfs_inode(sb, &super.root);
+	root = get_cramfs_inode(sb, &super.root, 0);
 	if (!root)
 		goto out;
 	sb->s_root = d_alloc_root(root);
@@ -365,7 +392,7 @@
 		 */
 		namelen = de->namelen << 2;
 		memcpy(buf, name, namelen);
-		ino = CRAMINO(de);
+		ino = cramino(de, OFFSET(inode) + offset);
 		mode = de->mode;
 		mutex_unlock(&read_mutex);
 		nextoffset = offset + sizeof(*de) + namelen;
@@ -404,8 +431,9 @@
 		struct cramfs_inode *de;
 		char *name;
 		int namelen, retval;
+		int dir_off = OFFSET(dir) + offset;
 
-		de = cramfs_read(dir->i_sb, OFFSET(dir) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
+		de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
 		name = (char *)(de+1);
 
 		/* Try to take advantage of sorted directories */
@@ -436,7 +464,7 @@
 		if (!retval) {
 			struct cramfs_inode entry = *de;
 			mutex_unlock(&read_mutex);
-			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry));
+			d_add(dentry, get_cramfs_inode(dir->i_sb, &entry, dir_off));
 			return NULL;
 		}
 		/* else (retval < 0) */
diff --git a/fs/dcache.c b/fs/dcache.c
index 5699d4c..0c6d5c5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1320,6 +1320,7 @@
 		__dget_dlock(parent);
 		dentry->d_parent = parent;
 		dentry->d_sb = parent->d_sb;
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
 		list_add(&dentry->d_u.d_child, &parent->d_subdirs);
 		spin_unlock(&parent->d_lock);
 	}
@@ -1335,6 +1336,7 @@
 	struct dentry *dentry = d_alloc(NULL, name);
 	if (dentry) {
 		dentry->d_sb = sb;
+		d_set_d_op(dentry, dentry->d_sb->s_d_op);
 		dentry->d_parent = dentry;
 		dentry->d_flags |= DCACHE_DISCONNECTED;
 	}
@@ -1507,6 +1509,7 @@
 		res = d_alloc(NULL, &name);
 		if (res) {
 			res->d_sb = root_inode->i_sb;
+			d_set_d_op(res, res->d_sb->s_d_op);
 			res->d_parent = res;
 			d_instantiate(res, root_inode);
 		}
@@ -1567,6 +1570,7 @@
 	/* attach a disconnected dentry */
 	spin_lock(&tmp->d_lock);
 	tmp->d_sb = inode->i_sb;
+	d_set_d_op(tmp, tmp->d_sb->s_d_op);
 	tmp->d_inode = inode;
 	tmp->d_flags |= DCACHE_DISCONNECTED;
 	list_add(&tmp->d_alias, &inode->i_dentry);
@@ -1966,7 +1970,7 @@
 /**
  * d_validate - verify dentry provided from insecure source (deprecated)
  * @dentry: The dentry alleged to be valid child of @dparent
- * @dparent: The parent dentry (known to be valid)
+ * @parent: The parent dentry (known to be valid)
  *
  * An insecure source has sent us a dentry, here we verify it and dget() it.
  * This is used by ncpfs in its readdir implementation.
@@ -2449,8 +2453,7 @@
 }
 
 /**
- * Prepend path string to a buffer
- *
+ * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
  * @root: root vfsmnt/dentry (may be modified by this function)
  * @buffer: pointer to the end of the buffer
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 337352a..64ff023 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -441,7 +441,6 @@
 	struct qstr lower_name;
 	int rc = 0;
 
-	d_set_d_op(ecryptfs_dentry, &ecryptfs_dops);
 	if ((ecryptfs_dentry->d_name.len == 1
 	     && !strcmp(ecryptfs_dentry->d_name.name, "."))
 	    || (ecryptfs_dentry->d_name.len == 2
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 3510386..9ed4769 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -141,21 +141,9 @@
 	return rc;
 }
 
-/**
- * ecryptfs_interpose
- * @lower_dentry: Existing dentry in the lower filesystem
- * @dentry: ecryptfs' dentry
- * @sb: ecryptfs's super_block
- * @flags: flags to govern behavior of interpose procedure
- *
- * Interposes upper and lower dentries.
- *
- * Returns zero on success; non-zero otherwise
- */
-int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
-		       struct super_block *sb, u32 flags)
+static inode *ecryptfs_get_inode(struct inode *lower_inode,
+		       struct super_block *sb)
 {
-	struct inode *lower_inode;
 	struct inode *inode;
 	int rc = 0;
 
@@ -189,17 +177,38 @@
 	if (special_file(lower_inode->i_mode))
 		init_special_inode(inode, lower_inode->i_mode,
 				   lower_inode->i_rdev);
-	d_set_d_op(dentry, &ecryptfs_dops);
 	fsstack_copy_attr_all(inode, lower_inode);
 	/* This size will be overwritten for real files w/ headers and
 	 * other metadata */
 	fsstack_copy_inode_size(inode, lower_inode);
+	return inode;
+out:
+	return ERR_PTR(rc);
+}
+
+/**
+ * ecryptfs_interpose
+ * @lower_dentry: Existing dentry in the lower filesystem
+ * @dentry: ecryptfs' dentry
+ * @sb: ecryptfs's super_block
+ * @flags: flags to govern behavior of interpose procedure
+ *
+ * Interposes upper and lower dentries.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
+		       struct super_block *sb, u32 flags)
+{
+	struct inode *lower_inode = lower_dentry->d_inode;
+	struct inode *inode = ecryptfs_get_inode(lower_inode, sb);
+	if (IS_ERR(inode)
+		return PTR_ERR(inode);
 	if (flags & ECRYPTFS_INTERPOSE_FLAG_D_ADD)
 		d_add(dentry, inode);
 	else
 		d_instantiate(dentry, inode);
-out:
-	return rc;
+	return 0;
 }
 
 enum { ecryptfs_opt_sig, ecryptfs_opt_ecryptfs_sig,
@@ -492,59 +501,11 @@
 static struct file_system_type ecryptfs_fs_type;
 
 /**
- * ecryptfs_read_super
- * @sb: The ecryptfs super block
- * @dev_name: The path to mount over
- *
- * Read the super block of the lower filesystem, and use
- * ecryptfs_interpose to create our initial inode and super block
- * struct.
- */
-static int ecryptfs_read_super(struct super_block *sb, const char *dev_name)
-{
-	struct path path;
-	int rc;
-
-	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
-	if (rc) {
-		ecryptfs_printk(KERN_WARNING, "path_lookup() failed\n");
-		goto out;
-	}
-	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
-		rc = -EINVAL;
-		printk(KERN_ERR "Mount on filesystem of type "
-			"eCryptfs explicitly disallowed due to "
-			"known incompatibilities\n");
-		goto out_free;
-	}
-	ecryptfs_set_superblock_lower(sb, path.dentry->d_sb);
-	sb->s_maxbytes = path.dentry->d_sb->s_maxbytes;
-	sb->s_blocksize = path.dentry->d_sb->s_blocksize;
-	ecryptfs_set_dentry_lower(sb->s_root, path.dentry);
-	ecryptfs_set_dentry_lower_mnt(sb->s_root, path.mnt);
-	rc = ecryptfs_interpose(path.dentry, sb->s_root, sb, 0);
-	if (rc)
-		goto out_free;
-	rc = 0;
-	goto out;
-out_free:
-	path_put(&path);
-out:
-	return rc;
-}
-
-/**
  * ecryptfs_get_sb
  * @fs_type
  * @flags
  * @dev_name: The path to mount over
  * @raw_data: The options passed into the kernel
- *
- * The whole ecryptfs_get_sb process is broken into 3 functions:
- * ecryptfs_parse_options(): handle options passed to ecryptfs, if any
- * ecryptfs_read_super(): this accesses the lower filesystem and uses
- *                        ecryptfs_interpose to perform most of the linking
- * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
  */
 static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags,
 			const char *dev_name, void *raw_data)
@@ -553,6 +514,8 @@
 	struct ecryptfs_sb_info *sbi;
 	struct ecryptfs_dentry_info *root_info;
 	const char *err = "Getting sb failed";
+	struct inode *inode;
+	struct path path;
 	int rc;
 
 	sbi = kmem_cache_zalloc(ecryptfs_sb_info_cache, GFP_KERNEL);
@@ -575,10 +538,8 @@
 
 	s->s_flags = flags;
 	rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
-	if (rc) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (rc)
+		goto out1;
 
 	ecryptfs_set_superblock_private(s, sbi);
 	s->s_bdi = &sbi->bdi;
@@ -586,34 +547,54 @@
 	/* ->kill_sb() will take care of sbi after that point */
 	sbi = NULL;
 	s->s_op = &ecryptfs_sops;
+	s->s_d_op = &ecryptfs_dops;
+
+	err = "Reading sb failed";
+	rc = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path);
+	if (rc) {
+		ecryptfs_printk(KERN_WARNING, "kern_path() failed\n");
+		goto out1;
+	}
+	if (path.dentry->d_sb->s_type == &ecryptfs_fs_type) {
+		rc = -EINVAL;
+		printk(KERN_ERR "Mount on filesystem of type "
+			"eCryptfs explicitly disallowed due to "
+			"known incompatibilities\n");
+		goto out_free;
+	}
+	ecryptfs_set_superblock_lower(s, path.dentry->d_sb);
+	s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
+	s->s_blocksize = path.dentry->d_sb->s_blocksize;
+
+	inode = ecryptfs_get_inode(path.dentry->d_inode, s);
+	rc = PTR_ERR(inode);
+	if (IS_ERR(inode))
+		goto out_free;
+
+	s->s_root = d_alloc_root(inode);
+	if (!s->s_root) {
+		iput(inode);
+		rc = -ENOMEM;
+		goto out_free;
+	}
 
 	rc = -ENOMEM;
-	s->s_root = d_alloc(NULL, &(const struct qstr) {
-			     .hash = 0,.name = "/",.len = 1});
-	if (!s->s_root) {
-		deactivate_locked_super(s);
-		goto out;
-	}
-	d_set_d_op(s->s_root, &ecryptfs_dops);
-	s->s_root->d_sb = s;
-	s->s_root->d_parent = s->s_root;
-
 	root_info = kmem_cache_zalloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
-	if (!root_info) {
-		deactivate_locked_super(s);
-		goto out;
-	}
+	if (!root_info)
+		goto out_free;
+
 	/* ->kill_sb() will take care of root_info */
 	ecryptfs_set_dentry_private(s->s_root, root_info);
+	ecryptfs_set_dentry_lower(s->s_root, path.dentry);
+	ecryptfs_set_dentry_lower_mnt(s->s_root, path.mnt);
+
 	s->s_flags |= MS_ACTIVE;
-	rc = ecryptfs_read_super(s, dev_name);
-	if (rc) {
-		deactivate_locked_super(s);
-		err = "Reading sb failed";
-		goto out;
-	}
 	return dget(s->s_root);
 
+out_free:
+	path_put(&path);
+out1:
+	deactivate_locked_super(s);
 out:
 	if (sbi) {
 		ecryptfs_destroy_mount_crypt_stat(&sbi->mount_crypt_stat);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8cf0724..cc8a9b7 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -217,7 +217,7 @@
  * Configuration options available inside /proc/sys/fs/epoll/
  */
 /* Maximum number of epoll watched descriptors, per user */
-static int max_user_watches __read_mostly;
+static long max_user_watches __read_mostly;
 
 /*
  * This mutex is used to serialize ep_free() and eventpoll_release_file().
@@ -240,16 +240,18 @@
 
 #include <linux/sysctl.h>
 
-static int zero;
+static long zero;
+static long long_max = LONG_MAX;
 
 ctl_table epoll_table[] = {
 	{
 		.procname	= "max_user_watches",
 		.data		= &max_user_watches,
-		.maxlen		= sizeof(int),
+		.maxlen		= sizeof(max_user_watches),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
+		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &zero,
+		.extra2		= &long_max,
 	},
 	{ }
 };
@@ -561,7 +563,7 @@
 	/* At this point it is safe to free the eventpoll item */
 	kmem_cache_free(epi_cache, epi);
 
-	atomic_dec(&ep->user->epoll_watches);
+	atomic_long_dec(&ep->user->epoll_watches);
 
 	return 0;
 }
@@ -898,11 +900,12 @@
 {
 	int error, revents, pwake = 0;
 	unsigned long flags;
+	long user_watches;
 	struct epitem *epi;
 	struct ep_pqueue epq;
 
-	if (unlikely(atomic_read(&ep->user->epoll_watches) >=
-		     max_user_watches))
+	user_watches = atomic_long_read(&ep->user->epoll_watches);
+	if (unlikely(user_watches >= max_user_watches))
 		return -ENOSPC;
 	if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
 		return -ENOMEM;
@@ -966,7 +969,7 @@
 
 	spin_unlock_irqrestore(&ep->lock, flags);
 
-	atomic_inc(&ep->user->epoll_watches);
+	atomic_long_inc(&ep->user->epoll_watches);
 
 	/* We have to call this outside the lock */
 	if (pwake)
@@ -1426,6 +1429,7 @@
 	 */
 	max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
 		EP_ITEM_COST;
+	BUG_ON(max_user_watches < 0);
 
 	/* Initialize the structure used to perform safe poll wait head wake ups */
 	ep_nested_calls_init(&poll_safewake_ncalls);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 2709b34..47cda41 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -28,21 +28,30 @@
 
 typedef struct ext2_dir_entry_2 ext2_dirent;
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT2_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT2_MAX_REC_LEN);
 	else
 		BUG_ON(len > (1 << 16));
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -129,15 +138,15 @@
 		p = (ext2_dirent *)(kaddr + offs);
 		rec_len = ext2_rec_len_from_disk(p->rec_len);
 
-		if (rec_len < EXT2_DIR_REC_LEN(1))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(1)))
 			goto Eshort;
-		if (rec_len & 3)
+		if (unlikely(rec_len & 3))
 			goto Ealign;
-		if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
+		if (unlikely(rec_len < EXT2_DIR_REC_LEN(p->name_len)))
 			goto Enamelen;
-		if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
+		if (unlikely(((offs + rec_len - 1) ^ offs) & ~(chunk_size-1)))
 			goto Espan;
-		if (le32_to_cpu(p->inode) > max_inumber)
+		if (unlikely(le32_to_cpu(p->inode) > max_inumber))
 			goto Einumber;
 	}
 	if (offs != limit)
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f8aecd2..2e1d834 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -67,7 +67,7 @@
 	inode = NULL;
 	if (ino) {
 		inode = ext2_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext2_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index e0c6380..7731695 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -43,9 +43,10 @@
 static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
 static int ext2_sync_fs(struct super_block *sb, int wait);
 
-void ext2_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext2_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext2_sb_info *sbi = EXT2_SB(sb);
 	struct ext2_super_block *es = sbi->s_es;
@@ -59,9 +60,13 @@
 	}
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT2-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -76,12 +81,16 @@
 void ext2_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT2-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index f84700b..c2e4dce 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -199,14 +199,6 @@
 			goto found;
 		entry = next;
 	}
-	/* Check the remaining name entries */
-	while (!IS_LAST_ENTRY(entry)) {
-		struct ext2_xattr_entry *next =
-			EXT2_XATTR_NEXT(entry);
-		if ((char *)next >= end)
-			goto bad_block;
-		entry = next;
-	}
 	if (ext2_xattr_cache_insert(bh))
 		ea_idebug(inode, "cache insert failed");
 	error = -ENODATA;
@@ -355,7 +347,7 @@
 /*
  * ext2_xattr_set()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
index b3db226..045995c 100644
--- a/fs/ext3/balloc.c
+++ b/fs/ext3/balloc.c
@@ -20,6 +20,7 @@
 #include <linux/ext3_jbd.h>
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
+#include <linux/blkdev.h>
 
 /*
  * balloc.c contains the blocks allocation and deallocation routines
@@ -39,6 +40,21 @@
 
 #define in_range(b, first, len)	((b) >= (first) && (b) <= (first) + (len) - 1)
 
+/*
+ * Calculate the block group number and offset, given a block number
+ */
+static void ext3_get_group_no_and_offset(struct super_block *sb,
+	ext3_fsblk_t blocknr, unsigned long *blockgrpp, ext3_grpblk_t *offsetp)
+{
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+
+	blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
+	if (offsetp)
+		*offsetp = blocknr % EXT3_BLOCKS_PER_GROUP(sb);
+	if (blockgrpp)
+		*blockgrpp = blocknr / EXT3_BLOCKS_PER_GROUP(sb);
+}
+
 /**
  * ext3_get_group_desc() -- load group descriptor from disk
  * @sb:			super block
@@ -1885,3 +1901,253 @@
 	return ext3_bg_num_gdb_meta(sb,group);
 
 }
+
+/**
+ * ext3_trim_all_free -- function to trim all free space in alloc. group
+ * @sb:			super block for file system
+ * @group:		allocation group to trim
+ * @start:		first group block to examine
+ * @max:		last group block to examine
+ * @gdp:		allocation group description structure
+ * @minblocks:		minimum extent block count
+ *
+ * ext3_trim_all_free walks through group's block bitmap searching for free
+ * blocks. When the free block is found, it tries to allocate this block and
+ * consequent free block to get the biggest free extent possible, until it
+ * reaches any used block. Then issue a TRIM command on this extent and free
+ * the extent in the block bitmap. This is done until whole group is scanned.
+ */
+ext3_grpblk_t ext3_trim_all_free(struct super_block *sb, unsigned int group,
+				ext3_grpblk_t start, ext3_grpblk_t max,
+				ext3_grpblk_t minblocks)
+{
+	handle_t *handle;
+	ext3_grpblk_t next, free_blocks, bit, freed, count = 0;
+	ext3_fsblk_t discard_block;
+	struct ext3_sb_info *sbi;
+	struct buffer_head *gdp_bh, *bitmap_bh = NULL;
+	struct ext3_group_desc *gdp;
+	int err = 0, ret = 0;
+
+	/*
+	 * We will update one block bitmap, and one group descriptor
+	 */
+	handle = ext3_journal_start_sb(sb, 2);
+	if (IS_ERR(handle))
+		return PTR_ERR(handle);
+
+	bitmap_bh = read_block_bitmap(sb, group);
+	if (!bitmap_bh) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(bitmap_bh, "getting undo access");
+	err = ext3_journal_get_undo_access(handle, bitmap_bh);
+	if (err)
+		goto err_out;
+
+	gdp = ext3_get_group_desc(sb, group, &gdp_bh);
+	if (!gdp) {
+		err = -EIO;
+		goto err_out;
+	}
+
+	BUFFER_TRACE(gdp_bh, "get_write_access");
+	err = ext3_journal_get_write_access(handle, gdp_bh);
+	if (err)
+		goto err_out;
+
+	free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+	sbi = EXT3_SB(sb);
+
+	 /* Walk through the whole group */
+	while (start < max) {
+		start = bitmap_search_next_usable_block(start, bitmap_bh, max);
+		if (start < 0)
+			break;
+		next = start;
+
+		/*
+		 * Allocate contiguous free extents by setting bits in the
+		 * block bitmap
+		 */
+		while (next < max
+			&& claim_block(sb_bgl_lock(sbi, group),
+					next, bitmap_bh)) {
+			next++;
+		}
+
+		 /* We did not claim any blocks */
+		if (next == start)
+			continue;
+
+		discard_block = (ext3_fsblk_t)start +
+				ext3_group_first_block_no(sb, group);
+
+		/* Update counters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, start - next);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_sub(&sbi->s_freeblocks_counter, next - start);
+
+		/* Do not issue a TRIM on extents smaller than minblocks */
+		if ((next - start) < minblocks)
+			goto free_extent;
+
+		 /* Send the TRIM command down to the device */
+		err = sb_issue_discard(sb, discard_block, next - start,
+				       GFP_NOFS, 0);
+		count += (next - start);
+free_extent:
+		freed = 0;
+
+		/*
+		 * Clear bits in the bitmap
+		 */
+		for (bit = start; bit < next; bit++) {
+			BUFFER_TRACE(bitmap_bh, "clear bit");
+			if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, group),
+						bit, bitmap_bh->b_data)) {
+				ext3_error(sb, __func__,
+					"bit already cleared for block "E3FSBLK,
+					 (unsigned long)bit);
+				BUFFER_TRACE(bitmap_bh, "bit already cleared");
+			} else {
+				freed++;
+			}
+		}
+
+		/* Update couters */
+		spin_lock(sb_bgl_lock(sbi, group));
+		le16_add_cpu(&gdp->bg_free_blocks_count, freed);
+		spin_unlock(sb_bgl_lock(sbi, group));
+		percpu_counter_add(&sbi->s_freeblocks_counter, freed);
+
+		start = next;
+		if (err < 0) {
+			if (err != -EOPNOTSUPP)
+				ext3_warning(sb, __func__, "Discard command "
+					     "returned error %d\n", err);
+			break;
+		}
+
+		if (fatal_signal_pending(current)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		cond_resched();
+
+		/* No more suitable extents */
+		if ((free_blocks - count) < minblocks)
+			break;
+	}
+
+	/* We dirtied the bitmap block */
+	BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+	ret = ext3_journal_dirty_metadata(handle, bitmap_bh);
+	if (!err)
+		err = ret;
+
+	/* And the group descriptor block */
+	BUFFER_TRACE(gdp_bh, "dirtied group descriptor block");
+	ret = ext3_journal_dirty_metadata(handle, gdp_bh);
+	if (!err)
+		err = ret;
+
+	ext3_debug("trimmed %d blocks in the group %d\n",
+		count, group);
+
+err_out:
+	if (err)
+		count = err;
+	ext3_journal_stop(handle);
+	brelse(bitmap_bh);
+
+	return count;
+}
+
+/**
+ * ext3_trim_fs() -- trim ioctl handle function
+ * @sb:			superblock for filesystem
+ * @start:		First Byte to trim
+ * @len:		number of Bytes to trim from start
+ * @minlen:		minimum extent length in Bytes
+ *
+ * ext3_trim_fs goes through all allocation groups containing Bytes from
+ * start to start+len. For each such a group ext3_trim_all_free function
+ * is invoked to trim all free space.
+ */
+int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+	ext3_grpblk_t last_block, first_block, free_blocks;
+	unsigned long first_group, last_group;
+	unsigned long group, ngroups;
+	struct ext3_group_desc *gdp;
+	struct ext3_super_block *es = EXT3_SB(sb)->s_es;
+	uint64_t start, len, minlen, trimmed;
+	ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
+	int ret = 0;
+
+	start = range->start >> sb->s_blocksize_bits;
+	len = range->len >> sb->s_blocksize_bits;
+	minlen = range->minlen >> sb->s_blocksize_bits;
+	trimmed = 0;
+
+	if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
+		return -EINVAL;
+	if (start >= max_blks)
+		goto out;
+	if (start < le32_to_cpu(es->s_first_data_block)) {
+		len -= le32_to_cpu(es->s_first_data_block) - start;
+		start = le32_to_cpu(es->s_first_data_block);
+	}
+	if (start + len > max_blks)
+		len = max_blks - start;
+
+	ngroups = EXT3_SB(sb)->s_groups_count;
+	smp_rmb();
+
+	/* Determine first and last group to examine based on start and len */
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
+				     &first_group, &first_block);
+	ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
+				     &last_group, &last_block);
+	last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
+	last_block = EXT3_BLOCKS_PER_GROUP(sb);
+
+	if (first_group > last_group)
+		return -EINVAL;
+
+	for (group = first_group; group <= last_group; group++) {
+		gdp = ext3_get_group_desc(sb, group, NULL);
+		if (!gdp)
+			break;
+
+		free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
+		if (free_blocks < minlen)
+			continue;
+
+		if (len >= EXT3_BLOCKS_PER_GROUP(sb))
+			len -= (EXT3_BLOCKS_PER_GROUP(sb) - first_block);
+		else
+			last_block = first_block + len;
+
+		ret = ext3_trim_all_free(sb, group, first_block,
+					last_block, minlen);
+		if (ret < 0)
+			break;
+
+		trimmed += ret;
+		first_block = 0;
+	}
+
+	if (ret >= 0)
+		ret = 0;
+
+out:
+	range->len = trimmed * sb->s_blocksize;
+
+	return ret;
+}
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index e2e72c3..34f0a07 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -69,25 +69,26 @@
 	const char * error_msg = NULL;
 	const int rlen = ext3_rec_len_from_disk(de->rec_len);
 
-	if (rlen < EXT3_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT3_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT3_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT3_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely((((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT3_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
 
-	if (error_msg != NULL)
+	if (unlikely(error_msg != NULL))
 		ext3_error (dir->i_sb, function,
 			"bad entry in directory #%lu: %s - "
 			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
 			dir->i_ino, error_msg, offset,
 			(unsigned long) le32_to_cpu(de->inode),
 			rlen, de->name_len);
+
 	return error_msg == NULL ? 1 : 0;
 }
 
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a958061..ae94f6d 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2145,13 +2145,15 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			if (ext3_journal_dirty_metadata(handle, bh))
+				return;
 		}
 		ext3_mark_inode_dirty(handle, inode);
 		truncate_restart_transaction(handle, inode);
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
-			ext3_journal_get_write_access(handle, bh);
+			if (ext3_journal_get_write_access(handle, bh))
+				return;
 		}
 	}
 
diff --git a/fs/ext3/ioctl.c b/fs/ext3/ioctl.c
index 8897481..fc080dd 100644
--- a/fs/ext3/ioctl.c
+++ b/fs/ext3/ioctl.c
@@ -276,7 +276,29 @@
 		mnt_drop_write(filp->f_path.mnt);
 		return err;
 	}
+	case FITRIM: {
 
+		struct super_block *sb = inode->i_sb;
+		struct fstrim_range range;
+		int ret = 0;
+
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+
+		if (copy_from_user(&range, (struct fstrim_range *)arg,
+				   sizeof(range)))
+			return -EFAULT;
+
+		ret = ext3_trim_fs(sb, &range);
+		if (ret < 0)
+			return ret;
+
+		if (copy_to_user((struct fstrim_range *)arg, &range,
+				 sizeof(range)))
+			return -EFAULT;
+
+		return 0;
+	}
 
 	default:
 		return -ENOTTY;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index bce9dce..b27ba71 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -858,6 +858,7 @@
 	struct buffer_head * bh_use[NAMEI_RA_SIZE];
 	struct buffer_head * bh, *ret = NULL;
 	unsigned long start, block, b;
+	const u8 *name = entry->name;
 	int ra_max = 0;		/* Number of bh's in the readahead
 				   buffer, bh_use[] */
 	int ra_ptr = 0;		/* Current index into readahead
@@ -871,6 +872,16 @@
 	namelen = entry->len;
 	if (namelen > EXT3_NAME_LEN)
 		return NULL;
+	if ((namelen <= 2) && (name[0] == '.') &&
+	    (name[1] == '.' || name[1] == 0)) {
+		/*
+		 * "." or ".." will only be in the first block
+		 * NFS may look up ".."; "." should be handled by the VFS
+		 */
+		block = start = 0;
+		nblocks = 1;
+		goto restart;
+	}
 	if (is_dx(dir)) {
 		bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
 		/*
@@ -961,55 +972,35 @@
 			struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
 			int *err)
 {
-	struct super_block * sb;
+	struct super_block *sb = dir->i_sb;
 	struct dx_hash_info	hinfo;
-	u32 hash;
 	struct dx_frame frames[2], *frame;
-	struct ext3_dir_entry_2 *de, *top;
 	struct buffer_head *bh;
 	unsigned long block;
 	int retval;
-	int namelen = entry->len;
-	const u8 *name = entry->name;
 
-	sb = dir->i_sb;
-	/* NFS may look up ".." - look at dx_root directory block */
-	if (namelen > 2 || name[0] != '.'|| (namelen == 2 && name[1] != '.')) {
-		if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
-			return NULL;
-	} else {
-		frame = frames;
-		frame->bh = NULL;			/* for dx_release() */
-		frame->at = (struct dx_entry *)frames;	/* hack for zero entry*/
-		dx_set_block(frame->at, 0);		/* dx_root block is 0 */
-	}
-	hash = hinfo.hash;
+	if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
+		return NULL;
 	do {
 		block = dx_get_block(frame->at);
 		if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
 			goto errout;
-		de = (struct ext3_dir_entry_2 *) bh->b_data;
-		top = (struct ext3_dir_entry_2 *) ((char *) de + sb->s_blocksize -
-				       EXT3_DIR_REC_LEN(0));
-		for (; de < top; de = ext3_next_entry(de)) {
-			int off = (block << EXT3_BLOCK_SIZE_BITS(sb))
-				  + ((char *) de - bh->b_data);
 
-			if (!ext3_check_dir_entry(__func__, dir, de, bh, off)) {
-				brelse(bh);
-				*err = ERR_BAD_DX_DIR;
-				goto errout;
-			}
-
-			if (ext3_match(namelen, name, de)) {
-				*res_dir = de;
-				dx_release(frames);
-				return bh;
-			}
+		retval = search_dirblock(bh, dir, entry,
+					 block << EXT3_BLOCK_SIZE_BITS(sb),
+					 res_dir);
+		if (retval == 1) {
+			dx_release(frames);
+			return bh;
 		}
-		brelse (bh);
+		brelse(bh);
+		if (retval == -1) {
+			*err = ERR_BAD_DX_DIR;
+			goto errout;
+		}
+
 		/* Check to see if we should continue to search */
-		retval = ext3_htree_next_block(dir, hash, frame,
+		retval = ext3_htree_next_block(dir, hinfo.hash, frame,
 					       frames, NULL);
 		if (retval < 0) {
 			ext3_warning(sb, __func__,
@@ -1047,7 +1038,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext3_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				ext3_error(dir->i_sb, __func__,
 						"deleted inode referenced: %lu",
@@ -1607,7 +1598,9 @@
 			if (err)
 				goto journal_error;
 		}
-		ext3_journal_dirty_metadata(handle, frames[0].bh);
+		err = ext3_journal_dirty_metadata(handle, frames[0].bh);
+		if (err)
+			goto journal_error;
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1644,8 +1637,13 @@
 		if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
+			int err;
+
 			BUFFER_TRACE(bh, "get_write_access");
-			ext3_journal_get_write_access(handle, bh);
+			err = ext3_journal_get_write_access(handle, bh);
+			if (err)
+				goto journal_error;
+
 			if (pde)
 				pde->rec_len = ext3_rec_len_to_disk(
 					ext3_rec_len_from_disk(pde->rec_len) +
@@ -1654,7 +1652,12 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-			ext3_journal_dirty_metadata(handle, bh);
+			err = ext3_journal_dirty_metadata(handle, bh);
+			if (err) {
+journal_error:
+				ext3_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext3_rec_len_from_disk(de->rec_len);
@@ -1762,7 +1765,7 @@
 {
 	handle_t *handle;
 	struct inode * inode;
-	struct buffer_head * dir_block;
+	struct buffer_head * dir_block = NULL;
 	struct ext3_dir_entry_2 * de;
 	int err, retries = 0;
 
@@ -1790,15 +1793,14 @@
 	inode->i_fop = &ext3_dir_operations;
 	inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
 	dir_block = ext3_bread (handle, inode, 0, 1, &err);
-	if (!dir_block) {
-		drop_nlink(inode); /* is this nlink == 0? */
-		unlock_new_inode(inode);
-		ext3_mark_inode_dirty(handle, inode);
-		iput (inode);
-		goto out_stop;
-	}
+	if (!dir_block)
+		goto out_clear_inode;
+
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext3_journal_get_write_access(handle, dir_block);
+	err = ext3_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
 	de = (struct ext3_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1814,11 +1816,16 @@
 	ext3_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata");
-	ext3_journal_dirty_metadata(handle, dir_block);
-	brelse (dir_block);
-	ext3_mark_inode_dirty(handle, inode);
-	err = ext3_add_entry (handle, dentry, inode);
+	err = ext3_journal_dirty_metadata(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
+
+	err = ext3_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext3_add_entry (handle, dentry, inode);
+
 	if (err) {
+out_clear_inode:
 		inode->i_nlink = 0;
 		unlock_new_inode(inode);
 		ext3_mark_inode_dirty(handle, inode);
@@ -1827,10 +1834,14 @@
 	}
 	inc_nlink(dir);
 	ext3_update_dx_flag(dir);
-	ext3_mark_inode_dirty(handle, dir);
+	err = ext3_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
+
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext3_journal_stop(handle);
 	if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -2353,7 +2364,9 @@
 			goto end_rename;
 	} else {
 		BUFFER_TRACE(new_bh, "get write access");
-		ext3_journal_get_write_access(handle, new_bh);
+		retval = ext3_journal_get_write_access(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		new_de->inode = cpu_to_le32(old_inode->i_ino);
 		if (EXT3_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
 					      EXT3_FEATURE_INCOMPAT_FILETYPE))
@@ -2362,7 +2375,9 @@
 		new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
 		ext3_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, new_bh);
+		retval = ext3_journal_dirty_metadata(handle, new_bh);
+		if (retval)
+			goto journal_error;
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2411,10 +2426,17 @@
 	ext3_update_dx_flag(old_dir);
 	if (dir_bh) {
 		BUFFER_TRACE(dir_bh, "get_write_access");
-		ext3_journal_get_write_access(handle, dir_bh);
+		retval = ext3_journal_get_write_access(handle, dir_bh);
+		if (retval)
+			goto journal_error;
 		PARENT_INO(dir_bh->b_data) = cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext3_journal_dirty_metadata");
-		ext3_journal_dirty_metadata(handle, dir_bh);
+		retval = ext3_journal_dirty_metadata(handle, dir_bh);
+		if (retval) {
+journal_error:
+			ext3_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		drop_nlink(old_dir);
 		if (new_inode) {
 			drop_nlink(new_inode);
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index e746d30..108b142 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -249,7 +249,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -269,7 +273,11 @@
 			err = PTR_ERR(gdb);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, gdb);
+		err = ext3_journal_dirty_metadata(handle, gdb);
+		if (err) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext3_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -295,7 +303,11 @@
 			err = PTR_ERR(it);
 			goto exit_bh;
 		}
-		ext3_journal_dirty_metadata(handle, it);
+		err = ext3_journal_dirty_metadata(handle, it);
+		if (err) {
+			brelse(it);
+			goto exit_bh;
+		}
 		brelse(it);
 		ext3_set_bit(bit, bh->b_data);
 	}
@@ -306,7 +318,9 @@
 
 	mark_bitmap_end(input->blocks_count, EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
+	if (err)
+		goto exit_bh;
 	brelse(bh);
 
 	/* Mark unused entries in inode bitmap used */
@@ -319,7 +333,7 @@
 
 	mark_bitmap_end(EXT3_INODES_PER_GROUP(sb), EXT3_BLOCKS_PER_GROUP(sb),
 			bh->b_data);
-	ext3_journal_dirty_metadata(handle, bh);
+	err = ext3_journal_dirty_metadata(handle, bh);
 exit_bh:
 	brelse(bh);
 
@@ -503,12 +517,19 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)] = 0;
-	ext3_journal_dirty_metadata(handle, dind);
+	err = ext3_journal_dirty_metadata(handle, dind);
+	if (err)
+		goto exit_group_desc;
 	brelse(dind);
+	dind = NULL;
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
-	ext3_mark_iloc_dirty(handle, inode, &iloc);
+	err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+	if (err)
+		goto exit_group_desc;
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext3_journal_dirty_metadata(handle, *primary);
+	err = ext3_journal_dirty_metadata(handle, *primary);
+	if (err)
+		goto exit_group_desc;
 
 	o_group_desc = EXT3_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -519,10 +540,14 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	if (err)
+		goto exit_inode;
 
 	return 0;
 
+exit_group_desc:
+	kfree(n_group_desc);
 exit_inode:
 	//ext3_journal_release_buffer(handle, iloc.bh);
 	brelse(iloc.bh);
@@ -706,16 +731,20 @@
 		}
 		ext3_debug("update metadata backup %#04lx\n",
 			  (unsigned long)bh->b_blocknr);
-		if ((err = ext3_journal_get_write_access(handle, bh)))
+		if ((err = ext3_journal_get_write_access(handle, bh))) {
+			brelse(bh);
 			break;
+		}
 		lock_buffer(bh);
 		memcpy(bh->b_data, data, size);
 		if (rest)
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext3_journal_dirty_metadata(handle, bh);
+		err = ext3_journal_dirty_metadata(handle, bh);
 		brelse(bh);
+		if (err)
+			break;
 	}
 	if ((err2 = ext3_journal_stop(handle)) && !err)
 		err = err2;
@@ -922,7 +951,9 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext3_journal_dirty_metadata(handle, primary);
+	err = ext3_journal_dirty_metadata(handle, primary);
+	if (err)
+		goto exit_journal;
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
@@ -934,7 +965,7 @@
 	percpu_counter_add(&sbi->s_freeinodes_counter,
 			   EXT3_INODES_PER_GROUP(sb));
 
-	ext3_journal_dirty_metadata(handle, sbi->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, sbi->s_sbh);
 
 exit_journal:
 	mutex_unlock(&sbi->s_resize_lock);
@@ -1064,8 +1095,14 @@
 		goto exit_put;
 	}
 	es->s_blocks_count = cpu_to_le32(o_blocks_count + add);
-	ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
+	err = ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh);
 	mutex_unlock(&EXT3_SB(sb)->s_resize_lock);
+	if (err) {
+		ext3_warning(sb, __func__,
+			     "error %d on journal dirty metadata", err);
+		ext3_journal_stop(handle);
+		goto exit_put;
+	}
 	ext3_debug("freeing blocks "E3FSBLK" through "E3FSBLK"\n",
 		   o_blocks_count, o_blocks_count + add);
 	ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 77ce161..b7d0554 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -143,12 +143,16 @@
 void ext3_msg(struct super_block *sb, const char *prefix,
 		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT3-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk("%sEXT3-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+
 	va_end(args);
 }
 
@@ -195,15 +199,20 @@
 			sb->s_id);
 }
 
-void ext3_error (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_error(struct super_block *sb, const char *function,
+		const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs error (device %s): %s: ",sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs error (device %s): %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	ext3_handle_error(sb);
@@ -274,15 +283,20 @@
  * case we take the easy way out and panic immediately.
  */
 
-void ext3_abort (struct super_block * sb, const char * function,
-		 const char * fmt, ...)
+void ext3_abort(struct super_block *sb, const char *function,
+		 const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT3-fs (%s): error: %s: ", sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_CRIT "EXT3-fs (%s): error: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_PANIC))
@@ -300,16 +314,20 @@
 		journal_abort(EXT3_SB(sb)->s_journal, -EIO);
 }
 
-void ext3_warning (struct super_block * sb, const char * function,
-		   const char * fmt, ...)
+void ext3_warning(struct super_block *sb, const char *function,
+		  const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: ",
-	       sb->s_id, function);
-	vprintk(fmt, args);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
+
+	printk(KERN_WARNING "EXT3-fs (%s): warning: %s: %pV\n",
+	       sb->s_id, function, &vaf);
+
 	va_end(args);
 }
 
@@ -1848,13 +1866,15 @@
 		goto failed_mount;
 	}
 
-	if (generic_check_addressable(sb->s_blocksize_bits,
-				      le32_to_cpu(es->s_blocks_count))) {
+	err = generic_check_addressable(sb->s_blocksize_bits,
+					le32_to_cpu(es->s_blocks_count));
+	if (err) {
 		ext3_msg(sb, KERN_ERR,
 			"error: filesystem is too large to mount safely");
 		if (sizeof(sector_t) < 8)
 			ext3_msg(sb, KERN_ERR,
 				"error: CONFIG_LBDAF not enabled");
+		ret = err;
 		goto failed_mount;
 	}
 
@@ -2297,7 +2317,7 @@
 	EXT3_SB(sb)->s_journal = journal;
 	ext3_clear_journal_err(sb, es);
 
-	if (journal_devnum &&
+	if (!really_read_only && journal_devnum &&
 	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
 		es->s_journal_dev = cpu_to_le32(journal_devnum);
 
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index e69dc6d..32e6cc2 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -925,7 +925,7 @@
 /*
  * ext3_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 14c3af2..adf96b8 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -592,7 +592,8 @@
 	 * Account for the allocated meta blocks.  We will never
 	 * fail EDQUOT for metdata, but we do account for it.
 	 */
-	if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
+	if (!(*errp) &&
+	    ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
 		spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 		EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
 		spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ece76fb..164c560 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -60,9 +60,13 @@
 	return (ext4_filetype_table[filetype]);
 }
 
-
+/*
+ * Return 0 if the directory entry is OK, and 1 if there is a problem
+ *
+ * Note: this is the opposite of what ext2 and ext3 historically returned...
+ */
 int __ext4_check_dir_entry(const char *function, unsigned int line,
-			   struct inode *dir,
+			   struct inode *dir, struct file *filp,
 			   struct ext4_dir_entry_2 *de,
 			   struct buffer_head *bh,
 			   unsigned int offset)
@@ -71,26 +75,37 @@
 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
 						dir->i_sb->s_blocksize);
 
-	if (rlen < EXT4_DIR_REC_LEN(1))
+	if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
 		error_msg = "rec_len is smaller than minimal";
-	else if (rlen % 4 != 0)
+	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (rlen < EXT4_DIR_REC_LEN(de->name_len))
+	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
 		error_msg = "rec_len is too small for name_len";
-	else if (((char *) de - bh->b_data) + rlen > dir->i_sb->s_blocksize)
+	else if (unlikely(((char *) de - bh->b_data) + rlen >
+			  dir->i_sb->s_blocksize))
 		error_msg = "directory entry across blocks";
-	else if (le32_to_cpu(de->inode) >
-			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))
+	else if (unlikely(le32_to_cpu(de->inode) >
+			le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
 		error_msg = "inode out of bounds";
+	else
+		return 0;
 
-	if (error_msg != NULL)
-		ext4_error_inode(dir, function, line, bh->b_blocknr,
-			"bad entry in directory: %s - "
-			"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
-			error_msg, (unsigned) (offset%bh->b_size), offset,
-			le32_to_cpu(de->inode),
-			rlen, de->name_len);
-	return error_msg == NULL ? 1 : 0;
+	if (filp)
+		ext4_error_file(filp, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+	else
+		ext4_error_inode(dir, function, line, bh ? bh->b_blocknr : 0,
+				"bad entry in directory: %s - offset=%u(%u), "
+				"inode=%u, rec_len=%d, name_len=%d",
+				error_msg, (unsigned) (offset%bh->b_size),
+				offset, le32_to_cpu(de->inode),
+				rlen, de->name_len);
+
+	return 1;
 }
 
 static int ext4_readdir(struct file *filp,
@@ -152,8 +167,9 @@
 		 */
 		if (!bh) {
 			if (!dir_has_error) {
-				EXT4_ERROR_INODE(inode, "directory "
-					   "contains a hole at offset %Lu",
+				EXT4_ERROR_FILE(filp, 0,
+						"directory contains a "
+						"hole at offset %llu",
 					   (unsigned long long) filp->f_pos);
 				dir_has_error = 1;
 			}
@@ -194,8 +210,8 @@
 		while (!error && filp->f_pos < inode->i_size
 		       && offset < sb->s_blocksize) {
 			de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
-			if (!ext4_check_dir_entry(inode, de,
-						  bh, offset)) {
+			if (ext4_check_dir_entry(inode, filp, de,
+						 bh, offset)) {
 				/*
 				 * On error, skip the f_pos to the next block
 				 */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 94ce3d7..1de65f5 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -62,8 +62,8 @@
 #define EXT4_ERROR_INODE_BLOCK(inode, block, fmt, a...)			\
 	ext4_error_inode((inode), __func__, __LINE__, (block), (fmt), ## a)
 
-#define EXT4_ERROR_FILE(file, fmt, a...)	\
-	ext4_error_file(__func__, __LINE__, (file), (fmt), ## a)
+#define EXT4_ERROR_FILE(file, block, fmt, a...)				\
+	ext4_error_file((file), __func__, __LINE__, (block), (fmt), ## a)
 
 /* data type for block offset of block group */
 typedef int ext4_grpblk_t;
@@ -561,23 +561,7 @@
 #define EXT4_IOC32_SETVERSION_OLD	FS_IOC32_SETVERSION
 #endif
 
-
-/*
- *  Mount options
- */
-struct ext4_mount_options {
-	unsigned long s_mount_opt;
-	uid_t s_resuid;
-	gid_t s_resgid;
-	unsigned long s_commit_interval;
-	u32 s_min_batch_time, s_max_batch_time;
-#ifdef CONFIG_QUOTA
-	int s_jquota_fmt;
-	char *s_qf_names[MAXQUOTAS];
-#endif
-};
-
-/* Max physical block we can addres w/o extents */
+/* Max physical block we can address w/o extents */
 #define EXT4_MAX_BLOCK_FILE_PHYS	0xFFFFFFFF
 
 /*
@@ -709,6 +693,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, EXT4_I(inode), xtime ## _extra))     \
 		ext4_decode_extra_time(&(inode)->xtime,			       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(inode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define EXT4_EINODE_GET_XTIME(xtime, einode, raw_inode)			       \
@@ -719,6 +705,8 @@
 	if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra))	       \
 		ext4_decode_extra_time(&(einode)->xtime,		       \
 				       raw_inode->xtime ## _extra);	       \
+	else								       \
+		(einode)->xtime.tv_nsec = 0;				       \
 } while (0)
 
 #define i_disk_version osd1.linux1.l_i_version
@@ -750,12 +738,13 @@
 
 /*
  * storage for cached extent
+ * If ec_len == 0, then the cache is invalid.
+ * If ec_start == 0, then the cache represents a gap (null mapping)
  */
 struct ext4_ext_cache {
 	ext4_fsblk_t	ec_start;
 	ext4_lblk_t	ec_block;
 	__u32		ec_len; /* must be 32bit to return holes */
-	__u32		ec_type;
 };
 
 /*
@@ -774,10 +763,12 @@
 	 * near to their parent directory's inode.
 	 */
 	ext4_group_t	i_block_group;
+	ext4_lblk_t	i_dir_start_lookup;
+#if (BITS_PER_LONG < 64)
 	unsigned long	i_state_flags;		/* Dynamic state flags */
+#endif
 	unsigned long	i_flags;
 
-	ext4_lblk_t		i_dir_start_lookup;
 #ifdef CONFIG_EXT4_FS_XATTR
 	/*
 	 * Extended attributes can be read independently of the main file
@@ -820,7 +811,7 @@
 	 */
 	struct rw_semaphore i_data_sem;
 	struct inode vfs_inode;
-	struct jbd2_inode jinode;
+	struct jbd2_inode *jinode;
 
 	struct ext4_ext_cache i_cached_extent;
 	/*
@@ -840,14 +831,12 @@
 	unsigned int i_reserved_data_blocks;
 	unsigned int i_reserved_meta_blocks;
 	unsigned int i_allocated_meta_blocks;
-	unsigned short i_delalloc_reserved_flag;
-	sector_t i_da_metadata_calc_last_lblock;
+	ext4_lblk_t i_da_metadata_calc_last_lblock;
 	int i_da_metadata_calc_len;
 
 	/* on-disk additional length */
 	__u16 i_extra_isize;
 
-	spinlock_t i_block_reservation_lock;
 #ifdef CONFIG_QUOTA
 	/* quota space reservation, managed internally by quota code */
 	qsize_t i_reserved_quota;
@@ -856,9 +845,11 @@
 	/* completed IOs that might need unwritten extents handling */
 	struct list_head i_completed_io_list;
 	spinlock_t i_completed_io_lock;
+	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 	/* current io_end structure for async DIO write*/
 	ext4_io_end_t *cur_aio_dio;
-	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
+
+	spinlock_t i_block_reservation_lock;
 
 	/*
 	 * Transactions that contain inode's metadata needed to complete
@@ -917,11 +908,20 @@
 #define EXT4_MOUNT_DISCARD		0x40000000 /* Issue DISCARD requests */
 #define EXT4_MOUNT_INIT_INODE_TABLE	0x80000000 /* Initialize uninitialized itables */
 
-#define clear_opt(o, opt)		o &= ~EXT4_MOUNT_##opt
-#define set_opt(o, opt)			o |= EXT4_MOUNT_##opt
+#define clear_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt &= \
+						~EXT4_MOUNT_##opt
+#define set_opt(sb, opt)		EXT4_SB(sb)->s_mount_opt |= \
+						EXT4_MOUNT_##opt
 #define test_opt(sb, opt)		(EXT4_SB(sb)->s_mount_opt & \
 					 EXT4_MOUNT_##opt)
 
+#define clear_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 &= \
+						~EXT4_MOUNT2_##opt
+#define set_opt2(sb, opt)		EXT4_SB(sb)->s_mount_opt2 |= \
+						EXT4_MOUNT2_##opt
+#define test_opt2(sb, opt)		(EXT4_SB(sb)->s_mount_opt2 & \
+					 EXT4_MOUNT2_##opt)
+
 #define ext4_set_bit			ext2_set_bit
 #define ext4_set_bit_atomic		ext2_set_bit_atomic
 #define ext4_clear_bit			ext2_clear_bit
@@ -1087,6 +1087,7 @@
 	struct ext4_super_block *s_es;	/* Pointer to the super block in the buffer */
 	struct buffer_head **s_group_desc;
 	unsigned int s_mount_opt;
+	unsigned int s_mount_opt2;
 	unsigned int s_mount_flags;
 	ext4_fsblk_t s_sb_block;
 	uid_t s_resuid;
@@ -1237,24 +1238,39 @@
 	EXT4_STATE_EXT_MIGRATE,		/* Inode is migrating */
 	EXT4_STATE_DIO_UNWRITTEN,	/* need convert on dio done*/
 	EXT4_STATE_NEWENTRY,		/* File just added to dir */
+	EXT4_STATE_DELALLOC_RESERVED,	/* blks already reserved for delalloc */
 };
 
-#define EXT4_INODE_BIT_FNS(name, field)					\
+#define EXT4_INODE_BIT_FNS(name, field, offset)				\
 static inline int ext4_test_inode_##name(struct inode *inode, int bit)	\
 {									\
-	return test_bit(bit, &EXT4_I(inode)->i_##field);		\
+	return test_bit(bit + (offset), &EXT4_I(inode)->i_##field);	\
 }									\
 static inline void ext4_set_inode_##name(struct inode *inode, int bit)	\
 {									\
-	set_bit(bit, &EXT4_I(inode)->i_##field);			\
+	set_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }									\
 static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
 {									\
-	clear_bit(bit, &EXT4_I(inode)->i_##field);			\
+	clear_bit(bit + (offset), &EXT4_I(inode)->i_##field);		\
 }
 
-EXT4_INODE_BIT_FNS(flag, flags)
-EXT4_INODE_BIT_FNS(state, state_flags)
+EXT4_INODE_BIT_FNS(flag, flags, 0)
+#if (BITS_PER_LONG < 64)
+EXT4_INODE_BIT_FNS(state, state_flags, 0)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	(ei)->i_state_flags = 0;
+}
+#else
+EXT4_INODE_BIT_FNS(state, flags, 32)
+
+static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
+{
+	/* We depend on the fact that callers will set i_flags */
+}
+#endif
 #else
 /* Assume that user mode programs are passing in an ext4fs superblock, not
  * a kernel struct super_block.  This will allow us to call the feature-test
@@ -1642,10 +1658,12 @@
 
 /* dir.c */
 extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
+				  struct file *,
 				  struct ext4_dir_entry_2 *,
 				  struct buffer_head *, unsigned int);
-#define ext4_check_dir_entry(dir, de, bh, offset) \
-	__ext4_check_dir_entry(__func__, __LINE__, (dir), (de), (bh), (offset))
+#define ext4_check_dir_entry(dir, filp, de, bh, offset)			\
+	unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
+					(de), (bh), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 				    __u32 minor_hash,
 				    struct ext4_dir_entry_2 *dirent);
@@ -1653,6 +1671,7 @@
 
 /* fsync.c */
 extern int ext4_sync_file(struct file *, int);
+extern int ext4_flush_completed_IO(struct inode *);
 
 /* hash.c */
 extern int ext4fs_dirhash(const char *name, int len, struct
@@ -1752,8 +1771,8 @@
 			     ext4_fsblk_t, const char *, ...)
 	__attribute__ ((format (printf, 5, 6)));
 extern void ext4_error_file(struct file *, const char *, unsigned int,
-			    const char *, ...)
-	__attribute__ ((format (printf, 4, 5)));
+			    ext4_fsblk_t, const char *, ...)
+	__attribute__ ((format (printf, 5, 6)));
 extern void __ext4_std_error(struct super_block *, const char *,
 			     unsigned int, int);
 extern void __ext4_abort(struct super_block *, const char *, unsigned int,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 28ce70f..2e29abb 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -119,10 +119,6 @@
  * structure for external API
  */
 
-#define EXT4_EXT_CACHE_NO	0
-#define EXT4_EXT_CACHE_GAP	1
-#define EXT4_EXT_CACHE_EXTENT	2
-
 /*
  * to be called by ext4_ext_walk_space()
  * negative retcode - error
@@ -197,7 +193,7 @@
 static inline void
 ext4_ext_invalidate_cache(struct inode *inode)
 {
-	EXT4_I(inode)->i_cached_extent.ec_type = EXT4_EXT_CACHE_NO;
+	EXT4_I(inode)->i_cached_extent.ec_len = 0;
 }
 
 static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
@@ -278,7 +274,7 @@
 }
 
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
-					 sector_t lblocks);
+					 ext4_lblk_t lblocks);
 extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
 						   int num,
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index b0bd792..d8b992e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -253,7 +253,7 @@
 static inline int ext4_jbd2_file_inode(handle_t *handle, struct inode *inode)
 {
 	if (ext4_handle_valid(handle))
-		return jbd2_journal_file_inode(handle, &EXT4_I(inode)->jinode);
+		return jbd2_journal_file_inode(handle, EXT4_I(inode)->jinode);
 	return 0;
 }
 
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0554c48..c4068f6 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -117,11 +117,33 @@
 		struct ext4_extent *ex;
 		depth = path->p_depth;
 
-		/* try to predict block placement */
+		/*
+		 * Try to predict block placement assuming that we are
+		 * filling in a file which will eventually be
+		 * non-sparse --- i.e., in the case of libbfd writing
+		 * an ELF object sections out-of-order but in a way
+		 * the eventually results in a contiguous object or
+		 * executable file, or some database extending a table
+		 * space file.  However, this is actually somewhat
+		 * non-ideal if we are writing a sparse file such as
+		 * qemu or KVM writing a raw image file that is going
+		 * to stay fairly sparse, since it will end up
+		 * fragmenting the file system's free space.  Maybe we
+		 * should have some hueristics or some way to allow
+		 * userspace to pass a hint to file system,
+		 * especiially if the latter case turns out to be
+		 * common.
+		 */
 		ex = path[depth].p_ext;
-		if (ex)
-			return (ext4_ext_pblock(ex) +
-				(block - le32_to_cpu(ex->ee_block)));
+		if (ex) {
+			ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
+			ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
+
+			if (block > ext_block)
+				return ext_pblk + (block - ext_block);
+			else
+				return ext_pblk - (ext_block - block);
+		}
 
 		/* it looks like index is empty;
 		 * try to find starting block from index itself */
@@ -244,7 +266,7 @@
  * to allocate @blocks
  * Worse case is one block per extent
  */
-int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
+int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	struct ext4_inode_info *ei = EXT4_I(inode);
 	int idxs, num = 0;
@@ -1872,12 +1894,10 @@
 			cbex.ec_block = start;
 			cbex.ec_len = end - start;
 			cbex.ec_start = 0;
-			cbex.ec_type = EXT4_EXT_CACHE_GAP;
 		} else {
 			cbex.ec_block = le32_to_cpu(ex->ee_block);
 			cbex.ec_len = ext4_ext_get_actual_len(ex);
 			cbex.ec_start = ext4_ext_pblock(ex);
-			cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
 		}
 
 		if (unlikely(cbex.ec_len == 0)) {
@@ -1917,13 +1937,12 @@
 
 static void
 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
-			__u32 len, ext4_fsblk_t start, int type)
+			__u32 len, ext4_fsblk_t start)
 {
 	struct ext4_ext_cache *cex;
 	BUG_ON(len == 0);
 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 	cex = &EXT4_I(inode)->i_cached_extent;
-	cex->ec_type = type;
 	cex->ec_block = block;
 	cex->ec_len = len;
 	cex->ec_start = start;
@@ -1976,15 +1995,18 @@
 	}
 
 	ext_debug(" -> %u:%lu\n", lblock, len);
-	ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
+	ext4_ext_put_in_cache(inode, lblock, len, 0);
 }
 
+/*
+ * Return 0 if cache is invalid; 1 if the cache is valid
+ */
 static int
 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
 			struct ext4_extent *ex)
 {
 	struct ext4_ext_cache *cex;
-	int ret = EXT4_EXT_CACHE_NO;
+	int ret = 0;
 
 	/*
 	 * We borrow i_block_reservation_lock to protect i_cached_extent
@@ -1993,11 +2015,9 @@
 	cex = &EXT4_I(inode)->i_cached_extent;
 
 	/* has cache valid data? */
-	if (cex->ec_type == EXT4_EXT_CACHE_NO)
+	if (cex->ec_len == 0)
 		goto errout;
 
-	BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
-			cex->ec_type != EXT4_EXT_CACHE_EXTENT);
 	if (in_range(block, cex->ec_block, cex->ec_len)) {
 		ex->ee_block = cpu_to_le32(cex->ec_block);
 		ext4_ext_store_pblock(ex, cex->ec_start);
@@ -2005,7 +2025,7 @@
 		ext_debug("%u cached by %u:%u:%llu\n",
 				block,
 				cex->ec_block, cex->ec_len, cex->ec_start);
-		ret = cex->ec_type;
+		ret = 1;
 	}
 errout:
 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
@@ -2825,14 +2845,14 @@
  * to an uninitialized extent.
  *
  * Writing to an uninitized extent may result in splitting the uninitialized
- * extent into multiple /intialized unintialized extents (up to three)
+ * extent into multiple /initialized uninitialized extents (up to three)
  * There are three possibilities:
  *   a> There is no split required: Entire extent should be uninitialized
  *   b> Splits in two extents: Write is happening at either end of the extent
  *   c> Splits in three extents: Somone is writing in middle of the extent
  *
  * One of more index blocks maybe needed if the extent tree grow after
- * the unintialized extent split. To prevent ENOSPC occur at the IO
+ * the uninitialized extent split. To prevent ENOSPC occur at the IO
  * complete, we need to split the uninitialized extent before DIO submit
  * the IO. The uninitialized extent called at this time will be split
  * into three uninitialized extent(at most). After IO complete, the part
@@ -3082,7 +3102,7 @@
  * Handle EOFBLOCKS_FL flag, clearing it if necessary
  */
 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
-			      struct ext4_map_blocks *map,
+			      ext4_lblk_t lblk,
 			      struct ext4_ext_path *path,
 			      unsigned int len)
 {
@@ -3112,7 +3132,7 @@
 	 * this turns out to be false, we can bail out from this
 	 * function immediately.
 	 */
-	if (map->m_lblk + len < le32_to_cpu(last_ex->ee_block) +
+	if (lblk + len < le32_to_cpu(last_ex->ee_block) +
 	    ext4_ext_get_actual_len(last_ex))
 		return 0;
 	/*
@@ -3168,8 +3188,8 @@
 							path);
 		if (ret >= 0) {
 			ext4_update_inode_fsync_trans(handle, inode, 1);
-			err = check_eofblocks_fl(handle, inode, map, path,
-						 map->m_len);
+			err = check_eofblocks_fl(handle, inode, map->m_lblk,
+						 path, map->m_len);
 		} else
 			err = ret;
 		goto out2;
@@ -3199,7 +3219,8 @@
 	ret = ext4_ext_convert_to_initialized(handle, inode, map, path);
 	if (ret >= 0) {
 		ext4_update_inode_fsync_trans(handle, inode, 1);
-		err = check_eofblocks_fl(handle, inode, map, path, map->m_len);
+		err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
+					 map->m_len);
 		if (err < 0)
 			goto out2;
 	}
@@ -3276,7 +3297,7 @@
 	struct ext4_extent_header *eh;
 	struct ext4_extent newex, *ex;
 	ext4_fsblk_t newblock;
-	int err = 0, depth, ret, cache_type;
+	int err = 0, depth, ret;
 	unsigned int allocated = 0;
 	struct ext4_allocation_request ar;
 	ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
@@ -3285,9 +3306,8 @@
 		  map->m_lblk, map->m_len, inode->i_ino);
 
 	/* check in cache */
-	cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex);
-	if (cache_type) {
-		if (cache_type == EXT4_EXT_CACHE_GAP) {
+	if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+		if (!newex.ee_start_lo && !newex.ee_start_hi) {
 			if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
 				/*
 				 * block isn't allocated yet and
@@ -3296,7 +3316,7 @@
 				goto out2;
 			}
 			/* we should allocate requested block */
-		} else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
+		} else {
 			/* block is already allocated */
 			newblock = map->m_lblk
 				   - le32_to_cpu(newex.ee_block)
@@ -3305,8 +3325,6 @@
 			allocated = ext4_ext_get_actual_len(&newex) -
 				(map->m_lblk - le32_to_cpu(newex.ee_block));
 			goto out;
-		} else {
-			BUG();
 		}
 	}
 
@@ -3357,8 +3375,7 @@
 			/* Do not put uninitialized extent in the cache */
 			if (!ext4_ext_is_uninitialized(ex)) {
 				ext4_ext_put_in_cache(inode, ee_block,
-							ee_len, ee_start,
-							EXT4_EXT_CACHE_EXTENT);
+							ee_len, ee_start);
 				goto out;
 			}
 			ret = ext4_ext_handle_uninitialized_extents(handle,
@@ -3456,7 +3473,7 @@
 			map->m_flags |= EXT4_MAP_UNINIT;
 	}
 
-	err = check_eofblocks_fl(handle, inode, map, path, ar.len);
+	err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
 	if (err)
 		goto out2;
 
@@ -3490,8 +3507,7 @@
 	 * when it is _not_ an uninitialized extent.
 	 */
 	if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
-		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock,
-						EXT4_EXT_CACHE_EXTENT);
+		ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
 		ext4_update_inode_fsync_trans(handle, inode, 1);
 	} else
 		ext4_update_inode_fsync_trans(handle, inode, 0);
@@ -3519,6 +3535,12 @@
 	int err = 0;
 
 	/*
+	 * finish any pending end_io work so we won't run the risk of
+	 * converting any truncated blocks to initialized later
+	 */
+	ext4_flush_completed_IO(inode);
+
+	/*
 	 * probably first extent we're gonna free will be last in block
 	 */
 	err = ext4_writepage_trans_blocks(inode);
@@ -3622,6 +3644,10 @@
 	struct ext4_map_blocks map;
 	unsigned int credits, blkbits = inode->i_blkbits;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	/*
 	 * currently supporting (pre)allocate mode for extent-based
 	 * files _only_
@@ -3767,7 +3793,7 @@
 
 	logical =  (__u64)newex->ec_block << blksize_bits;
 
-	if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
+	if (newex->ec_start == 0) {
 		pgoff_t offset;
 		struct page *page;
 		struct buffer_head *bh = NULL;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 5a5c55d..bb003dc 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -104,6 +104,7 @@
 {
 	struct super_block *sb = inode->i_sb;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+	struct ext4_inode_info *ei = EXT4_I(inode);
 	struct vfsmount *mnt = filp->f_path.mnt;
 	struct path path;
 	char buf[64], *cp;
@@ -127,6 +128,27 @@
 			ext4_mark_super_dirty(sb);
 		}
 	}
+	/*
+	 * Set up the jbd2_inode if we are opening the inode for
+	 * writing and the journal is present
+	 */
+	if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
+		struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
+
+		spin_lock(&inode->i_lock);
+		if (!ei->jinode) {
+			if (!jinode) {
+				spin_unlock(&inode->i_lock);
+				return -ENOMEM;
+			}
+			ei->jinode = jinode;
+			jbd2_journal_init_jbd_inode(ei->jinode, inode);
+			jinode = NULL;
+		}
+		spin_unlock(&inode->i_lock);
+		if (unlikely(jinode != NULL))
+			jbd2_free_inode(jinode);
+	}
 	return dquot_file_open(inode, filp);
 }
 
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index c1a7bc9..7829b28 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -75,7 +75,7 @@
  * to written.
  * The function return the number of pending IOs on success.
  */
-static int flush_completed_IO(struct inode *inode)
+extern int ext4_flush_completed_IO(struct inode *inode)
 {
 	ext4_io_end_t *io;
 	struct ext4_inode_info *ei = EXT4_I(inode);
@@ -169,7 +169,7 @@
 	if (inode->i_sb->s_flags & MS_RDONLY)
 		return 0;
 
-	ret = flush_completed_IO(inode);
+	ret = ext4_flush_completed_IO(inode);
 	if (ret < 0)
 		return ret;
 
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 1ce240a..eb9097a 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1027,7 +1027,7 @@
 	inode->i_generation = sbi->s_next_generation++;
 	spin_unlock(&sbi->s_next_gen_lock);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
 
 	ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index e659597..9f7f9e4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -39,7 +39,9 @@
 #include <linux/bio.h>
 #include <linux/workqueue.h>
 #include <linux/kernel.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -54,10 +56,17 @@
 					      loff_t new_size)
 {
 	trace_ext4_begin_ordered_truncate(inode, new_size);
-	return jbd2_journal_begin_ordered_truncate(
-					EXT4_SB(inode->i_sb)->s_journal,
-					&EXT4_I(inode)->jinode,
-					new_size);
+	/*
+	 * If jinode is zero, then we never opened the file for
+	 * writing, so there's no need to call
+	 * jbd2_journal_begin_ordered_truncate() since there's no
+	 * outstanding writes we need to flush.
+	 */
+	if (!EXT4_I(inode)->jinode)
+		return 0;
+	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
+						   EXT4_I(inode)->jinode,
+						   new_size);
 }
 
 static void ext4_invalidatepage(struct page *page, unsigned long offset);
@@ -552,7 +561,7 @@
 }
 
 /**
- *	ext4_blks_to_allocate: Look up the block map and count the number
+ *	ext4_blks_to_allocate - Look up the block map and count the number
  *	of direct blocks need to be allocated for the given branch.
  *
  *	@branch: chain of indirect blocks
@@ -591,13 +600,19 @@
 
 /**
  *	ext4_alloc_blocks: multiple allocate blocks needed for a branch
+ *	@handle: handle for this transaction
+ *	@inode: inode which needs allocated blocks
+ *	@iblock: the logical block to start allocated at
+ *	@goal: preferred physical block of allocation
  *	@indirect_blks: the number of blocks need to allocate for indirect
  *			blocks
- *
+ *	@blks: number of desired blocks
  *	@new_blocks: on return it will store the new block numbers for
  *	the indirect blocks(if needed) and the first direct block,
- *	@blks:	on return it will store the total number of allocated
- *		direct blocks
+ *	@err: on return it will store the error code
+ *
+ *	This function will return the number of blocks allocated as
+ *	requested by the passed-in parameters.
  */
 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
 			     ext4_lblk_t iblock, ext4_fsblk_t goal,
@@ -711,9 +726,11 @@
 
 /**
  *	ext4_alloc_branch - allocate and set up a chain of blocks.
+ *	@handle: handle for this transaction
  *	@inode: owner
  *	@indirect_blks: number of allocated indirect blocks
  *	@blks: number of allocated direct blocks
+ *	@goal: preferred place for allocation
  *	@offsets: offsets (in the blocks) to store the pointers to next.
  *	@branch: place to store the chain in.
  *
@@ -826,6 +843,7 @@
 
 /**
  * ext4_splice_branch - splice the allocated branch onto inode.
+ * @handle: handle for this transaction
  * @inode: owner
  * @block: (logical) number of block we are adding
  * @chain: chain of indirect blocks (with a missing link - see
@@ -1081,7 +1099,7 @@
  * Calculate the number of metadata blocks need to reserve
  * to allocate a block located at @lblock
  */
-static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
+static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
 {
 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
 		return ext4_ext_calc_metadata_amount(inode, lblock);
@@ -1320,7 +1338,7 @@
 	 * avoid double accounting
 	 */
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 1;
+		ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 	/*
 	 * We need to check for EXT4 here because migrate
 	 * could have changed the inode type in between
@@ -1350,7 +1368,7 @@
 			ext4_da_update_reserve_space(inode, retval, 1);
 	}
 	if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
-		EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+		ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
 
 	up_write((&EXT4_I(inode)->i_data_sem));
 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
@@ -1878,7 +1896,7 @@
 /*
  * Reserve a single block located at lblock
  */
-static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
+static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
 {
 	int retries = 0;
 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -2239,7 +2257,7 @@
 	 * affects functions in many different parts of the allocation
 	 * call path.  This flag exists primarily because we don't
 	 * want to change *many* call functions, so ext4_map_blocks()
-	 * will set the magic i_delalloc_reserved_flag once the
+	 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
 	 * inode's allocation semaphore is taken.
 	 *
 	 * If the blocks in questions were delalloc blocks, set
@@ -3362,7 +3380,7 @@
 	 * doing I/O at all.
 	 *
 	 * We could call write_cache_pages(), and then redirty all of
-	 * the pages by calling redirty_page_for_writeback() but that
+	 * the pages by calling redirty_page_for_writepage() but that
 	 * would be ugly in the extreme.  So instead we would need to
 	 * replicate parts of the code in the above functions,
 	 * simplifying them becuase we wouldn't actually intend to
@@ -3720,8 +3738,7 @@
 retry:
 	io_end = ext4_init_io_end(inode, GFP_ATOMIC);
 	if (!io_end) {
-		if (printk_ratelimit())
-			printk(KERN_WARNING "%s: allocation fail\n", __func__);
+		pr_warn_ratelimited("%s: allocation fail\n", __func__);
 		schedule();
 		goto retry;
 	}
@@ -3745,9 +3762,9 @@
  * preallocated extents, and those write extend the file, no need to
  * fall back to buffered IO.
  *
- * For holes, we fallocate those blocks, mark them as unintialized
+ * For holes, we fallocate those blocks, mark them as uninitialized
  * If those blocks were preallocated, we mark sure they are splited, but
- * still keep the range to write as unintialized.
+ * still keep the range to write as uninitialized.
  *
  * The unwrritten extents will be converted to written when DIO is completed.
  * For async direct IO, since the IO may still pending when return, we
@@ -4045,7 +4062,7 @@
 	if (ext4_should_journal_data(inode)) {
 		err = ext4_handle_dirty_metadata(handle, inode, bh);
 	} else {
-		if (ext4_should_order_data(inode))
+		if (ext4_should_order_data(inode) && EXT4_I(inode)->jinode)
 			err = ext4_jbd2_file_inode(handle, inode);
 		mark_buffer_dirty(bh);
 	}
@@ -4169,6 +4186,7 @@
 {
 	__le32 *p;
 	int	flags = EXT4_FREE_BLOCKS_FORGET | EXT4_FREE_BLOCKS_VALIDATED;
+	int	err;
 
 	if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
 		flags |= EXT4_FREE_BLOCKS_METADATA;
@@ -4184,11 +4202,23 @@
 	if (try_to_extend_transaction(handle, inode)) {
 		if (bh) {
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, inode, bh);
+			err = ext4_handle_dirty_metadata(handle, inode, bh);
+			if (unlikely(err)) {
+				ext4_std_error(inode->i_sb, err);
+				return 1;
+			}
 		}
-		ext4_mark_inode_dirty(handle, inode);
-		ext4_truncate_restart_trans(handle, inode,
-					    blocks_for_truncate(inode));
+		err = ext4_mark_inode_dirty(handle, inode);
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
+		err = ext4_truncate_restart_trans(handle, inode,
+						  blocks_for_truncate(inode));
+		if (unlikely(err)) {
+			ext4_std_error(inode->i_sb, err);
+			return 1;
+		}
 		if (bh) {
 			BUFFER_TRACE(bh, "retaking write access");
 			ext4_journal_get_write_access(handle, bh);
@@ -4349,6 +4379,7 @@
 					(__le32 *) bh->b_data,
 					(__le32 *) bh->b_data + addr_per_block,
 					depth);
+			brelse(bh);
 
 			/*
 			 * Everything below this this pointer has been
@@ -4859,7 +4890,7 @@
 	}
 	inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
 
-	ei->i_state_flags = 0;
+	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
 	ei->i_dir_start_lookup = 0;
 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
 	/* We now have enough fields to check if the inode was active or not.
@@ -5118,7 +5149,7 @@
 	if (ext4_inode_blocks_set(handle, raw_inode, ei))
 		goto out_brelse;
 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-	raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
 	    cpu_to_le32(EXT4_OS_HURD))
 		raw_inode->i_file_acl_high =
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5b4d4e3..851f49b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2608,18 +2608,12 @@
 static inline int ext4_issue_discard(struct super_block *sb,
 		ext4_group_t block_group, ext4_grpblk_t block, int count)
 {
-	int ret;
 	ext4_fsblk_t discard_block;
 
 	discard_block = block + ext4_group_first_block_no(sb, block_group);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	ret = sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
-	if (ret == -EOPNOTSUPP) {
-		ext4_warning(sb, "discard not supported, disabling");
-		clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
-	}
-	return ret;
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
 }
 
 /*
@@ -2631,7 +2625,7 @@
 	struct super_block *sb = journal->j_private;
 	struct ext4_buddy e4b;
 	struct ext4_group_info *db;
-	int err, count = 0, count2 = 0;
+	int err, ret, count = 0, count2 = 0;
 	struct ext4_free_data *entry;
 	struct list_head *l, *ltmp;
 
@@ -2641,9 +2635,15 @@
 		mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
 			 entry->count, entry->group, entry);
 
-		if (test_opt(sb, DISCARD))
-			ext4_issue_discard(sb, entry->group,
+		if (test_opt(sb, DISCARD)) {
+			ret = ext4_issue_discard(sb, entry->group,
 					entry->start_blk, entry->count);
+			if (unlikely(ret == -EOPNOTSUPP)) {
+				ext4_warning(sb, "discard not supported, "
+						 "disabling");
+				clear_opt(sb, DISCARD);
+			}
+		}
 
 		err = ext4_mb_load_buddy(sb, entry->group, &e4b);
 		/* we expect to find existing buddy because it's pinned */
@@ -3881,19 +3881,6 @@
 	}
 }
 
-/*
- * finds all preallocated spaces and return blocks being freed to them
- * if preallocated space becomes full (no block is used from the space)
- * then the function frees space in buddy
- * XXX: at the moment, truncate (which is the only way to free blocks)
- * discards all preallocations
- */
-static void ext4_mb_return_to_preallocation(struct inode *inode,
-					struct ext4_buddy *e4b,
-					sector_t block, int count)
-{
-	BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
-}
 #ifdef CONFIG_EXT4_DEBUG
 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
 {
@@ -4283,7 +4270,7 @@
 	 * EDQUOT check, as blocks and quotas have been already
 	 * reserved when data being copied into pagecache.
 	 */
-	if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+	if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
 		ar->flags |= EXT4_MB_DELALLOC_RESERVED;
 	else {
 		/* Without delayed allocation we need to verify
@@ -4380,7 +4367,8 @@
 	if (inquota && ar->len < inquota)
 		dquot_free_block(ar->inode, inquota - ar->len);
 	if (!ar->len) {
-		if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+		if (!ext4_test_inode_state(ar->inode,
+					   EXT4_STATE_DELALLOC_RESERVED))
 			/* release all the reserved blocks if non delalloc */
 			percpu_counter_sub(&sbi->s_dirtyblocks_counter,
 						reserv_blks);
@@ -4626,7 +4614,11 @@
 		 * blocks being freed are metadata. these blocks shouldn't
 		 * be used until this transaction is committed
 		 */
-		new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		new_entry = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
+		if (!new_entry) {
+			err = -ENOMEM;
+			goto error_return;
+		}
 		new_entry->start_blk = bit;
 		new_entry->group  = block_group;
 		new_entry->count = count;
@@ -4643,7 +4635,6 @@
 		ext4_lock_group(sb, block_group);
 		mb_clear_bits(bitmap_bh->b_data, bit, count);
 		mb_free_blocks(inode, &e4b, bit, count);
-		ext4_mb_return_to_preallocation(inode, &e4b, block, count);
 	}
 
 	ret = ext4_free_blks_count(sb, gdp) + count;
@@ -4718,8 +4709,6 @@
 	ext4_unlock_group(sb, group);
 
 	ret = ext4_issue_discard(sb, group, start, count);
-	if (ret)
-		ext4_std_error(sb, ret);
 
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
@@ -4819,6 +4808,8 @@
 	ext4_group_t group, ngroups = ext4_get_groups_count(sb);
 	ext4_grpblk_t cnt = 0, first_block, last_block;
 	uint64_t start, len, minlen, trimmed;
+	ext4_fsblk_t first_data_blk =
+			le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 	int ret = 0;
 
 	start = range->start >> sb->s_blocksize_bits;
@@ -4828,6 +4819,10 @@
 
 	if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
 		return -EINVAL;
+	if (start < first_data_blk) {
+		len -= first_data_blk - start;
+		start = first_data_blk;
+	}
 
 	/* Determine first and last group to examine based on start and len */
 	ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start,
@@ -4851,7 +4846,7 @@
 		if (len >= EXT4_BLOCKS_PER_GROUP(sb))
 			len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block);
 		else
-			last_block = len;
+			last_block = first_block + len;
 
 		if (e4b.bd_info->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 25f3a97..b0a126f 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -496,7 +496,7 @@
 	goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
 		EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
 	tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
-				   S_IFREG, 0, goal);
+				   S_IFREG, NULL, goal);
 	if (IS_ERR(tmp_inode)) {
 		retval = -ENOMEM;
 		ext4_journal_stop(handle);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index dc40e75..5485390 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -581,9 +581,9 @@
 					   dir->i_sb->s_blocksize -
 					   EXT4_DIR_REC_LEN(0));
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
-		if (!ext4_check_dir_entry(dir, de, bh,
-					(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
-						+((char *)de - bh->b_data))) {
+		if (ext4_check_dir_entry(dir, NULL, de, bh,
+				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
+					 + ((char *)de - bh->b_data))) {
 			/* On error, skip the f_pos to the next block. */
 			dir_file->f_pos = (dir_file->f_pos |
 					(dir->i_sb->s_blocksize - 1)) + 1;
@@ -820,7 +820,7 @@
 		if ((char *) de + namelen <= dlimit &&
 		    ext4_match (namelen, name, de)) {
 			/* found a match - just to be sure, do a full check */
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -1;
 			*res_dir = de;
 			return 1;
@@ -1036,7 +1036,7 @@
 			return ERR_PTR(-EIO);
 		}
 		inode = ext4_iget(dir->i_sb, ino);
-		if (unlikely(IS_ERR(inode))) {
+		if (IS_ERR(inode)) {
 			if (PTR_ERR(inode) == -ESTALE) {
 				EXT4_ERROR_INODE(dir,
 						 "deleted inode referenced: %u",
@@ -1269,7 +1269,7 @@
 		de = (struct ext4_dir_entry_2 *)bh->b_data;
 		top = bh->b_data + blocksize - reclen;
 		while ((char *) de <= top) {
-			if (!ext4_check_dir_entry(dir, de, bh, offset))
+			if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
 				return -EIO;
 			if (ext4_match(namelen, name, de))
 				return -EEXIST;
@@ -1602,7 +1602,11 @@
 			if (err)
 				goto journal_error;
 		}
-		ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		err = ext4_handle_dirty_metadata(handle, inode, frames[0].bh);
+		if (err) {
+			ext4_std_error(inode->i_sb, err);
+			goto cleanup;
+		}
 	}
 	de = do_split(handle, dir, &bh, frame, &hinfo, &err);
 	if (!de)
@@ -1630,17 +1634,21 @@
 {
 	struct ext4_dir_entry_2 *de, *pde;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
-	int i;
+	int i, err;
 
 	i = 0;
 	pde = NULL;
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
 	while (i < bh->b_size) {
-		if (!ext4_check_dir_entry(dir, de, bh, i))
+		if (ext4_check_dir_entry(dir, NULL, de, bh, i))
 			return -EIO;
 		if (de == de_del)  {
 			BUFFER_TRACE(bh, "get_write_access");
-			ext4_journal_get_write_access(handle, bh);
+			err = ext4_journal_get_write_access(handle, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			if (pde)
 				pde->rec_len = ext4_rec_len_to_disk(
 					ext4_rec_len_from_disk(pde->rec_len,
@@ -1652,7 +1660,11 @@
 				de->inode = 0;
 			dir->i_version++;
 			BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-			ext4_handle_dirty_metadata(handle, dir, bh);
+			err = ext4_handle_dirty_metadata(handle, dir, bh);
+			if (unlikely(err)) {
+				ext4_std_error(dir->i_sb, err);
+				return err;
+			}
 			return 0;
 		}
 		i += ext4_rec_len_from_disk(de->rec_len, blocksize);
@@ -1789,7 +1801,7 @@
 {
 	handle_t *handle;
 	struct inode *inode;
-	struct buffer_head *dir_block;
+	struct buffer_head *dir_block = NULL;
 	struct ext4_dir_entry_2 *de;
 	unsigned int blocksize = dir->i_sb->s_blocksize;
 	int err, retries = 0;
@@ -1822,7 +1834,9 @@
 	if (!dir_block)
 		goto out_clear_inode;
 	BUFFER_TRACE(dir_block, "get_write_access");
-	ext4_journal_get_write_access(handle, dir_block);
+	err = ext4_journal_get_write_access(handle, dir_block);
+	if (err)
+		goto out_clear_inode;
 	de = (struct ext4_dir_entry_2 *) dir_block->b_data;
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
@@ -1839,10 +1853,12 @@
 	ext4_set_de_type(dir->i_sb, de, S_IFDIR);
 	inode->i_nlink = 2;
 	BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
-	ext4_handle_dirty_metadata(handle, dir, dir_block);
-	brelse(dir_block);
-	ext4_mark_inode_dirty(handle, inode);
-	err = ext4_add_entry(handle, dentry, inode);
+	err = ext4_handle_dirty_metadata(handle, dir, dir_block);
+	if (err)
+		goto out_clear_inode;
+	err = ext4_mark_inode_dirty(handle, inode);
+	if (!err)
+		err = ext4_add_entry(handle, dentry, inode);
 	if (err) {
 out_clear_inode:
 		clear_nlink(inode);
@@ -1853,10 +1869,13 @@
 	}
 	ext4_inc_count(handle, dir);
 	ext4_update_dx_flag(dir);
-	ext4_mark_inode_dirty(handle, dir);
+	err = ext4_mark_inode_dirty(handle, dir);
+	if (err)
+		goto out_clear_inode;
 	d_instantiate(dentry, inode);
 	unlock_new_inode(inode);
 out_stop:
+	brelse(dir_block);
 	ext4_journal_stop(handle);
 	if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
 		goto retry;
@@ -1919,7 +1938,7 @@
 			}
 			de = (struct ext4_dir_entry_2 *) bh->b_data;
 		}
-		if (!ext4_check_dir_entry(inode, de, bh, offset)) {
+		if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
 			de = (struct ext4_dir_entry_2 *)(bh->b_data +
 							 sb->s_blocksize);
 			offset = (offset | (sb->s_blocksize - 1)) + 1;
@@ -2407,7 +2426,11 @@
 					ext4_current_time(new_dir);
 		ext4_mark_inode_dirty(handle, new_dir);
 		BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh);
+		if (unlikely(retval)) {
+			ext4_std_error(new_dir->i_sb, retval);
+			goto end_rename;
+		}
 		brelse(new_bh);
 		new_bh = NULL;
 	}
@@ -2459,7 +2482,11 @@
 		PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
 						cpu_to_le32(new_dir->i_ino);
 		BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
-		ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		retval = ext4_handle_dirty_metadata(handle, old_dir, dir_bh);
+		if (retval) {
+			ext4_std_error(old_dir->i_sb, retval);
+			goto end_rename;
+		}
 		ext4_dec_count(handle, old_dir);
 		if (new_inode) {
 			/* checked empty_dir above, can't have another parent,
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index beacce1..7270dcf 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -44,7 +44,7 @@
 	if (io_page_cachep == NULL)
 		return -ENOMEM;
 	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
-	if (io_page_cachep == NULL) {
+	if (io_end_cachep == NULL) {
 		kmem_cache_destroy(io_page_cachep);
 		return -ENOMEM;
 	}
@@ -158,11 +158,8 @@
 
 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
 {
-	ext4_io_end_t *io = NULL;
-
-	io = kmem_cache_alloc(io_end_cachep, flags);
+	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
 	if (io) {
-		memset(io, 0, sizeof(*io));
 		atomic_inc(&EXT4_I(inode)->i_ioend_count);
 		io->inode = inode;
 		INIT_WORK(&io->work, ext4_end_io_work);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 981c847..3ecc6e4 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -220,7 +220,11 @@
 		memcpy(gdb->b_data, sbi->s_group_desc[i]->b_data, gdb->b_size);
 		set_buffer_uptodate(gdb);
 		unlock_buffer(gdb);
-		ext4_handle_dirty_metadata(handle, NULL, gdb);
+		err = ext4_handle_dirty_metadata(handle, NULL, gdb);
+		if (unlikely(err)) {
+			brelse(gdb);
+			goto exit_bh;
+		}
 		ext4_set_bit(bit, bh->b_data);
 		brelse(gdb);
 	}
@@ -258,7 +262,11 @@
 
 	ext4_mark_bitmap_end(input->blocks_count, sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_bh;
+	}
 	brelse(bh);
 	/* Mark unused entries in inode bitmap used */
 	ext4_debug("clear inode bitmap %#04llx (+%llu)\n",
@@ -270,7 +278,9 @@
 
 	ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
 			     bh->b_data);
-	ext4_handle_dirty_metadata(handle, NULL, bh);
+	err = ext4_handle_dirty_metadata(handle, NULL, bh);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 exit_bh:
 	brelse(bh);
 
@@ -422,17 +432,21 @@
 		goto exit_dind;
 	}
 
-	if ((err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh)))
+	err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
+	if (unlikely(err))
 		goto exit_dind;
 
-	if ((err = ext4_journal_get_write_access(handle, *primary)))
+	err = ext4_journal_get_write_access(handle, *primary);
+	if (unlikely(err))
 		goto exit_sbh;
 
-	if ((err = ext4_journal_get_write_access(handle, dind)))
-		goto exit_primary;
+	err = ext4_journal_get_write_access(handle, dind);
+	if (unlikely(err))
+		ext4_std_error(sb, err);
 
 	/* ext4_reserve_inode_write() gets a reference on the iloc */
-	if ((err = ext4_reserve_inode_write(handle, inode, &iloc)))
+	err = ext4_reserve_inode_write(handle, inode, &iloc);
+	if (unlikely(err))
 		goto exit_dindj;
 
 	n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
@@ -454,12 +468,20 @@
 	 * reserved inode, and will become GDT blocks (primary and backup).
 	 */
 	data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)] = 0;
-	ext4_handle_dirty_metadata(handle, NULL, dind);
-	brelse(dind);
+	err = ext4_handle_dirty_metadata(handle, NULL, dind);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
 	inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >> 9;
 	ext4_mark_iloc_dirty(handle, inode, &iloc);
 	memset((*primary)->b_data, 0, sb->s_blocksize);
-	ext4_handle_dirty_metadata(handle, NULL, *primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, *primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_inode;
+	}
+	brelse(dind);
 
 	o_group_desc = EXT4_SB(sb)->s_group_desc;
 	memcpy(n_group_desc, o_group_desc,
@@ -470,19 +492,19 @@
 	kfree(o_group_desc);
 
 	le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-	ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
+	if (err)
+		ext4_std_error(sb, err);
 
-	return 0;
+	return err;
 
 exit_inode:
 	/* ext4_journal_release_buffer(handle, iloc.bh); */
 	brelse(iloc.bh);
 exit_dindj:
 	/* ext4_journal_release_buffer(handle, dind); */
-exit_primary:
-	/* ext4_journal_release_buffer(handle, *primary); */
 exit_sbh:
-	/* ext4_journal_release_buffer(handle, *primary); */
+	/* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
 exit_dind:
 	brelse(dind);
 exit_bh:
@@ -665,7 +687,9 @@
 			memset(bh->b_data + size, 0, rest);
 		set_buffer_uptodate(bh);
 		unlock_buffer(bh);
-		ext4_handle_dirty_metadata(handle, NULL, bh);
+		err = ext4_handle_dirty_metadata(handle, NULL, bh);
+		if (unlikely(err))
+			ext4_std_error(sb, err);
 		brelse(bh);
 	}
 	if ((err2 = ext4_journal_stop(handle)) && !err)
@@ -883,7 +907,11 @@
 	/* Update the global fs size fields */
 	sbi->s_groups_count++;
 
-	ext4_handle_dirty_metadata(handle, NULL, primary);
+	err = ext4_handle_dirty_metadata(handle, NULL, primary);
+	if (unlikely(err)) {
+		ext4_std_error(sb, err);
+		goto exit_journal;
+	}
 
 	/* Update the reserved block counts only once the new group is
 	 * active. */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index cd37f9d..29c80f6 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -388,13 +388,14 @@
 void __ext4_error(struct super_block *sb, const char *function,
 		  unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
-	       sb->s_id, function, line, current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
+	       sb->s_id, function, line, current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(sb);
@@ -405,28 +406,31 @@
 		      const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	save_error_info(inode->i_sb, function, line);
 	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
 	       inode->i_sb->s_id, function, line, inode->i_ino);
 	if (block)
-		printk("block %llu: ", block);
-	printk("comm %s: ", current->comm);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu: ", block);
+	printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
 }
 
 void ext4_error_file(struct file *file, const char *function,
-		     unsigned int line, const char *fmt, ...)
+		     unsigned int line, ext4_fsblk_t block,
+		     const char *fmt, ...)
 {
 	va_list args;
+	struct va_format vaf;
 	struct ext4_super_block *es;
 	struct inode *inode = file->f_dentry->d_inode;
 	char pathname[80], *path;
@@ -434,17 +438,18 @@
 	es = EXT4_SB(inode->i_sb)->s_es;
 	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
 	save_error_info(inode->i_sb, function, line);
-	va_start(args, fmt);
 	path = d_path(&(file->f_path), pathname, sizeof(pathname));
-	if (!path)
+	if (IS_ERR(path))
 		path = "(unknown)";
 	printk(KERN_CRIT
-	       "EXT4-fs error (device %s): %s:%d: inode #%lu "
-	       "(comm %s path %s): ",
-	       inode->i_sb->s_id, function, line, inode->i_ino,
-	       current->comm, path);
-	vprintk(fmt, args);
-	printk("\n");
+	       "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
+	       inode->i_sb->s_id, function, line, inode->i_ino);
+	if (block)
+		printk(KERN_CONT "block %llu: ", block);
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
 	va_end(args);
 
 	ext4_handle_error(inode->i_sb);
@@ -543,28 +548,29 @@
 		panic("EXT4-fs panic from previous error\n");
 }
 
-void ext4_msg (struct super_block * sb, const char *prefix,
-		   const char *fmt, ...)
+void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
 	va_end(args);
 }
 
 void __ext4_warning(struct super_block *sb, const char *function,
 		    unsigned int line, const char *fmt, ...)
 {
+	struct va_format vaf;
 	va_list args;
 
 	va_start(args, fmt);
-	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
-	       sb->s_id, function, line);
-	vprintk(fmt, args);
-	printk("\n");
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
+	       sb->s_id, function, line, &vaf);
 	va_end(args);
 }
 
@@ -575,21 +581,25 @@
 __releases(bitlock)
 __acquires(bitlock)
 {
+	struct va_format vaf;
 	va_list args;
 	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
 	es->s_last_error_ino = cpu_to_le32(ino);
 	es->s_last_error_block = cpu_to_le64(block);
 	__save_error_info(sb, function, line);
+
 	va_start(args, fmt);
+
+	vaf.fmt = fmt;
+	vaf.va = &args;
 	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
 	       sb->s_id, function, line, grp);
 	if (ino)
-		printk("inode %lu: ", ino);
+		printk(KERN_CONT "inode %lu: ", ino);
 	if (block)
-		printk("block %llu:", (unsigned long long) block);
-	vprintk(fmt, args);
-	printk("\n");
+		printk(KERN_CONT "block %llu:", (unsigned long long) block);
+	printk(KERN_CONT "%pV\n", &vaf);
 	va_end(args);
 
 	if (test_opt(sb, ERRORS_CONT)) {
@@ -808,21 +818,15 @@
 	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
 	INIT_LIST_HEAD(&ei->i_prealloc_list);
 	spin_lock_init(&ei->i_prealloc_lock);
-	/*
-	 * Note:  We can be called before EXT4_SB(sb)->s_journal is set,
-	 * therefore it can be null here.  Don't check it, just initialize
-	 * jinode.
-	 */
-	jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
 	ei->i_reserved_data_blocks = 0;
 	ei->i_reserved_meta_blocks = 0;
 	ei->i_allocated_meta_blocks = 0;
 	ei->i_da_metadata_calc_len = 0;
-	ei->i_delalloc_reserved_flag = 0;
 	spin_lock_init(&(ei->i_block_reservation_lock));
 #ifdef CONFIG_QUOTA
 	ei->i_reserved_quota = 0;
 #endif
+	ei->jinode = NULL;
 	INIT_LIST_HEAD(&ei->i_completed_io_list);
 	spin_lock_init(&ei->i_completed_io_lock);
 	ei->cur_aio_dio = NULL;
@@ -898,9 +902,12 @@
 	end_writeback(inode);
 	dquot_drop(inode);
 	ext4_discard_preallocations(inode);
-	if (EXT4_JOURNAL(inode))
-		jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
-				       &EXT4_I(inode)->jinode);
+	if (EXT4_I(inode)->jinode) {
+		jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
+					       EXT4_I(inode)->jinode);
+		jbd2_free_inode(EXT4_I(inode)->jinode);
+		EXT4_I(inode)->jinode = NULL;
+	}
 }
 
 static inline void ext4_show_quota_options(struct seq_file *seq,
@@ -1393,7 +1400,7 @@
 		sbi->s_qf_names[qtype] = NULL;
 		return 0;
 	}
-	set_opt(sbi->s_mount_opt, QUOTA);
+	set_opt(sb, QUOTA);
 	return 1;
 }
 
@@ -1448,21 +1455,21 @@
 		switch (token) {
 		case Opt_bsd_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, MINIX_DF);
+			clear_opt(sb, MINIX_DF);
 			break;
 		case Opt_minix_df:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, MINIX_DF);
+			set_opt(sb, MINIX_DF);
 
 			break;
 		case Opt_grpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			set_opt(sbi->s_mount_opt, GRPID);
+			set_opt(sb, GRPID);
 
 			break;
 		case Opt_nogrpid:
 			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
-			clear_opt(sbi->s_mount_opt, GRPID);
+			clear_opt(sb, GRPID);
 
 			break;
 		case Opt_resuid:
@@ -1480,38 +1487,38 @@
 			/* *sb_block = match_int(&args[0]); */
 			break;
 		case Opt_err_panic:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			set_opt(sb, ERRORS_PANIC);
 			break;
 		case Opt_err_ro:
-			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_RO);
+			clear_opt(sb, ERRORS_CONT);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_RO);
 			break;
 		case Opt_err_cont:
-			clear_opt(sbi->s_mount_opt, ERRORS_RO);
-			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
-			set_opt(sbi->s_mount_opt, ERRORS_CONT);
+			clear_opt(sb, ERRORS_RO);
+			clear_opt(sb, ERRORS_PANIC);
+			set_opt(sb, ERRORS_CONT);
 			break;
 		case Opt_nouid32:
-			set_opt(sbi->s_mount_opt, NO_UID32);
+			set_opt(sb, NO_UID32);
 			break;
 		case Opt_debug:
-			set_opt(sbi->s_mount_opt, DEBUG);
+			set_opt(sb, DEBUG);
 			break;
 		case Opt_oldalloc:
-			set_opt(sbi->s_mount_opt, OLDALLOC);
+			set_opt(sb, OLDALLOC);
 			break;
 		case Opt_orlov:
-			clear_opt(sbi->s_mount_opt, OLDALLOC);
+			clear_opt(sb, OLDALLOC);
 			break;
 #ifdef CONFIG_EXT4_FS_XATTR
 		case Opt_user_xattr:
-			set_opt(sbi->s_mount_opt, XATTR_USER);
+			set_opt(sb, XATTR_USER);
 			break;
 		case Opt_nouser_xattr:
-			clear_opt(sbi->s_mount_opt, XATTR_USER);
+			clear_opt(sb, XATTR_USER);
 			break;
 #else
 		case Opt_user_xattr:
@@ -1521,10 +1528,10 @@
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 		case Opt_acl:
-			set_opt(sbi->s_mount_opt, POSIX_ACL);
+			set_opt(sb, POSIX_ACL);
 			break;
 		case Opt_noacl:
-			clear_opt(sbi->s_mount_opt, POSIX_ACL);
+			clear_opt(sb, POSIX_ACL);
 			break;
 #else
 		case Opt_acl:
@@ -1543,7 +1550,7 @@
 					 "Cannot specify journal on remount");
 				return 0;
 			}
-			set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
+			set_opt(sb, UPDATE_JOURNAL);
 			break;
 		case Opt_journal_dev:
 			if (is_remount) {
@@ -1556,14 +1563,14 @@
 			*journal_devnum = option;
 			break;
 		case Opt_journal_checksum:
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_journal_async_commit:
-			set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
-			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
+			set_opt(sb, JOURNAL_ASYNC_COMMIT);
+			set_opt(sb, JOURNAL_CHECKSUM);
 			break;
 		case Opt_noload:
-			set_opt(sbi->s_mount_opt, NOLOAD);
+			set_opt(sb, NOLOAD);
 			break;
 		case Opt_commit:
 			if (match_int(&args[0], &option))
@@ -1606,15 +1613,15 @@
 					return 0;
 				}
 			} else {
-				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
+				clear_opt(sb, DATA_FLAGS);
 				sbi->s_mount_opt |= data_opt;
 			}
 			break;
 		case Opt_data_err_abort:
-			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			set_opt(sb, DATA_ERR_ABORT);
 			break;
 		case Opt_data_err_ignore:
-			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
+			clear_opt(sb, DATA_ERR_ABORT);
 			break;
 #ifdef CONFIG_QUOTA
 		case Opt_usrjquota:
@@ -1654,12 +1661,12 @@
 			break;
 		case Opt_quota:
 		case Opt_usrquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, USRQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, USRQUOTA);
 			break;
 		case Opt_grpquota:
-			set_opt(sbi->s_mount_opt, QUOTA);
-			set_opt(sbi->s_mount_opt, GRPQUOTA);
+			set_opt(sb, QUOTA);
+			set_opt(sb, GRPQUOTA);
 			break;
 		case Opt_noquota:
 			if (sb_any_quota_loaded(sb)) {
@@ -1667,9 +1674,9 @@
 					"options when quota turned on");
 				return 0;
 			}
-			clear_opt(sbi->s_mount_opt, QUOTA);
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, QUOTA);
+			clear_opt(sb, USRQUOTA);
+			clear_opt(sb, GRPQUOTA);
 			break;
 #else
 		case Opt_quota:
@@ -1695,7 +1702,7 @@
 			sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
 			break;
 		case Opt_nobarrier:
-			clear_opt(sbi->s_mount_opt, BARRIER);
+			clear_opt(sb, BARRIER);
 			break;
 		case Opt_barrier:
 			if (args[0].from) {
@@ -1704,9 +1711,9 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				set_opt(sbi->s_mount_opt, BARRIER);
+				set_opt(sb, BARRIER);
 			else
-				clear_opt(sbi->s_mount_opt, BARRIER);
+				clear_opt(sb, BARRIER);
 			break;
 		case Opt_ignore:
 			break;
@@ -1730,17 +1737,17 @@
 				 "Ignoring deprecated bh option");
 			break;
 		case Opt_i_version:
-			set_opt(sbi->s_mount_opt, I_VERSION);
+			set_opt(sb, I_VERSION);
 			sb->s_flags |= MS_I_VERSION;
 			break;
 		case Opt_nodelalloc:
-			clear_opt(sbi->s_mount_opt, DELALLOC);
+			clear_opt(sb, DELALLOC);
 			break;
 		case Opt_mblk_io_submit:
-			set_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			set_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_nomblk_io_submit:
-			clear_opt(sbi->s_mount_opt, MBLK_IO_SUBMIT);
+			clear_opt(sb, MBLK_IO_SUBMIT);
 			break;
 		case Opt_stripe:
 			if (match_int(&args[0], &option))
@@ -1750,13 +1757,13 @@
 			sbi->s_stripe = option;
 			break;
 		case Opt_delalloc:
-			set_opt(sbi->s_mount_opt, DELALLOC);
+			set_opt(sb, DELALLOC);
 			break;
 		case Opt_block_validity:
-			set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			set_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_noblock_validity:
-			clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+			clear_opt(sb, BLOCK_VALIDITY);
 			break;
 		case Opt_inode_readahead_blks:
 			if (match_int(&args[0], &option))
@@ -1780,7 +1787,7 @@
 							    option);
 			break;
 		case Opt_noauto_da_alloc:
-			set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+			set_opt(sb, NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_auto_da_alloc:
 			if (args[0].from) {
@@ -1789,24 +1796,24 @@
 			} else
 				option = 1;	/* No argument, default to 1 */
 			if (option)
-				clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
+				clear_opt(sb, NO_AUTO_DA_ALLOC);
 			else
-				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
+				set_opt(sb,NO_AUTO_DA_ALLOC);
 			break;
 		case Opt_discard:
-			set_opt(sbi->s_mount_opt, DISCARD);
+			set_opt(sb, DISCARD);
 			break;
 		case Opt_nodiscard:
-			clear_opt(sbi->s_mount_opt, DISCARD);
+			clear_opt(sb, DISCARD);
 			break;
 		case Opt_dioread_nolock:
-			set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			set_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_dioread_lock:
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 			break;
 		case Opt_init_inode_table:
-			set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			set_opt(sb, INIT_INODE_TABLE);
 			if (args[0].from) {
 				if (match_int(&args[0], &option))
 					return 0;
@@ -1817,7 +1824,7 @@
 			sbi->s_li_wait_mult = option;
 			break;
 		case Opt_noinit_inode_table:
-			clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+			clear_opt(sb, INIT_INODE_TABLE);
 			break;
 		default:
 			ext4_msg(sb, KERN_ERR,
@@ -1829,10 +1836,10 @@
 #ifdef CONFIG_QUOTA
 	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
 		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
-			clear_opt(sbi->s_mount_opt, USRQUOTA);
+			clear_opt(sb, USRQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
-			clear_opt(sbi->s_mount_opt, GRPQUOTA);
+			clear_opt(sb, GRPQUOTA);
 
 		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
 			ext4_msg(sb, KERN_ERR, "old and new quota "
@@ -1902,12 +1909,12 @@
 	ext4_commit_super(sb, 1);
 	if (test_opt(sb, DEBUG))
 		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
-				"bpg=%lu, ipg=%lu, mo=%04x]\n",
+				"bpg=%lu, ipg=%lu, mo=%04x, mo2=%04x]\n",
 			sb->s_blocksize,
 			sbi->s_groups_count,
 			EXT4_BLOCKS_PER_GROUP(sb),
 			EXT4_INODES_PER_GROUP(sb),
-			sbi->s_mount_opt);
+			sbi->s_mount_opt, sbi->s_mount_opt2);
 
 	return res;
 }
@@ -1937,14 +1944,13 @@
 	size = flex_group_count * sizeof(struct flex_groups);
 	sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
 	if (sbi->s_flex_groups == NULL) {
-		sbi->s_flex_groups = vmalloc(size);
-		if (sbi->s_flex_groups)
-			memset(sbi->s_flex_groups, 0, size);
-	}
-	if (sbi->s_flex_groups == NULL) {
-		ext4_msg(sb, KERN_ERR, "not enough memory for "
-				"%u flex groups", flex_group_count);
-		goto failed;
+		sbi->s_flex_groups = vzalloc(size);
+		if (sbi->s_flex_groups == NULL) {
+			ext4_msg(sb, KERN_ERR,
+				 "not enough memory for %u flex groups",
+				 flex_group_count);
+			goto failed;
+		}
 	}
 
 	for (i = 0; i < sbi->s_groups_count; i++) {
@@ -2923,7 +2929,7 @@
 	struct ext4_sb_info *sbi = EXT4_SB(sb);
 	struct ext4_li_request *elr;
 	ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
-	int ret;
+	int ret = 0;
 
 	if (sbi->s_li_request != NULL)
 		return 0;
@@ -3078,41 +3084,41 @@
 
 	/* Set defaults before we parse the mount options */
 	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
-	set_opt(sbi->s_mount_opt, INIT_INODE_TABLE);
+	set_opt(sb, INIT_INODE_TABLE);
 	if (def_mount_opts & EXT4_DEFM_DEBUG)
-		set_opt(sbi->s_mount_opt, DEBUG);
+		set_opt(sb, DEBUG);
 	if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
 		ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
 			"2.6.38");
-		set_opt(sbi->s_mount_opt, GRPID);
+		set_opt(sb, GRPID);
 	}
 	if (def_mount_opts & EXT4_DEFM_UID16)
-		set_opt(sbi->s_mount_opt, NO_UID32);
+		set_opt(sb, NO_UID32);
 #ifdef CONFIG_EXT4_FS_XATTR
 	if (def_mount_opts & EXT4_DEFM_XATTR_USER)
-		set_opt(sbi->s_mount_opt, XATTR_USER);
+		set_opt(sb, XATTR_USER);
 #endif
 #ifdef CONFIG_EXT4_FS_POSIX_ACL
 	if (def_mount_opts & EXT4_DEFM_ACL)
-		set_opt(sbi->s_mount_opt, POSIX_ACL);
+		set_opt(sb, POSIX_ACL);
 #endif
 	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
-		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+		set_opt(sb, JOURNAL_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
-		set_opt(sbi->s_mount_opt, ORDERED_DATA);
+		set_opt(sb, ORDERED_DATA);
 	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		set_opt(sb, WRITEBACK_DATA);
 
 	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
-		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+		set_opt(sb, ERRORS_PANIC);
 	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
-		set_opt(sbi->s_mount_opt, ERRORS_CONT);
+		set_opt(sb, ERRORS_CONT);
 	else
-		set_opt(sbi->s_mount_opt, ERRORS_RO);
+		set_opt(sb, ERRORS_RO);
 	if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
-		set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
+		set_opt(sb, BLOCK_VALIDITY);
 	if (def_mount_opts & EXT4_DEFM_DISCARD)
-		set_opt(sbi->s_mount_opt, DISCARD);
+		set_opt(sb, DISCARD);
 
 	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
 	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
@@ -3121,7 +3127,7 @@
 	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
 
 	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
-		set_opt(sbi->s_mount_opt, BARRIER);
+		set_opt(sb, BARRIER);
 
 	/*
 	 * enable delayed allocation by default
@@ -3129,7 +3135,7 @@
 	 */
 	if (!IS_EXT3_SB(sb) &&
 	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
-		set_opt(sbi->s_mount_opt, DELALLOC);
+		set_opt(sb, DELALLOC);
 
 	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
 			   &journal_devnum, &journal_ioprio, NULL, 0)) {
@@ -3432,8 +3438,8 @@
 		       "suppressed and not mounted read-only");
 		goto failed_mount_wq;
 	} else {
-		clear_opt(sbi->s_mount_opt, DATA_FLAGS);
-		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
+		clear_opt(sb, DATA_FLAGS);
+		set_opt(sb, WRITEBACK_DATA);
 		sbi->s_journal = NULL;
 		needs_recovery = 0;
 		goto no_journal;
@@ -3471,9 +3477,9 @@
 		 */
 		if (jbd2_journal_check_available_features
 		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
-			set_opt(sbi->s_mount_opt, ORDERED_DATA);
+			set_opt(sb, ORDERED_DATA);
 		else
-			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
+			set_opt(sb, JOURNAL_DATA);
 		break;
 
 	case EXT4_MOUNT_ORDERED_DATA:
@@ -3563,18 +3569,18 @@
 	    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
 		ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
 			 "requested data journaling mode");
-		clear_opt(sbi->s_mount_opt, DELALLOC);
+		clear_opt(sb, DELALLOC);
 	}
 	if (test_opt(sb, DIOREAD_NOLOCK)) {
 		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - requested data journaling mode");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 		if (sb->s_blocksize < PAGE_SIZE) {
 			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
 				"option - block size is too small");
-			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
+			clear_opt(sb, DIOREAD_NOLOCK);
 		}
 	}
 
@@ -4173,6 +4179,22 @@
 	return 0;
 }
 
+/*
+ * Structure to save mount options for ext4_remount's benefit
+ */
+struct ext4_mount_options {
+	unsigned long s_mount_opt;
+	unsigned long s_mount_opt2;
+	uid_t s_resuid;
+	gid_t s_resgid;
+	unsigned long s_commit_interval;
+	u32 s_min_batch_time, s_max_batch_time;
+#ifdef CONFIG_QUOTA
+	int s_jquota_fmt;
+	char *s_qf_names[MAXQUOTAS];
+#endif
+};
+
 static int ext4_remount(struct super_block *sb, int *flags, char *data)
 {
 	struct ext4_super_block *es;
@@ -4193,6 +4215,7 @@
 	lock_super(sb);
 	old_sb_flags = sb->s_flags;
 	old_opts.s_mount_opt = sbi->s_mount_opt;
+	old_opts.s_mount_opt2 = sbi->s_mount_opt2;
 	old_opts.s_resuid = sbi->s_resuid;
 	old_opts.s_resgid = sbi->s_resgid;
 	old_opts.s_commit_interval = sbi->s_commit_interval;
@@ -4346,6 +4369,7 @@
 restore_opts:
 	sb->s_flags = old_sb_flags;
 	sbi->s_mount_opt = old_opts.s_mount_opt;
+	sbi->s_mount_opt2 = old_opts.s_mount_opt2;
 	sbi->s_resuid = old_opts.s_resuid;
 	sbi->s_resgid = old_opts.s_resgid;
 	sbi->s_commit_interval = old_opts.s_commit_interval;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fa4b899..fc32176 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -427,23 +427,23 @@
 static int
 ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
 {
-	int i_error, b_error;
+	int ret, ret2;
 
 	down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	i_error = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
-	if (i_error < 0) {
-		b_error = 0;
-	} else {
-		if (buffer) {
-			buffer += i_error;
-			buffer_size -= i_error;
-		}
-		b_error = ext4_xattr_block_list(dentry, buffer, buffer_size);
-		if (b_error < 0)
-			i_error = 0;
+	ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	if (buffer) {
+		buffer += ret;
+		buffer_size -= ret;
 	}
+	ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
+	if (ret < 0)
+		goto errout;
+	ret += ret2;
+errout:
 	up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
-	return i_error + b_error;
+	return ret;
 }
 
 /*
@@ -947,7 +947,7 @@
 /*
  * ext4_xattr_set_handle()
  *
- * Create, replace or remove an extended attribute for this inode. Buffer
+ * Create, replace or remove an extended attribute for this inode.  Value
  * is NULL to remove an existing extended attribute, and non-NULL to
  * either replace an existing extended attribute, or create a new extended
  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index d75a77f..f504089 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -319,7 +319,8 @@
 			struct msdos_dir_entry *de, loff_t i_pos);
 extern int fat_sync_inode(struct inode *inode);
 extern int fat_fill_super(struct super_block *sb, void *data, int silent,
-			const struct inode_operations *fs_dir_inode_ops, int isvfat);
+			const struct inode_operations *fs_dir_inode_ops,
+			int isvfat, void (*setup)(struct super_block *));
 
 extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
 		            struct inode *i2);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 206351a..86753fe 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -703,7 +703,6 @@
 		struct fid *fid, int fh_len, int fh_type)
 {
 	struct inode *inode = NULL;
-	struct dentry *result;
 	u32 *fh = fid->raw;
 
 	if (fh_len < 5 || fh_type != 3)
@@ -748,10 +747,7 @@
 	 * the fat_iget lookup again.  If that fails, then we are totally out
 	 * of luck.  But all that is for another day
 	 */
-	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		d_set_d_op(result, sb->s_root->d_op);
-	return result;
+	return d_obtain_alias(inode);
 }
 
 static int
@@ -799,8 +795,6 @@
 	brelse(bh);
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent))
-		d_set_d_op(parent, sb->s_root->d_op);
 out:
 	unlock_super(sb);
 
@@ -1244,7 +1238,8 @@
  * Read the super block of an MS-DOS FS.
  */
 int fat_fill_super(struct super_block *sb, void *data, int silent,
-		   const struct inode_operations *fs_dir_inode_ops, int isvfat)
+		   const struct inode_operations *fs_dir_inode_ops, int isvfat,
+		   void (*setup)(struct super_block *))
 {
 	struct inode *root_inode = NULL, *fat_inode = NULL;
 	struct buffer_head *bh;
@@ -1280,6 +1275,8 @@
 	if (error)
 		goto out_fail;
 
+	setup(sb); /* flavour-specific stuff that needs options */
+
 	error = -EIO;
 	sb_min_blocksize(sb, 512);
 	bh = sb_bread(sb, 0);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index 35ffe43..7114990 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -227,11 +227,7 @@
 	}
 out:
 	unlock_super(sb);
-	d_set_d_op(dentry, &msdos_dentry_operations);
-	dentry = d_splice_alias(inode, dentry);
-	if (dentry)
-		d_set_d_op(dentry, &msdos_dentry_operations);
-	return dentry;
+	return d_splice_alias(inode, dentry);
 
 error:
 	unlock_super(sb);
@@ -661,21 +657,16 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	sb->s_d_op = &msdos_dentry_operations;
+	sb->s_flags |= MS_NOATIME;
+}
+
 static int msdos_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &msdos_dir_inode_operations, 0);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	sb->s_flags |= MS_NOATIME;
-	d_set_d_op(sb->s_root, &msdos_dentry_operations);
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &msdos_dir_inode_operations,
+			     0, setup);
 }
 
 static struct dentry *msdos_mount(struct file_system_type *fs_type,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index e3ffc5e..f88f752 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -772,13 +772,10 @@
 
 out:
 	unlock_super(sb);
-	d_set_d_op(dentry, sb->s_root->d_op);
 	dentry->d_time = dentry->d_parent->d_inode->i_version;
 	dentry = d_splice_alias(inode, dentry);
-	if (dentry) {
-		d_set_d_op(dentry, sb->s_root->d_op);
+	if (dentry)
 		dentry->d_time = dentry->d_parent->d_inode->i_version;
-	}
 	return dentry;
 
 error:
@@ -1066,24 +1063,18 @@
 	.getattr	= fat_getattr,
 };
 
+static void setup(struct super_block *sb)
+{
+	if (MSDOS_SB(sb)->options.name_check != 's')
+		sb->s_d_op = &vfat_ci_dentry_ops;
+	else
+		sb->s_d_op = &vfat_dentry_ops;
+}
+
 static int vfat_fill_super(struct super_block *sb, void *data, int silent)
 {
-	int res;
-
-	lock_super(sb);
-	res = fat_fill_super(sb, data, silent, &vfat_dir_inode_operations, 1);
-	if (res) {
-		unlock_super(sb);
-		return res;
-	}
-
-	if (MSDOS_SB(sb)->options.name_check != 's')
-		d_set_d_op(sb->s_root, &vfat_ci_dentry_ops);
-	else
-		d_set_d_op(sb->s_root, &vfat_dentry_ops);
-
-	unlock_super(sb);
-	return 0;
+	return fat_fill_super(sb, data, silent, &vfat_dir_inode_operations,
+			     1, setup);
 }
 
 static struct dentry *vfat_mount(struct file_system_type *fs_type,
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 042af73..bfed844 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -350,7 +350,6 @@
 	}
 
 	entry = newent ? newent : entry;
-	d_set_d_op(entry, &fuse_dentry_operations);
 	if (outarg_valid)
 		fuse_change_entry_timeout(entry, &outarg);
 	else
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index f62b32c..9e3f68c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -617,10 +617,8 @@
 		goto out_iput;
 
 	entry = d_obtain_alias(inode);
-	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID) {
-		d_set_d_op(entry, &fuse_dentry_operations);
+	if (!IS_ERR(entry) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(entry);
-	}
 
 	return entry;
 
@@ -719,10 +717,8 @@
 	}
 
 	parent = d_obtain_alias(inode);
-	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID) {
-		d_set_d_op(parent, &fuse_dentry_operations);
+	if (!IS_ERR(parent) && get_node_id(inode) != FUSE_ROOT_ID)
 		fuse_invalidate_entry_cache(parent);
-	}
 
 	return parent;
 }
@@ -989,6 +985,8 @@
 		iput(root);
 		goto err_put_conn;
 	}
+	/* only now - we want root dentry with NULL ->d_op */
+	sb->s_d_op = &fuse_dentry_operations;
 
 	init_req = fuse_request_alloc();
 	if (!init_req)
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 97012ec..9023db8 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -126,12 +126,7 @@
 
 static struct dentry *gfs2_get_parent(struct dentry *child)
 {
-	struct dentry *dentry;
-
-	dentry = d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &gfs2_dops);
-	return dentry;
+	return d_obtain_alias(gfs2_lookupi(child->d_inode, &gfs2_qdotdot, 1));
 }
 
 static struct dentry *gfs2_get_dentry(struct super_block *sb,
@@ -139,7 +134,6 @@
 {
 	struct gfs2_sbd *sdp = sb->s_fs_info;
 	struct inode *inode;
-	struct dentry *dentry;
 
 	inode = gfs2_ilookup(sb, inum->no_addr);
 	if (inode) {
@@ -156,10 +150,7 @@
 		return ERR_CAST(inode);
 
 out_inode:
-	dentry = d_obtain_alias(inode);
-	if (!IS_ERR(dentry))
-		d_set_d_op(dentry, &gfs2_dops);
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid,
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 2aeabd4..693f447 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -440,7 +440,6 @@
 		iput(inode);
 		return -ENOMEM;
 	}
-	d_set_d_op(dentry, &gfs2_dops);
 	*dptr = dentry;
 	return 0;
 }
@@ -1106,6 +1105,7 @@
 
 	sb->s_magic = GFS2_MAGIC;
 	sb->s_op = &gfs2_super_ops;
+	sb->s_d_op = &gfs2_dops;
 	sb->s_export_op = &gfs2_export_ops;
 	sb->s_xattr = gfs2_xattr_handlers;
 	sb->s_qcop = &gfs2_quotactl_ops;
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 1501db4..040b5a2 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -106,8 +106,6 @@
 {
 	struct inode *inode = NULL;
 
-	d_set_d_op(dentry, &gfs2_dops);
-
 	inode = gfs2_lookupi(dir, &dentry->d_name, 0);
 	if (inode && IS_ERR(inode))
 		return ERR_CAST(inode);
@@ -1427,6 +1425,10 @@
 	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 
+	/* We only support the FALLOC_FL_KEEP_SIZE mode */
+	if (mode && (mode != FALLOC_FL_KEEP_SIZE))
+		return -EOPNOTSUPP;
+
 	offset = (offset >> sdp->sd_sb.sb_bsize_shift) <<
 		 sdp->sd_sb.sb_bsize_shift;
 
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index ea4aefe..afa66aa 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -25,8 +25,6 @@
 	struct inode *inode = NULL;
 	int res;
 
-	d_set_d_op(dentry, &hfs_dentry_operations);
-
 	hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
 	hfs_cat_build_key(dir->i_sb, fd.search_key, dir->i_ino, &dentry->d_name);
 	res = hfs_brec_read(&fd, &rec, sizeof(rec));
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 0bef62a..1b55f70 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -429,13 +429,12 @@
 	if (!root_inode)
 		goto bail_no_root;
 
+	sb->s_d_op = &hfs_dentry_operations;
 	res = -ENOMEM;
 	sb->s_root = d_alloc_root(root_inode);
 	if (!sb->s_root)
 		goto bail_iput;
 
-	d_set_d_op(sb->s_root, &hfs_dentry_operations);
-
 	/* everything's okay */
 	return 0;
 
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index f896dc84..4df5059 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -37,7 +37,6 @@
 
 	sb = dir->i_sb;
 
-	d_set_d_op(dentry, &hfsplus_dentry_operations);
 	dentry->d_fsdata = NULL;
 	hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
 	hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 6ee6ad2..9a3b479 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -444,13 +444,13 @@
 		err = PTR_ERR(root);
 		goto cleanup;
 	}
+	sb->s_d_op = &hfsplus_dentry_operations;
 	sb->s_root = d_alloc_root(root);
 	if (!sb->s_root) {
 		iput(root);
 		err = -ENOMEM;
 		goto cleanup;
 	}
-	d_set_d_op(sb->s_root, &hfsplus_dentry_operations);
 
 	str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
 	str.name = HFSP_HIDDENDIR_NAME;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index d3244d9..2638c834e 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -612,7 +612,6 @@
 		goto out_put;
 
 	d_add(dentry, inode);
-	d_set_d_op(dentry, &hostfs_dentry_ops);
 	return NULL;
 
  out_put:
@@ -922,6 +921,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = HOSTFS_SUPER_MAGIC;
 	sb->s_op = &hostfs_sbops;
+	sb->s_d_op = &hostfs_dentry_ops;
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 
 	/* NULL is printed as <NULL> by sprintf: avoid that. */
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index 32c13a9..05d4816 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -58,12 +58,7 @@
 	return 0;
 }
 
-static const struct dentry_operations hpfs_dentry_operations = {
+const struct dentry_operations hpfs_dentry_operations = {
 	.d_hash		= hpfs_hash_dentry,
 	.d_compare	= hpfs_compare_dentry,
 };
-
-void hpfs_set_dentry_operations(struct dentry *dentry)
-{
-	d_set_d_op(dentry, &hpfs_dentry_operations);
-}
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2338130..d32f63a 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -298,7 +298,6 @@
 
 	end:
 	end_add:
-	hpfs_set_dentry_operations(dentry);
 	unlock_kernel();
 	d_add(dentry, result);
 	return NULL;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index 2fee17d..1c43dbe 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -233,7 +233,7 @@
 
 /* dentry.c */
 
-void hpfs_set_dentry_operations(struct dentry *);
+extern const struct dentry_operations hpfs_dentry_operations;
 
 /* dir.c */
 
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 49935ba..b30426b 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -550,6 +550,7 @@
 	/* Fill superblock stuff */
 	s->s_magic = HPFS_SUPER_MAGIC;
 	s->s_op = &hpfs_sops;
+	s->s_d_op = &hpfs_dentry_operations;
 
 	sbi->sb_root = superblock->root;
 	sbi->sb_fs_size = superblock->n_sectors;
@@ -651,7 +652,6 @@
 		iput(root);
 		goto bail0;
 	}
-	hpfs_set_dentry_operations(s->s_root);
 
 	/*
 	 * find the root directory's . pointer & finish filling in the inode
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 844a790..a0f3833 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -939,17 +939,18 @@
 		goto out_iput;
 	}
 
-	/* get the root dentry */
-	s->s_root = d_alloc_root(inode);
-	if (!(s->s_root))
-		goto out_no_root;
-
 	table = 0;
 	if (joliet_level)
 		table += 2;
 	if (opt.check == 'r')
 		table++;
-	d_set_d_op(s->s_root, &isofs_dentry_ops[table]);
+
+	s->s_d_op = &isofs_dentry_ops[table];
+
+	/* get the root dentry */
+	s->s_root = d_alloc_root(inode);
+	if (!(s->s_root))
+		goto out_no_root;
 
 	kfree(opt.iocharset);
 
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 679a849..4fb3e80 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -172,8 +172,6 @@
 	struct inode *inode;
 	struct page *page;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
-
 	page = alloc_page(GFP_USER);
 	if (!page)
 		return ERR_PTR(-ENOMEM);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 846a3f3..5b2e4c3 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -207,7 +207,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * journal_extend().
 	 */
 	if (__log_space_left(journal) < jbd_space_needed(journal)) {
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f837ba9..9e46869 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -43,6 +43,7 @@
 #include <linux/vmalloc.h>
 #include <linux/backing-dev.h>
 #include <linux/bitops.h>
+#include <linux/ratelimit.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/jbd2.h>
@@ -93,6 +94,7 @@
 EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
 EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
+EXPORT_SYMBOL(jbd2_inode_cache);
 
 static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
 static void __journal_abort_soft (journal_t *journal, int errno);
@@ -827,7 +829,7 @@
 
 	journal = kzalloc(sizeof(*journal), GFP_KERNEL);
 	if (!journal)
-		goto fail;
+		return NULL;
 
 	init_waitqueue_head(&journal->j_wait_transaction_locked);
 	init_waitqueue_head(&journal->j_wait_logspace);
@@ -852,14 +854,12 @@
 	err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
 	if (err) {
 		kfree(journal);
-		goto fail;
+		return NULL;
 	}
 
 	spin_lock_init(&journal->j_history_lock);
 
 	return journal;
-fail:
-	return NULL;
 }
 
 /* jbd2_journal_init_dev and jbd2_journal_init_inode:
@@ -1982,7 +1982,6 @@
 static struct journal_head *journal_alloc_journal_head(void)
 {
 	struct journal_head *ret;
-	static unsigned long last_warning;
 
 #ifdef CONFIG_JBD2_DEBUG
 	atomic_inc(&nr_journal_heads);
@@ -1990,11 +1989,7 @@
 	ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
 	if (!ret) {
 		jbd_debug(1, "out of memory for journal_head\n");
-		if (time_after(jiffies, last_warning + 5*HZ)) {
-			printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-			       __func__);
-			last_warning = jiffies;
-		}
+		pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
 		while (!ret) {
 			yield();
 			ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS);
@@ -2292,17 +2287,19 @@
 
 #endif
 
-struct kmem_cache *jbd2_handle_cache;
+struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache;
 
 static int __init journal_init_handle_cache(void)
 {
-	jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle",
-				sizeof(handle_t),
-				0,		/* offset */
-				SLAB_TEMPORARY,	/* flags */
-				NULL);		/* ctor */
+	jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY);
 	if (jbd2_handle_cache == NULL) {
-		printk(KERN_EMERG "JBD: failed to create handle cache\n");
+		printk(KERN_EMERG "JBD2: failed to create handle cache\n");
+		return -ENOMEM;
+	}
+	jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0);
+	if (jbd2_inode_cache == NULL) {
+		printk(KERN_EMERG "JBD2: failed to create inode cache\n");
+		kmem_cache_destroy(jbd2_handle_cache);
 		return -ENOMEM;
 	}
 	return 0;
@@ -2312,6 +2309,9 @@
 {
 	if (jbd2_handle_cache)
 		kmem_cache_destroy(jbd2_handle_cache);
+	if (jbd2_inode_cache)
+		kmem_cache_destroy(jbd2_inode_cache);
+
 }
 
 /*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 2bc4d5f..1cad869 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -299,10 +299,10 @@
 #ifdef CONFIG_JBD2_DEBUG
 		int dropped = info.end_transaction - 
 			be32_to_cpu(journal->j_superblock->s_sequence);
-#endif
 		jbd_debug(1,
 			  "JBD: ignoring %d transaction%s from the journal.\n",
 			  dropped, (dropped == 1) ? "" : "s");
+#endif
 		journal->j_transaction_sequence = ++info.end_transaction;
 	}
 
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6bf0a24..faad2bd 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -251,7 +251,7 @@
 	 * the committing transaction.  Really, we only need to give it
 	 * committing_transaction->t_outstanding_credits plus "enough" for
 	 * the log control blocks.
-	 * Also, this test is inconsitent with the matching one in
+	 * Also, this test is inconsistent with the matching one in
 	 * jbd2_journal_extend().
 	 */
 	if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
@@ -340,9 +340,7 @@
 		jbd2_free_handle(handle);
 		current->journal_info = NULL;
 		handle = ERR_PTR(err);
-		goto out;
 	}
-out:
 	return handle;
 }
 EXPORT_SYMBOL(jbd2__journal_start);
@@ -589,7 +587,7 @@
 	transaction = handle->h_transaction;
 	journal = transaction->t_journal;
 
-	jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
+	jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
 
 	JBUFFER_TRACE(jh, "entry");
 repeat:
@@ -774,7 +772,7 @@
 		J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
 			    "Possible IO failure.\n");
 		page = jh2bh(jh)->b_page;
-		offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
+		offset = offset_in_page(jh2bh(jh)->b_data);
 		source = kmap_atomic(page, KM_USER0);
 		/* Fire data frozen trigger just before we copy the data */
 		jbd2_buffer_frozen_trigger(jh, source + offset,
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 4414e3a..81ead85 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1465,9 +1465,6 @@
 
 	jfs_info("jfs_lookup: name = %s", name);
 
-	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
-		d_set_d_op(dentry, &jfs_ci_dentry_operations);
-
 	if ((name[0] == '.') && (len == 1))
 		inum = dip->i_ino;
 	else if (strcmp(name, "..") == 0)
@@ -1492,12 +1489,7 @@
 		return ERR_CAST(ip);
 	}
 
-	dentry = d_splice_alias(ip, dentry);
-
-	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
-		d_set_d_op(dentry, &jfs_ci_dentry_operations);
-
-	return dentry;
+	return d_splice_alias(ip, dentry);
 }
 
 static struct inode *jfs_nfs_get_inode(struct super_block *sb,
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 3150d76..eeca48a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -515,6 +515,9 @@
 
 	sb->s_magic = JFS_SUPER_MAGIC;
 
+	if (sbi->mntflag & JFS_OS2)
+		sb->s_d_op = &jfs_ci_dentry_operations;
+
 	inode = jfs_iget(sb, ROOT_I);
 	if (IS_ERR(inode)) {
 		ret = PTR_ERR(inode);
@@ -524,9 +527,6 @@
 	if (!sb->s_root)
 		goto out_no_root;
 
-	if (sbi->mntflag & JFS_OS2)
-		d_set_d_op(sb->s_root, &jfs_ci_dentry_operations);
-
 	/* logical blocks are represented by 40 bits in pxd_t, etc. */
 	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
 #if BITS_PER_LONG == 32
diff --git a/fs/libfs.c b/fs/libfs.c
index 889311e..c88eab5 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -217,7 +217,8 @@
  * will never be mountable)
  */
 struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
-	const struct super_operations *ops, unsigned long magic)
+	const struct super_operations *ops,
+	const struct dentry_operations *dops, unsigned long magic)
 {
 	struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
 	struct dentry *dentry;
@@ -254,6 +255,7 @@
 	dentry->d_parent = dentry;
 	d_instantiate(dentry, root);
 	s->s_root = dentry;
+	s->s_d_op = dops;
 	s->s_flags |= MS_ACTIVE;
 	return dget(s->s_root);
 
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index 97f6073..ca58d64 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_LOCKD) += lockd.o
 
-lockd-objs-y := clntlock.o clntproc.o host.o svc.o svclock.o svcshare.o \
-	        svcproc.o svcsubs.o mon.o xdr.o grace.o
-lockd-objs-$(CONFIG_LOCKD_V4) += xdr4.o svc4proc.o
+lockd-objs-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
+	        svcshare.o svcproc.o svcsubs.o mon.o xdr.o grace.o
+lockd-objs-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
 lockd-objs		      := $(lockd-objs-y)
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
new file mode 100644
index 0000000..f848b52
--- /dev/null
+++ b/fs/lockd/clnt4xdr.c
@@ -0,0 +1,605 @@
+/*
+ * linux/fs/lockd/clnt4xdr.c
+ *
+ * XDR functions to encode/decode NLM version 4 RPC arguments and results.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
+#  error "NLM host name cannot be larger than NLM's maximum string length!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM4_void_sz		(0)
+#define NLM4_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM4_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM4_fhandle_sz		(1+(NFS3_FHSIZE>>2))
+#define NLM4_lock_sz		(5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz)
+#define NLM4_holder_sz		(6+NLM4_owner_sz)
+
+#define NLM4_testargs_sz	(NLM4_cookie_sz+1+NLM4_lock_sz)
+#define NLM4_lockargs_sz	(NLM4_cookie_sz+4+NLM4_lock_sz)
+#define NLM4_cancargs_sz	(NLM4_cookie_sz+2+NLM4_lock_sz)
+#define NLM4_unlockargs_sz	(NLM4_cookie_sz+NLM4_lock_sz)
+
+#define NLM4_testres_sz		(NLM4_cookie_sz+1+NLM4_holder_sz)
+#define NLM4_res_sz		(NLM4_cookie_sz+1)
+#define NLM4_norep_sz		(0)
+
+
+static s64 loff_t_to_s64(loff_t offset)
+{
+	s64 res;
+
+	if (offset >= NLM4_OFFSET_MAX)
+		res = NLM4_OFFSET_MAX;
+	else if (offset <= -NLM4_OFFSET_MAX)
+		res = -NLM4_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm4_compute_offsets(const struct nlm_lock *lock,
+				 u64 *l_offset, u64 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM4_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM4_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s64(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv4 basic data types
+ *
+ * Basic NLMv4 data types are defined in Appendix II, section 6.1.4
+ * of RFC 1813: "NFS Version 3 Protocol Specification" and in Chapter
+ * 10 of X/Open's "Protocols for Interworking: XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			     struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, fh->size);
+}
+
+/*
+ *	enum nlm4_stats {
+ *		NLM4_GRANTED = 0,
+ *		NLM4_DENIED = 1,
+ *		NLM4_DENIED_NOLOCKS = 2,
+ *		NLM4_BLOCKED = 3,
+ *		NLM4_DENIED_GRACE_PERIOD = 4,
+ *		NLM4_DEADLCK = 5,
+ *		NLM4_ROFS = 6,
+ *		NLM4_STALE_FH = 7,
+ *		NLM4_FBIG = 8,
+ *		NLM4_FAILED = 9
+ *	};
+ *
+ *	struct nlm4_stat {
+ *		nlm4_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+static void encode_nlm4_stat(struct xdr_stream *xdr,
+			     const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_FAILED);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm4_stat(struct xdr_stream *xdr, __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm4_failed))
+		goto out_bad_xdr;
+	*stat = *p;
+	return 0;
+out_bad_xdr:
+	dprintk("%s: server returned invalid nlm4_stats value: %u\n",
+			__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm4_holder {
+ *		bool	exclusive;
+ *		int32	svid;
+ *		netobj	oh;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_holder(struct xdr_stream *xdr,
+			       const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u64 l_offset, l_len;
+	u32 exclusive;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 8 + 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	p = xdr_decode_hyper(p, &l_offset);
+	xdr_decode_hyper(p, &l_len);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm4_lock {
+ *		string	caller_name<LM_MAXSTRLEN>;
+ *		netobj	fh;
+ *		netobj	oh;
+ *		int32	svid;
+ *		uint64	l_offset;
+ *		uint64	l_len;
+ *	};
+ */
+static void encode_nlm4_lock(struct xdr_stream *xdr,
+			     const struct nlm_lock *lock)
+{
+	u64 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 8 + 8);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm4_compute_offsets(lock, &l_offset, &l_len);
+	p = xdr_encode_hyper(p, l_offset);
+	xdr_encode_hyper(p, l_len);
+}
+
+
+/*
+ * NLMv4 XDR encode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm4_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_testargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm4_xdr_enc_lockargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm4_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_cancargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_unlockargs {
+ *		netobj cookie;
+ *		struct nlm4_lock alock;
+ *	};
+ */
+static void nlm4_xdr_enc_unlockargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm4_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static void nlm4_xdr_enc_res(struct rpc_rqst *req,
+			     struct xdr_stream *xdr,
+			     const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static void nlm4_xdr_enc_testres(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm4_stat(xdr, result->status);
+	if (result->status == nlm_lck_denied)
+		encode_nlm4_holder(xdr, result);
+}
+
+
+/*
+ * NLMv4 XDR decode functions
+ *
+ * NLMv4 argument types are defined in Appendix II of RFC 1813:
+ * "NFS Version 3 Protocol Specification" and Chapter 10 of X/Open's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm4_testrply switch (nlm4_stats stat) {
+ *	case NLM4_DENIED:
+ *		struct nlm4_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm4_testres {
+ *		netobj cookie;
+ *		nlm4_testrply test_stat;
+ *	};
+ */
+static int decode_nlm4_testrply(struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm4_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm4_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm4_xdr_dec_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm4_res {
+ *		netobj cookie;
+ *		nlm4_stat stat;
+ *	};
+ */
+static int nlm4_xdr_dec_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm4_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm4_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)					\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm4_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm4_xdr_dec_##restype,		\
+	.p_arglen    = NLM4_##argtype##_sz,				\
+	.p_replen    = NLM4_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm4_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+struct rpc_version	nlm_version4 = {
+	.number		= 4,
+	.nrprocs	= ARRAY_SIZE(nlm4_procedures),
+	.procs		= nlm4_procedures,
+};
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 25509eb..8d4ea83 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -79,7 +79,7 @@
  */
 void nlmclnt_done(struct nlm_host *host)
 {
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 }
 EXPORT_SYMBOL_GPL(nlmclnt_done);
@@ -273,7 +273,7 @@
 	spin_unlock(&nlm_blocked_lock);
 
 	/* Release host handle after use */
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	lockd_down();
 	return 0;
 }
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 332c54c..adb45ec 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -58,7 +58,7 @@
 		return;
 	list_del(&lockowner->list);
 	spin_unlock(&lockowner->host->h_lock);
-	nlm_release_host(lockowner->host);
+	nlmclnt_release_host(lockowner->host);
 	kfree(lockowner);
 }
 
@@ -207,22 +207,22 @@
 		printk("nlm_alloc_call: failed, waiting for memory\n");
 		schedule_timeout_interruptible(5*HZ);
 	}
-	nlm_release_host(host);
+	nlmclnt_release_host(host);
 	return NULL;
 }
 
-void nlm_release_call(struct nlm_rqst *call)
+void nlmclnt_release_call(struct nlm_rqst *call)
 {
 	if (!atomic_dec_and_test(&call->a_count))
 		return;
-	nlm_release_host(call->a_host);
+	nlmclnt_release_host(call->a_host);
 	nlmclnt_release_lockargs(call);
 	kfree(call);
 }
 
 static void nlmclnt_rpc_release(void *data)
 {
-	nlm_release_call(data);
+	nlmclnt_release_call(data);
 }
 
 static int nlm_wait_on_grace(wait_queue_head_t *queue)
@@ -436,7 +436,7 @@
 			status = nlm_stat_to_errno(req->a_res.status);
 	}
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -593,7 +593,7 @@
 out_unblock:
 	nlmclnt_finish_block(block);
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 out_unlock:
 	/* Fatal error: ensure that we remove the lock altogether */
@@ -694,7 +694,7 @@
 	/* What to do now? I'm out of my depth... */
 	status = -ENOLCK;
 out:
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
@@ -755,7 +755,7 @@
 			NLMPROC_CANCEL, &nlmclnt_cancel_ops);
 	if (status == 0 && req->a_res.status == nlm_lck_denied)
 		status = -ENOLCK;
-	nlm_release_call(req);
+	nlmclnt_release_call(req);
 	return status;
 }
 
diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c
new file mode 100644
index 0000000..180ac34
--- /dev/null
+++ b/fs/lockd/clntxdr.c
@@ -0,0 +1,627 @@
+/*
+ * linux/fs/lockd/clntxdr.c
+ *
+ * XDR functions to encode/decode NLM version 3 RPC arguments and results.
+ * NLM version 3 is backwards compatible with NLM versions 1 and 2.
+ *
+ * NLM client-side only.
+ *
+ * Copyright (C) 2010, Oracle.  All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/sunrpc/xdr.h>
+#include <linux/sunrpc/clnt.h>
+#include <linux/sunrpc/stats.h>
+#include <linux/lockd/lockd.h>
+
+#define NLMDBG_FACILITY		NLMDBG_XDR
+
+#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
+#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
+#endif
+
+/*
+ * Declare the space requirements for NLM arguments and replies as
+ * number of 32bit-words
+ */
+#define NLM_cookie_sz		(1+(NLM_MAXCOOKIELEN>>2))
+#define NLM_caller_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_owner_sz		(1+(NLMCLNT_OHSIZE>>2))
+#define NLM_fhandle_sz		(1+(NFS2_FHSIZE>>2))
+#define NLM_lock_sz		(3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz)
+#define NLM_holder_sz		(4+NLM_owner_sz)
+
+#define NLM_testargs_sz		(NLM_cookie_sz+1+NLM_lock_sz)
+#define NLM_lockargs_sz		(NLM_cookie_sz+4+NLM_lock_sz)
+#define NLM_cancargs_sz		(NLM_cookie_sz+2+NLM_lock_sz)
+#define NLM_unlockargs_sz	(NLM_cookie_sz+NLM_lock_sz)
+
+#define NLM_testres_sz		(NLM_cookie_sz+1+NLM_holder_sz)
+#define NLM_res_sz		(NLM_cookie_sz+1)
+#define NLM_norep_sz		(0)
+
+
+static s32 loff_t_to_s32(loff_t offset)
+{
+	s32 res;
+
+	if (offset >= NLM_OFFSET_MAX)
+		res = NLM_OFFSET_MAX;
+	else if (offset <= -NLM_OFFSET_MAX)
+		res = -NLM_OFFSET_MAX;
+	else
+		res = offset;
+	return res;
+}
+
+static void nlm_compute_offsets(const struct nlm_lock *lock,
+				u32 *l_offset, u32 *l_len)
+{
+	const struct file_lock *fl = &lock->fl;
+
+	BUG_ON(fl->fl_start > NLM_OFFSET_MAX);
+	BUG_ON(fl->fl_end > NLM_OFFSET_MAX &&
+				fl->fl_end != OFFSET_MAX);
+
+	*l_offset = loff_t_to_s32(fl->fl_start);
+	if (fl->fl_end == OFFSET_MAX)
+		*l_len = 0;
+	else
+		*l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
+{
+	dprintk("lockd: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+
+/*
+ * Encode/decode NLMv3 basic data types
+ *
+ * Basic NLMv3 data types are not defined in an IETF standards
+ * document.  X/Open has a description of these data types that
+ * is useful.  See Chapter 10 of "Protocols for Interworking:
+ * XNFS, Version 3W".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+static void encode_bool(struct xdr_stream *xdr, const int value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = value ? xdr_one : xdr_zero;
+}
+
+static void encode_int32(struct xdr_stream *xdr, const s32 value)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
+}
+
+/*
+ *	typedef opaque netobj<MAXNETOBJ_SZ>
+ */
+static void encode_netobj(struct xdr_stream *xdr,
+			  const u8 *data, const unsigned int length)
+{
+	__be32 *p;
+
+	BUG_ON(length > XDR_MAX_NETOBJ);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, data, length);
+}
+
+static int decode_netobj(struct xdr_stream *xdr,
+			 struct xdr_netobj *obj)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > XDR_MAX_NETOBJ))
+		goto out_size;
+	obj->len = length;
+	obj->data = (u8 *)p;
+	return 0;
+out_size:
+	dprintk("NFS: returned netobj was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj cookie;
+ */
+static void encode_cookie(struct xdr_stream *xdr,
+			  const struct nlm_cookie *cookie)
+{
+	BUG_ON(cookie->len > NLM_MAXCOOKIELEN);
+	encode_netobj(xdr, (u8 *)&cookie->data, cookie->len);
+}
+
+static int decode_cookie(struct xdr_stream *xdr,
+			 struct nlm_cookie *cookie)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	/* apparently HPUX can return empty cookies */
+	if (length == 0)
+		goto out_hpux;
+	if (length > NLM_MAXCOOKIELEN)
+		goto out_size;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	cookie->len = length;
+	memcpy(cookie->data, p, length);
+	return 0;
+out_hpux:
+	cookie->len = 4;
+	memset(cookie->data, 0, 4);
+	return 0;
+out_size:
+	dprintk("NFS: returned cookie was too long: %u\n", length);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	netobj fh;
+ */
+static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE);
+}
+
+/*
+ *	enum nlm_stats {
+ *		LCK_GRANTED = 0,
+ *		LCK_DENIED = 1,
+ *		LCK_DENIED_NOLOCKS = 2,
+ *		LCK_BLOCKED = 3,
+ *		LCK_DENIED_GRACE_PERIOD = 4
+ *	};
+ *
+ *
+ *	struct nlm_stat {
+ *		nlm_stats stat;
+ *	};
+ *
+ * NB: we don't swap bytes for the NLM status values.  The upper
+ * layers deal directly with the status value in network byte
+ * order.
+ */
+
+static void encode_nlm_stat(struct xdr_stream *xdr,
+			    const __be32 stat)
+{
+	__be32 *p;
+
+	BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD);
+	p = xdr_reserve_space(xdr, 4);
+	*p = stat;
+}
+
+static int decode_nlm_stat(struct xdr_stream *xdr,
+			   __be32 *stat)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (unlikely(*p > nlm_lck_denied_grace_period))
+		goto out_enum;
+	*stat = *p;
+	return 0;
+out_enum:
+	dprintk("%s: server returned invalid nlm_stats value: %u\n",
+		__func__, be32_to_cpup(p));
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	struct nlm_holder {
+ *		bool exclusive;
+ *		int uppid;
+ *		netobj oh;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_holder(struct xdr_stream *xdr,
+			      const struct nlm_res *result)
+{
+	const struct nlm_lock *lock = &result->lock;
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_bool(xdr, lock->fl.fl_type == F_RDLCK);
+	encode_int32(xdr, lock->svid);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result)
+{
+	struct nlm_lock *lock = &result->lock;
+	struct file_lock *fl = &lock->fl;
+	u32 exclusive, l_offset, l_len;
+	int error;
+	__be32 *p;
+	s32 end;
+
+	memset(lock, 0, sizeof(*lock));
+	locks_init_lock(fl);
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	exclusive = be32_to_cpup(p++);
+	lock->svid = be32_to_cpup(p);
+	fl->fl_pid = (pid_t)lock->svid;
+
+	error = decode_netobj(xdr, &lock->oh);
+	if (unlikely(error))
+		goto out;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fl->fl_flags = FL_POSIX;
+	fl->fl_type  = exclusive != 0 ? F_WRLCK : F_RDLCK;
+	l_offset = be32_to_cpup(p++);
+	l_len = be32_to_cpup(p);
+	end = l_offset + l_len - 1;
+
+	fl->fl_start = (loff_t)l_offset;
+	if (l_len == 0 || end < 0)
+		fl->fl_end = OFFSET_MAX;
+	else
+		fl->fl_end = (loff_t)end;
+	error = 0;
+out:
+	return error;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	string caller_name<LM_MAXSTRLEN>;
+ */
+static void encode_caller_name(struct xdr_stream *xdr, const char *name)
+{
+	/* NB: client-side does not set lock->len */
+	u32 length = strlen(name);
+	__be32 *p;
+
+	BUG_ON(length > NLM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+/*
+ *	struct nlm_lock {
+ *		string caller_name<LM_MAXSTRLEN>;
+ *		netobj fh;
+ *		netobj oh;
+ *		int uppid;
+ *		unsigned l_offset;
+ *		unsigned l_len;
+ *	};
+ */
+static void encode_nlm_lock(struct xdr_stream *xdr,
+			    const struct nlm_lock *lock)
+{
+	u32 l_offset, l_len;
+	__be32 *p;
+
+	encode_caller_name(xdr, lock->caller);
+	encode_fh(xdr, &lock->fh);
+	encode_netobj(xdr, lock->oh.data, lock->oh.len);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(lock->svid);
+
+	nlm_compute_offsets(lock, &l_offset, &l_len);
+	*p++ = cpu_to_be32(l_offset);
+	*p   = cpu_to_be32(l_len);
+}
+
+
+/*
+ * NLMv3 XDR encode functions
+ *
+ * NLMv3 argument types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	struct nlm_testargs {
+ *		netobj cookie;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_testargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_lockargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *		bool reclaim;
+ *		int state;
+ *	};
+ */
+static void nlm_xdr_enc_lockargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+	encode_bool(xdr, args->reclaim);
+	encode_int32(xdr, args->state);
+}
+
+/*
+ *	struct nlm_cancargs {
+ *		netobj cookie;
+ *		bool block;
+ *		bool exclusive;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_cancargs(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_bool(xdr, args->block);
+	encode_bool(xdr, lock->fl.fl_type == F_WRLCK);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_unlockargs {
+ *		netobj cookie;
+ *		struct nlm_lock alock;
+ *	};
+ */
+static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nlm_args *args)
+{
+	const struct nlm_lock *lock = &args->lock;
+
+	encode_cookie(xdr, &args->cookie);
+	encode_nlm_lock(xdr, lock);
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static void nlm_xdr_enc_res(struct rpc_rqst *req,
+			    struct xdr_stream *xdr,
+			    const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+}
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static void encode_nlm_testrply(struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	if (result->status == nlm_lck_denied)
+		encode_nlm_holder(xdr, result);
+}
+
+static void nlm_xdr_enc_testres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				const struct nlm_res *result)
+{
+	encode_cookie(xdr, &result->cookie);
+	encode_nlm_stat(xdr, result->status);
+	encode_nlm_testrply(xdr, result);
+}
+
+
+/*
+ * NLMv3 XDR decode functions
+ *
+ * NLMv3 result types are defined in Chapter 10 of The Open Group's
+ * "Protocols for Interworking: XNFS, Version 3W".
+ */
+
+/*
+ *	union nlm_testrply switch (nlm_stats stat) {
+ *	case LCK_DENIED:
+ *		struct nlm_holder holder;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct nlm_testres {
+ *		netobj cookie;
+ *		nlm_testrply test_stat;
+ *	};
+ */
+static int decode_nlm_testrply(struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_nlm_stat(xdr, &result->status);
+	if (unlikely(error))
+		goto out;
+	if (result->status == nlm_lck_denied)
+		error = decode_nlm_holder(xdr, result);
+out:
+	return error;
+}
+
+static int nlm_xdr_dec_testres(struct rpc_rqst *req,
+			       struct xdr_stream *xdr,
+			       struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_testrply(xdr, result);
+out:
+	return error;
+}
+
+/*
+ *	struct nlm_res {
+ *		netobj cookie;
+ *		nlm_stat stat;
+ *	};
+ */
+static int nlm_xdr_dec_res(struct rpc_rqst *req,
+			   struct xdr_stream *xdr,
+			   struct nlm_res *result)
+{
+	int error;
+
+	error = decode_cookie(xdr, &result->cookie);
+	if (unlikely(error))
+		goto out;
+	error = decode_nlm_stat(xdr, &result->status);
+out:
+	return error;
+}
+
+
+/*
+ * For NLM, a void procedure really returns nothing
+ */
+#define nlm_xdr_dec_norep	NULL
+
+#define PROC(proc, argtype, restype)	\
+[NLMPROC_##proc] = {							\
+	.p_proc      = NLMPROC_##proc,					\
+	.p_encode    = (kxdreproc_t)nlm_xdr_enc_##argtype,		\
+	.p_decode    = (kxdrdproc_t)nlm_xdr_dec_##restype,		\
+	.p_arglen    = NLM_##argtype##_sz,				\
+	.p_replen    = NLM_##restype##_sz,				\
+	.p_statidx   = NLMPROC_##proc,					\
+	.p_name      = #proc,						\
+	}
+
+static struct rpc_procinfo	nlm_procedures[] = {
+	PROC(TEST,		testargs,	testres),
+	PROC(LOCK,		lockargs,	res),
+	PROC(CANCEL,		cancargs,	res),
+	PROC(UNLOCK,		unlockargs,	res),
+	PROC(GRANTED,		testargs,	res),
+	PROC(TEST_MSG,		testargs,	norep),
+	PROC(LOCK_MSG,		lockargs,	norep),
+	PROC(CANCEL_MSG,	cancargs,	norep),
+	PROC(UNLOCK_MSG,	unlockargs,	norep),
+	PROC(GRANTED_MSG,	testargs,	norep),
+	PROC(TEST_RES,		testres,	norep),
+	PROC(LOCK_RES,		res,		norep),
+	PROC(CANCEL_RES,	res,		norep),
+	PROC(UNLOCK_RES,	res,		norep),
+	PROC(GRANTED_RES,	res,		norep),
+};
+
+static struct rpc_version	nlm_version1 = {
+		.number		= 1,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	nlm_version3 = {
+		.number		= 3,
+		.nrprocs	= ARRAY_SIZE(nlm_procedures),
+		.procs		= nlm_procedures,
+};
+
+static struct rpc_version	*nlm_versions[] = {
+	[1] = &nlm_version1,
+	[3] = &nlm_version3,
+#ifdef CONFIG_LOCKD_V4
+	[4] = &nlm_version4,
+#endif
+};
+
+static struct rpc_stat		nlm_rpc_stats;
+
+struct rpc_program		nlm_program = {
+		.name		= "lockd",
+		.number		= NLM_PROGRAM,
+		.nrvers		= ARRAY_SIZE(nlm_versions),
+		.version	= nlm_versions,
+		.stats		= &nlm_rpc_stats,
+};
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index ed0c59f..5f1bcb2 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -25,9 +25,22 @@
 #define NLM_HOST_EXPIRE		(300 * HZ)
 #define NLM_HOST_COLLECT	(120 * HZ)
 
-static struct hlist_head	nlm_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_server_hosts[NLM_HOST_NRHASH];
+static struct hlist_head	nlm_client_hosts[NLM_HOST_NRHASH];
+
+#define for_each_host(host, pos, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry((host), (pos), (chain), h_hash)
+
+#define for_each_host_safe(host, pos, next, chain, table) \
+	for ((chain) = (table); \
+	     (chain) < (table) + NLM_HOST_NRHASH; ++(chain)) \
+		hlist_for_each_entry_safe((host), (pos), (next), \
+						(chain), h_hash)
+
 static unsigned long		next_gc;
-static int			nrhosts;
+static unsigned long		nrhosts;
 static DEFINE_MUTEX(nlm_host_mutex);
 
 static void			nlm_gc_hosts(void);
@@ -40,8 +53,6 @@
 	const u32		version;	/* NLM version to search for */
 	const char		*hostname;	/* remote's hostname */
 	const size_t		hostname_len;	/* it's length */
-	const struct sockaddr	*src_sap;	/* our address (optional) */
-	const size_t		src_len;	/* it's length */
 	const int		noresvport;	/* use non-priv port */
 };
 
@@ -88,127 +99,83 @@
 }
 
 /*
- * Common host lookup routine for server & client
+ * Allocate and initialize an nlm_host.  Common to both client and server.
  */
-static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
+static struct nlm_host *nlm_alloc_host(struct nlm_lookup_host_info *ni,
+				       struct nsm_handle *nsm)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
-	struct nlm_host	*host;
-	struct nsm_handle *nsm = NULL;
+	struct nlm_host *host = NULL;
+	unsigned long now = jiffies;
 
-	mutex_lock(&nlm_host_mutex);
-
-	if (time_after_eq(jiffies, next_gc))
-		nlm_gc_hosts();
-
-	/* We may keep several nlm_host objects for a peer, because each
-	 * nlm_host is identified by
-	 * (address, protocol, version, server/client)
-	 * We could probably simplify this a little by putting all those
-	 * different NLM rpc_clients into one single nlm_host object.
-	 * This would allow us to have one nlm_host per address.
-	 */
-	chain = &nlm_hosts[nlm_hash_address(ni->sap)];
-	hlist_for_each_entry(host, pos, chain, h_hash) {
-		if (!rpc_cmp_addr(nlm_addr(host), ni->sap))
-			continue;
-
-		/* See if we have an NSM handle for this client */
-		if (!nsm)
-			nsm = host->h_nsmhandle;
-
-		if (host->h_proto != ni->protocol)
-			continue;
-		if (host->h_version != ni->version)
-			continue;
-		if (host->h_server != ni->server)
-			continue;
-		if (ni->server && ni->src_len != 0 &&
-		    !rpc_cmp_addr(nlm_srcaddr(host), ni->src_sap))
-			continue;
-
-		/* Move to head of hash chain. */
-		hlist_del(&host->h_hash);
-		hlist_add_head(&host->h_hash, chain);
-
-		nlm_get_host(host);
-		dprintk("lockd: nlm_lookup_host found host %s (%s)\n",
-				host->h_name, host->h_addrbuf);
-		goto out;
-	}
-
-	/*
-	 * The host wasn't in our hash table.  If we don't
-	 * have an NSM handle for it yet, create one.
-	 */
-	if (nsm)
+	if (nsm != NULL)
 		atomic_inc(&nsm->sm_count);
 	else {
 		host = NULL;
 		nsm = nsm_get_handle(ni->sap, ni->salen,
 					ni->hostname, ni->hostname_len);
-		if (!nsm) {
-			dprintk("lockd: nlm_lookup_host failed; "
-				"no nsm handle\n");
+		if (unlikely(nsm == NULL)) {
+			dprintk("lockd: %s failed; no nsm handle\n",
+				__func__);
 			goto out;
 		}
 	}
 
-	host = kzalloc(sizeof(*host), GFP_KERNEL);
-	if (!host) {
+	host = kmalloc(sizeof(*host), GFP_KERNEL);
+	if (unlikely(host == NULL)) {
+		dprintk("lockd: %s failed; no memory\n", __func__);
 		nsm_release(nsm);
-		dprintk("lockd: nlm_lookup_host failed; no memory\n");
 		goto out;
 	}
-	host->h_name	   = nsm->sm_name;
-	host->h_addrbuf    = nsm->sm_addrbuf;
+
 	memcpy(nlm_addr(host), ni->sap, ni->salen);
-	host->h_addrlen = ni->salen;
+	host->h_addrlen    = ni->salen;
 	rpc_set_port(nlm_addr(host), 0);
-	memcpy(nlm_srcaddr(host), ni->src_sap, ni->src_len);
-	host->h_srcaddrlen = ni->src_len;
+	host->h_srcaddrlen = 0;
+
+	host->h_rpcclnt    = NULL;
+	host->h_name	   = nsm->sm_name;
 	host->h_version    = ni->version;
 	host->h_proto      = ni->protocol;
-	host->h_rpcclnt    = NULL;
-	mutex_init(&host->h_mutex);
-	host->h_nextrebind = jiffies + NLM_HOST_REBIND;
-	host->h_expires    = jiffies + NLM_HOST_EXPIRE;
-	atomic_set(&host->h_count, 1);
+	host->h_reclaiming = 0;
+	host->h_server     = ni->server;
+	host->h_noresvport = ni->noresvport;
+	host->h_inuse      = 0;
 	init_waitqueue_head(&host->h_gracewait);
 	init_rwsem(&host->h_rwsem);
-	host->h_state      = 0;			/* pseudo NSM state */
-	host->h_nsmstate   = 0;			/* real NSM state */
-	host->h_nsmhandle  = nsm;
-	host->h_server	   = ni->server;
-	host->h_noresvport = ni->noresvport;
-	hlist_add_head(&host->h_hash, chain);
+	host->h_state      = 0;
+	host->h_nsmstate   = 0;
+	host->h_pidcount   = 0;
+	atomic_set(&host->h_count, 1);
+	mutex_init(&host->h_mutex);
+	host->h_nextrebind = now + NLM_HOST_REBIND;
+	host->h_expires    = now + NLM_HOST_EXPIRE;
 	INIT_LIST_HEAD(&host->h_lockowners);
 	spin_lock_init(&host->h_lock);
 	INIT_LIST_HEAD(&host->h_granted);
 	INIT_LIST_HEAD(&host->h_reclaim);
-
-	nrhosts++;
-
-	dprintk("lockd: nlm_lookup_host created host %s\n",
-			host->h_name);
+	host->h_nsmhandle  = nsm;
+	host->h_addrbuf    = nsm->sm_addrbuf;
 
 out:
-	mutex_unlock(&nlm_host_mutex);
 	return host;
 }
 
 /*
- * Destroy a host
+ * Destroy an nlm_host and free associated resources
+ *
+ * Caller must hold nlm_host_mutex.
  */
-static void
-nlm_destroy_host(struct nlm_host *host)
+static void nlm_destroy_host_locked(struct nlm_host *host)
 {
 	struct rpc_clnt	*clnt;
 
+	dprintk("lockd: destroy host %s\n", host->h_name);
+
 	BUG_ON(!list_empty(&host->h_lockowners));
 	BUG_ON(atomic_read(&host->h_count));
 
+	hlist_del_init(&host->h_hash);
+
 	nsm_unmonitor(host);
 	nsm_release(host->h_nsmhandle);
 
@@ -216,6 +183,8 @@
 	if (clnt != NULL)
 		rpc_shutdown_client(clnt);
 	kfree(host);
+
+	nrhosts--;
 }
 
 /**
@@ -249,12 +218,76 @@
 		.hostname_len	= strlen(hostname),
 		.noresvport	= noresvport,
 	};
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host;
+	struct nsm_handle *nsm = NULL;
 
 	dprintk("lockd: %s(host='%s', vers=%u, proto=%s)\n", __func__,
 			(hostname ? hostname : "<none>"), version,
 			(protocol == IPPROTO_UDP ? "udp" : "tcp"));
 
-	return nlm_lookup_host(&ni);
+	mutex_lock(&nlm_host_mutex);
+
+	chain = &nlm_client_hosts[nlm_hash_address(sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != protocol)
+			continue;
+		if (host->h_version != version)
+			continue;
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n", __func__,
+			host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n", __func__,
+		host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmclnt_release_host - release client nlm_host
+ * @host: nlm_host to release
+ *
+ */
+void nlmclnt_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release client host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(host->h_server);
+
+	if (atomic_dec_and_test(&host->h_count)) {
+		BUG_ON(!list_empty(&host->h_lockowners));
+		BUG_ON(!list_empty(&host->h_granted));
+		BUG_ON(!list_empty(&host->h_reclaim));
+
+		mutex_lock(&nlm_host_mutex);
+		nlm_destroy_host_locked(host);
+		mutex_unlock(&nlm_host_mutex);
+	}
 }
 
 /**
@@ -279,12 +312,18 @@
 				    const char *hostname,
 				    const size_t hostname_len)
 {
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+	struct nlm_host	*host = NULL;
+	struct nsm_handle *nsm = NULL;
 	struct sockaddr_in sin = {
 		.sin_family	= AF_INET,
 	};
 	struct sockaddr_in6 sin6 = {
 		.sin6_family	= AF_INET6,
 	};
+	struct sockaddr *src_sap;
+	size_t src_len = rqstp->rq_addrlen;
 	struct nlm_lookup_host_info ni = {
 		.server		= 1,
 		.sap		= svc_addr(rqstp),
@@ -293,27 +332,91 @@
 		.version	= rqstp->rq_vers,
 		.hostname	= hostname,
 		.hostname_len	= hostname_len,
-		.src_len	= rqstp->rq_addrlen,
 	};
 
 	dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
 			(int)hostname_len, hostname, rqstp->rq_vers,
 			(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
 
+	mutex_lock(&nlm_host_mutex);
+
 	switch (ni.sap->sa_family) {
 	case AF_INET:
 		sin.sin_addr.s_addr = rqstp->rq_daddr.addr.s_addr;
-		ni.src_sap = (struct sockaddr *)&sin;
+		src_sap = (struct sockaddr *)&sin;
 		break;
 	case AF_INET6:
 		ipv6_addr_copy(&sin6.sin6_addr, &rqstp->rq_daddr.addr6);
-		ni.src_sap = (struct sockaddr *)&sin6;
+		src_sap = (struct sockaddr *)&sin6;
 		break;
 	default:
-		return NULL;
+		dprintk("lockd: %s failed; unrecognized address family\n",
+			__func__);
+		goto out;
 	}
 
-	return nlm_lookup_host(&ni);
+	if (time_after_eq(jiffies, next_gc))
+		nlm_gc_hosts();
+
+	chain = &nlm_server_hosts[nlm_hash_address(ni.sap)];
+	hlist_for_each_entry(host, pos, chain, h_hash) {
+		if (!rpc_cmp_addr(nlm_addr(host), ni.sap))
+			continue;
+
+		/* Same address. Share an NSM handle if we already have one */
+		if (nsm == NULL)
+			nsm = host->h_nsmhandle;
+
+		if (host->h_proto != ni.protocol)
+			continue;
+		if (host->h_version != ni.version)
+			continue;
+		if (!rpc_cmp_addr(nlm_srcaddr(host), src_sap))
+			continue;
+
+		/* Move to head of hash chain. */
+		hlist_del(&host->h_hash);
+		hlist_add_head(&host->h_hash, chain);
+
+		nlm_get_host(host);
+		dprintk("lockd: %s found host %s (%s)\n",
+			__func__, host->h_name, host->h_addrbuf);
+		goto out;
+	}
+
+	host = nlm_alloc_host(&ni, nsm);
+	if (unlikely(host == NULL))
+		goto out;
+
+	memcpy(nlm_srcaddr(host), src_sap, src_len);
+	host->h_srcaddrlen = src_len;
+	hlist_add_head(&host->h_hash, chain);
+	nrhosts++;
+
+	dprintk("lockd: %s created host %s (%s)\n",
+		__func__, host->h_name, host->h_addrbuf);
+
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
+}
+
+/**
+ * nlmsvc_release_host - release server nlm_host
+ * @host: nlm_host to release
+ *
+ * Host is destroyed later in nlm_gc_host().
+ */
+void nlmsvc_release_host(struct nlm_host *host)
+{
+	if (host == NULL)
+		return;
+
+	dprintk("lockd: release server host %s\n", host->h_name);
+
+	BUG_ON(atomic_read(&host->h_count) < 0);
+	BUG_ON(!host->h_server);
+	atomic_dec(&host->h_count);
 }
 
 /*
@@ -413,20 +516,28 @@
 	return host;
 }
 
-/*
- * Release NLM host after use
- */
-void nlm_release_host(struct nlm_host *host)
+static struct nlm_host *next_host_state(struct hlist_head *cache,
+					struct nsm_handle *nsm,
+					const struct nlm_reboot *info)
 {
-	if (host != NULL) {
-		dprintk("lockd: release host %s\n", host->h_name);
-		BUG_ON(atomic_read(&host->h_count) < 0);
-		if (atomic_dec_and_test(&host->h_count)) {
-			BUG_ON(!list_empty(&host->h_lockowners));
-			BUG_ON(!list_empty(&host->h_granted));
-			BUG_ON(!list_empty(&host->h_reclaim));
+	struct nlm_host *host = NULL;
+	struct hlist_head *chain;
+	struct hlist_node *pos;
+
+	mutex_lock(&nlm_host_mutex);
+	for_each_host(host, pos, chain, cache) {
+		if (host->h_nsmhandle == nsm
+		    && host->h_nsmstate != info->state) {
+			host->h_nsmstate = info->state;
+			host->h_state++;
+
+			nlm_get_host(host);
+			goto out;
 		}
 	}
+out:
+	mutex_unlock(&nlm_host_mutex);
+	return host;
 }
 
 /**
@@ -438,8 +549,6 @@
  */
 void nlm_host_rebooted(const struct nlm_reboot *info)
 {
-	struct hlist_head *chain;
-	struct hlist_node *pos;
 	struct nsm_handle *nsm;
 	struct nlm_host	*host;
 
@@ -452,32 +561,15 @@
 	 * lock for this.
 	 * To avoid processing a host several times, we match the nsmstate.
 	 */
-again:	mutex_lock(&nlm_host_mutex);
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			if (host->h_nsmhandle == nsm
-			 && host->h_nsmstate != info->state) {
-				host->h_nsmstate = info->state;
-				host->h_state++;
-
-				nlm_get_host(host);
-				mutex_unlock(&nlm_host_mutex);
-
-				if (host->h_server) {
-					/* We're server for this guy, just ditch
-					 * all the locks he held. */
-					nlmsvc_free_host_resources(host);
-				} else {
-					/* He's the server, initiate lock recovery. */
-					nlmclnt_recovery(host);
-				}
-
-				nlm_release_host(host);
-				goto again;
-			}
-		}
+	while ((host = next_host_state(nlm_server_hosts, nsm, info)) != NULL) {
+		nlmsvc_free_host_resources(host);
+		nlmsvc_release_host(host);
 	}
-	mutex_unlock(&nlm_host_mutex);
+	while ((host = next_host_state(nlm_client_hosts, nsm, info)) != NULL) {
+		nlmclnt_recovery(host);
+		nlmclnt_release_host(host);
+	}
+
 	nsm_release(nsm);
 }
 
@@ -497,13 +589,11 @@
 
 	/* First, make all hosts eligible for gc */
 	dprintk("lockd: nuking all hosts...\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash) {
-			host->h_expires = jiffies - 1;
-			if (host->h_rpcclnt) {
-				rpc_shutdown_client(host->h_rpcclnt);
-				host->h_rpcclnt = NULL;
-			}
+	for_each_host(host, pos, chain, nlm_server_hosts) {
+		host->h_expires = jiffies - 1;
+		if (host->h_rpcclnt) {
+			rpc_shutdown_client(host->h_rpcclnt);
+			host->h_rpcclnt = NULL;
 		}
 	}
 
@@ -512,15 +602,13 @@
 	mutex_unlock(&nlm_host_mutex);
 
 	/* complain if any hosts are left */
-	if (nrhosts) {
+	if (nrhosts != 0) {
 		printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
-		dprintk("lockd: %d hosts left:\n", nrhosts);
-		for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-			hlist_for_each_entry(host, pos, chain, h_hash) {
-				dprintk("       %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-			}
+		dprintk("lockd: %lu hosts left:\n", nrhosts);
+		for_each_host(host, pos, chain, nlm_server_hosts) {
+			dprintk("       %s (cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
 		}
 	}
 }
@@ -538,29 +626,22 @@
 	struct nlm_host	*host;
 
 	dprintk("lockd: host garbage collection\n");
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry(host, pos, chain, h_hash)
-			host->h_inuse = 0;
-	}
+	for_each_host(host, pos, chain, nlm_server_hosts)
+		host->h_inuse = 0;
 
 	/* Mark all hosts that hold locks, blocks or shares */
 	nlmsvc_mark_resources();
 
-	for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
-		hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
-			if (atomic_read(&host->h_count) || host->h_inuse
-			 || time_before(jiffies, host->h_expires)) {
-				dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
-					host->h_name, atomic_read(&host->h_count),
-					host->h_inuse, host->h_expires);
-				continue;
-			}
-			dprintk("lockd: delete host %s\n", host->h_name);
-			hlist_del_init(&host->h_hash);
-
-			nlm_destroy_host(host);
-			nrhosts--;
+	for_each_host_safe(host, pos, next, chain, nlm_server_hosts) {
+		if (atomic_read(&host->h_count) || host->h_inuse
+		 || time_before(jiffies, host->h_expires)) {
+			dprintk("nlm_gc_hosts skipping %s "
+				"(cnt %d use %d exp %ld)\n",
+				host->h_name, atomic_read(&host->h_count),
+				host->h_inuse, host->h_expires);
+			continue;
 		}
+		nlm_destroy_host_locked(host);
 	}
 
 	next_gc = jiffies + NLM_HOST_COLLECT;
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index e0c9189..23d7451 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -401,26 +401,22 @@
  * Status Monitor wire protocol.
  */
 
-static int encode_nsm_string(struct xdr_stream *xdr, const char *string)
+static void encode_nsm_string(struct xdr_stream *xdr, const char *string)
 {
 	const u32 len = strlen(string);
 	__be32 *p;
 
-	if (unlikely(len > SM_MAXSTRLEN))
-		return -EIO;
-	p = xdr_reserve_space(xdr, sizeof(u32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > SM_MAXSTRLEN);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-	return 0;
 }
 
 /*
  * "mon_name" specifies the host to be monitored.
  */
-static int encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	return encode_nsm_string(xdr, argp->mon_name);
+	encode_nsm_string(xdr, argp->mon_name);
 }
 
 /*
@@ -429,35 +425,25 @@
  * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name"
  * has changed.
  */
-static int encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
 	__be32 *p;
 
-	status = encode_nsm_string(xdr, utsname()->nodename);
-	if (unlikely(status != 0))
-		return status;
-	p = xdr_reserve_space(xdr, 3 * sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(argp->prog);
-	*p++ = htonl(argp->vers);
-	*p++ = htonl(argp->proc);
-	return 0;
+	encode_nsm_string(xdr, utsname()->nodename);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(argp->prog);
+	*p++ = cpu_to_be32(argp->vers);
+	*p = cpu_to_be32(argp->proc);
 }
 
 /*
  * The "mon_id" argument specifies the non-private arguments
  * of an NSMPROC_MON or NSMPROC_UNMON call.
  */
-static int encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
-	int status;
-
-	status = encode_mon_name(xdr, argp);
-	if (unlikely(status != 0))
-		return status;
-	return encode_my_id(xdr, argp);
+	encode_mon_name(xdr, argp);
+	encode_my_id(xdr, argp);
 }
 
 /*
@@ -465,68 +451,56 @@
  * by the NSMPROC_MON call. This information will be supplied in the
  * NLMPROC_SM_NOTIFY call.
  */
-static int encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
+static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp)
 {
 	__be32 *p;
 
 	p = xdr_reserve_space(xdr, SM_PRIV_SIZE);
+	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+}
+
+static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			    const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+	encode_priv(xdr, argp);
+}
+
+static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      const struct nsm_args *argp)
+{
+	encode_mon_id(xdr, argp);
+}
+
+static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nsm_res *resp)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE);
+	resp->status = be32_to_cpup(p++);
+	resp->state = be32_to_cpup(p);
+
+	dprintk("lockd: %s status %d state %d\n",
+		__func__, resp->status, resp->state);
 	return 0;
 }
 
-static int xdr_enc_mon(struct rpc_rqst *req, __be32 *p,
-		       const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-	int status;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	status = encode_mon_id(&xdr, argp);
-	if (unlikely(status))
-		return status;
-	return encode_priv(&xdr, argp);
-}
-
-static int xdr_enc_unmon(struct rpc_rqst *req, __be32 *p,
-			 const struct nsm_args *argp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mon_id(&xdr, argp);
-}
-
-static int xdr_dec_stat_res(struct rpc_rqst *rqstp, __be32 *p,
+static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp,
+			    struct xdr_stream *xdr,
 			    struct nsm_res *resp)
 {
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, 2 * sizeof(u32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	resp->status = ntohl(*p++);
-	resp->state = ntohl(*p);
+	resp->state = be32_to_cpup(p);
 
-	dprintk("lockd: xdr_dec_stat_res status %d state %d\n",
-			resp->status, resp->state);
-	return 0;
-}
-
-static int xdr_dec_stat(struct rpc_rqst *rqstp, __be32 *p,
-			struct nsm_res *resp)
-{
-	struct xdr_stream xdr;
-
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	p = xdr_inline_decode(&xdr, sizeof(u32));
-	if (unlikely(p == NULL))
-		return -EIO;
-	resp->state = ntohl(*p);
-
-	dprintk("lockd: xdr_dec_stat state %d\n", resp->state);
+	dprintk("lockd: %s state %d\n", __func__, resp->state);
 	return 0;
 }
 
@@ -542,8 +516,8 @@
 static struct rpc_procinfo	nsm_procedures[] = {
 [NSMPROC_MON] = {
 		.p_proc		= NSMPROC_MON,
-		.p_encode	= (kxdrproc_t)xdr_enc_mon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat_res,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_mon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat_res,
 		.p_arglen	= SM_mon_sz,
 		.p_replen	= SM_monres_sz,
 		.p_statidx	= NSMPROC_MON,
@@ -551,8 +525,8 @@
 	},
 [NSMPROC_UNMON] = {
 		.p_proc		= NSMPROC_UNMON,
-		.p_encode	= (kxdrproc_t)xdr_enc_unmon,
-		.p_decode	= (kxdrproc_t)xdr_dec_stat,
+		.p_encode	= (kxdreproc_t)nsm_xdr_enc_unmon,
+		.p_decode	= (kxdrdproc_t)nsm_xdr_dec_stat,
 		.p_arglen	= SM_mon_id_sz,
 		.p_replen	= SM_unmonres_sz,
 		.p_statidx	= NSMPROC_UNMON,
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 38d2611..9a41fdc 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -51,7 +51,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
  	if (error)
 		return error;	
 	return nlm_lck_denied_nolocks;
@@ -92,7 +92,7 @@
 	else
 		dprintk("lockd: TEST4        status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -134,7 +134,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	resp->status = nlmsvc_cancel_blocked(file, &argp->lock);
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -197,7 +197,7 @@
 	resp->status = nlmsvc_unlock(file, &argp->lock);
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -229,7 +229,7 @@
 
 static void nlm4svc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlm4svc_callback_ops = {
@@ -261,7 +261,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -334,7 +334,7 @@
 	resp->status = nlmsvc_share_file(host, file, argp);
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -367,7 +367,7 @@
 	resp->status = nlmsvc_unshare_file(host, file, argp);
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +399,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index ef5659b..6e31695 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -46,6 +46,7 @@
 static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
 static void nlmsvc_freegrantargs(struct nlm_rqst *call);
 static const struct rpc_call_ops nlmsvc_grant_ops;
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie);
 
 /*
  * The list of blocked locks to retry
@@ -233,7 +234,7 @@
 failed_free:
 	kfree(block);
 failed:
-	nlm_release_call(call);
+	nlmsvc_release_call(call);
 	return NULL;
 }
 
@@ -266,7 +267,7 @@
 	mutex_unlock(&file->f_mutex);
 
 	nlmsvc_freegrantargs(block->b_call);
-	nlm_release_call(block->b_call);
+	nlmsvc_release_call(block->b_call);
 	nlm_release_file(block->b_file);
 	kfree(block->b_fl);
 	kfree(block);
@@ -934,3 +935,32 @@
 
 	return timeout;
 }
+
+#ifdef RPC_DEBUG
+static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
+{
+	/*
+	 * We can get away with a static buffer because we're only
+	 * called with BKL held.
+	 */
+	static char buf[2*NLM_MAXCOOKIELEN+1];
+	unsigned int i, len = sizeof(buf);
+	char *p = buf;
+
+	len--;	/* allow for trailing \0 */
+	if (len < 3)
+		return "???";
+	for (i = 0 ; i < cookie->len ; i++) {
+		if (len < 2) {
+			strcpy(p-3, "...");
+			break;
+		}
+		sprintf(p, "%02x", cookie->data[i]);
+		p += 2;
+		len -= 2;
+	}
+	*p = '\0';
+
+	return buf;
+}
+#endif
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 0caea53..d27aab1 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -80,7 +80,7 @@
 	return 0;
 
 no_locks:
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	if (error)
 		return error;
 	return nlm_lck_denied_nolocks;
@@ -122,7 +122,7 @@
 		dprintk("lockd: TEST          status %d vers %d\n",
 			ntohl(resp->status), rqstp->rq_vers);
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -164,7 +164,7 @@
 	else
 		dprintk("lockd: LOCK         status %d\n", ntohl(resp->status));
 
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rc;
 }
@@ -194,7 +194,7 @@
 	resp->status = cast_status(nlmsvc_cancel_blocked(file, &argp->lock));
 
 	dprintk("lockd: CANCEL        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -227,7 +227,7 @@
 	resp->status = cast_status(nlmsvc_unlock(file, &argp->lock));
 
 	dprintk("lockd: UNLOCK        status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -257,9 +257,17 @@
 			-task->tk_status);
 }
 
+void nlmsvc_release_call(struct nlm_rqst *call)
+{
+	if (!atomic_dec_and_test(&call->a_count))
+		return;
+	nlmsvc_release_host(call->a_host);
+	kfree(call);
+}
+
 static void nlmsvc_callback_release(void *data)
 {
-	nlm_release_call(data);
+	nlmsvc_release_call(data);
 }
 
 static const struct rpc_call_ops nlmsvc_callback_ops = {
@@ -291,7 +299,7 @@
 
 	stat = func(rqstp, argp, &call->a_res);
 	if (stat != 0) {
-		nlm_release_call(call);
+		nlmsvc_release_call(call);
 		return stat;
 	}
 
@@ -366,7 +374,7 @@
 	resp->status = cast_status(nlmsvc_share_file(host, file, argp));
 
 	dprintk("lockd: SHARE         status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -399,7 +407,7 @@
 	resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
 
 	dprintk("lockd: UNSHARE       status %d\n", ntohl(resp->status));
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	nlm_release_file(file);
 	return rpc_success;
 }
@@ -431,7 +439,7 @@
 		return rpc_success;
 
 	nlmsvc_free_host_resources(host);
-	nlm_release_host(host);
+	nlmsvc_release_host(host);
 	return rpc_success;
 }
 
diff --git a/fs/lockd/xdr.c b/fs/lockd/xdr.c
index b583ab0..964666c 100644
--- a/fs/lockd/xdr.c
+++ b/fs/lockd/xdr.c
@@ -149,37 +149,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s32			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm_encode_fh(p, &lock->fh))
-	 || !(p = nlm_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM_OFFSET_MAX
-	 || (fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	start = loff_t_to_s32(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);
-
-	*p++ = htonl(lock->svid);
-	*p++ = htonl(start);
-	*p++ = htonl(len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -372,259 +341,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlmclt_decode_void(struct rpc_rqst *req, u32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlmclt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		s32			start, len, end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		start = ntohl(*p++);
-		len = ntohl(*p++);
-		end = start + len - 1;
-
-		fl->fl_start = s32_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s32_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlmclt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlmclt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM_void_sz		0
-#define NLM_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM_fhandle_sz		1+XDR_QUADLEN(NFS2_FHSIZE)
-#define NLM_lock_sz		3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz
-#define NLM_holder_sz		4+NLM_owner_sz
-
-#define NLM_testargs_sz		NLM_cookie_sz+1+NLM_lock_sz
-#define NLM_lockargs_sz		NLM_cookie_sz+4+NLM_lock_sz
-#define NLM_cancargs_sz		NLM_cookie_sz+2+NLM_lock_sz
-#define NLM_unlockargs_sz	NLM_cookie_sz+NLM_lock_sz
-
-#define NLM_testres_sz		NLM_cookie_sz+1+NLM_holder_sz
-#define NLM_res_sz		NLM_cookie_sz+1
-#define NLM_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlmclt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)	\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlmclt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlmclt_decode_##restype,		\
-	.p_arglen    = NLM_##argtype##_sz,				\
-	.p_replen    = NLM_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-static struct rpc_version	nlm_version1 = {
-		.number		= 1,
-		.nrprocs	= 16,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version	nlm_version3 = {
-		.number		= 3,
-		.nrprocs	= 24,
-		.procs		= nlm_procedures,
-};
-
-static struct rpc_version *	nlm_versions[] = {
-	[1] = &nlm_version1,
-	[3] = &nlm_version3,
-#ifdef 	CONFIG_LOCKD_V4
-	[4] = &nlm_version4,
-#endif
-};
-
-static struct rpc_stat		nlm_stats;
-
-struct rpc_program		nlm_program = {
-		.name		= "lockd",
-		.number		= NLM_PROGRAM,
-		.nrvers		= ARRAY_SIZE(nlm_versions),
-		.version	= nlm_versions,
-		.stats		= &nlm_stats,
-};
-
-#ifdef RPC_DEBUG
-const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
-{
-	/*
-	 * We can get away with a static buffer because we're only
-	 * called with BKL held.
-	 */
-	static char buf[2*NLM_MAXCOOKIELEN+1];
-	unsigned int i, len = sizeof(buf);
-	char *p = buf;
-
-	len--;	/* allow for trailing \0 */
-	if (len < 3)
-		return "???";
-	for (i = 0 ; i < cookie->len ; i++) {
-		if (len < 2) {
-			strcpy(p-3, "...");
-			break;
-		}
-		sprintf(p, "%02x", cookie->data[i]);
-		p += 2;
-		len -= 2;
-	}
-	*p = '\0';
-
-	return buf;
-}
-#endif
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index ad9dbbc..dfa4789 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -93,15 +93,6 @@
 	return p + XDR_QUADLEN(f->size);
 }
 
-static __be32 *
-nlm4_encode_fh(__be32 *p, struct nfs_fh *f)
-{
-	*p++ = htonl(f->size);
-	if (f->size) p[XDR_QUADLEN(f->size)-1] = 0; /* don't leak anything */
-	memcpy(p, f->data, f->size);
-	return p + XDR_QUADLEN(f->size);
-}
-
 /*
  * Encode and decode owner handle
  */
@@ -112,12 +103,6 @@
 }
 
 static __be32 *
-nlm4_encode_oh(__be32 *p, struct xdr_netobj *oh)
-{
-	return xdr_encode_netobj(p, oh);
-}
-
-static __be32 *
 nlm4_decode_lock(__be32 *p, struct nlm_lock *lock)
 {
 	struct file_lock	*fl = &lock->fl;
@@ -150,38 +135,6 @@
 }
 
 /*
- * Encode a lock as part of an NLM call
- */
-static __be32 *
-nlm4_encode_lock(__be32 *p, struct nlm_lock *lock)
-{
-	struct file_lock	*fl = &lock->fl;
-	__s64			start, len;
-
-	if (!(p = xdr_encode_string(p, lock->caller))
-	 || !(p = nlm4_encode_fh(p, &lock->fh))
-	 || !(p = nlm4_encode_oh(p, &lock->oh)))
-		return NULL;
-
-	if (fl->fl_start > NLM4_OFFSET_MAX
-	 || (fl->fl_end > NLM4_OFFSET_MAX && fl->fl_end != OFFSET_MAX))
-		return NULL;
-
-	*p++ = htonl(lock->svid);
-
-	start = loff_t_to_s64(fl->fl_start);
-	if (fl->fl_end == OFFSET_MAX)
-		len = 0;
-	else
-		len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1);
-
-	p = xdr_encode_hyper(p, start);
-	p = xdr_encode_hyper(p, len);
-
-	return p;
-}
-
-/*
  * Encode result of a TEST/TEST_MSG call
  */
 static __be32 *
@@ -379,211 +332,3 @@
 {
 	return xdr_ressize_check(rqstp, p);
 }
-
-/*
- * Now, the client side XDR functions
- */
-#ifdef NLMCLNT_SUPPORT_SHARES
-static int
-nlm4clt_decode_void(struct rpc_rqst *req, __be32 *p, void *ptr)
-{
-	return 0;
-}
-#endif
-
-static int
-nlm4clt_encode_testargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	if (resp->status == nlm_lck_denied) {
-		struct file_lock	*fl = &resp->lock.fl;
-		u32			excl;
-		__u64			start, len;
-		__s64			end;
-
-		memset(&resp->lock, 0, sizeof(resp->lock));
-		locks_init_lock(fl);
-		excl = ntohl(*p++);
-		resp->lock.svid = ntohl(*p++);
-		fl->fl_pid = (pid_t)resp->lock.svid;
-		if (!(p = nlm4_decode_oh(p, &resp->lock.oh)))
-			return -EIO;
-
-		fl->fl_flags = FL_POSIX;
-		fl->fl_type  = excl? F_WRLCK : F_RDLCK;
-		p = xdr_decode_hyper(p, &start);
-		p = xdr_decode_hyper(p, &len);
-		end = start + len - 1;
-
-		fl->fl_start = s64_to_loff_t(start);
-		if (len == 0 || end < 0)
-			fl->fl_end = OFFSET_MAX;
-		else
-			fl->fl_end = s64_to_loff_t(end);
-	}
-	return 0;
-}
-
-
-static int
-nlm4clt_encode_lockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	*p++ = argp->reclaim? xdr_one : xdr_zero;
-	*p++ = htonl(argp->state);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_cancargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	*p++ = argp->block? xdr_one : xdr_zero;
-	*p++ = (lock->fl.fl_type == F_WRLCK)? xdr_one : xdr_zero;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_unlockargs(struct rpc_rqst *req, __be32 *p, nlm_args *argp)
-{
-	struct nlm_lock	*lock = &argp->lock;
-
-	if (!(p = nlm4_encode_cookie(p, &argp->cookie)))
-		return -EIO;
-	if (!(p = nlm4_encode_lock(p, lock)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_cookie(p, &resp->cookie)))
-		return -EIO;
-	*p++ = resp->status;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_encode_testres(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_encode_testres(p, resp)))
-		return -EIO;
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-static int
-nlm4clt_decode_res(struct rpc_rqst *req, __be32 *p, struct nlm_res *resp)
-{
-	if (!(p = nlm4_decode_cookie(p, &resp->cookie)))
-		return -EIO;
-	resp->status = *p++;
-	return 0;
-}
-
-#if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ)
-#  error "NLM host name cannot be larger than XDR_MAX_NETOBJ!"
-#endif
-
-#if (NLMCLNT_OHSIZE > NLM_MAXSTRLEN)
-#  error "NLM host name cannot be larger than NLM's maximum string length!"
-#endif
-
-/*
- * Buffer requirements for NLM
- */
-#define NLM4_void_sz		0
-#define NLM4_cookie_sz		1+XDR_QUADLEN(NLM_MAXCOOKIELEN)
-#define NLM4_caller_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_owner_sz		1+XDR_QUADLEN(NLMCLNT_OHSIZE)
-#define NLM4_fhandle_sz		1+XDR_QUADLEN(NFS3_FHSIZE)
-#define NLM4_lock_sz		5+NLM4_caller_sz+NLM4_owner_sz+NLM4_fhandle_sz
-#define NLM4_holder_sz		6+NLM4_owner_sz
-
-#define NLM4_testargs_sz	NLM4_cookie_sz+1+NLM4_lock_sz
-#define NLM4_lockargs_sz	NLM4_cookie_sz+4+NLM4_lock_sz
-#define NLM4_cancargs_sz	NLM4_cookie_sz+2+NLM4_lock_sz
-#define NLM4_unlockargs_sz	NLM4_cookie_sz+NLM4_lock_sz
-
-#define NLM4_testres_sz		NLM4_cookie_sz+1+NLM4_holder_sz
-#define NLM4_res_sz		NLM4_cookie_sz+1
-#define NLM4_norep_sz		0
-
-/*
- * For NLM, a void procedure really returns nothing
- */
-#define nlm4clt_decode_norep	NULL
-
-#define PROC(proc, argtype, restype)					\
-[NLMPROC_##proc] = {							\
-	.p_proc      = NLMPROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nlm4clt_encode_##argtype,		\
-	.p_decode    = (kxdrproc_t) nlm4clt_decode_##restype,		\
-	.p_arglen    = NLM4_##argtype##_sz,				\
-	.p_replen    = NLM4_##restype##_sz,				\
-	.p_statidx   = NLMPROC_##proc,					\
-	.p_name      = #proc,						\
-	}
-
-static struct rpc_procinfo	nlm4_procedures[] = {
-    PROC(TEST,		testargs,	testres),
-    PROC(LOCK,		lockargs,	res),
-    PROC(CANCEL,	cancargs,	res),
-    PROC(UNLOCK,	unlockargs,	res),
-    PROC(GRANTED,	testargs,	res),
-    PROC(TEST_MSG,	testargs,	norep),
-    PROC(LOCK_MSG,	lockargs,	norep),
-    PROC(CANCEL_MSG,	cancargs,	norep),
-    PROC(UNLOCK_MSG,	unlockargs,	norep),
-    PROC(GRANTED_MSG,	testargs,	norep),
-    PROC(TEST_RES,	testres,	norep),
-    PROC(LOCK_RES,	res,		norep),
-    PROC(CANCEL_RES,	res,		norep),
-    PROC(UNLOCK_RES,	res,		norep),
-    PROC(GRANTED_RES,	res,		norep),
-#ifdef NLMCLNT_SUPPORT_SHARES
-    PROC(SHARE,		shareargs,	shareres),
-    PROC(UNSHARE,	shareargs,	shareres),
-    PROC(NM_LOCK,	lockargs,	res),
-    PROC(FREE_ALL,	notify,		void),
-#endif
-};
-
-struct rpc_version	nlm_version4 = {
-	.number		= 4,
-	.nrprocs	= 24,
-	.procs		= nlm4_procedures,
-};
diff --git a/fs/mbcache.c b/fs/mbcache.c
index 9344474..a25444ab 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -76,18 +76,6 @@
 EXPORT_SYMBOL(mb_cache_entry_find_next);
 #endif
 
-struct mb_cache {
-	struct list_head		c_cache_list;
-	const char			*c_name;
-	atomic_t			c_entry_count;
-	int				c_max_entries;
-	int				c_bucket_bits;
-	struct kmem_cache		*c_entry_cache;
-	struct list_head		*c_block_hash;
-	struct list_head		*c_index_hash;
-};
-
-
 /*
  * Global data: list of all mbcache's, lru list, and a spinlock for
  * accessing cache data structures on SMP machines. The lru list is
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 1b9e077..ce7337d 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -23,8 +23,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
-
 	if (dentry->d_name.len > minix_sb(dir->i_sb)->s_namelen)
 		return ERR_PTR(-ENAMETOOLONG);
 
diff --git a/fs/namei.c b/fs/namei.c
index 24ece10..0b14f69 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1950,8 +1950,9 @@
 	return break_lease(inode, flag);
 }
 
-static int handle_truncate(struct path *path)
+static int handle_truncate(struct file *filp)
 {
+	struct path *path = &filp->f_path;
 	struct inode *inode = path->dentry->d_inode;
 	int error = get_write_access(inode);
 	if (error)
@@ -1965,7 +1966,7 @@
 	if (!error) {
 		error = do_truncate(path->dentry, 0,
 				    ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
-				    NULL);
+				    filp);
 	}
 	put_write_access(inode);
 	return error;
@@ -2063,7 +2064,7 @@
 	}
 	if (!IS_ERR(filp)) {
 		if (will_truncate) {
-			error = handle_truncate(&nd->path);
+			error = handle_truncate(filp);
 			if (error) {
 				fput(filp);
 				filp = ERR_PTR(error);
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 28f136d..f6946bb 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -21,9 +21,7 @@
 #include <asm/uaccess.h>
 #include <asm/byteorder.h>
 
-#include <linux/ncp_fs.h>
-
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static void ncp_read_volume_list(struct file *, void *, filldir_t,
 				struct ncp_cache_control *);
@@ -82,7 +80,7 @@
 		unsigned int, const char *, const struct qstr *);
 static int ncp_delete_dentry(const struct dentry *);
 
-static const struct dentry_operations ncp_dentry_operations =
+const struct dentry_operations ncp_dentry_operations =
 {
 	.d_revalidate	= ncp_lookup_validate,
 	.d_hash		= ncp_hash_dentry,
@@ -90,14 +88,6 @@
 	.d_delete	= ncp_delete_dentry,
 };
 
-const struct dentry_operations ncp_root_dentry_operations =
-{
-	.d_hash		= ncp_hash_dentry,
-	.d_compare	= ncp_compare_dentry,
-	.d_delete	= ncp_delete_dentry,
-};
-
-
 #define ncp_namespace(i)	(NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber])
 
 static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator)
@@ -309,6 +299,9 @@
 	int res, val = 0, len;
 	__u8 __name[NCP_MAXPATHLEN + 1];
 
+	if (dentry == dentry->d_sb->s_root)
+		return 1;
+
 	if (nd->flags & LOOKUP_RCU)
 		return -ECHILD;
 
@@ -637,7 +630,6 @@
 		entry->ino = iunique(dir->i_sb, 2);
 		inode = ncp_iget(dir->i_sb, entry);
 		if (inode) {
-			d_set_d_op(newdent, &ncp_dentry_operations);
 			d_instantiate(newdent, inode);
 			if (!hashed)
 				d_rehash(newdent);
@@ -893,7 +885,6 @@
 	if (inode) {
 		ncp_new_dentry(dentry);
 add_entry:
-		d_set_d_op(dentry, &ncp_dentry_operations);
 		d_add(dentry, inode);
 		error = 0;
 	}
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index cb50aaf..0ed65e0 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -18,8 +18,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static int ncp_fsync(struct file *file, int datasync)
 {
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 9b39a5d..00a1d1c 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -31,11 +31,9 @@
 #include <linux/seq_file.h>
 #include <linux/namei.h>
 
-#include <linux/ncp_fs.h>
-
 #include <net/sock.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 #include "getopt.h"
 
 #define NCP_DEFAULT_FILE_MODE 0600
@@ -544,6 +542,7 @@
 	sb->s_blocksize_bits = 10;
 	sb->s_magic = NCP_SUPER_MAGIC;
 	sb->s_op = &ncp_sops;
+	sb->s_d_op = &ncp_dentry_operations;
 	sb->s_bdi = &server->bdi;
 
 	server = NCP_SBP(sb);
@@ -723,7 +722,6 @@
 	sb->s_root = d_alloc_root(root_inode);
         if (!sb->s_root)
 		goto out_no_root;
-	d_set_d_op(sb->s_root, &ncp_root_dentry_operations);
 	return 0;
 
 out_no_root:
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index d40a547..790e92a 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -20,11 +20,9 @@
 #include <linux/vmalloc.h>
 #include <linux/sched.h>
 
-#include <linux/ncp_fs.h>
-
 #include <asm/uaccess.h>
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 /* maximum limit for ncp_objectname_ioctl */
 #define NCP_OBJECT_NAME_MAX_LEN	4096
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 56f5b3a..a7c07b4 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -16,12 +16,12 @@
 #include <linux/mman.h>
 #include <linux/string.h>
 #include <linux/fcntl.h>
-#include <linux/ncp_fs.h>
 
-#include "ncplib_kernel.h"
 #include <asm/uaccess.h>
 #include <asm/system.h>
 
+#include "ncp_fs.h"
+
 /*
  * Fill in the supplied page for mmap
  * XXX: how are we excluding truncate/invalidate here? Maybe need to lock
diff --git a/fs/ncpfs/ncp_fs.h b/fs/ncpfs/ncp_fs.h
new file mode 100644
index 0000000..31831af
--- /dev/null
+++ b/fs/ncpfs/ncp_fs.h
@@ -0,0 +1,98 @@
+#include <linux/ncp_fs.h>
+#include "ncp_fs_i.h"
+#include "ncp_fs_sb.h"
+
+/* define because it is easy to change PRINTK to {*}PRINTK */
+#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
+
+#undef NCPFS_PARANOIA
+#ifdef NCPFS_PARANOIA
+#define PPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define PPRINTK(format, args...)
+#endif
+
+#ifndef DEBUG_NCP
+#define DEBUG_NCP 0
+#endif
+#if DEBUG_NCP > 0
+#define DPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DPRINTK(format, args...)
+#endif
+#if DEBUG_NCP > 1
+#define DDPRINTK(format, args...) PRINTK(format , ## args)
+#else
+#define DDPRINTK(format, args...)
+#endif
+
+#define NCP_MAX_RPC_TIMEOUT (6*HZ)
+
+
+struct ncp_entry_info {
+	struct nw_info_struct	i;
+	ino_t			ino;
+	int			opened;
+	int			access;
+	unsigned int		volume;
+	__u8			file_handle[6];
+};
+
+static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
+static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
+{
+	return container_of(inode, struct ncp_inode_info, vfs_inode);
+}
+
+/* linux/fs/ncpfs/inode.c */
+int ncp_notify_change(struct dentry *, struct iattr *);
+struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
+void ncp_update_inode(struct inode *, struct ncp_entry_info *);
+void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
+
+/* linux/fs/ncpfs/dir.c */
+extern const struct inode_operations ncp_dir_inode_operations;
+extern const struct file_operations ncp_dir_operations;
+extern const struct dentry_operations ncp_dentry_operations;
+int ncp_conn_logged_in(struct super_block *);
+int ncp_date_dos2unix(__le16 time, __le16 date);
+void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
+
+/* linux/fs/ncpfs/ioctl.c */
+long ncp_ioctl(struct file *, unsigned int, unsigned long);
+long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
+
+/* linux/fs/ncpfs/sock.c */
+int ncp_request2(struct ncp_server *server, int function,
+	void* reply, int max_reply_size);
+static inline int ncp_request(struct ncp_server *server, int function) {
+	return ncp_request2(server, function, server->packet, server->packet_size);
+}
+int ncp_connect(struct ncp_server *server);
+int ncp_disconnect(struct ncp_server *server);
+void ncp_lock_server(struct ncp_server *server);
+void ncp_unlock_server(struct ncp_server *server);
+
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
+/* linux/fs/ncpfs/file.c */
+extern const struct inode_operations ncp_file_inode_operations;
+extern const struct file_operations ncp_file_operations;
+int ncp_make_open(struct inode *, int);
+
+/* linux/fs/ncpfs/mmap.c */
+int ncp_mmap(struct file *, struct vm_area_struct *);
+
+/* linux/fs/ncpfs/ncplib_kernel.c */
+int ncp_make_closed(struct inode *);
+
+#include "ncplib_kernel.h"
diff --git a/include/linux/ncp_fs_i.h b/fs/ncpfs/ncp_fs_i.h
similarity index 100%
rename from include/linux/ncp_fs_i.h
rename to fs/ncpfs/ncp_fs_i.h
diff --git a/include/linux/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
similarity index 86%
rename from include/linux/ncp_fs_sb.h
rename to fs/ncpfs/ncp_fs_sb.h
index d64b0e8..4af803f 100644
--- a/include/linux/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -13,15 +13,30 @@
 #include <linux/net.h>
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
-
-#ifdef __KERNEL__
-
 #include <linux/workqueue.h>
 
 #define NCP_DEFAULT_OPTIONS 0		/* 2 for packet signatures */
 
 struct sock;
 
+struct ncp_mount_data_kernel {
+	unsigned long    flags;		/* NCP_MOUNT_* flags */
+	unsigned int	 int_flags;	/* internal flags */
+#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
+	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
+	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
+	unsigned int     ncp_fd;	/* The socket to the ncp port */
+	unsigned int     time_out;	/* How long should I wait after
+					   sending a NCP request? */
+	unsigned int     retry_count;	/* And how often should I retry? */
+	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
+	__kernel_uid32_t uid;
+	__kernel_gid32_t gid;
+	__kernel_mode_t  file_mode;
+	__kernel_mode_t  dir_mode;
+	int		 info_fd;
+};
+
 struct ncp_server {
 
 	struct ncp_mount_data_kernel m;	/* Nearly all of the mount data is of
@@ -158,7 +173,4 @@
 	server->conn_status |= 0x01;
 }
 
-#endif				/* __KERNEL__ */
-
 #endif
- 
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c
index a95615a..981a956 100644
--- a/fs/ncpfs/ncplib_kernel.c
+++ b/fs/ncpfs/ncplib_kernel.c
@@ -11,7 +11,7 @@
 
 
 
-#include "ncplib_kernel.h"
+#include "ncp_fs.h"
 
 static inline void assert_server_locked(struct ncp_server *server)
 {
diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h
index 1220df7..09881e6 100644
--- a/fs/ncpfs/ncplib_kernel.h
+++ b/fs/ncpfs/ncplib_kernel.h
@@ -32,8 +32,6 @@
 #include <linux/ctype.h>
 #endif /* CONFIG_NCPFS_NLS */
 
-#include <linux/ncp_fs.h>
-
 #define NCP_MIN_SYMLINK_SIZE	8
 #define NCP_MAX_SYMLINK_SIZE	512
 
diff --git a/fs/ncpfs/ncpsign_kernel.c b/fs/ncpfs/ncpsign_kernel.c
index d8b2d7e..0890759 100644
--- a/fs/ncpfs/ncpsign_kernel.c
+++ b/fs/ncpfs/ncpsign_kernel.c
@@ -11,6 +11,7 @@
 #include <linux/string.h>
 #include <linux/ncp.h>
 #include <linux/bitops.h>
+#include "ncp_fs.h"
 #include "ncpsign_kernel.h"
 
 /* i386: 32-bit, little endian, handles mis-alignment */
diff --git a/fs/ncpfs/ncpsign_kernel.h b/fs/ncpfs/ncpsign_kernel.h
index 6451a68..d9a1438 100644
--- a/fs/ncpfs/ncpsign_kernel.h
+++ b/fs/ncpfs/ncpsign_kernel.h
@@ -8,8 +8,6 @@
 #ifndef _NCPSIGN_KERNEL_H
 #define _NCPSIGN_KERNEL_H
 
-#include <linux/ncp_fs.h>
-
 #ifdef CONFIG_NCPFS_PACKET_SIGNING
 void __sign_packet(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, void *sign_buff);
 int sign_verify_reply(struct ncp_server *server, const char *data, size_t size, __u32 totalsize, const void *sign_buff);
diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c
index 668bd26..3a15872 100644
--- a/fs/ncpfs/sock.c
+++ b/fs/ncpfs/sock.c
@@ -28,7 +28,7 @@
 #include <linux/poll.h>
 #include <linux/file.h>
 
-#include <linux/ncp_fs.h>
+#include "ncp_fs.h"
 
 #include "ncpsign_kernel.h"
 
diff --git a/fs/ncpfs/symlink.c b/fs/ncpfs/symlink.c
index c634fd1..661f861 100644
--- a/fs/ncpfs/symlink.c
+++ b/fs/ncpfs/symlink.c
@@ -25,13 +25,11 @@
 
 #include <linux/errno.h>
 #include <linux/fs.h>
-#include <linux/ncp_fs.h>
 #include <linux/time.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/stat.h>
-#include "ncplib_kernel.h"
-
+#include "ncp_fs.h"
 
 /* these magic numbers must appear in the symlink file -- this makes it a bit
    more resilient against the magic attributes being set on random files. */
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 93a8b3b..1990165 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -16,9 +16,7 @@
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/sunrpc/svcauth_gss.h>
-#if defined(CONFIG_NFS_V4_1)
 #include <linux/sunrpc/bc_xprt.h>
-#endif
 
 #include <net/inet_sock.h>
 
@@ -137,6 +135,33 @@
 
 #if defined(CONFIG_NFS_V4_1)
 /*
+ *  * CB_SEQUENCE operations will fail until the callback sessionid is set.
+ *   */
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+	struct svc_serv *serv = clp->cl_rpcclient->cl_xprt->bc_serv;
+	struct nfs4_sessionid *bc_sid;
+
+	if (!serv->sv_bc_xprt)
+		return -EINVAL;
+
+	/* on success freed in xprt_free */
+	bc_sid = kmalloc(sizeof(struct nfs4_sessionid), GFP_KERNEL);
+	if (!bc_sid)
+		return -ENOMEM;
+	memcpy(bc_sid->data, &clp->cl_session->sess_id.data,
+		NFS4_MAX_SESSIONID_LEN);
+	spin_lock_bh(&serv->sv_cb_lock);
+	serv->sv_bc_xprt->xpt_bc_sid = bc_sid;
+	spin_unlock_bh(&serv->sv_cb_lock);
+	dprintk("%s set xpt_bc_sid=%u:%u:%u:%u for sv_bc_xprt %p\n", __func__,
+		((u32 *)bc_sid->data)[0], ((u32 *)bc_sid->data)[1],
+		((u32 *)bc_sid->data)[2], ((u32 *)bc_sid->data)[3],
+		serv->sv_bc_xprt);
+	return 0;
+}
+
+/*
  * The callback service for NFSv4.1 callbacks
  */
 static int
@@ -177,30 +202,38 @@
 struct svc_rqst *
 nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
 {
-	struct svc_xprt *bc_xprt;
-	struct svc_rqst *rqstp = ERR_PTR(-ENOMEM);
+	struct svc_rqst *rqstp;
+	int ret;
 
-	dprintk("--> %s\n", __func__);
-	/* Create a svc_sock for the service */
-	bc_xprt = svc_sock_create(serv, xprt->prot);
-	if (!bc_xprt)
+	/*
+	 * Create an svc_sock for the back channel service that shares the
+	 * fore channel connection.
+	 * Returns the input port (0) and sets the svc_serv bc_xprt on success
+	 */
+	ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
+			      SVC_SOCK_ANONYMOUS);
+	if (ret < 0) {
+		rqstp = ERR_PTR(ret);
 		goto out;
+	}
 
 	/*
 	 * Save the svc_serv in the transport so that it can
 	 * be referenced when the session backchannel is initialized
 	 */
-	serv->bc_xprt = bc_xprt;
 	xprt->bc_serv = serv;
 
 	INIT_LIST_HEAD(&serv->sv_cb_list);
 	spin_lock_init(&serv->sv_cb_lock);
 	init_waitqueue_head(&serv->sv_cb_waitq);
 	rqstp = svc_prepare_thread(serv, &serv->sv_pools[0]);
-	if (IS_ERR(rqstp))
-		svc_sock_destroy(bc_xprt);
+	if (IS_ERR(rqstp)) {
+		svc_xprt_put(serv->sv_bc_xprt);
+		serv->sv_bc_xprt = NULL;
+	}
 out:
-	dprintk("--> %s return %p\n", __func__, rqstp);
+	dprintk("--> %s return %ld\n", __func__,
+		IS_ERR(rqstp) ? PTR_ERR(rqstp) : 0);
 	return rqstp;
 }
 
@@ -233,6 +266,10 @@
 		struct nfs_callback_data *cb_info)
 {
 }
+int nfs4_set_callback_sessionid(struct nfs_client *clp)
+{
+	return 0;
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 /*
@@ -328,6 +365,9 @@
 	struct rpc_clnt *r = clp->cl_rpcclient;
 	char *p = svc_gss_principal(rqstp);
 
+	/* No RPC_AUTH_GSS on NFSv4.1 back channel yet */
+	if (clp->cl_minorversion != 0)
+		return SVC_DROP;
 	/*
 	 * It might just be a normal user principal, in which case
 	 * userspace won't bother to tell us the name at all.
@@ -345,6 +385,23 @@
 	return SVC_OK;
 }
 
+/* pg_authenticate method helper */
+static struct nfs_client *nfs_cb_find_client(struct svc_rqst *rqstp)
+{
+	struct nfs4_sessionid *sessionid = bc_xprt_sid(rqstp);
+	int is_cb_compound = rqstp->rq_proc == CB_COMPOUND ? 1 : 0;
+
+	dprintk("--> %s rq_proc %d\n", __func__, rqstp->rq_proc);
+	if (svc_is_backchannel(rqstp))
+		/* Sessionid (usually) set after CB_NULL ping */
+		return nfs4_find_client_sessionid(svc_addr(rqstp), sessionid,
+						  is_cb_compound);
+	else
+		/* No callback identifier in pg_authenticate */
+		return nfs4_find_client_no_ident(svc_addr(rqstp));
+}
+
+/* pg_authenticate method for nfsv4 callback threads. */
 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
 {
 	struct nfs_client *clp;
@@ -352,7 +409,7 @@
 	int ret = SVC_OK;
 
 	/* Don't talk to strangers */
-	clp = nfs_find_client(svc_addr(rqstp), 4);
+	clp = nfs_cb_find_client(rqstp);
 	if (clp == NULL)
 		return SVC_DROP;
 
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 85a7cfd..d3b44f9 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -34,10 +34,17 @@
 	OP_CB_ILLEGAL = 10044,
 };
 
+struct cb_process_state {
+	__be32			drc_status;
+	struct nfs_client	*clp;
+	struct nfs4_sessionid	*svc_sid; /* v4.1 callback service sessionid */
+};
+
 struct cb_compound_hdr_arg {
 	unsigned int taglen;
 	const char *tag;
 	unsigned int minorversion;
+	unsigned int cb_ident; /* v4.0 callback identifier */
 	unsigned nops;
 };
 
@@ -103,14 +110,23 @@
 	uint32_t			csr_target_highestslotid;
 };
 
-extern unsigned nfs4_callback_sequence(struct cb_sequenceargs *args,
-				       struct cb_sequenceres *res);
+extern __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
+				       struct cb_sequenceres *res,
+				       struct cb_process_state *cps);
 
 extern int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation,
 					     const nfs4_stateid *stateid);
 
 #define RCA4_TYPE_MASK_RDATA_DLG	0
 #define RCA4_TYPE_MASK_WDATA_DLG	1
+#define RCA4_TYPE_MASK_DIR_DLG         2
+#define RCA4_TYPE_MASK_FILE_LAYOUT     3
+#define RCA4_TYPE_MASK_BLK_LAYOUT      4
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MIN  8
+#define RCA4_TYPE_MASK_OBJ_LAYOUT_MAX  9
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MIN 12
+#define RCA4_TYPE_MASK_OTHER_LAYOUT_MAX 15
+#define RCA4_TYPE_MASK_ALL 0xf31f
 
 struct cb_recallanyargs {
 	struct sockaddr	*craa_addr;
@@ -118,25 +134,52 @@
 	uint32_t	craa_type_mask;
 };
 
-extern unsigned nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy);
+extern __be32 nfs4_callback_recallany(struct cb_recallanyargs *args,
+					void *dummy,
+					struct cb_process_state *cps);
 
 struct cb_recallslotargs {
 	struct sockaddr	*crsa_addr;
 	uint32_t	crsa_target_max_slots;
 };
-extern unsigned nfs4_callback_recallslot(struct cb_recallslotargs *args,
-					  void *dummy);
+extern __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args,
+					 void *dummy,
+					 struct cb_process_state *cps);
 
+struct cb_layoutrecallargs {
+	struct sockaddr		*cbl_addr;
+	uint32_t		cbl_recall_type;
+	uint32_t		cbl_layout_type;
+	uint32_t		cbl_layoutchanged;
+	union {
+		struct {
+			struct nfs_fh		cbl_fh;
+			struct pnfs_layout_range cbl_range;
+			nfs4_stateid		cbl_stateid;
+		};
+		struct nfs_fsid		cbl_fsid;
+	};
+};
+
+extern unsigned nfs4_callback_layoutrecall(
+	struct cb_layoutrecallargs *args,
+	void *dummy, struct cb_process_state *cps);
+
+extern void nfs4_check_drain_bc_complete(struct nfs4_session *ses);
+extern void nfs4_cb_take_slot(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4_1 */
 
-extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
-extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
-
+extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+				    struct cb_getattrres *res,
+				    struct cb_process_state *cps);
+extern __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+				   struct cb_process_state *cps);
 #ifdef CONFIG_NFS_V4
 extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt);
 extern void nfs_callback_down(int minorversion);
 extern int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation,
 					    const nfs4_stateid *stateid);
+extern int nfs4_set_callback_sessionid(struct nfs_client *clp);
 #endif /* CONFIG_NFS_V4 */
 /*
  * nfs41: Callbacks are expected to not cause substantial latency,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 2950fca..4bb91cb 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -12,30 +12,33 @@
 #include "callback.h"
 #include "delegation.h"
 #include "internal.h"
+#include "pnfs.h"
 
 #ifdef NFS_DEBUG
 #define NFSDBG_FACILITY NFSDBG_CALLBACK
 #endif
- 
-__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
+
+__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
+			     struct cb_getattrres *res,
+			     struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs_delegation *delegation;
 	struct nfs_inode *nfsi;
 	struct inode *inode;
 
-	res->bitmap[0] = res->bitmap[1] = 0;
-	res->status = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
-	dprintk("NFS: GETATTR callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+	res->bitmap[0] = res->bitmap[1] = 0;
+	res->status = htonl(NFS4ERR_BADHANDLE);
 
-	inode = nfs_delegation_find_inode(clp, &args->fh);
+	dprintk("NFS: GETATTR callback request from %s\n",
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 	if (inode == NULL)
-		goto out_putclient;
+		goto out;
 	nfsi = NFS_I(inode);
 	rcu_read_lock();
 	delegation = rcu_dereference(nfsi->delegation);
@@ -55,49 +58,41 @@
 out_iput:
 	rcu_read_unlock();
 	iput(inode);
-out_putclient:
-	nfs_put_client(clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 	return res->status;
 }
 
-__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
+__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
+			    struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct inode *inode;
 	__be32 res;
 	
-	res = htonl(NFS4ERR_BADHANDLE);
-	clp = nfs_find_client(args->addr, 4);
-	if (clp == NULL)
+	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 		goto out;
 
 	dprintk("NFS: RECALL callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
-	do {
-		struct nfs_client *prev = clp;
-
-		inode = nfs_delegation_find_inode(clp, &args->fh);
-		if (inode != NULL) {
-			/* Set up a helper thread to actually return the delegation */
-			switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
-				case 0:
-					res = 0;
-					break;
-				case -ENOENT:
-					if (res != 0)
-						res = htonl(NFS4ERR_BAD_STATEID);
-					break;
-				default:
-					res = htonl(NFS4ERR_RESOURCE);
-			}
-			iput(inode);
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
+	res = htonl(NFS4ERR_BADHANDLE);
+	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
+	if (inode == NULL)
+		goto out;
+	/* Set up a helper thread to actually return the delegation */
+	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
+	case 0:
+		res = 0;
+		break;
+	case -ENOENT:
+		if (res != 0)
+			res = htonl(NFS4ERR_BAD_STATEID);
+		break;
+	default:
+		res = htonl(NFS4ERR_RESOURCE);
+	}
+	iput(inode);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
 	return res;
@@ -113,6 +108,139 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static u32 initiate_file_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	bool found = false;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	LIST_HEAD(free_me_list);
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if (nfs_compare_fh(&args->cbl_fh,
+				   &NFS_I(lo->plh_inode)->fh))
+			continue;
+		ino = igrab(lo->plh_inode);
+		if (!ino)
+			continue;
+		found = true;
+		/* Without this, layout can be freed as soon
+		 * as we release cl_lock.
+		 */
+		get_layout_hdr(lo);
+		break;
+	}
+	spin_unlock(&clp->cl_lock);
+	if (!found)
+		return NFS4ERR_NOMATCHING_LAYOUT;
+
+	spin_lock(&ino->i_lock);
+	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+	    mark_matching_lsegs_invalid(lo, &free_me_list,
+					args->cbl_range.iomode))
+		rv = NFS4ERR_DELAY;
+	else
+		rv = NFS4ERR_NOMATCHING_LAYOUT;
+	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&free_me_list);
+	put_layout_hdr(lo);
+	iput(ino);
+	return rv;
+}
+
+static u32 initiate_bulk_draining(struct nfs_client *clp,
+				  struct cb_layoutrecallargs *args)
+{
+	struct pnfs_layout_hdr *lo;
+	struct inode *ino;
+	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
+	struct pnfs_layout_hdr *tmp;
+	LIST_HEAD(recall_list);
+	LIST_HEAD(free_me_list);
+	struct pnfs_layout_range range = {
+		.iomode = IOMODE_ANY,
+		.offset = 0,
+		.length = NFS4_MAX_UINT64,
+	};
+
+	spin_lock(&clp->cl_lock);
+	list_for_each_entry(lo, &clp->cl_layouts, plh_layouts) {
+		if ((args->cbl_recall_type == RETURN_FSID) &&
+		    memcmp(&NFS_SERVER(lo->plh_inode)->fsid,
+			   &args->cbl_fsid, sizeof(struct nfs_fsid)))
+			continue;
+		if (!igrab(lo->plh_inode))
+			continue;
+		get_layout_hdr(lo);
+		BUG_ON(!list_empty(&lo->plh_bulk_recall));
+		list_add(&lo->plh_bulk_recall, &recall_list);
+	}
+	spin_unlock(&clp->cl_lock);
+	list_for_each_entry_safe(lo, tmp,
+				 &recall_list, plh_bulk_recall) {
+		ino = lo->plh_inode;
+		spin_lock(&ino->i_lock);
+		set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		if (mark_matching_lsegs_invalid(lo, &free_me_list, range.iomode))
+			rv = NFS4ERR_DELAY;
+		list_del_init(&lo->plh_bulk_recall);
+		spin_unlock(&ino->i_lock);
+		put_layout_hdr(lo);
+		iput(ino);
+	}
+	pnfs_free_lseg_list(&free_me_list);
+	return rv;
+}
+
+static u32 do_callback_layoutrecall(struct nfs_client *clp,
+				    struct cb_layoutrecallargs *args)
+{
+	u32 res = NFS4ERR_DELAY;
+
+	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
+	if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
+		goto out;
+	if (args->cbl_recall_type == RETURN_FILE)
+		res = initiate_file_draining(clp, args);
+	else
+		res = initiate_bulk_draining(clp, args);
+	clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
+out:
+	dprintk("%s returning %i\n", __func__, res);
+	return res;
+
+}
+
+__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
+				  void *dummy, struct cb_process_state *cps)
+{
+	u32 res;
+
+	dprintk("%s: -->\n", __func__);
+
+	if (cps->clp)
+		res = do_callback_layoutrecall(cps->clp, args);
+	else
+		res = NFS4ERR_OP_NOT_IN_SESSION;
+
+	dprintk("%s: exit with status = %d\n", __func__, res);
+	return cpu_to_be32(res);
+}
+
+static void pnfs_recall_all_layouts(struct nfs_client *clp)
+{
+	struct cb_layoutrecallargs args;
+
+	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
+	memset(&args, 0, sizeof(args));
+	args.cbl_recall_type = RETURN_ALL;
+	/* FIXME we ignore errors, what should we do? */
+	do_callback_layoutrecall(clp, &args);
+}
+
 int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
 {
 	if (delegation == NULL)
@@ -185,42 +313,6 @@
 }
 
 /*
- * Returns a pointer to a held 'struct nfs_client' that matches the server's
- * address, major version number, and session ID.  It is the caller's
- * responsibility to release the returned reference.
- *
- * Returns NULL if there are no connections with sessions, or if no session
- * matches the one of interest.
- */
- static struct nfs_client *find_client_with_session(
-	const struct sockaddr *addr, u32 nfsversion,
-	struct nfs4_sessionid *sessionid)
-{
-	struct nfs_client *clp;
-
-	clp = nfs_find_client(addr, 4);
-	if (clp == NULL)
-		return NULL;
-
-	do {
-		struct nfs_client *prev = clp;
-
-		if (clp->cl_session != NULL) {
-			if (memcmp(clp->cl_session->sess_id.data,
-					sessionid->data,
-					NFS4_MAX_SESSIONID_LEN) == 0) {
-				/* Returns a held reference to clp */
-				return clp;
-			}
-		}
-		clp = nfs_find_client_next(prev);
-		nfs_put_client(prev);
-	} while (clp != NULL);
-
-	return NULL;
-}
-
-/*
  * For each referring call triple, check the session's slot table for
  * a match.  If the slot is in use and the sequence numbers match, the
  * client is still waiting for a response to the original request.
@@ -276,20 +368,34 @@
 }
 
 __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
-				struct cb_sequenceres *res)
+			      struct cb_sequenceres *res,
+			      struct cb_process_state *cps)
 {
 	struct nfs_client *clp;
 	int i;
 	__be32 status;
 
+	cps->clp = NULL;
+
 	status = htonl(NFS4ERR_BADSESSION);
-	clp = find_client_with_session(args->csa_addr, 4, &args->csa_sessionid);
+	/* Incoming session must match the callback session */
+	if (memcmp(&args->csa_sessionid, cps->svc_sid, NFS4_MAX_SESSIONID_LEN))
+		goto out;
+
+	clp = nfs4_find_client_sessionid(args->csa_addr,
+					 &args->csa_sessionid, 1);
 	if (clp == NULL)
 		goto out;
 
+	/* state manager is resetting the session */
+	if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
+		status = NFS4ERR_DELAY;
+		goto out;
+	}
+
 	status = validate_seqid(&clp->cl_session->bc_slot_table, args);
 	if (status)
-		goto out_putclient;
+		goto out;
 
 	/*
 	 * Check for pending referring calls.  If a match is found, a
@@ -298,7 +404,7 @@
 	 */
 	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
 		status = htonl(NFS4ERR_DELAY);
-		goto out_putclient;
+		goto out;
 	}
 
 	memcpy(&res->csr_sessionid, &args->csa_sessionid,
@@ -307,83 +413,93 @@
 	res->csr_slotid = args->csa_slotid;
 	res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
 	res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
+	nfs4_cb_take_slot(clp);
+	cps->clp = clp; /* put in nfs4_callback_compound */
 
-out_putclient:
-	nfs_put_client(clp);
 out:
 	for (i = 0; i < args->csa_nrclists; i++)
 		kfree(args->csa_rclists[i].rcl_refcalls);
 	kfree(args->csa_rclists);
 
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP))
-		res->csr_status = 0;
-	else
+	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
+		cps->drc_status = status;
+		status = 0;
+	} else
 		res->csr_status = status;
+
 	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
 		ntohl(status), ntohl(res->csr_status));
 	return status;
 }
 
-__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy)
+static bool
+validate_bitmap_values(unsigned long mask)
 {
-	struct nfs_client *clp;
+	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
+}
+
+__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
+			       struct cb_process_state *cps)
+{
 	__be32 status;
 	fmode_t flags = 0;
 
-	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->craa_addr, 4);
-	if (clp == NULL)
+	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: RECALL_ANY callback request from %s\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 
+	status = cpu_to_be32(NFS4ERR_INVAL);
+	if (!validate_bitmap_values(args->craa_type_mask))
+		goto out;
+
+	status = cpu_to_be32(NFS4_OK);
 	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags = FMODE_READ;
 	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
 		     &args->craa_type_mask))
 		flags |= FMODE_WRITE;
-
+	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
+		     &args->craa_type_mask))
+		pnfs_recall_all_layouts(cps->clp);
 	if (flags)
-		nfs_expire_all_delegation_types(clp, flags);
-	status = htonl(NFS4_OK);
+		nfs_expire_all_delegation_types(cps->clp, flags);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
 }
 
 /* Reduce the fore channel's max_slots to the target value */
-__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy)
+__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
+				struct cb_process_state *cps)
 {
-	struct nfs_client *clp;
 	struct nfs4_slot_table *fc_tbl;
 	__be32 status;
 
 	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
-	clp = nfs_find_client(args->crsa_addr, 4);
-	if (clp == NULL)
+	if (!cps->clp) /* set in cb_sequence */
 		goto out;
 
 	dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
-		rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
+		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
 		args->crsa_target_max_slots);
 
-	fc_tbl = &clp->cl_session->fc_slot_table;
+	fc_tbl = &cps->clp->cl_session->fc_slot_table;
 
 	status = htonl(NFS4ERR_BAD_HIGH_SLOT);
 	if (args->crsa_target_max_slots > fc_tbl->max_slots ||
 	    args->crsa_target_max_slots < 1)
-		goto out_putclient;
+		goto out;
 
 	status = htonl(NFS4_OK);
 	if (args->crsa_target_max_slots == fc_tbl->max_slots)
-		goto out_putclient;
+		goto out;
 
 	fc_tbl->target_max_slots = args->crsa_target_max_slots;
-	nfs41_handle_recall_slot(clp);
-out_putclient:
-	nfs_put_client(clp);	/* balance nfs_find_client */
+	nfs41_handle_recall_slot(cps->clp);
 out:
 	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
 	return status;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 05af212..23112c2 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -10,8 +10,10 @@
 #include <linux/nfs4.h>
 #include <linux/nfs_fs.h>
 #include <linux/slab.h>
+#include <linux/sunrpc/bc_xprt.h>
 #include "nfs4_fs.h"
 #include "callback.h"
+#include "internal.h"
 
 #define CB_OP_TAGLEN_MAXSZ	(512)
 #define CB_OP_HDR_RES_MAXSZ	(2 + CB_OP_TAGLEN_MAXSZ)
@@ -22,6 +24,7 @@
 #define CB_OP_RECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 
 #if defined(CONFIG_NFS_V4_1)
+#define CB_OP_LAYOUTRECALL_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
 #define CB_OP_SEQUENCE_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ + \
 					4 + 1 + 3)
 #define CB_OP_RECALLANY_RES_MAXSZ	(CB_OP_HDR_RES_MAXSZ)
@@ -33,7 +36,8 @@
 /* Internal error code */
 #define NFS4ERR_RESOURCE_HDR	11050
 
-typedef __be32 (*callback_process_op_t)(void *, void *);
+typedef __be32 (*callback_process_op_t)(void *, void *,
+					struct cb_process_state *);
 typedef __be32 (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
 typedef __be32 (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
 
@@ -160,7 +164,7 @@
 	hdr->minorversion = ntohl(*p++);
 	/* Check minor version is zero or one. */
 	if (hdr->minorversion <= 1) {
-		p++;	/* skip callback_ident */
+		hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */
 	} else {
 		printk(KERN_WARNING "%s: NFSv4 server callback with "
 			"illegal minor version %u!\n",
@@ -220,6 +224,66 @@
 
 #if defined(CONFIG_NFS_V4_1)
 
+static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct cb_layoutrecallargs *args)
+{
+	__be32 *p;
+	__be32 status = 0;
+	uint32_t iomode;
+
+	args->cbl_addr = svc_addr(rqstp);
+	p = read_buf(xdr, 4 * sizeof(uint32_t));
+	if (unlikely(p == NULL)) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+
+	args->cbl_layout_type = ntohl(*p++);
+	/* Depite the spec's xdr, iomode really belongs in the FILE switch,
+	 * as it is unuseable and ignored with the other types.
+	 */
+	iomode = ntohl(*p++);
+	args->cbl_layoutchanged = ntohl(*p++);
+	args->cbl_recall_type = ntohl(*p++);
+
+	if (args->cbl_recall_type == RETURN_FILE) {
+		args->cbl_range.iomode = iomode;
+		status = decode_fh(xdr, &args->cbl_fh);
+		if (unlikely(status != 0))
+			goto out;
+
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_range.offset);
+		p = xdr_decode_hyper(p, &args->cbl_range.length);
+		status = decode_stateid(xdr, &args->cbl_stateid);
+		if (unlikely(status != 0))
+			goto out;
+	} else if (args->cbl_recall_type == RETURN_FSID) {
+		p = read_buf(xdr, 2 * sizeof(uint64_t));
+		if (unlikely(p == NULL)) {
+			status = htonl(NFS4ERR_BADXDR);
+			goto out;
+		}
+		p = xdr_decode_hyper(p, &args->cbl_fsid.major);
+		p = xdr_decode_hyper(p, &args->cbl_fsid.minor);
+	} else if (args->cbl_recall_type != RETURN_ALL) {
+		status = htonl(NFS4ERR_BADXDR);
+		goto out;
+	}
+	dprintk("%s: ltype 0x%x iomode %d changed %d recall_type %d\n",
+		__func__,
+		args->cbl_layout_type, iomode,
+		args->cbl_layoutchanged, args->cbl_recall_type);
+out:
+	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+	return status;
+}
+
 static __be32 decode_sessionid(struct xdr_stream *xdr,
 				 struct nfs4_sessionid *sid)
 {
@@ -574,10 +638,10 @@
 	case OP_CB_SEQUENCE:
 	case OP_CB_RECALL_ANY:
 	case OP_CB_RECALL_SLOT:
+	case OP_CB_LAYOUTRECALL:
 		*op = &callback_ops[op_nr];
 		break;
 
-	case OP_CB_LAYOUTRECALL:
 	case OP_CB_NOTIFY_DEVICEID:
 	case OP_CB_NOTIFY:
 	case OP_CB_PUSH_DELEG:
@@ -593,6 +657,37 @@
 	return htonl(NFS_OK);
 }
 
+static void nfs4_callback_free_slot(struct nfs4_session *session)
+{
+	struct nfs4_slot_table *tbl = &session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	/*
+	 * Let the state manager know callback processing done.
+	 * A single slot, so highest used slotid is either 0 or -1
+	 */
+	tbl->highest_used_slotid--;
+	nfs4_check_drain_bc_complete(session);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+	if (clp && clp->cl_session)
+		nfs4_callback_free_slot(clp->cl_session);
+}
+
+/* A single slot, so highest used slotid is either 0 or -1 */
+void nfs4_cb_take_slot(struct nfs_client *clp)
+{
+	struct nfs4_slot_table *tbl = &clp->cl_session->bc_slot_table;
+
+	spin_lock(&tbl->slot_tbl_lock);
+	tbl->highest_used_slotid++;
+	BUG_ON(tbl->highest_used_slotid != 0);
+	spin_unlock(&tbl->slot_tbl_lock);
+}
+
 #else /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -601,6 +696,9 @@
 	return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
 }
 
+static void nfs4_cb_free_slot(struct nfs_client *clp)
+{
+}
 #endif /* CONFIG_NFS_V4_1 */
 
 static __be32
@@ -621,7 +719,8 @@
 static __be32 process_op(uint32_t minorversion, int nop,
 		struct svc_rqst *rqstp,
 		struct xdr_stream *xdr_in, void *argp,
-		struct xdr_stream *xdr_out, void *resp, int* drc_status)
+		struct xdr_stream *xdr_out, void *resp,
+		struct cb_process_state *cps)
 {
 	struct callback_op *op = &callback_ops[0];
 	unsigned int op_nr;
@@ -644,8 +743,8 @@
 	if (status)
 		goto encode_hdr;
 
-	if (*drc_status) {
-		status = *drc_status;
+	if (cps->drc_status) {
+		status = cps->drc_status;
 		goto encode_hdr;
 	}
 
@@ -653,16 +752,10 @@
 	if (maxlen > 0 && maxlen < PAGE_SIZE) {
 		status = op->decode_args(rqstp, xdr_in, argp);
 		if (likely(status == 0))
-			status = op->process_op(argp, resp);
+			status = op->process_op(argp, resp, cps);
 	} else
 		status = htonl(NFS4ERR_RESOURCE);
 
-	/* Only set by OP_CB_SEQUENCE processing */
-	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
-		*drc_status = status;
-		status = 0;
-	}
-
 encode_hdr:
 	res = encode_op_hdr(xdr_out, op_nr, status);
 	if (unlikely(res))
@@ -681,8 +774,11 @@
 	struct cb_compound_hdr_arg hdr_arg = { 0 };
 	struct cb_compound_hdr_res hdr_res = { NULL };
 	struct xdr_stream xdr_in, xdr_out;
-	__be32 *p;
-	__be32 status, drc_status = 0;
+	__be32 *p, status;
+	struct cb_process_state cps = {
+		.drc_status = 0,
+		.clp = NULL,
+	};
 	unsigned int nops = 0;
 
 	dprintk("%s: start\n", __func__);
@@ -696,6 +792,13 @@
 	if (status == __constant_htonl(NFS4ERR_RESOURCE))
 		return rpc_garbage_args;
 
+	if (hdr_arg.minorversion == 0) {
+		cps.clp = nfs4_find_client_ident(hdr_arg.cb_ident);
+		if (!cps.clp)
+			return rpc_drop_reply;
+	} else
+		cps.svc_sid = bc_xprt_sid(rqstp);
+
 	hdr_res.taglen = hdr_arg.taglen;
 	hdr_res.tag = hdr_arg.tag;
 	if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
@@ -703,7 +806,7 @@
 
 	while (status == 0 && nops != hdr_arg.nops) {
 		status = process_op(hdr_arg.minorversion, nops, rqstp,
-				    &xdr_in, argp, &xdr_out, resp, &drc_status);
+				    &xdr_in, argp, &xdr_out, resp, &cps);
 		nops++;
 	}
 
@@ -716,6 +819,8 @@
 
 	*hdr_res.status = status;
 	*hdr_res.nops = htonl(nops);
+	nfs4_cb_free_slot(cps.clp);
+	nfs_put_client(cps.clp);
 	dprintk("%s: done, status = %u\n", __func__, ntohl(status));
 	return rpc_success;
 }
@@ -739,6 +844,12 @@
 		.res_maxsize = CB_OP_RECALL_RES_MAXSZ,
 	},
 #if defined(CONFIG_NFS_V4_1)
+	[OP_CB_LAYOUTRECALL] = {
+		.process_op = (callback_process_op_t)nfs4_callback_layoutrecall,
+		.decode_args =
+			(callback_decode_arg_t)decode_layoutrecall_args,
+		.res_maxsize = CB_OP_LAYOUTRECALL_RES_MAXSZ,
+	},
 	[OP_CB_SEQUENCE] = {
 		.process_op = (callback_process_op_t)nfs4_callback_sequence,
 		.decode_args = (callback_decode_arg_t)decode_cb_sequence_args,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 0870d0d..192f2f8 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -56,6 +56,30 @@
 static LIST_HEAD(nfs_client_list);
 static LIST_HEAD(nfs_volume_list);
 static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
+#ifdef CONFIG_NFS_V4
+static DEFINE_IDR(cb_ident_idr); /* Protected by nfs_client_lock */
+
+/*
+ * Get a unique NFSv4.0 callback identifier which will be used
+ * by the V4.0 callback service to lookup the nfs_client struct
+ */
+static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
+{
+	int ret = 0;
+
+	if (clp->rpc_ops->version != 4 || minorversion != 0)
+		return ret;
+retry:
+	if (!idr_pre_get(&cb_ident_idr, GFP_KERNEL))
+		return -ENOMEM;
+	spin_lock(&nfs_client_lock);
+	ret = idr_get_new(&cb_ident_idr, clp, &clp->cl_cb_ident);
+	spin_unlock(&nfs_client_lock);
+	if (ret == -EAGAIN)
+		goto retry;
+	return ret;
+}
+#endif /* CONFIG_NFS_V4 */
 
 /*
  * RPC cruft for NFS
@@ -144,7 +168,10 @@
 	clp->cl_proto = cl_init->proto;
 
 #ifdef CONFIG_NFS_V4
-	INIT_LIST_HEAD(&clp->cl_delegations);
+	err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
+	if (err)
+		goto error_cleanup;
+
 	spin_lock_init(&clp->cl_lock);
 	INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
 	rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
@@ -170,21 +197,17 @@
 }
 
 #ifdef CONFIG_NFS_V4
-/*
- * Clears/puts all minor version specific parts from an nfs_client struct
- * reverting it to minorversion 0.
- */
-static void nfs4_clear_client_minor_version(struct nfs_client *clp)
-{
 #ifdef CONFIG_NFS_V4_1
-	if (nfs4_has_session(clp)) {
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+	if (nfs4_has_session(clp))
 		nfs4_destroy_session(clp->cl_session);
-		clp->cl_session = NULL;
-	}
-
-	clp->cl_mvops = nfs_v4_minor_ops[0];
-#endif /* CONFIG_NFS_V4_1 */
 }
+#else /* CONFIG_NFS_V4_1 */
+static void nfs4_shutdown_session(struct nfs_client *clp)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
 
 /*
  * Destroy the NFS4 callback service
@@ -199,17 +222,49 @@
 {
 	if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
 		nfs4_kill_renewd(clp);
-	nfs4_clear_client_minor_version(clp);
+	nfs4_shutdown_session(clp);
 	nfs4_destroy_callback(clp);
 	if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
 		nfs_idmap_delete(clp);
 
 	rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
 }
+
+/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
+void nfs_cleanup_cb_ident_idr(void)
+{
+	idr_destroy(&cb_ident_idr);
+}
+
+/* nfs_client_lock held */
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+	if (clp->cl_cb_ident)
+		idr_remove(&cb_ident_idr, clp->cl_cb_ident);
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+	rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC");
+}
+
 #else
 static void nfs4_shutdown_client(struct nfs_client *clp)
 {
 }
+
+void nfs_cleanup_cb_ident_idr(void)
+{
+}
+
+static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
+{
+}
+
+static void pnfs_init_server(struct nfs_server *server)
+{
+}
+
 #endif /* CONFIG_NFS_V4 */
 
 /*
@@ -248,6 +303,7 @@
 
 	if (atomic_dec_and_lock(&clp->cl_count, &nfs_client_lock)) {
 		list_del(&clp->cl_share_link);
+		nfs_cb_idr_remove_locked(clp);
 		spin_unlock(&nfs_client_lock);
 
 		BUG_ON(!list_empty(&clp->cl_superblocks));
@@ -363,70 +419,28 @@
 	return 0;
 }
 
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client(const struct sockaddr *addr, u32 nfsversion)
+/* Common match routine for v4.0 and v4.1 callback services */
+bool
+nfs4_cb_match_client(const struct sockaddr *addr, struct nfs_client *clp,
+		     u32 minorversion)
 {
-	struct nfs_client *clp;
+	struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
 
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
+	/* Don't match clients that failed to initialise */
+	if (!(clp->cl_cons_state == NFS_CS_READY ||
+	    clp->cl_cons_state == NFS_CS_SESSION_INITING))
+		return false;
 
-		/* Don't match clients that failed to initialise properly */
-		if (!(clp->cl_cons_state == NFS_CS_READY ||
-		      clp->cl_cons_state == NFS_CS_SESSION_INITING))
-			continue;
+	/* Match the version and minorversion */
+	if (clp->rpc_ops->version != 4 ||
+	    clp->cl_minorversion != minorversion)
+		return false;
 
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsversion)
-			continue;
+	/* Match only the IP address, not the port number */
+	if (!nfs_sockaddr_match_ipaddr(addr, clap))
+		return false;
 
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(addr, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
-}
-
-/*
- * Find a client by IP address and protocol version
- * - returns NULL if no such client
- */
-struct nfs_client *nfs_find_client_next(struct nfs_client *clp)
-{
-	struct sockaddr *sap = (struct sockaddr *)&clp->cl_addr;
-	u32 nfsvers = clp->rpc_ops->version;
-
-	spin_lock(&nfs_client_lock);
-	list_for_each_entry_continue(clp, &nfs_client_list, cl_share_link) {
-		struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr;
-
-		/* Don't match clients that failed to initialise properly */
-		if (clp->cl_cons_state != NFS_CS_READY)
-			continue;
-
-		/* Different NFS versions cannot share the same nfs_client */
-		if (clp->rpc_ops->version != nfsvers)
-			continue;
-
-		/* Match only the IP address, not the port number */
-		if (!nfs_sockaddr_match_ipaddr(sap, clap))
-			continue;
-
-		atomic_inc(&clp->cl_count);
-		spin_unlock(&nfs_client_lock);
-		return clp;
-	}
-	spin_unlock(&nfs_client_lock);
-	return NULL;
+	return true;
 }
 
 /*
@@ -988,6 +1002,27 @@
 	target->options = source->options;
 }
 
+static void nfs_server_insert_lists(struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+
+	spin_lock(&nfs_client_lock);
+	list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
+	list_add_tail(&server->master_link, &nfs_volume_list);
+	spin_unlock(&nfs_client_lock);
+
+}
+
+static void nfs_server_remove_lists(struct nfs_server *server)
+{
+	spin_lock(&nfs_client_lock);
+	list_del_rcu(&server->client_link);
+	list_del(&server->master_link);
+	spin_unlock(&nfs_client_lock);
+
+	synchronize_rcu();
+}
+
 /*
  * Allocate and initialise a server record
  */
@@ -1004,6 +1039,7 @@
 	/* Zero out the NFS state stuff */
 	INIT_LIST_HEAD(&server->client_link);
 	INIT_LIST_HEAD(&server->master_link);
+	INIT_LIST_HEAD(&server->delegations);
 
 	atomic_set(&server->active, 0);
 
@@ -1019,6 +1055,8 @@
 		return NULL;
 	}
 
+	pnfs_init_server(server);
+
 	return server;
 }
 
@@ -1029,11 +1067,8 @@
 {
 	dprintk("--> nfs_free_server()\n");
 
+	nfs_server_remove_lists(server);
 	unset_pnfs_layoutdriver(server);
-	spin_lock(&nfs_client_lock);
-	list_del(&server->client_link);
-	list_del(&server->master_link);
-	spin_unlock(&nfs_client_lock);
 
 	if (server->destroy != NULL)
 		server->destroy(server);
@@ -1108,11 +1143,7 @@
 		(unsigned long long) server->fsid.major,
 		(unsigned long long) server->fsid.minor);
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 	nfs_free_fattr(fattr);
 	return server;
@@ -1125,6 +1156,101 @@
 
 #ifdef CONFIG_NFS_V4
 /*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by IP address, protocol version, and minorversion
+ *
+ * Called from the pg_authenticate method. The callback identifier
+ * is not used as it has not been decoded.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_no_ident(const struct sockaddr *addr)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 0) == false)
+			continue;
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+/*
+ * NFSv4.0 callback thread helper
+ *
+ * Find a client by callback identifier
+ */
+struct nfs_client *
+nfs4_find_client_ident(int cb_ident)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	clp = idr_find(&cb_ident_idr, cb_ident);
+	if (clp)
+		atomic_inc(&clp->cl_count);
+	spin_unlock(&nfs_client_lock);
+	return clp;
+}
+
+#if defined(CONFIG_NFS_V4_1)
+/*
+ * NFSv4.1 callback thread helper
+ * For CB_COMPOUND calls, find a client by IP address, protocol version,
+ * minorversion, and sessionID
+ *
+ * CREATE_SESSION triggers a CB_NULL ping from servers. The callback service
+ * sessionid can only be set after the CREATE_SESSION return, so a CB_NULL
+ * can arrive before the callback sessionid is set. For CB_NULL calls,
+ * find a client by IP address protocol version, and minorversion.
+ *
+ * Returns NULL if no such client
+ */
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid, int is_cb_compound)
+{
+	struct nfs_client *clp;
+
+	spin_lock(&nfs_client_lock);
+	list_for_each_entry(clp, &nfs_client_list, cl_share_link) {
+		if (nfs4_cb_match_client(addr, clp, 1) == false)
+			continue;
+
+		if (!nfs4_has_session(clp))
+			continue;
+
+		/* Match sessionid unless cb_null call*/
+		if (is_cb_compound && (memcmp(clp->cl_session->sess_id.data,
+		    sid->data, NFS4_MAX_SESSIONID_LEN) != 0))
+			continue;
+
+		atomic_inc(&clp->cl_count);
+		spin_unlock(&nfs_client_lock);
+		return clp;
+	}
+	spin_unlock(&nfs_client_lock);
+	return NULL;
+}
+
+#else /* CONFIG_NFS_V4_1 */
+
+struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *addr,
+			   struct nfs4_sessionid *sid, int is_cb_compound)
+{
+	return NULL;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
+/*
  * Initialize the NFS4 callback service
  */
 static int nfs4_init_callback(struct nfs_client *clp)
@@ -1342,11 +1468,7 @@
 	if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
 		server->namelen = NFS4_MAXNAMLEN;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 out:
 	nfs_free_fattr(fattr);
@@ -1551,11 +1673,7 @@
 	if (error < 0)
 		goto out_free_server;
 
-	spin_lock(&nfs_client_lock);
-	list_add_tail(&server->client_link, &server->nfs_client->cl_superblocks);
-	list_add_tail(&server->master_link, &nfs_volume_list);
-	spin_unlock(&nfs_client_lock);
-
+	nfs_server_insert_lists(server);
 	server->mount_time = jiffies;
 
 	nfs_free_fattr(fattr_fsinfo);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 1fd62fc..364e432 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -40,11 +40,23 @@
 	call_rcu(&delegation->rcu, nfs_free_delegation_callback);
 }
 
+/**
+ * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
+ * @delegation: delegation to process
+ *
+ */
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
 {
 	set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
 }
 
+/**
+ * nfs_have_delegation - check if inode has a delegation
+ * @inode: inode to check
+ * @flags: delegation types to check for
+ *
+ * Returns one if inode has the indicated delegation, otherwise zero.
+ */
 int nfs_have_delegation(struct inode *inode, fmode_t flags)
 {
 	struct nfs_delegation *delegation;
@@ -119,10 +131,15 @@
 	return 0;
 }
 
-/*
- * Set up a delegation on an inode
+/**
+ * nfs_inode_reclaim_delegation - process a delegation reclaim request
+ * @inode: inode to process
+ * @cred: credential to use for request
+ * @res: new delegation state from server
+ *
  */
-void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
+void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
+				  struct nfs_openres *res)
 {
 	struct nfs_delegation *delegation;
 	struct rpc_cred *oldcred = NULL;
@@ -175,38 +192,52 @@
 	return inode;
 }
 
-static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi,
-							   const nfs4_stateid *stateid,
-							   struct nfs_client *clp)
+static struct nfs_delegation *
+nfs_detach_delegation_locked(struct nfs_inode *nfsi,
+			     struct nfs_server *server)
 {
 	struct nfs_delegation *delegation =
 		rcu_dereference_protected(nfsi->delegation,
-					  lockdep_is_held(&clp->cl_lock));
+				lockdep_is_held(&server->nfs_client->cl_lock));
 
 	if (delegation == NULL)
 		goto nomatch;
+
 	spin_lock(&delegation->lock);
-	if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
-				sizeof(delegation->stateid.data)) != 0)
-		goto nomatch_unlock;
 	list_del_rcu(&delegation->super_list);
 	delegation->inode = NULL;
 	nfsi->delegation_state = 0;
 	rcu_assign_pointer(nfsi->delegation, NULL);
 	spin_unlock(&delegation->lock);
 	return delegation;
-nomatch_unlock:
-	spin_unlock(&delegation->lock);
 nomatch:
 	return NULL;
 }
 
-/*
- * Set up a delegation on an inode
+static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
+						    struct nfs_server *server)
+{
+	struct nfs_client *clp = server->nfs_client;
+	struct nfs_delegation *delegation;
+
+	spin_lock(&clp->cl_lock);
+	delegation = nfs_detach_delegation_locked(nfsi, server);
+	spin_unlock(&clp->cl_lock);
+	return delegation;
+}
+
+/**
+ * nfs_inode_set_delegation - set up a delegation on an inode
+ * @inode: inode to which delegation applies
+ * @cred: cred to use for subsequent delegation processing
+ * @res: new delegation state from server
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation, *old_delegation;
 	struct nfs_delegation *freeme = NULL;
@@ -227,7 +258,7 @@
 
 	spin_lock(&clp->cl_lock);
 	old_delegation = rcu_dereference_protected(nfsi->delegation,
-						   lockdep_is_held(&clp->cl_lock));
+					lockdep_is_held(&clp->cl_lock));
 	if (old_delegation != NULL) {
 		if (memcmp(&delegation->stateid, &old_delegation->stateid,
 					sizeof(old_delegation->stateid)) == 0 &&
@@ -246,9 +277,9 @@
 			delegation = NULL;
 			goto out;
 		}
-		freeme = nfs_detach_delegation_locked(nfsi, NULL, clp);
+		freeme = nfs_detach_delegation_locked(nfsi, server);
 	}
-	list_add_rcu(&delegation->super_list, &clp->cl_delegations);
+	list_add_rcu(&delegation->super_list, &server->delegations);
 	nfsi->delegation_state = delegation->type;
 	rcu_assign_pointer(nfsi->delegation, delegation);
 	delegation = NULL;
@@ -290,73 +321,85 @@
 	return err;
 }
 
-/*
- * Return all delegations that have been marked for return
+/**
+ * nfs_client_return_marked_delegations - return previously marked delegations
+ * @clp: nfs_client to process
+ *
+ * Returns zero on success, or a negative errno value.
  */
 int nfs_client_return_marked_delegations(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
 	int err = 0;
 
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (!test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL) {
-			filemap_flush(inode->i_mapping);
-			err = __nfs_inode_return_delegation(inode, delegation, 0);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (!test_and_clear_bit(NFS_DELEGATION_RETURN,
+							&delegation->flags))
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL) {
+				filemap_flush(inode->i_mapping);
+				err = __nfs_inode_return_delegation(inode,
+								delegation, 0);
+			}
+			iput(inode);
+			if (!err)
+				goto restart;
+			set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
+			return err;
 		}
-		iput(inode);
-		if (!err)
-			goto restart;
-		set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
-		return err;
 	}
 	rcu_read_unlock();
 	return 0;
 }
 
-/*
- * This function returns the delegation without reclaiming opens
- * or protecting against delegation reclaims.
- * It is therefore really only safe to be called from
- * nfs4_clear_inode()
+/**
+ * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
+ * @inode: inode to process
+ *
+ * Does not protect against delegation reclaims, therefore really only safe
+ * to be called from nfs4_clear_inode().
  */
 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL)
 			nfs_do_return_delegation(inode, delegation, 0);
 	}
 }
 
+/**
+ * nfs_inode_return_delegation - synchronously return a delegation
+ * @inode: inode to process
+ *
+ * Returns zero on success, or a negative errno value.
+ */
 int nfs_inode_return_delegation(struct inode *inode)
 {
-	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+	struct nfs_server *server = NFS_SERVER(inode);
 	struct nfs_inode *nfsi = NFS_I(inode);
 	struct nfs_delegation *delegation;
 	int err = 0;
 
 	if (rcu_access_pointer(nfsi->delegation) != NULL) {
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(nfsi, NULL, clp);
-		spin_unlock(&clp->cl_lock);
+		delegation = nfs_detach_delegation(nfsi, server);
 		if (delegation != NULL) {
 			nfs_wb_all(inode);
 			err = __nfs_inode_return_delegation(inode, delegation, 1);
@@ -365,46 +408,61 @@
 	return err;
 }
 
-static void nfs_mark_return_delegation(struct nfs_client *clp, struct nfs_delegation *delegation)
+static void nfs_mark_return_delegation(struct nfs_delegation *delegation)
 {
+	struct nfs_client *clp = NFS_SERVER(delegation->inode)->nfs_client;
+
 	set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 	set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
 }
 
-/*
- * Return all delegations associated to a super block
+/**
+ * nfs_super_return_all_delegations - return delegations for one superblock
+ * @sb: sb to process
+ *
  */
 void nfs_super_return_all_delegations(struct super_block *sb)
 {
-	struct nfs_client *clp = NFS_SB(sb)->nfs_client;
+	struct nfs_server *server = NFS_SB(sb);
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs_delegation *delegation;
 
 	if (clp == NULL)
 		return;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
-		if (delegation->inode != NULL && delegation->inode->i_sb == sb)
-			set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+		set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
 		spin_unlock(&delegation->lock);
 	}
 	rcu_read_unlock();
+
 	if (nfs_client_return_marked_delegations(clp) != 0)
 		nfs4_schedule_state_manager(clp);
 }
 
-static
-void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp, fmode_t flags)
+static void nfs_mark_return_all_delegation_types(struct nfs_server *server,
+						 fmode_t flags)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
 			continue;
 		if (delegation->type & flags)
-			nfs_mark_return_delegation(clp, delegation);
+			nfs_mark_return_delegation(delegation);
 	}
+}
+
+static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp,
+							fmode_t flags)
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_all_delegation_types(server, flags);
 	rcu_read_unlock();
 }
 
@@ -419,19 +477,32 @@
 		nfs4_schedule_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegation_types
+ * @clp: client to process
+ * @flags: delegation types to expire
+ *
+ */
 void nfs_expire_all_delegation_types(struct nfs_client *clp, fmode_t flags)
 {
 	nfs_client_mark_return_all_delegation_types(clp, flags);
 	nfs_delegation_run_state_manager(clp);
 }
 
+/**
+ * nfs_expire_all_delegations
+ * @clp: client to process
+ *
+ */
 void nfs_expire_all_delegations(struct nfs_client *clp)
 {
 	nfs_expire_all_delegation_types(clp, FMODE_READ|FMODE_WRITE);
 }
 
-/*
- * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
+/**
+ * nfs_handle_cb_pathdown - return all delegations after NFS4ERR_CB_PATH_DOWN
+ * @clp: client to process
+ *
  */
 void nfs_handle_cb_pathdown(struct nfs_client *clp)
 {
@@ -440,29 +511,43 @@
 	nfs_client_mark_return_all_delegations(clp);
 }
 
-static void nfs_client_mark_return_unreferenced_delegations(struct nfs_client *clp)
+static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
 {
 	struct nfs_delegation *delegation;
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
 			continue;
-		nfs_mark_return_delegation(clp, delegation);
+		nfs_mark_return_delegation(delegation);
 	}
-	rcu_read_unlock();
 }
 
+/**
+ * nfs_expire_unreferenced_delegations - Eliminate unused delegations
+ * @clp: nfs_client to process
+ *
+ */
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
 {
-	nfs_client_mark_return_unreferenced_delegations(clp);
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_mark_return_unreferenced_delegations(server);
+	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 }
 
-/*
- * Asynchronous delegation recall!
+/**
+ * nfs_async_inode_return_delegation - asynchronously return a delegation
+ * @inode: inode to process
+ * @stateid: state ID information from CB_RECALL arguments
+ *
+ * Returns zero on success, or a negative errno value.
  */
-int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
+int nfs_async_inode_return_delegation(struct inode *inode,
+				      const nfs4_stateid *stateid)
 {
 	struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
 	struct nfs_delegation *delegation;
@@ -474,22 +559,21 @@
 		rcu_read_unlock();
 		return -ENOENT;
 	}
-
-	nfs_mark_return_delegation(clp, delegation);
+	nfs_mark_return_delegation(delegation);
 	rcu_read_unlock();
+
 	nfs_delegation_run_state_manager(clp);
 	return 0;
 }
 
-/*
- * Retrieve the inode associated with a delegation
- */
-struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
+static struct inode *
+nfs_delegation_find_inode_server(struct nfs_server *server,
+				 const struct nfs_fh *fhandle)
 {
 	struct nfs_delegation *delegation;
 	struct inode *res = NULL;
-	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
 		spin_lock(&delegation->lock);
 		if (delegation->inode != NULL &&
 		    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
@@ -499,49 +583,121 @@
 		if (res != NULL)
 			break;
 	}
+	return res;
+}
+
+/**
+ * nfs_delegation_find_inode - retrieve the inode associated with a delegation
+ * @clp: client state handle
+ * @fhandle: filehandle from a delegation recall
+ *
+ * Returns pointer to inode matching "fhandle," or NULL if a matching inode
+ * cannot be found.
+ */
+struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
+					const struct nfs_fh *fhandle)
+{
+	struct nfs_server *server;
+	struct inode *res = NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		res = nfs_delegation_find_inode_server(server, fhandle);
+		if (res != NULL)
+			break;
+	}
 	rcu_read_unlock();
 	return res;
 }
 
-/*
- * Mark all delegations as needing to be reclaimed
+static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
+{
+	struct nfs_delegation *delegation;
+
+	list_for_each_entry_rcu(delegation, &server->delegations, super_list)
+		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+}
+
+/**
+ * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
 {
-	struct nfs_delegation *delegation;
+	struct nfs_server *server;
+
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list)
-		set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs_delegation_mark_reclaim_server(server);
 	rcu_read_unlock();
 }
 
-/*
- * Reap all unclaimed delegations after reboot recovery is done
+/**
+ * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
+ * @clp: nfs_client to process
+ *
  */
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
 {
 	struct nfs_delegation *delegation;
+	struct nfs_server *server;
 	struct inode *inode;
+
 restart:
 	rcu_read_lock();
-	list_for_each_entry_rcu(delegation, &clp->cl_delegations, super_list) {
-		if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0)
-			continue;
-		inode = nfs_delegation_grab_inode(delegation);
-		if (inode == NULL)
-			continue;
-		spin_lock(&clp->cl_lock);
-		delegation = nfs_detach_delegation_locked(NFS_I(inode), NULL, clp);
-		spin_unlock(&clp->cl_lock);
-		rcu_read_unlock();
-		if (delegation != NULL)
-			nfs_free_delegation(delegation);
-		iput(inode);
-		goto restart;
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		list_for_each_entry_rcu(delegation, &server->delegations,
+								super_list) {
+			if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
+						&delegation->flags) == 0)
+				continue;
+			inode = nfs_delegation_grab_inode(delegation);
+			if (inode == NULL)
+				continue;
+			delegation = nfs_detach_delegation(NFS_I(inode),
+								server);
+			rcu_read_unlock();
+
+			if (delegation != NULL)
+				nfs_free_delegation(delegation);
+			iput(inode);
+			goto restart;
+		}
 	}
 	rcu_read_unlock();
 }
 
+/**
+ * nfs_delegations_present - check for existence of delegations
+ * @clp: client state handle
+ *
+ * Returns one if there are any nfs_delegation structures attached
+ * to this nfs_client.
+ */
+int nfs_delegations_present(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	int ret = 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		if (!list_empty(&server->delegations)) {
+			ret = 1;
+			break;
+		}
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * nfs4_copy_delegation_stateid - Copy inode's state ID information
+ * @dst: stateid data structure to fill in
+ * @inode: inode to check
+ *
+ * Returns one and fills in "dst->data" * if inode had a delegation,
+ * otherwise zero is returned.
+ */
 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
 {
 	struct nfs_inode *nfsi = NFS_I(inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 2026304..d9322e4 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -44,6 +44,7 @@
 void nfs_expire_unreferenced_delegations(struct nfs_client *clp);
 void nfs_handle_cb_pathdown(struct nfs_client *clp);
 int nfs_client_return_marked_delegations(struct nfs_client *clp);
+int nfs_delegations_present(struct nfs_client *clp);
 
 void nfs_delegation_mark_reclaim(struct nfs_client *clp);
 void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index d33da53..95b081b 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -33,8 +33,8 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
-#include <linux/vmalloc.h>
 #include <linux/kmemleak.h>
+#include <linux/xattr.h>
 
 #include "delegation.h"
 #include "iostat.h"
@@ -125,9 +125,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr       = nfs4_getxattr,
-	.setxattr       = nfs4_setxattr,
-	.listxattr      = nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 #endif /* CONFIG_NFS_V4 */
@@ -172,7 +173,7 @@
 	struct nfs_cache_array_entry array[0];
 };
 
-typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+typedef int (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, int);
 typedef struct {
 	struct file	*file;
 	struct page	*page;
@@ -378,14 +379,14 @@
 	return error;
 }
 
-/* Fill in an entry based on the xdr code stored in desc->page */
-static
-int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct xdr_stream *stream)
+static int xdr_decode(nfs_readdir_descriptor_t *desc,
+		      struct nfs_entry *entry, struct xdr_stream *xdr)
 {
-	__be32 *p = desc->decode(stream, entry, NFS_SERVER(desc->file->f_path.dentry->d_inode), desc->plus);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
+	int error;
 
+	error = desc->decode(xdr, entry, desc->plus);
+	if (error)
+		return error;
 	entry->fattr->time_start = desc->timestamp;
 	entry->fattr->gencount = desc->gencount;
 	return 0;
@@ -438,7 +439,6 @@
 	if (dentry == NULL)
 		return;
 
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
 	inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
 	if (IS_ERR(inode))
 		goto out;
@@ -459,25 +459,26 @@
 /* Perform conversion from xdr to cache array */
 static
 int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry,
-				void *xdr_page, struct page *page, unsigned int buflen)
+				struct page **xdr_pages, struct page *page, unsigned int buflen)
 {
 	struct xdr_stream stream;
-	struct xdr_buf buf;
-	__be32 *ptr = xdr_page;
+	struct xdr_buf buf = {
+		.pages = xdr_pages,
+		.page_len = buflen,
+		.buflen = buflen,
+		.len = buflen,
+	};
+	struct page *scratch;
 	struct nfs_cache_array *array;
 	unsigned int count = 0;
 	int status;
 
-	buf.head->iov_base = xdr_page;
-	buf.head->iov_len = buflen;
-	buf.tail->iov_len = 0;
-	buf.page_base = 0;
-	buf.page_len = 0;
-	buf.buflen = buf.head->iov_len;
-	buf.len = buf.head->iov_len;
+	scratch = alloc_page(GFP_KERNEL);
+	if (scratch == NULL)
+		return -ENOMEM;
 
-	xdr_init_decode(&stream, &buf, ptr);
-
+	xdr_init_decode(&stream, &buf, NULL);
+	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
 
 	do {
 		status = xdr_decode(desc, entry, &stream);
@@ -506,6 +507,8 @@
 		} else
 			status = PTR_ERR(array);
 	}
+
+	put_page(scratch);
 	return status;
 }
 
@@ -521,7 +524,6 @@
 void nfs_readdir_free_large_page(void *ptr, struct page **pages,
 		unsigned int npages)
 {
-	vm_unmap_ram(ptr, npages);
 	nfs_readdir_free_pagearray(pages, npages);
 }
 
@@ -530,9 +532,8 @@
  * to nfs_readdir_free_large_page
  */
 static
-void *nfs_readdir_large_page(struct page **pages, unsigned int npages)
+int nfs_readdir_large_page(struct page **pages, unsigned int npages)
 {
-	void *ptr;
 	unsigned int i;
 
 	for (i = 0; i < npages; i++) {
@@ -541,13 +542,11 @@
 			goto out_freepages;
 		pages[i] = page;
 	}
+	return 0;
 
-	ptr = vm_map_ram(pages, npages, 0, PAGE_KERNEL);
-	if (!IS_ERR_OR_NULL(ptr))
-		return ptr;
 out_freepages:
 	nfs_readdir_free_pagearray(pages, i);
-	return NULL;
+	return -ENOMEM;
 }
 
 static
@@ -566,6 +565,7 @@
 	entry.eof = 0;
 	entry.fh = nfs_alloc_fhandle();
 	entry.fattr = nfs_alloc_fattr();
+	entry.server = NFS_SERVER(inode);
 	if (entry.fh == NULL || entry.fattr == NULL)
 		goto out;
 
@@ -577,8 +577,8 @@
 	memset(array, 0, sizeof(struct nfs_cache_array));
 	array->eof_index = -1;
 
-	pages_ptr = nfs_readdir_large_page(pages, array_size);
-	if (!pages_ptr)
+	status = nfs_readdir_large_page(pages, array_size);
+	if (status < 0)
 		goto out_release_array;
 	do {
 		unsigned int pglen;
@@ -587,7 +587,7 @@
 		if (status < 0)
 			break;
 		pglen = status;
-		status = nfs_readdir_page_filler(desc, &entry, pages_ptr, page, pglen);
+		status = nfs_readdir_page_filler(desc, &entry, pages, page, pglen);
 		if (status < 0) {
 			if (status == -ENOSPC)
 				status = 0;
@@ -1192,8 +1192,6 @@
 	if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
 		goto out;
 
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
-
 	/*
 	 * If we're doing an exclusive create, optimize away the lookup
 	 * but don't hash the dentry.
@@ -1221,7 +1219,7 @@
 		goto out_unblock_sillyrename;
 	}
 	inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
-	res = (struct dentry *)inode;
+	res = ERR_CAST(inode);
 	if (IS_ERR(res))
 		goto out_unblock_sillyrename;
 
@@ -1337,7 +1335,6 @@
 		res = ERR_PTR(-ENAMETOOLONG);
 		goto out;
 	}
-	d_set_d_op(dentry, NFS_PROTO(dir)->dentry_ops);
 
 	/* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
 	 * the dentry. */
@@ -1355,8 +1352,7 @@
 	if (nd->flags & LOOKUP_CREATE) {
 		attr.ia_mode = nd->intent.open.create_mode;
 		attr.ia_valid = ATTR_MODE;
-		if (!IS_POSIXACL(dir))
-			attr.ia_mode &= ~current_umask();
+		attr.ia_mode &= ~current_umask();
 	} else {
 		open_flags &= ~(O_EXCL | O_CREAT);
 		attr.ia_valid = 0;
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 5596c6a..b5ffe8fa 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -119,9 +119,6 @@
 	}
 
 	security_d_instantiate(ret, inode);
-
-	if (ret->d_op == NULL)
-		d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
 out:
 	nfs_free_fattr(fsinfo.fattr);
 	return ret;
@@ -227,9 +224,6 @@
 
 	security_d_instantiate(ret, inode);
 
-	if (ret->d_op == NULL)
-		d_set_d_op(ret, server->nfs_client->rpc_ops->dentry_ops);
-
 out:
 	nfs_free_fattr(fattr);
 	dprintk("<-- nfs4_get_root()\n");
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index 4e2d9b6..1869688 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -238,7 +238,7 @@
 	return nfs_idmap_lookup_name(gid, "group", buf, buflen);
 }
 
-#else  /* CONFIG_NFS_USE_IDMAPPER not defined */
+#else  /* CONFIG_NFS_USE_NEW_IDMAPPER not defined */
 
 #include <linux/module.h>
 #include <linux/mutex.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 017daa3..ce00b70 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1410,9 +1410,9 @@
  */
 void nfs4_evict_inode(struct inode *inode)
 {
+	pnfs_destroy_layout(NFS_I(inode));
 	truncate_inode_pages(&inode->i_data, 0);
 	end_writeback(inode);
-	pnfs_destroy_layout(NFS_I(inode));
 	/* If we are holding a delegation, return it! */
 	nfs_inode_return_delegation_noreclaim(inode);
 	/* First call standard NFS clear_inode() code */
@@ -1619,6 +1619,7 @@
 #ifdef CONFIG_PROC_FS
 	rpc_proc_unregister("nfs");
 #endif
+	nfs_cleanup_cb_ident_idr();
 	unregister_nfs_fs();
 	nfs_fs_proc_exit();
 	nfsiod_stop();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e6356b7..bfa3a34 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -128,9 +128,13 @@
 /* client.c */
 extern struct rpc_program nfs_program;
 
+extern void nfs_cleanup_cb_ident_idr(void);
 extern void nfs_put_client(struct nfs_client *);
-extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
-extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
+extern struct nfs_client *nfs4_find_client_no_ident(const struct sockaddr *);
+extern struct nfs_client *nfs4_find_client_ident(int);
+extern struct nfs_client *
+nfs4_find_client_sessionid(const struct sockaddr *, struct nfs4_sessionid *,
+			   int);
 extern struct nfs_server *nfs_create_server(
 					const struct nfs_parsed_mount_data *,
 					struct nfs_fh *);
@@ -185,17 +189,20 @@
 extern void nfs_destroy_directcache(void);
 
 /* nfs2xdr.c */
-extern int nfs_stat_to_errno(int);
+extern int nfs_stat_to_errno(enum nfs_stat);
 extern struct rpc_procinfo nfs_procedures[];
-extern __be32 *nfs_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs2_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs3xdr.c */
 extern struct rpc_procinfo nfs3_procedures[];
-extern __be32 *nfs3_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs3_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 
 /* nfs4xdr.c */
 #ifdef CONFIG_NFS_V4
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
+extern int nfs4_decode_dirent(struct xdr_stream *,
+				struct nfs_entry *, int);
 #endif
 #ifdef CONFIG_NFS_V4_1
 extern const u32 nfs41_maxread_overhead;
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 4f981f1..d4c2d6b 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -236,10 +236,8 @@
 		.authflavor	= RPC_AUTH_UNIX,
 		.flags		= RPC_CLNT_CREATE_NOPING,
 	};
-	struct mountres	result;
 	struct rpc_message msg	= {
 		.rpc_argp	= info->dirpath,
-		.rpc_resp	= &result,
 	};
 	struct rpc_clnt *clnt;
 	int status;
@@ -248,7 +246,7 @@
 		args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
 
 	clnt = rpc_create(&args);
-	if (unlikely(IS_ERR(clnt)))
+	if (IS_ERR(clnt))
 		goto out_clnt_err;
 
 	dprintk("NFS: sending UMNT request for %s:%s\n",
@@ -280,29 +278,20 @@
  * XDR encode/decode functions for MOUNT
  */
 
-static int encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
+static void encode_mntdirpath(struct xdr_stream *xdr, const char *pathname)
 {
 	const u32 pathname_len = strlen(pathname);
 	__be32 *p;
 
-	if (unlikely(pathname_len > MNTPATHLEN))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(u32) + pathname_len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(pathname_len > MNTPATHLEN);
+	p = xdr_reserve_space(xdr, 4 + pathname_len);
 	xdr_encode_opaque(p, pathname, pathname_len);
-
-	return 0;
 }
 
-static int mnt_enc_dirpath(struct rpc_rqst *req, __be32 *p,
-			   const char *dirpath)
+static void mnt_xdr_enc_dirpath(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const char *dirpath)
 {
-	struct xdr_stream xdr;
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	return encode_mntdirpath(&xdr, dirpath);
+	encode_mntdirpath(xdr, dirpath);
 }
 
 /*
@@ -320,10 +309,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt_errtbl); i++) {
 		if (mnt_errtbl[i].status == status) {
@@ -351,18 +340,16 @@
 	return 0;
 }
 
-static int mnt_dec_mountres(struct rpc_rqst *req, __be32 *p,
-			    struct mountres *res)
+static int mnt_xdr_dec_mountres(struct rpc_rqst *req,
+				struct xdr_stream *xdr,
+				struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_status(&xdr, res);
+	status = decode_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	return decode_fhandle(&xdr, res);
+	return decode_fhandle(xdr, res);
 }
 
 static int decode_fhs_status(struct xdr_stream *xdr, struct mountres *res)
@@ -371,10 +358,10 @@
 	u32 status;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(status));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	status = ntohl(*p);
+	status = be32_to_cpup(p);
 
 	for (i = 0; i < ARRAY_SIZE(mnt3_errtbl); i++) {
 		if (mnt3_errtbl[i].status == status) {
@@ -394,11 +381,11 @@
 	u32 size;
 	__be32 *p;
 
-	p = xdr_inline_decode(xdr, sizeof(size));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	size = ntohl(*p++);
+	size = be32_to_cpup(p);
 	if (size > NFS3_FHSIZE || size == 0)
 		return -EIO;
 
@@ -421,15 +408,15 @@
 	if (*count == 0)
 		return 0;
 
-	p = xdr_inline_decode(xdr, sizeof(entries));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
-	entries = ntohl(*p);
+	entries = be32_to_cpup(p);
 	dprintk("NFS: received %u auth flavors\n", entries);
 	if (entries > NFS_MAX_SECFLAVORS)
 		entries = NFS_MAX_SECFLAVORS;
 
-	p = xdr_inline_decode(xdr, sizeof(u32) * entries);
+	p = xdr_inline_decode(xdr, 4 * entries);
 	if (unlikely(p == NULL))
 		return -EIO;
 
@@ -437,7 +424,7 @@
 		entries = *count;
 
 	for (i = 0; i < entries; i++) {
-		flavors[i] = ntohl(*p++);
+		flavors[i] = be32_to_cpup(p++);
 		dprintk("NFS:   auth flavor[%u]: %d\n", i, flavors[i]);
 	}
 	*count = i;
@@ -445,30 +432,28 @@
 	return 0;
 }
 
-static int mnt_dec_mountres3(struct rpc_rqst *req, __be32 *p,
-			     struct mountres *res)
+static int mnt_xdr_dec_mountres3(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 struct mountres *res)
 {
-	struct xdr_stream xdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	status = decode_fhs_status(&xdr, res);
+	status = decode_fhs_status(xdr, res);
 	if (unlikely(status != 0 || res->errno != 0))
 		return status;
-	status = decode_fhandle3(&xdr, res);
+	status = decode_fhandle3(xdr, res);
 	if (unlikely(status != 0)) {
 		res->errno = -EBADHANDLE;
 		return 0;
 	}
-	return decode_auth_flavors(&xdr, res);
+	return decode_auth_flavors(xdr, res);
 }
 
 static struct rpc_procinfo mnt_procedures[] = {
 	[MOUNTPROC_MNT] = {
 		.p_proc		= MOUNTPROC_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres_sz,
 		.p_statidx	= MOUNTPROC_MNT,
@@ -476,7 +461,7 @@
 	},
 	[MOUNTPROC_UMNT] = {
 		.p_proc		= MOUNTPROC_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC_UMNT,
 		.p_name		= "UMOUNT",
@@ -486,8 +471,8 @@
 static struct rpc_procinfo mnt3_procedures[] = {
 	[MOUNTPROC3_MNT] = {
 		.p_proc		= MOUNTPROC3_MNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
-		.p_decode	= (kxdrproc_t)mnt_dec_mountres3,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
+		.p_decode	= (kxdrdproc_t)mnt_xdr_dec_mountres3,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_replen	= MNT_dec_mountres3_sz,
 		.p_statidx	= MOUNTPROC3_MNT,
@@ -495,7 +480,7 @@
 	},
 	[MOUNTPROC3_UMNT] = {
 		.p_proc		= MOUNTPROC3_UMNT,
-		.p_encode	= (kxdrproc_t)mnt_enc_dirpath,
+		.p_encode	= (kxdreproc_t)mnt_xdr_enc_dirpath,
 		.p_arglen	= MNT_enc_dirpath_sz,
 		.p_statidx	= MOUNTPROC3_UMNT,
 		.p_name		= "UMOUNT",
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5914a19..792cb13 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,584 +61,1008 @@
 #define NFS_readdirres_sz	(1)
 #define NFS_statfsres_sz	(1+NFS_info_sz)
 
-/*
- * Common NFS XDR functions as inlines
- */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fhandle)
-{
-	memcpy(p, fhandle->data, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fhandle)
-{
-	/* NFSv2 handles have a fixed length */
-	fhandle->size = NFS2_FHSIZE;
-	memcpy(fhandle->data, p, NFS2_FHSIZE);
-	return p + XDR_QUADLEN(NFS2_FHSIZE);
-}
-
-static inline __be32*
-xdr_encode_time(__be32 *p, struct timespec *timep)
-{
-	*p++ = htonl(timep->tv_sec);
-	/* Convert nanoseconds into microseconds */
-	*p++ = htonl(timep->tv_nsec ? timep->tv_nsec / 1000 : 0);
-	return p;
-}
-
-static inline __be32*
-xdr_encode_current_server_time(__be32 *p, struct timespec *timep)
-{
-	/*
-	 * Passing the invalid value useconds=1000000 is a
-	 * Sun convention for "set to current server time".
-	 * It's needed to make permissions checks for the
-	 * "touch" program across v2 mounts to Solaris and
-	 * Irix boxes work correctly. See description of
-	 * sattr in section 6.1 of "NFS Illustrated" by
-	 * Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5
-	 */
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(1000000);
-	return p;
-}
-
-static inline __be32*
-xdr_decode_time(__be32 *p, struct timespec *timep)
-{
-	timep->tv_sec = ntohl(*p++);
-	/* Convert microseconds into nanoseconds */
-	timep->tv_nsec = ntohl(*p++) * 1000;
-	return p;
-}
-
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
-{
-	u32 rdev, type;
-	type = ntohl(*p++);
-	fattr->mode = ntohl(*p++);
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	fattr->size = ntohl(*p++);
-	fattr->du.nfs2.blocksize = ntohl(*p++);
-	rdev = ntohl(*p++);
-	fattr->du.nfs2.blocks = ntohl(*p++);
-	fattr->fsid.major = ntohl(*p++);
-	fattr->fsid.minor = 0;
-	fattr->fileid = ntohl(*p++);
-	p = xdr_decode_time(p, &fattr->atime);
-	p = xdr_decode_time(p, &fattr->mtime);
-	p = xdr_decode_time(p, &fattr->ctime);
-	fattr->valid |= NFS_ATTR_FATTR_V2;
-	fattr->rdev = new_decode_dev(rdev);
-	if (type == NFCHR && rdev == NFS2_FIFO_DEV) {
-		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
-		fattr->rdev = 0;
-	}
-	return p;
-}
-
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
-{
-	const __be32 not_set = __constant_htonl(0xFFFFFFFF);
-
-	*p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
-	*p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
-	*p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
-
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_atime);
-	} else {
-		*p++ = not_set;
-		*p++ = not_set;
-	}
-
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		p = xdr_encode_time(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		p = xdr_encode_current_server_time(p, &attr->ia_mtime);
-	} else {
-		*p++ = not_set;	
-		*p++ = not_set;
-	}
-  	return p;
-}
 
 /*
- * NFS encode functions
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
  */
-/*
- * Encode file handle argument
- * GETATTR, READLINK, STATFS
- */
-static int
-nfs_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
-{
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SETATTR arguments
- */
-static int
-nfs_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs_sattrargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode directory ops argument
- * LOOKUP, RMDIR
- */
-static int
-nfs_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs_diropargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode REMOVE argument
- */
-static int
-nfs_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
 {
 	struct rpc_auth	*auth = req->rq_cred->cr_auth;
 	unsigned int replen;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
 
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
 }
 
 /*
- * Decode READ reply
+ * Handle decode buffer overflows out-of-line.
  */
-static int
-nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fattr(p, res->fattr);
-
-	count = ntohl(*p++);
-	res->eof = 0;
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-	}
-
-	dprintk("RPC:      readres OK count %u\n", count);
-	if (count < res->count)
-		res->count = count;
-
-	return count;
-}
-
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 offset = (u32)args->offset;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(offset);
-	*p++ = htonl(offset);
-	*p++ = htonl(count);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode create arguments
- * CREATE, MKDIR
- */
-static int
-nfs_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_symlinkargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	size_t pad;
-
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	*p++ = htonl(args->pathlen);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	xdr_encode_pages(sndbuf, args->pages, 0, args->pathlen);
-
-	/*
-	 * xdr_encode_pages may have added a few bytes to ensure the
-	 * pathname ends on a 4-byte boundary.  Start encoding the
-	 * attributes after the pad bytes.
-	 */
-	pad = sndbuf->tail->iov_len;
-	if (pad > 0)
-		p++;
-	p = xdr_encode_sattr(p, args->sattr);
-	sndbuf->len += xdr_adjust_iovec(sndbuf->tail, p) - pad;
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->cookie);
-	*p++ = htonl(count); /* see above */
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We're not really decoding anymore, we just leave the buffer untouched
- * and only check that it is syntactically correct.
- * The real decoding happens in nfs_decode_entry below, called directly
- * from nfs_readdir for each entry.
- */
-static int
-nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	unsigned int pglen, recvd;
-	int status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-	return pglen;
-}
-
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
-__be32 *
-nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+
+/*
+ * Encode/decode NFSv2 basic data types
+ *
+ * Basic NFSv2 data types are defined in section 2.3 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
+ */
+
+/*
+ *	typedef opaque	nfsdata<>;
+ */
+static int decode_nfsdata(struct xdr_stream *xdr, struct nfs_readres *result)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = 0;	/* NFSv2 does not pass EOF flag on the wire. */
+	result->count = count;
+	return count;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ *	enum stat {
+ *		NFS_OK = 0,
+ *		NFSERR_PERM = 1,
+ *		NFSERR_NOENT = 2,
+ *		NFSERR_IO = 5,
+ *		NFSERR_NXIO = 6,
+ *		NFSERR_ACCES = 13,
+ *		NFSERR_EXIST = 17,
+ *		NFSERR_NODEV = 19,
+ *		NFSERR_NOTDIR = 20,
+ *		NFSERR_ISDIR = 21,
+ *		NFSERR_FBIG = 27,
+ *		NFSERR_NOSPC = 28,
+ *		NFSERR_ROFS = 30,
+ *		NFSERR_NAMETOOLONG = 63,
+ *		NFSERR_NOTEMPTY = 66,
+ *		NFSERR_DQUOT = 69,
+ *		NFSERR_STALE = 70,
+ *		NFSERR_WFLUSH = 99
+ *	};
+ */
+static int decode_stat(struct xdr_stream *xdr, enum nfs_stat *status)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.2.  ftype
+ *
+ *	enum ftype {
+ *		NFNON = 0,
+ *		NFREG = 1,
+ *		NFDIR = 2,
+ *		NFBLK = 3,
+ *		NFCHR = 4,
+ *		NFLNK = 5
+ *	};
+ *
+ */
+static __be32 *xdr_decode_ftype(__be32 *p, u32 *type)
+{
+	*type = be32_to_cpup(p++);
+	if (unlikely(*type > NF2FIFO))
+		*type = NFBAD;
+	return p;
+}
+
+/*
+ * 2.3.3.  fhandle
+ *
+ *	typedef opaque fhandle[FHSIZE];
+ */
+static void encode_fhandle(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size != NFS2_FHSIZE);
+	p = xdr_reserve_space(xdr, NFS2_FHSIZE);
+	memcpy(p, fh->data, NFS2_FHSIZE);
+}
+
+static int decode_fhandle(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS2_FHSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = NFS2_FHSIZE;
+	memcpy(fh->data, p, NFS2_FHSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.4.  timeval
+ *
+ *	struct timeval {
+ *		unsigned int seconds;
+ *		unsigned int useconds;
+ *	};
+ */
+static __be32 *xdr_encode_time(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	if (timep->tv_nsec != 0)
+		*p++ = cpu_to_be32(timep->tv_nsec / NSEC_PER_USEC);
+	else
+		*p++ = cpu_to_be32(0);
+	return p;
+}
+
+/*
+ * Passing the invalid value useconds=1000000 is a Sun convention for
+ * "set to current server time".  It's needed to make permissions checks
+ * for the "touch" program across v2 mounts to Solaris and Irix servers
+ * work correctly.  See description of sattr in section 6.1 of "NFS
+ * Illustrated" by Brent Callaghan, Addison-Wesley, ISBN 0-201-32750-5.
+ */
+static __be32 *xdr_encode_current_server_time(__be32 *p,
+					      const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(1000000);
+	return p;
+}
+
+static __be32 *xdr_decode_time(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++) * NSEC_PER_USEC;
+	return p;
+}
+
+/*
+ * 2.3.5.  fattr
+ *
+ *	struct fattr {
+ *		ftype		type;
+ *		unsigned int	mode;
+ *		unsigned int	nlink;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		unsigned int	blocksize;
+ *		unsigned int	rdev;
+ *		unsigned int	blocks;
+ *		unsigned int	fsid;
+ *		unsigned int	fileid;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *		timeval		ctime;
+ *	};
+ *
+ */
+static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	u32 rdev, type;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	fattr->valid |= NFS_ATTR_FATTR_V2;
+
+	p = xdr_decode_ftype(p, &type);
+
+	fattr->mode = be32_to_cpup(p++);
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+	fattr->size = be32_to_cpup(p++);
+	fattr->du.nfs2.blocksize = be32_to_cpup(p++);
+
+	rdev = be32_to_cpup(p++);
+	fattr->rdev = new_decode_dev(rdev);
+	if (type == (u32)NFCHR && rdev == (u32)NFS2_FIFO_DEV) {
+		fattr->mode = (fattr->mode & ~S_IFMT) | S_IFIFO;
+		fattr->rdev = 0;
 	}
 
-	p = xdr_inline_decode(xdr, 8);
-	if (unlikely(!p))
-		goto out_overflow;
+	fattr->du.nfs2.blocks = be32_to_cpup(p++);
+	fattr->fsid.major = be32_to_cpup(p++);
+	fattr->fsid.minor = 0;
+	fattr->fileid = be32_to_cpup(p++);
 
-	entry->ino	  = ntohl(*p++);
-	entry->len	  = ntohl(*p++);
+	p = xdr_decode_time(p, &fattr->atime);
+	p = xdr_decode_time(p, &fattr->mtime);
+	xdr_decode_time(p, &fattr->ctime);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	p = xdr_inline_decode(xdr, entry->len + 4);
-	if (unlikely(!p))
+/*
+ * 2.3.6.  sattr
+ *
+ *	struct sattr {
+ *		unsigned int	mode;
+ *		unsigned int	uid;
+ *		unsigned int	gid;
+ *		unsigned int	size;
+ *		timeval		atime;
+ *		timeval		mtime;
+ *	};
+ */
+
+#define NFS2_SATTR_NOT_SET	(0xffffffff)
+
+static __be32 *xdr_time_not_set(__be32 *p)
+{
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	return p;
+}
+
+static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS_sattr_sz << 2);
+
+	if (attr->ia_valid & ATTR_MODE)
+		*p++ = cpu_to_be32(attr->ia_mode);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_UID)
+		*p++ = cpu_to_be32(attr->ia_uid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_GID)
+		*p++ = cpu_to_be32(attr->ia_gid);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+	if (attr->ia_valid & ATTR_SIZE)
+		*p++ = cpu_to_be32((u32)attr->ia_size);
+	else
+		*p++ = cpu_to_be32(NFS2_SATTR_NOT_SET);
+
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		p = xdr_encode_time(p, &attr->ia_atime);
+	else if (attr->ia_valid & ATTR_ATIME)
+		p = xdr_encode_current_server_time(p, &attr->ia_atime);
+	else
+		p = xdr_time_not_set(p);
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		xdr_encode_time(p, &attr->ia_mtime);
+	else if (attr->ia_valid & ATTR_MTIME)
+		xdr_encode_current_server_time(p, &attr->ia_mtime);
+	else
+		xdr_time_not_set(p);
+}
+
+/*
+ * 2.3.7.  filename
+ *
+ *	typedef string filename<MAXNAMLEN>;
+ */
+static void encode_filename(struct xdr_stream *xdr,
+			    const char *name, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_filename_inline(struct xdr_stream *xdr,
+				  const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	entry->name	  = (const char *) p;
-	p		 += XDR_QUADLEN(entry->len);
-	entry->prev_cookie	  = entry->cookie;
-	entry->cookie	  = ntohl(*p++);
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.8.  path
+ *
+ *	typedef string path<MAXPATHLEN>;
+ */
+static void encode_path(struct xdr_stream *xdr, struct page **pages, u32 length)
+{
+	__be32 *p;
+
+	BUG_ON(length > NFS2_MAXPATHLEN);
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_path(struct xdr_stream *xdr)
+{
+	u32 length, recvd;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p);
+	if (unlikely(length >= xdr->buf->page_len || length > NFS_MAXPATHLEN))
+		goto out_size;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(length > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, length);
+	xdr_terminate_string(xdr->buf, length);
+	return 0;
+out_size:
+	dprintk("NFS: returned pathname too long: %u\n", length);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"length %u > received %u\n", length, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * 2.3.9.  attrstat
+ *
+ *	union attrstat switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_attrstat(struct xdr_stream *xdr, struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.3.10.  diropargs
+ *
+ *	struct diropargs {
+ *		fhandle  dir;
+ *		filename name;
+ *	};
+ */
+static void encode_diropargs(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			     const char *name, u32 length)
+{
+	encode_fhandle(xdr, fh);
+	encode_filename(xdr, name, length);
+}
+
+/*
+ * 2.3.11.  diropres
+ *
+ *	union diropres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			fhandle file;
+ *			fattr   attributes;
+ *		} diropok;
+ *	default:
+ *		void;
+ *	};
+ */
+static int decode_diropok(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	int error;
+
+	error = decode_fhandle(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_fattr(xdr, result->fattr);
+out:
+	return error;
+}
+
+static int decode_diropres(struct xdr_stream *xdr, struct nfs_diropok *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_diropok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
+/*
+ * NFSv2 XDR encode functions
+ *
+ * NFSv2 argument types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static void nfs2_xdr_enc_fhandle(struct rpc_rqst *req,
+				 struct xdr_stream *xdr,
+				 const struct nfs_fh *fh)
+{
+	encode_fhandle(xdr, fh);
+}
+
+/*
+ * 2.2.3.  sattrargs
+ *
+ *	struct sattrargs {
+ *		fhandle file;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_sattrargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_sattrargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_diropargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_diropargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+}
+
+static void nfs2_xdr_enc_readlinkargs(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_readlinkargs *args)
+{
+	encode_fhandle(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS_readlinkres_sz);
+}
+
+/*
+ * 2.2.7.  readargs
+ *
+ *	struct readargs {
+ *		fhandle file;
+ *		unsigned offset;
+ *		unsigned count;
+ *		unsigned totalcount;
+ *	};
+ */
+static void encode_readargs(struct xdr_stream *xdr,
+			    const struct nfs_readargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+	*p = cpu_to_be32(count);
+}
+
+static void nfs2_xdr_enc_readargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_readargs *args)
+{
+	encode_readargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 2.2.9.  writeargs
+ *
+ *	struct writeargs {
+ *		fhandle file;
+ *		unsigned beginoffset;
+ *		unsigned offset;
+ *		unsigned totalcount;
+ *		nfsdata data;
+ *	};
+ */
+static void encode_writeargs(struct xdr_stream *xdr,
+			     const struct nfs_writeargs *args)
+{
+	u32 offset = args->offset;
+	u32 count = args->count;
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(offset);
+	*p++ = cpu_to_be32(count);
+
+	/* nfsdata */
+	*p = cpu_to_be32(count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, count);
+}
+
+static void nfs2_xdr_enc_writeargs(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_writeargs *args)
+{
+	encode_writeargs(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 2.2.10.  createargs
+ *
+ *	struct createargs {
+ *		diropargs where;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_createargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_createargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name, args->len);
+	encode_sattr(xdr, args->sattr);
+}
+
+static void nfs2_xdr_enc_removeargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_removeargs *args)
+{
+	encode_diropargs(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 2.2.12.  renameargs
+ *
+ *	struct renameargs {
+ *		diropargs from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_renameargs(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 2.2.13.  linkargs
+ *
+ *	struct linkargs {
+ *		fhandle from;
+ *		diropargs to;
+ *	};
+ */
+static void nfs2_xdr_enc_linkargs(struct rpc_rqst *req,
+				  struct xdr_stream *xdr,
+				  const struct nfs_linkargs *args)
+{
+	encode_fhandle(xdr, args->fromfh);
+	encode_diropargs(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 2.2.14.  symlinkargs
+ *
+ *	struct symlinkargs {
+ *		diropargs from;
+ *		path to;
+ *		sattr attributes;
+ *	};
+ */
+static void nfs2_xdr_enc_symlinkargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_symlinkargs *args)
+{
+	encode_diropargs(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_path(xdr, args->pages, args->pathlen);
+	encode_sattr(xdr, args->sattr);
+}
+
+/*
+ * 2.2.17.  readdirargs
+ *
+ *	struct readdirargs {
+ *		fhandle dir;
+ *		nfscookie cookie;
+ *		unsigned count;
+ *	};
+ */
+static void encode_readdirargs(struct xdr_stream *xdr,
+			       const struct nfs_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_fhandle(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 4 + 4);
+	*p++ = cpu_to_be32(args->cookie);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs2_xdr_enc_readdirargs(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_readdirargs *args)
+{
+	encode_readdirargs(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+					args->count, NFS_readdirres_sz);
+}
+
+/*
+ * NFSv2 XDR decode functions
+ *
+ * NFSv2 result types are defined in section 2.2 of RFC 1094:
+ * "NFS: Network File System Protocol Specification".
+ */
+
+static int nfs2_xdr_dec_stat(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_attrstat(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_fattr *result)
+{
+	return decode_attrstat(xdr, result);
+}
+
+static int nfs2_xdr_dec_diropres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_diropok *result)
+{
+	return decode_diropres(xdr, result);
+}
+
+/*
+ * 2.2.6.  readlinkres
+ *
+ *	union readlinkres switch (stat status) {
+ *	case NFS_OK:
+ *		path data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readlinkres(struct rpc_rqst *req,
+				    struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_path(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 2.2.7.  readres
+ *
+ *	union readres switch (stat status) {
+ *	case NFS_OK:
+ *		fattr attributes;
+ *		nfsdata data;
+ *	default:
+ *		void;
+ *	};
+ */
+static int nfs2_xdr_dec_readres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_fattr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_nfsdata(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+static int nfs2_xdr_dec_writeres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_writeres *result)
+{
+	/* All NFSv2 writes are "file sync" writes */
+	result->verf->committed = NFS_FILE_SYNC;
+	return decode_attrstat(xdr, result->fattr);
+}
+
+/**
+ * nfs2_decode_dirent - Decode a single NFSv2 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 2.2.17.  entry
+ *
+ *	struct entry {
+ *		unsigned	fileid;
+ *		filename	name;
+ *		nfscookie	cookie;
+ *		entry		*nextentry;
+ *	};
+ */
+int nfs2_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p++ == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p++ == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
+	}
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->ino = be32_to_cpup(p);
+
+	error = decode_filename_inline(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	/*
+	 * The type (size and byte order) of nfscookie isn't defined in
+	 * RFC 1094.  This implementation assumes that it's an XDR uint32.
+	 */
+	entry->prev_cookie = entry->cookie;
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	entry->cookie = be32_to_cpup(p);
 
 	entry->d_type = DT_UNKNOWN;
 
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
- * NFS XDR decode functions
+ * 2.2.17.  readdirres
+ *
+ *	union readdirres switch (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			entry *entries;
+ *			bool eof;
+ *		} readdirok;
+ *	default:
+ *		void;
+ *	};
+ *
+ * Read the directory contents into the page cache, but don't
+ * touch them.  The actual decoding is done by nfs2_decode_dirent()
+ * during subsequent nfs_readdir() calls.
  */
-/*
- * Decode simple status reply
- */
-static int
-nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
+static int decode_readdirok(struct xdr_stream *xdr)
 {
-	int	status;
-
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	return status;
-}
-
-/*
- * Decode attrstat reply
- * GETATTR, SETATTR, WRITE
- */
-static int
-nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
-}
-
-/*
- * Decode diropres reply
- * LOOKUP, CREATE, MKDIR
- */
-static int
-nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_fhandle(p, res->fh);
-	xdr_decode_fattr(p, res->fattr);
-	return 0;
-}
-
-/*
- * Encode READLINK args
- */
-static int
-nfs_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
-}
-
-/*
- * Decode READLINK reply
- */
-static int
-nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	u32 recvd, pglen;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
 
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
+static int nfs2_xdr_dec_readdirres(struct rpc_rqst *req,
+				   struct xdr_stream *xdr, void *__unused)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_readdirok(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE reply
+ * 2.2.18.  statfsres
+ *
+ *	union statfsres (stat status) {
+ *	case NFS_OK:
+ *		struct {
+ *			unsigned tsize;
+ *			unsigned bsize;
+ *			unsigned blocks;
+ *			unsigned bfree;
+ *			unsigned bavail;
+ *		} info;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_info(struct xdr_stream *xdr, struct nfs2_fsstat *result)
 {
-	res->verf->committed = NFS_FILE_SYNC;
-	return nfs_xdr_attrstat(req, p, res->fattr);
-}
+	__be32 *p;
 
-/*
- * Decode STATFS reply
- */
-static int
-nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
-{
-	int	status;
-
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-
-	res->tsize  = ntohl(*p++);
-	res->bsize  = ntohl(*p++);
-	res->blocks = ntohl(*p++);
-	res->bfree  = ntohl(*p++);
-	res->bavail = ntohl(*p++);
+	p = xdr_inline_decode(xdr, NFS_info_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->tsize  = be32_to_cpup(p++);
+	result->bsize  = be32_to_cpup(p++);
+	result->blocks = be32_to_cpup(p++);
+	result->bfree  = be32_to_cpup(p++);
+	result->bavail = be32_to_cpup(p);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
+static int nfs2_xdr_dec_statfsres(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs2_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_stat(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS_OK)
+		goto out_default;
+	error = decode_info(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+
 /*
  * We need to translate between nfs status return values and
  * the local errno values which may not be the same.
  */
-static struct {
+static const struct {
 	int stat;
 	int errno;
 } nfs_errtbl[] = {
@@ -678,28 +1102,30 @@
 	{ -1,			-EIO		}
 };
 
-/*
- * Convert an NFS error code to a local one.
- * This one is used jointly by NFSv2 and NFSv3.
+/**
+ * nfs_stat_to_errno - convert an NFS status code to a local errno
+ * @status: NFS status code to convert
+ *
+ * Returns a local errno value, or -EIO if the NFS status code is
+ * not recognized.  This function is used jointly by NFSv2 and NFSv3.
  */
-int
-nfs_stat_to_errno(int stat)
+int nfs_stat_to_errno(enum nfs_stat status)
 {
 	int i;
 
 	for (i = 0; nfs_errtbl[i].stat != -1; i++) {
-		if (nfs_errtbl[i].stat == stat)
+		if (nfs_errtbl[i].stat == (int)status)
 			return nfs_errtbl[i].errno;
 	}
-	dprintk("nfs_stat_to_errno: bad nfs status return value: %d\n", stat);
+	dprintk("NFS: Unrecognized nfs status value: %u\n", status);
 	return nfs_errtbl[i].errno;
 }
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFSPROC_##proc] = {							\
 	.p_proc	    =  NFSPROC_##proc,					\
-	.p_encode   =  (kxdrproc_t) nfs_xdr_##argtype,			\
-	.p_decode   =  (kxdrproc_t) nfs_xdr_##restype,			\
+	.p_encode   =  (kxdreproc_t)nfs2_xdr_enc_##argtype,		\
+	.p_decode   =  (kxdrdproc_t)nfs2_xdr_dec_##restype,		\
 	.p_arglen   =  NFS_##argtype##_sz,				\
 	.p_replen   =  NFS_##restype##_sz,				\
 	.p_timer    =  timer,						\
@@ -707,21 +1133,21 @@
 	.p_name     =  #proc,						\
 	}
 struct rpc_procinfo	nfs_procedures[] = {
-    PROC(GETATTR,	fhandle,	attrstat, 1),
-    PROC(SETATTR,	sattrargs,	attrstat, 0),
-    PROC(LOOKUP,	diropargs,	diropres, 2),
-    PROC(READLINK,	readlinkargs,	readlinkres, 3),
-    PROC(READ,		readargs,	readres, 3),
-    PROC(WRITE,		writeargs,	writeres, 4),
-    PROC(CREATE,	createargs,	diropres, 0),
-    PROC(REMOVE,	removeargs,	stat, 0),
-    PROC(RENAME,	renameargs,	stat, 0),
-    PROC(LINK,		linkargs,	stat, 0),
-    PROC(SYMLINK,	symlinkargs,	stat, 0),
-    PROC(MKDIR,		createargs,	diropres, 0),
-    PROC(RMDIR,		diropargs,	stat, 0),
-    PROC(READDIR,	readdirargs,	readdirres, 3),
-    PROC(STATFS,	fhandle,	statfsres, 0),
+	PROC(GETATTR,	fhandle,	attrstat,	1),
+	PROC(SETATTR,	sattrargs,	attrstat,	0),
+	PROC(LOOKUP,	diropargs,	diropres,	2),
+	PROC(READLINK,	readlinkargs,	readlinkres,	3),
+	PROC(READ,	readargs,	readres,	3),
+	PROC(WRITE,	writeargs,	writeres,	4),
+	PROC(CREATE,	createargs,	diropres,	0),
+	PROC(REMOVE,	removeargs,	stat,		0),
+	PROC(RENAME,	renameargs,	stat,		0),
+	PROC(LINK,	linkargs,	stat,		0),
+	PROC(SYMLINK,	symlinkargs,	stat,		0),
+	PROC(MKDIR,	createargs,	diropres,	0),
+	PROC(RMDIR,	diropargs,	stat,		0),
+	PROC(READDIR,	readdirargs,	readdirres,	3),
+	PROC(STATFS,	fhandle,	statfsres,	0),
 };
 
 struct rpc_version		nfs_version2 = {
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index f6cc60f..01c5e8b 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -37,18 +37,16 @@
 #define NFS3_filename_sz	(1+(NFS3_MAXNAMLEN>>2))
 #define NFS3_path_sz		(1+(NFS3_MAXPATHLEN>>2))
 #define NFS3_fattr_sz		(21)
-#define NFS3_wcc_attr_sz		(6)
+#define NFS3_cookieverf_sz	(NFS3_COOKIEVERFSIZE>>2)
+#define NFS3_wcc_attr_sz	(6)
 #define NFS3_pre_op_attr_sz	(1+NFS3_wcc_attr_sz)
 #define NFS3_post_op_attr_sz	(1+NFS3_fattr_sz)
-#define NFS3_wcc_data_sz		(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
-#define NFS3_fsstat_sz		
-#define NFS3_fsinfo_sz		
-#define NFS3_pathconf_sz		
-#define NFS3_entry_sz		(NFS3_filename_sz+3)
-
-#define NFS3_sattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_wcc_data_sz	(NFS3_pre_op_attr_sz+NFS3_post_op_attr_sz)
 #define NFS3_diropargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
-#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
+
+#define NFS3_getattrargs_sz	(NFS3_fh_sz)
+#define NFS3_setattrargs_sz	(NFS3_fh_sz+NFS3_sattr_sz+3)
+#define NFS3_lookupargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_accessargs_sz	(NFS3_fh_sz+1)
 #define NFS3_readlinkargs_sz	(NFS3_fh_sz)
 #define NFS3_readargs_sz	(NFS3_fh_sz+3)
@@ -57,14 +55,16 @@
 #define NFS3_mkdirargs_sz	(NFS3_diropargs_sz+NFS3_sattr_sz)
 #define NFS3_symlinkargs_sz	(NFS3_diropargs_sz+1+NFS3_sattr_sz)
 #define NFS3_mknodargs_sz	(NFS3_diropargs_sz+2+NFS3_sattr_sz)
+#define NFS3_removeargs_sz	(NFS3_fh_sz+NFS3_filename_sz)
 #define NFS3_renameargs_sz	(NFS3_diropargs_sz+NFS3_diropargs_sz)
 #define NFS3_linkargs_sz		(NFS3_fh_sz+NFS3_diropargs_sz)
-#define NFS3_readdirargs_sz	(NFS3_fh_sz+2)
+#define NFS3_readdirargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+3)
+#define NFS3_readdirplusargs_sz	(NFS3_fh_sz+NFS3_cookieverf_sz+4)
 #define NFS3_commitargs_sz	(NFS3_fh_sz+3)
 
-#define NFS3_attrstat_sz	(1+NFS3_fattr_sz)
-#define NFS3_wccstat_sz		(1+NFS3_wcc_data_sz)
-#define NFS3_removeres_sz	(NFS3_wccstat_sz)
+#define NFS3_getattrres_sz	(1+NFS3_fattr_sz)
+#define NFS3_setattrres_sz	(1+NFS3_wcc_data_sz)
+#define NFS3_removeres_sz	(NFS3_setattrres_sz)
 #define NFS3_lookupres_sz	(1+NFS3_fh_sz+(2 * NFS3_post_op_attr_sz))
 #define NFS3_accessres_sz	(1+NFS3_post_op_attr_sz+1)
 #define NFS3_readlinkres_sz	(1+NFS3_post_op_attr_sz+1)
@@ -100,1079 +100,2362 @@
 	[NF3FIFO] = S_IFIFO,
 };
 
+/*
+ * While encoding arguments, set up the reply buffer in advance to
+ * receive reply data directly into the page cache.
+ */
+static void prepare_reply_buffer(struct rpc_rqst *req, struct page **pages,
+				 unsigned int base, unsigned int len,
+				 unsigned int bufsize)
+{
+	struct rpc_auth	*auth = req->rq_cred->cr_auth;
+	unsigned int replen;
+
+	replen = RPC_REPHDRSIZE + auth->au_rslack + bufsize;
+	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, pages, base, len);
+}
+
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
 static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	dprintk("nfs: %s: prematurely hit end of receive buffer. "
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
 		"Remaining buffer length is %tu words.\n",
 		func, xdr->end - xdr->p);
 }
 
+
 /*
- * Common NFS XDR functions as inlines
+ * Encode/decode NFSv3 basic data types
+ *
+ * Basic NFSv3 data types are defined in section 2.5 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ *
+ * Not all basic data types have their own encoding and decoding
+ * functions.  For run-time efficiency, some data types are encoded
+ * or decoded inline.
  */
-static inline __be32 *
-xdr_encode_fhandle(__be32 *p, const struct nfs_fh *fh)
+
+static void encode_uint32(struct xdr_stream *xdr, u32 value)
 {
-	return xdr_encode_array(p, fh->data, fh->size);
+	__be32 *p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(value);
 }
 
-static inline __be32 *
-xdr_decode_fhandle(__be32 *p, struct nfs_fh *fh)
-{
-	if ((fh->size = ntohl(*p++)) <= NFS3_FHSIZE) {
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-}
-
-static inline __be32 *
-xdr_decode_fhandle_stream(struct xdr_stream *xdr, struct nfs_fh *fh)
+static int decode_uint32(struct xdr_stream *xdr, u32 *value)
 {
 	__be32 *p;
+
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	fh->size = ntohl(*p++);
-
-	if (fh->size <= NFS3_FHSIZE) {
-		p = xdr_inline_decode(xdr, fh->size);
-		if (unlikely(!p))
-			goto out_overflow;
-		memcpy(fh->data, p, fh->size);
-		return p + XDR_QUADLEN(fh->size);
-	}
-	return NULL;
-
+	*value = be32_to_cpup(p);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
+}
+
+static int decode_uint64(struct xdr_stream *xdr, u64 *value)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 8);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	xdr_decode_hyper(p, value);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode/decode time.
+ * fileid3
+ *
+ *	typedef uint64 fileid3;
  */
-static inline __be32 *
-xdr_encode_time3(__be32 *p, struct timespec *timep)
+static __be32 *xdr_decode_fileid3(__be32 *p, u64 *fileid)
 {
-	*p++ = htonl(timep->tv_sec);
-	*p++ = htonl(timep->tv_nsec);
-	return p;
+	return xdr_decode_hyper(p, fileid);
 }
 
-static inline __be32 *
-xdr_decode_time3(__be32 *p, struct timespec *timep)
+static int decode_fileid3(struct xdr_stream *xdr, u64 *fileid)
 {
-	timep->tv_sec = ntohl(*p++);
-	timep->tv_nsec = ntohl(*p++);
-	return p;
+	return decode_uint64(xdr, fileid);
 }
 
-static __be32 *
-xdr_decode_fattr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * filename3
+ *
+ *	typedef string filename3<>;
+ */
+static void encode_filename3(struct xdr_stream *xdr,
+			     const char *name, u32 length)
 {
-	unsigned int	type, major, minor;
-	umode_t		fmode;
+	__be32 *p;
 
-	type = ntohl(*p++);
+	BUG_ON(length > NFS3_MAXNAMLEN);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, name, length);
+}
+
+static int decode_inline_filename3(struct xdr_stream *xdr,
+				   const char **name, u32 *length)
+{
+	__be32 *p;
+	u32 count;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (count > NFS3_MAXNAMLEN)
+		goto out_nametoolong;
+	p = xdr_inline_decode(xdr, count);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*name = (const char *)p;
+	*length = count;
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned filename too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * nfspath3
+ *
+ *	typedef string nfspath3<>;
+ */
+static void encode_nfspath3(struct xdr_stream *xdr, struct page **pages,
+			    const u32 length)
+{
+	BUG_ON(length > NFS3_MAXPATHLEN);
+	encode_uint32(xdr, length);
+	xdr_write_pages(xdr, pages, 0, length);
+}
+
+static int decode_nfspath3(struct xdr_stream *xdr)
+{
+	u32 recvd, count;
+	size_t hdrlen;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p);
+	if (unlikely(count >= xdr->buf->page_len || count > NFS3_MAXPATHLEN))
+		goto out_nametoolong;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
+
+	xdr_read_pages(xdr, count);
+	xdr_terminate_string(xdr->buf, count);
+	return 0;
+
+out_nametoolong:
+	dprintk("NFS: returned pathname too long: %u\n", count);
+	return -ENAMETOOLONG;
+out_cheating:
+	dprintk("NFS: server cheating in pathname result: "
+		"count %u > recvd %u\n", count, recvd);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * cookie3
+ *
+ *	typedef uint64 cookie3
+ */
+static __be32 *xdr_encode_cookie3(__be32 *p, u64 cookie)
+{
+	return xdr_encode_hyper(p, cookie);
+}
+
+static int decode_cookie3(struct xdr_stream *xdr, u64 *cookie)
+{
+	return decode_uint64(xdr, cookie);
+}
+
+/*
+ * cookieverf3
+ *
+ *	typedef opaque cookieverf3[NFS3_COOKIEVERFSIZE];
+ */
+static __be32 *xdr_encode_cookieverf3(__be32 *p, const __be32 *verifier)
+{
+	memcpy(p, verifier, NFS3_COOKIEVERFSIZE);
+	return p + XDR_QUADLEN(NFS3_COOKIEVERFSIZE);
+}
+
+static int decode_cookieverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_COOKIEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_COOKIEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * createverf3
+ *
+ *	typedef opaque createverf3[NFS3_CREATEVERFSIZE];
+ */
+static void encode_createverf3(struct xdr_stream *xdr, const __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS3_CREATEVERFSIZE);
+	memcpy(p, verifier, NFS3_CREATEVERFSIZE);
+}
+
+static int decode_writeverf3(struct xdr_stream *xdr, __be32 *verifier)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	memcpy(verifier, p, NFS3_WRITEVERFSIZE);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * size3
+ *
+ *	typedef uint64 size3;
+ */
+static __be32 *xdr_decode_size3(__be32 *p, u64 *size)
+{
+	return xdr_decode_hyper(p, size);
+}
+
+/*
+ * nfsstat3
+ *
+ *	enum nfsstat3 {
+ *		NFS3_OK = 0,
+ *		...
+ *	}
+ */
+#define NFS3_OK		NFS_OK
+
+static int decode_nfsstat3(struct xdr_stream *xdr, enum nfs_stat *status)
+{
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * ftype3
+ *
+ *	enum ftype3 {
+ *		NF3REG	= 1,
+ *		NF3DIR	= 2,
+ *		NF3BLK	= 3,
+ *		NF3CHR	= 4,
+ *		NF3LNK	= 5,
+ *		NF3SOCK	= 6,
+ *		NF3FIFO	= 7
+ *	};
+ */
+static void encode_ftype3(struct xdr_stream *xdr, const u32 type)
+{
+	BUG_ON(type > NF3FIFO);
+	encode_uint32(xdr, type);
+}
+
+static __be32 *xdr_decode_ftype3(__be32 *p, umode_t *mode)
+{
+	u32 type;
+
+	type = be32_to_cpup(p++);
 	if (type > NF3FIFO)
 		type = NF3NON;
-	fmode = nfs_type2fmt[type];
-	fattr->mode = (ntohl(*p++) & ~S_IFMT) | fmode;
-	fattr->nlink = ntohl(*p++);
-	fattr->uid = ntohl(*p++);
-	fattr->gid = ntohl(*p++);
-	p = xdr_decode_hyper(p, &fattr->size);
-	p = xdr_decode_hyper(p, &fattr->du.nfs3.used);
+	*mode = nfs_type2fmt[type];
+	return p;
+}
 
-	/* Turn remote device info into Linux-specific dev_t */
-	major = ntohl(*p++);
-	minor = ntohl(*p++);
-	fattr->rdev = MKDEV(major, minor);
-	if (MAJOR(fattr->rdev) != major || MINOR(fattr->rdev) != minor)
-		fattr->rdev = 0;
+/*
+ * specdata3
+ *
+ *     struct specdata3 {
+ *             uint32  specdata1;
+ *             uint32  specdata2;
+ *     };
+ */
+static void encode_specdata3(struct xdr_stream *xdr, const dev_t rdev)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, 8);
+	*p++ = cpu_to_be32(MAJOR(rdev));
+	*p = cpu_to_be32(MINOR(rdev));
+}
+
+static __be32 *xdr_decode_specdata3(__be32 *p, dev_t *rdev)
+{
+	unsigned int major, minor;
+
+	major = be32_to_cpup(p++);
+	minor = be32_to_cpup(p++);
+	*rdev = MKDEV(major, minor);
+	if (MAJOR(*rdev) != major || MINOR(*rdev) != minor)
+		*rdev = 0;
+	return p;
+}
+
+/*
+ * nfs_fh3
+ *
+ *	struct nfs_fh3 {
+ *		opaque       data<NFS3_FHSIZE>;
+ *	};
+ */
+static void encode_nfs_fh3(struct xdr_stream *xdr, const struct nfs_fh *fh)
+{
+	__be32 *p;
+
+	BUG_ON(fh->size > NFS3_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + fh->size);
+	xdr_encode_opaque(p, fh->data, fh->size);
+}
+
+static int decode_nfs_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	length = be32_to_cpup(p++);
+	if (unlikely(length > NFS3_FHSIZE))
+		goto out_toobig;
+	p = xdr_inline_decode(xdr, length);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	fh->size = length;
+	memcpy(fh->data, p, length);
+	return 0;
+out_toobig:
+	dprintk("NFS: file handle size (%u) too big\n", length);
+	return -E2BIG;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static void zero_nfs_fh3(struct nfs_fh *fh)
+{
+	memset(fh, 0, sizeof(*fh));
+}
+
+/*
+ * nfstime3
+ *
+ *	struct nfstime3 {
+ *		uint32	seconds;
+ *		uint32	nseconds;
+ *	};
+ */
+static __be32 *xdr_encode_nfstime3(__be32 *p, const struct timespec *timep)
+{
+	*p++ = cpu_to_be32(timep->tv_sec);
+	*p++ = cpu_to_be32(timep->tv_nsec);
+	return p;
+}
+
+static __be32 *xdr_decode_nfstime3(__be32 *p, struct timespec *timep)
+{
+	timep->tv_sec = be32_to_cpup(p++);
+	timep->tv_nsec = be32_to_cpup(p++);
+	return p;
+}
+
+/*
+ * sattr3
+ *
+ *	enum time_how {
+ *		DONT_CHANGE		= 0,
+ *		SET_TO_SERVER_TIME	= 1,
+ *		SET_TO_CLIENT_TIME	= 2
+ *	};
+ *
+ *	union set_mode3 switch (bool set_it) {
+ *	case TRUE:
+ *		mode3	mode;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_uid3 switch (bool set_it) {
+ *	case TRUE:
+ *		uid3	uid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_gid3 switch (bool set_it) {
+ *	case TRUE:
+ *		gid3	gid;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_size3 switch (bool set_it) {
+ *	case TRUE:
+ *		size3	size;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_atime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3	atime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	union set_mtime switch (time_how set_it) {
+ *	case SET_TO_CLIENT_TIME:
+ *		nfstime3  mtime;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct sattr3 {
+ *		set_mode3	mode;
+ *		set_uid3	uid;
+ *		set_gid3	gid;
+ *		set_size3	size;
+ *		set_atime	atime;
+ *		set_mtime	mtime;
+ *	};
+ */
+static void encode_sattr3(struct xdr_stream *xdr, const struct iattr *attr)
+{
+	u32 nbytes;
+	__be32 *p;
+
+	/*
+	 * In order to make only a single xdr_reserve_space() call,
+	 * pre-compute the total number of bytes to be reserved.
+	 * Six boolean values, one for each set_foo field, are always
+	 * present in the encoded result, so start there.
+	 */
+	nbytes = 6 * 4;
+	if (attr->ia_valid & ATTR_MODE)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_UID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_GID)
+		nbytes += 4;
+	if (attr->ia_valid & ATTR_SIZE)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_ATIME_SET)
+		nbytes += 8;
+	if (attr->ia_valid & ATTR_MTIME_SET)
+		nbytes += 8;
+	p = xdr_reserve_space(xdr, nbytes);
+
+	if (attr->ia_valid & ATTR_MODE) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_mode & S_IALLUGO);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_UID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_uid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_GID) {
+		*p++ = xdr_one;
+		*p++ = cpu_to_be32(attr->ia_gid);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_SIZE) {
+		*p++ = xdr_one;
+		p = xdr_encode_hyper(p, (u64)attr->ia_size);
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_ATIME_SET) {
+		*p++ = xdr_two;
+		p = xdr_encode_nfstime3(p, &attr->ia_atime);
+	} else if (attr->ia_valid & ATTR_ATIME) {
+		*p++ = xdr_one;
+	} else
+		*p++ = xdr_zero;
+
+	if (attr->ia_valid & ATTR_MTIME_SET) {
+		*p++ = xdr_two;
+		xdr_encode_nfstime3(p, &attr->ia_mtime);
+	} else if (attr->ia_valid & ATTR_MTIME) {
+		*p = xdr_one;
+	} else
+		*p = xdr_zero;
+}
+
+/*
+ * fattr3
+ *
+ *	struct fattr3 {
+ *		ftype3		type;
+ *		mode3		mode;
+ *		uint32		nlink;
+ *		uid3		uid;
+ *		gid3		gid;
+ *		size3		size;
+ *		size3		used;
+ *		specdata3	rdev;
+ *		uint64		fsid;
+ *		fileid3		fileid;
+ *		nfstime3	atime;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+{
+	umode_t fmode;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_fattr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
+	p = xdr_decode_ftype3(p, &fmode);
+
+	fattr->mode = (be32_to_cpup(p++) & ~S_IFMT) | fmode;
+	fattr->nlink = be32_to_cpup(p++);
+	fattr->uid = be32_to_cpup(p++);
+	fattr->gid = be32_to_cpup(p++);
+
+	p = xdr_decode_size3(p, &fattr->size);
+	p = xdr_decode_size3(p, &fattr->du.nfs3.used);
+	p = xdr_decode_specdata3(p, &fattr->rdev);
 
 	p = xdr_decode_hyper(p, &fattr->fsid.major);
 	fattr->fsid.minor = 0;
-	p = xdr_decode_hyper(p, &fattr->fileid);
-	p = xdr_decode_time3(p, &fattr->atime);
-	p = xdr_decode_time3(p, &fattr->mtime);
-	p = xdr_decode_time3(p, &fattr->ctime);
 
-	/* Update the mode bits */
+	p = xdr_decode_fileid3(p, &fattr->fileid);
+	p = xdr_decode_nfstime3(p, &fattr->atime);
+	p = xdr_decode_nfstime3(p, &fattr->mtime);
+	xdr_decode_nfstime3(p, &fattr->ctime);
+
 	fattr->valid |= NFS_ATTR_FATTR_V3;
-	return p;
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+/*
+ * post_op_attr
+ *
+ *	union post_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		fattr3	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ */
+static int decode_post_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (attr->ia_valid & ATTR_MODE) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_mode & S_IALLUGO);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_UID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_uid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_GID) {
-		*p++ = xdr_one;
-		*p++ = htonl(attr->ia_gid);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_SIZE) {
-		*p++ = xdr_one;
-		p = xdr_encode_hyper(p, (__u64) attr->ia_size);
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_ATIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_atime);
-	} else if (attr->ia_valid & ATTR_ATIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	if (attr->ia_valid & ATTR_MTIME_SET) {
-		*p++ = xdr_two;
-		p = xdr_encode_time3(p, &attr->ia_mtime);
-	} else if (attr->ia_valid & ATTR_MTIME) {
-		*p++ = xdr_one;
-	} else {
-		*p++ = xdr_zero;
-	}
-	return p;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_fattr3(xdr, fattr);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_wcc_attr(__be32 *p, struct nfs_fattr *fattr)
+/*
+ * wcc_attr
+ *	struct wcc_attr {
+ *		size3		size;
+ *		nfstime3	mtime;
+ *		nfstime3	ctime;
+ *	};
+ */
+static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	p = xdr_decode_hyper(p, &fattr->pre_size);
-	p = xdr_decode_time3(p, &fattr->pre_mtime);
-	p = xdr_decode_time3(p, &fattr->pre_ctime);
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, NFS3_wcc_attr_sz << 2);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+
 	fattr->valid |= NFS_ATTR_FATTR_PRESIZE
 		| NFS_ATTR_FATTR_PREMTIME
 		| NFS_ATTR_FATTR_PRECTIME;
-	return p;
+
+	p = xdr_decode_size3(p, &fattr->pre_size);
+	p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
+	xdr_decode_nfstime3(p, &fattr->pre_ctime);
+
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_post_op_attr(__be32 *p, struct nfs_fattr *fattr)
-{
-	if (*p++)
-		p = xdr_decode_fattr(p, fattr);
-	return p;
-}
-
-static inline __be32 *
-xdr_decode_post_op_attr_stream(struct xdr_stream *xdr, struct nfs_fattr *fattr)
+/*
+ * pre_op_attr
+ *	union pre_op_attr switch (bool attributes_follow) {
+ *	case TRUE:
+ *		wcc_attr	attributes;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ * wcc_data
+ *
+ *	struct wcc_data {
+ *		pre_op_attr	before;
+ *		post_op_attr	after;
+ *	};
+ */
+static int decode_pre_op_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
 	__be32 *p;
 
 	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
+	if (unlikely(p == NULL))
 		goto out_overflow;
-	if (ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 84);
-		if (unlikely(!p))
-			goto out_overflow;
-		p = xdr_decode_fattr(p, fattr);
-	}
-	return p;
+	if (*p != xdr_zero)
+		return decode_wcc_attr(xdr, fattr);
+	return 0;
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EIO);
+	return -EIO;
 }
 
-static inline __be32 *
-xdr_decode_pre_op_attr(__be32 *p, struct nfs_fattr *fattr)
+static int decode_wcc_data(struct xdr_stream *xdr, struct nfs_fattr *fattr)
 {
-	if (*p++)
-		return xdr_decode_wcc_attr(p, fattr);
-	return p;
-}
+	int error;
 
-
-static inline __be32 *
-xdr_decode_wcc_data(__be32 *p, struct nfs_fattr *fattr)
-{
-	p = xdr_decode_pre_op_attr(p, fattr);
-	return xdr_decode_post_op_attr(p, fattr);
+	error = decode_pre_op_attr(xdr, fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, fattr);
+out:
+	return error;
 }
 
 /*
- * NFS encode functions
+ * post_op_fh3
+ *
+ *	union post_op_fh3 switch (bool handle_follows) {
+ *	case TRUE:
+ *		nfs_fh3  handle;
+ *	case FALSE:
+ *		void;
+ *	};
  */
-
-/*
- * Encode file handle argument
- */
-static int
-nfs3_xdr_fhandle(struct rpc_rqst *req, __be32 *p, struct nfs_fh *fh)
+static int decode_post_op_fh3(struct xdr_stream *xdr, struct nfs_fh *fh)
 {
-	p = xdr_encode_fhandle(p, fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
+	__be32 *p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p != xdr_zero)
+		return decode_nfs_fh3(xdr, fh);
+	zero_nfs_fh3(fh);
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
 }
 
 /*
- * Encode SETATTR arguments
+ * diropargs3
+ *
+ *	struct diropargs3 {
+ *		nfs_fh3		dir;
+ *		filename3	name;
+ *	};
  */
-static int
-nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
+static void encode_diropargs3(struct xdr_stream *xdr, const struct nfs_fh *fh,
+			      const char *name, u32 length)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->guard);
-	if (args->guard)
-		p = xdr_encode_time3(p, &args->guardtime);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	encode_nfs_fh3(xdr, fh);
+	encode_filename3(xdr, name, length);
+}
+
+
+/*
+ * NFSv3 XDR encode functions
+ *
+ * NFSv3 argument types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
+ */
+
+/*
+ * 3.3.1  GETATTR3args
+ *
+ *	struct GETATTR3args {
+ *		nfs_fh3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_getattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs_fh *fh)
+{
+	encode_nfs_fh3(xdr, fh);
 }
 
 /*
- * Encode directory ops argument
+ * 3.3.2  SETATTR3args
+ *
+ *	union sattrguard3 switch (bool check) {
+ *	case TRUE:
+ *		nfstime3  obj_ctime;
+ *	case FALSE:
+ *		void;
+ *	};
+ *
+ *	struct SETATTR3args {
+ *		nfs_fh3		object;
+ *		sattr3		new_attributes;
+ *		sattrguard3	guard;
+ *	};
  */
-static int
-nfs3_xdr_diropargs(struct rpc_rqst *req, __be32 *p, struct nfs3_diropargs *args)
+static void encode_sattrguard3(struct xdr_stream *xdr,
+			       const struct nfs3_sattrargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
+	__be32 *p;
 
-/*
- * Encode REMOVE argument
- */
-static int
-nfs3_xdr_removeargs(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name.name, args->name.len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode access() argument
- */
-static int
-nfs3_xdr_accessargs(struct rpc_rqst *req, __be32 *p, struct nfs3_accessargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->access);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Arguments to a READ call. Since we read data directly into the page
- * cache, we also set up the reply iovec here so that iov[1] points
- * exactly to the page we want to fetch.
- */
-static int
-nfs3_xdr_readargs(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen,
-			 args->pages, args->pgbase, count);
-	req->rq_rcv_buf.flags |= XDRBUF_READ;
-	return 0;
-}
-
-/*
- * Write arguments. Splice the buffer to be written into the iovec.
- */
-static int
-nfs3_xdr_writeargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
-{
-	struct xdr_buf *sndbuf = &req->rq_snd_buf;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(count);
-	*p++ = htonl(args->stable);
-	*p++ = htonl(count);
-	sndbuf->len = xdr_adjust_iovec(sndbuf->head, p);
-
-	/* Copy the page array */
-	xdr_encode_pages(sndbuf, args->pages, args->pgbase, count);
-	sndbuf->flags |= XDRBUF_WRITE;
-	return 0;
-}
-
-/*
- * Encode CREATE arguments
- */
-static int
-nfs3_xdr_createargs(struct rpc_rqst *req, __be32 *p, struct nfs3_createargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-
-	*p++ = htonl(args->createmode);
-	if (args->createmode == NFS3_CREATE_EXCLUSIVE) {
-		*p++ = args->verifier[0];
-		*p++ = args->verifier[1];
-	} else
-		p = xdr_encode_sattr(p, args->sattr);
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode MKDIR arguments
- */
-static int
-nfs3_xdr_mkdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mkdirargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	p = xdr_encode_sattr(p, args->sattr);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode SYMLINK arguments
- */
-static int
-nfs3_xdr_symlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_symlinkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_array(p, args->fromname, args->fromlen);
-	p = xdr_encode_sattr(p, args->sattr);
-	*p++ = htonl(args->pathlen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Copy the page */
-	xdr_encode_pages(&req->rq_snd_buf, args->pages, 0, args->pathlen);
-	return 0;
-}
-
-/*
- * Encode MKNOD arguments
- */
-static int
-nfs3_xdr_mknodargs(struct rpc_rqst *req, __be32 *p, struct nfs3_mknodargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_array(p, args->name, args->len);
-	*p++ = htonl(args->type);
-	p = xdr_encode_sattr(p, args->sattr);
-	if (args->type == NF3CHR || args->type == NF3BLK) {
-		*p++ = htonl(MAJOR(args->rdev));
-		*p++ = htonl(MINOR(args->rdev));
-	}
-
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode RENAME arguments
- */
-static int
-nfs3_xdr_renameargs(struct rpc_rqst *req, __be32 *p, struct nfs_renameargs *args)
-{
-	p = xdr_encode_fhandle(p, args->old_dir);
-	p = xdr_encode_array(p, args->old_name->name, args->old_name->len);
-	p = xdr_encode_fhandle(p, args->new_dir);
-	p = xdr_encode_array(p, args->new_name->name, args->new_name->len);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode LINK arguments
- */
-static int
-nfs3_xdr_linkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_linkargs *args)
-{
-	p = xdr_encode_fhandle(p, args->fromfh);
-	p = xdr_encode_fhandle(p, args->tofh);
-	p = xdr_encode_array(p, args->toname, args->tolen);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
-}
-
-/*
- * Encode arguments to readdir call
- */
-static int
-nfs3_xdr_readdirargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-	u32 count = args->count;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	p = xdr_encode_hyper(p, args->cookie);
-	*p++ = args->verf[0];
-	*p++ = args->verf[1];
-	if (args->plus) {
-		/* readdirplus: need dircount + buffer size.
-		 * We just make sure we make dircount big enough */
-		*p++ = htonl(count >> 3);
-	}
-	*p++ = htonl(count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readdirres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0, count);
-	return 0;
-}
-
-/*
- * Decode the result of a readdir call.
- * We just check for syntactical correctness.
- */
-static int
-nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res)
-{
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
-	struct page **page;
-	size_t hdrlen;
-	u32 recvd, pglen;
-	int status;
-
-	status = ntohl(*p++);
-	/* Decode post_op_attrs */
-	p = xdr_decode_post_op_attr(p, res->dir_attr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	/* Decode verifier cookie */
-	if (res->verf) {
-		res->verf[0] = *p++;
-		res->verf[1] = *p++;
+	if (args->guard) {
+		p = xdr_reserve_space(xdr, 4 + 8);
+		*p++ = xdr_one;
+		xdr_encode_nfstime3(p, &args->guardtime);
 	} else {
-		p += 2;
+		p = xdr_reserve_space(xdr, 4);
+		*p = xdr_zero;
 	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READDIR reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-
-	pglen = rcvbuf->page_len;
-	recvd = rcvbuf->len - hdrlen;
-	if (pglen > recvd)
-		pglen = recvd;
-	page = rcvbuf->pages;
-
-	return pglen;
 }
 
-__be32 *
-nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_server *server, int plus)
+static void nfs3_xdr_enc_setattr3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_sattrargs *args)
 {
-	__be32 *p;
-	struct nfs_entry old = *entry;
-
-	p = xdr_inline_decode(xdr, 4);
-	if (unlikely(!p))
-		goto out_overflow;
-	if (!ntohl(*p++)) {
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
-		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
-	}
-
-	p = xdr_inline_decode(xdr, 12);
-	if (unlikely(!p))
-		goto out_overflow;
-	p = xdr_decode_hyper(p, &entry->ino);
-	entry->len  = ntohl(*p++);
-
-	p = xdr_inline_decode(xdr, entry->len + 8);
-	if (unlikely(!p))
-		goto out_overflow;
-	entry->name = (const char *) p;
-	p += XDR_QUADLEN(entry->len);
-	entry->prev_cookie = entry->cookie;
-	p = xdr_decode_hyper(p, &entry->cookie);
-
-	entry->d_type = DT_UNKNOWN;
-	if (plus) {
-		entry->fattr->valid = 0;
-		p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
-		if (IS_ERR(p))
-			goto out_overflow_exit;
-		entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
-		/* In fact, a post_op_fh3: */
-		p = xdr_inline_decode(xdr, 4);
-		if (unlikely(!p))
-			goto out_overflow;
-		if (*p++) {
-			p = xdr_decode_fhandle_stream(xdr, entry->fh);
-			if (IS_ERR(p))
-				goto out_overflow_exit;
-			/* Ugh -- server reply was truncated */
-			if (p == NULL) {
-				dprintk("NFS: FH truncated\n");
-				*entry = old;
-				return ERR_PTR(-EAGAIN);
-			}
-		} else
-			memset((u8*)(entry->fh), 0, sizeof(*entry->fh));
-	}
-
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
-
-out_overflow:
-	print_overflow_msg(__func__, xdr);
-out_overflow_exit:
-	return ERR_PTR(-EAGAIN);
+	encode_nfs_fh3(xdr, args->fh);
+	encode_sattr3(xdr, args->sattr);
+	encode_sattrguard3(xdr, args);
 }
 
 /*
- * Encode COMMIT arguments
+ * 3.3.3  LOOKUP3args
+ *
+ *	struct LOOKUP3args {
+ *		diropargs3  what;
+ *	};
  */
-static int
-nfs3_xdr_commitargs(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs3_xdr_enc_lookup3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_diropargs *args)
 {
-	p = xdr_encode_fhandle(p, args->fh);
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+}
+
+/*
+ * 3.3.4  ACCESS3args
+ *
+ *	struct ACCESS3args {
+ *		nfs_fh3		object;
+ *		uint32		access;
+ *	};
+ */
+static void encode_access3args(struct xdr_stream *xdr,
+			       const struct nfs3_accessargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->access);
+}
+
+static void nfs3_xdr_enc_access3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_accessargs *args)
+{
+	encode_access3args(xdr, args);
+}
+
+/*
+ * 3.3.5  READLINK3args
+ *
+ *	struct READLINK3args {
+ *		nfs_fh3	symlink;
+ *	};
+ */
+static void nfs3_xdr_enc_readlink3args(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       const struct nfs3_readlinkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fh);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->pglen, NFS3_readlinkres_sz);
+}
+
+/*
+ * 3.3.6  READ3args
+ *
+ *	struct READ3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_read3args(struct xdr_stream *xdr,
+			     const struct nfs_readargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
 	p = xdr_encode_hyper(p, args->offset);
-	*p++ = htonl(args->count);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	return 0;
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs_readargs *args)
+{
+	encode_read3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, args->pgbase,
+					args->count, NFS3_readres_sz);
+	req->rq_rcv_buf.flags |= XDRBUF_READ;
+}
+
+/*
+ * 3.3.7  WRITE3args
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *		stable_how	stable;
+ *		opaque		data<>;
+ *	};
+ */
+static void encode_write3args(struct xdr_stream *xdr,
+			      const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4 + 4 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p++ = cpu_to_be32(args->count);
+	*p++ = cpu_to_be32(args->stable);
+	*p = cpu_to_be32(args->count);
+	xdr_write_pages(xdr, args->pages, args->pgbase, args->count);
+}
+
+static void nfs3_xdr_enc_write3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs_writeargs *args)
+{
+	encode_write3args(xdr, args);
+	xdr->buf->flags |= XDRBUF_WRITE;
+}
+
+/*
+ * 3.3.8  CREATE3args
+ *
+ *	enum createmode3 {
+ *		UNCHECKED = 0,
+ *		GUARDED   = 1,
+ *		EXCLUSIVE = 2
+ *	};
+ *
+ *	union createhow3 switch (createmode3 mode) {
+ *	case UNCHECKED:
+ *	case GUARDED:
+ *		sattr3       obj_attributes;
+ *	case EXCLUSIVE:
+ *		createverf3  verf;
+ *	};
+ *
+ *	struct CREATE3args {
+ *		diropargs3	where;
+ *		createhow3	how;
+ *	};
+ */
+static void encode_createhow3(struct xdr_stream *xdr,
+			      const struct nfs3_createargs *args)
+{
+	encode_uint32(xdr, args->createmode);
+	switch (args->createmode) {
+	case NFS3_CREATE_UNCHECKED:
+	case NFS3_CREATE_GUARDED:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NFS3_CREATE_EXCLUSIVE:
+		encode_createverf3(xdr, args->verifier);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_create3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_createargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_createhow3(xdr, args);
+}
+
+/*
+ * 3.3.9  MKDIR3args
+ *
+ *	struct MKDIR3args {
+ *		diropargs3	where;
+ *		sattr3		attributes;
+ *	};
+ */
+static void nfs3_xdr_enc_mkdir3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mkdirargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_sattr3(xdr, args->sattr);
+}
+
+/*
+ * 3.3.10  SYMLINK3args
+ *
+ *	struct symlinkdata3 {
+ *		sattr3		symlink_attributes;
+ *		nfspath3	symlink_data;
+ *	};
+ *
+ *	struct SYMLINK3args {
+ *		diropargs3	where;
+ *		symlinkdata3	symlink;
+ *	};
+ */
+static void encode_symlinkdata3(struct xdr_stream *xdr,
+				const struct nfs3_symlinkargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_nfspath3(xdr, args->pages, args->pathlen);
+}
+
+static void nfs3_xdr_enc_symlink3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_symlinkargs *args)
+{
+	encode_diropargs3(xdr, args->fromfh, args->fromname, args->fromlen);
+	encode_symlinkdata3(xdr, args);
+}
+
+/*
+ * 3.3.11  MKNOD3args
+ *
+ *	struct devicedata3 {
+ *		sattr3		dev_attributes;
+ *		specdata3	spec;
+ *	};
+ *
+ *	union mknoddata3 switch (ftype3 type) {
+ *	case NF3CHR:
+ *	case NF3BLK:
+ *		devicedata3	device;
+ *	case NF3SOCK:
+ *	case NF3FIFO:
+ *		sattr3		pipe_attributes;
+ *	default:
+ *		void;
+ *	};
+ *
+ *	struct MKNOD3args {
+ *		diropargs3	where;
+ *		mknoddata3	what;
+ *	};
+ */
+static void encode_devicedata3(struct xdr_stream *xdr,
+			       const struct nfs3_mknodargs *args)
+{
+	encode_sattr3(xdr, args->sattr);
+	encode_specdata3(xdr, args->rdev);
+}
+
+static void encode_mknoddata3(struct xdr_stream *xdr,
+			      const struct nfs3_mknodargs *args)
+{
+	encode_ftype3(xdr, args->type);
+	switch (args->type) {
+	case NF3CHR:
+	case NF3BLK:
+		encode_devicedata3(xdr, args);
+		break;
+	case NF3SOCK:
+	case NF3FIFO:
+		encode_sattr3(xdr, args->sattr);
+		break;
+	case NF3REG:
+	case NF3DIR:
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void nfs3_xdr_enc_mknod3args(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    const struct nfs3_mknodargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name, args->len);
+	encode_mknoddata3(xdr, args);
+}
+
+/*
+ * 3.3.12  REMOVE3args
+ *
+ *	struct REMOVE3args {
+ *		diropargs3  object;
+ *	};
+ */
+static void nfs3_xdr_enc_remove3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_removeargs *args)
+{
+	encode_diropargs3(xdr, args->fh, args->name.name, args->name.len);
+}
+
+/*
+ * 3.3.14  RENAME3args
+ *
+ *	struct RENAME3args {
+ *		diropargs3	from;
+ *		diropargs3	to;
+ *	};
+ */
+static void nfs3_xdr_enc_rename3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_renameargs *args)
+{
+	const struct qstr *old = args->old_name;
+	const struct qstr *new = args->new_name;
+
+	encode_diropargs3(xdr, args->old_dir, old->name, old->len);
+	encode_diropargs3(xdr, args->new_dir, new->name, new->len);
+}
+
+/*
+ * 3.3.15  LINK3args
+ *
+ *	struct LINK3args {
+ *		nfs_fh3		file;
+ *		diropargs3	link;
+ *	};
+ */
+static void nfs3_xdr_enc_link3args(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   const struct nfs3_linkargs *args)
+{
+	encode_nfs_fh3(xdr, args->fromfh);
+	encode_diropargs3(xdr, args->tofh, args->toname, args->tolen);
+}
+
+/*
+ * 3.3.16  READDIR3args
+ *
+ *	struct READDIR3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		count;
+ *	};
+ */
+static void encode_readdir3args(struct xdr_stream *xdr,
+				const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdir3args(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      const struct nfs3_readdirargs *args)
+{
+	encode_readdir3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.17  READDIRPLUS3args
+ *
+ *	struct READDIRPLUS3args {
+ *		nfs_fh3		dir;
+ *		cookie3		cookie;
+ *		cookieverf3	cookieverf;
+ *		count3		dircount;
+ *		count3		maxcount;
+ *	};
+ */
+static void encode_readdirplus3args(struct xdr_stream *xdr,
+				    const struct nfs3_readdirargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + NFS3_COOKIEVERFSIZE + 4 + 4);
+	p = xdr_encode_cookie3(p, args->cookie);
+	p = xdr_encode_cookieverf3(p, args->verf);
+
+	/*
+	 * readdirplus: need dircount + buffer size.
+	 * We just make sure we make dircount big enough
+	 */
+	*p++ = cpu_to_be32(args->count >> 3);
+
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+					  const struct nfs3_readdirargs *args)
+{
+	encode_readdirplus3args(xdr, args);
+	prepare_reply_buffer(req, args->pages, 0,
+				args->count, NFS3_readdirres_sz);
+}
+
+/*
+ * 3.3.21  COMMIT3args
+ *
+ *	struct COMMIT3args {
+ *		nfs_fh3		file;
+ *		offset3		offset;
+ *		count3		count;
+ *	};
+ */
+static void encode_commit3args(struct xdr_stream *xdr,
+			       const struct nfs_writeargs *args)
+{
+	__be32 *p;
+
+	encode_nfs_fh3(xdr, args->fh);
+
+	p = xdr_reserve_space(xdr, 8 + 4);
+	p = xdr_encode_hyper(p, args->offset);
+	*p = cpu_to_be32(args->count);
+}
+
+static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs_writeargs *args)
+{
+	encode_commit3args(xdr, args);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Encode GETACL arguments
- */
-static int
-nfs3_xdr_getaclargs(struct rpc_rqst *req, __be32 *p,
-		    struct nfs3_getaclargs *args)
+
+static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_getaclargs *args)
 {
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	if (args->mask & (NFS_ACL | NFS_DFACL)) {
-		/* Inline the page array */
-		replen = (RPC_REPHDRSIZE + auth->au_rslack +
-			  ACL3_getaclres_sz) << 2;
-		xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, 0,
-				 NFSACL_MAXPAGES << PAGE_SHIFT);
-	}
-	return 0;
+	encode_nfs_fh3(xdr, args->fh);
+	encode_uint32(xdr, args->mask);
+	if (args->mask & (NFS_ACL | NFS_DFACL))
+		prepare_reply_buffer(req, args->pages, 0,
+					NFSACL_MAXPAGES << PAGE_SHIFT,
+					ACL3_getaclres_sz);
 }
 
-/*
- * Encode SETACL arguments
- */
-static int
-nfs3_xdr_setaclargs(struct rpc_rqst *req, __be32 *p,
-                   struct nfs3_setaclargs *args)
+static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs3_setaclargs *args)
 {
-	struct xdr_buf *buf = &req->rq_snd_buf;
 	unsigned int base;
-	int err;
+	int error;
 
-	p = xdr_encode_fhandle(p, NFS_FH(args->inode));
-	*p++ = htonl(args->mask);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-	base = req->rq_slen;
-
+	encode_nfs_fh3(xdr, NFS_FH(args->inode));
+	encode_uint32(xdr, args->mask);
 	if (args->npages != 0)
-		xdr_encode_pages(buf, args->pages, 0, args->len);
-	else
-		req->rq_slen = xdr_adjust_iovec(req->rq_svec,
-				p + XDR_QUADLEN(args->len));
+		xdr_write_pages(xdr, args->pages, 0, args->len);
 
-	err = nfsacl_encode(buf, base, args->inode,
+	base = req->rq_slen;
+	error = nfsacl_encode(xdr->buf, base, args->inode,
 			    (args->mask & NFS_ACL) ?
 			    args->acl_access : NULL, 1, 0);
-	if (err > 0)
-		err = nfsacl_encode(buf, base + err, args->inode,
-				    (args->mask & NFS_DFACL) ?
-				    args->acl_default : NULL, 1,
-				    NFS_ACL_DEFAULT);
-	return (err > 0) ? 0 : err;
+	BUG_ON(error < 0);
+	error = nfsacl_encode(xdr->buf, base + error, args->inode,
+			    (args->mask & NFS_DFACL) ?
+			    args->acl_default : NULL, 1,
+			    NFS_ACL_DEFAULT);
+	BUG_ON(error < 0);
 }
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 /*
- * NFS XDR decode functions
+ * NFSv3 XDR decode functions
+ *
+ * NFSv3 result types are defined in section 3.3 of RFC 1813:
+ * "NFS Version 3 Protocol Specification".
  */
 
 /*
- * Decode attrstat reply.
+ * 3.3.1  GETATTR3res
+ *
+ *	struct GETATTR3resok {
+ *		fattr3		obj_attributes;
+ *	};
+ *
+ *	union GETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		GETATTR3resok  resok;
+ *	default:
+ *		void;
+ *	};
  */
-static int
-nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		return nfs_stat_to_errno(status);
-	xdr_decode_fattr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_fattr3(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode status+wcc_data reply
- * SATTR, REMOVE, RMDIR
+ * 3.3.2  SETATTR3res
+ *
+ *	struct SETATTR3resok {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	struct SETATTR3resfail {
+ *		wcc_data  obj_wcc;
+ *	};
+ *
+ *	union SETATTR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		SETATTR3resok   resok;
+ *	default:
+ *		SETATTR3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs_fattr *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++)))
-		status = nfs_stat_to_errno(status);
-	xdr_decode_wcc_data(p, fattr);
-	return status;
-}
-
-static int
-nfs3_xdr_removeres(struct rpc_rqst *req, __be32 *p, struct nfs_removeres *res)
-{
-	return nfs3_xdr_wccstat(req, p, res->dir_attr);
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LOOKUP reply
+ * 3.3.3  LOOKUP3res
+ *
+ *	struct LOOKUP3resok {
+ *		nfs_fh3		object;
+ *		post_op_attr	obj_attributes;
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	struct LOOKUP3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union LOOKUP3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LOOKUP3resok	resok;
+ *	default:
+ *		LOOKUP3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int nfs3_xdr_dec_lookup3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
 {
-	int	status;
+	enum nfs_stat status;
+	int error;
 
-	if ((status = ntohl(*p++))) {
-		status = nfs_stat_to_errno(status);
-	} else {
-		if (!(p = xdr_decode_fhandle(p, res->fh)))
-			return -errno_NFSERR_IO;
-		p = xdr_decode_post_op_attr(p, res->fattr);
-	}
-	xdr_decode_post_op_attr(p, res->dir_attr);
-	return status;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfs_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->dir_attr);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode ACCESS reply
+ * 3.3.4  ACCESS3res
+ *
+ *	struct ACCESS3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		access;
+ *	};
+ *
+ *	struct ACCESS3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union ACCESS3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		ACCESS3resok	resok;
+ *	default:
+ *		ACCESS3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
+static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_accessres *result)
 {
-	int	status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status)
-		return nfs_stat_to_errno(status);
-	res->access = ntohl(*p++);
-	return 0;
-}
-
-static int
-nfs3_xdr_readlinkargs(struct rpc_rqst *req, __be32 *p, struct nfs3_readlinkargs *args)
-{
-	struct rpc_auth	*auth = req->rq_cred->cr_auth;
-	unsigned int replen;
-
-	p = xdr_encode_fhandle(p, args->fh);
-	req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
-
-	/* Inline the page array */
-	replen = (RPC_REPHDRSIZE + auth->au_rslack + NFS3_readlinkres_sz) << 2;
-	xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages, args->pgbase, args->pglen);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_uint32(xdr, &result->access);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode READLINK reply
+ * 3.3.5  READLINK3res
+ *
+ *	struct READLINK3resok {
+ *		post_op_attr	symlink_attributes;
+ *		nfspath3	data;
+ *	};
+ *
+ *	struct READLINK3resfail {
+ *		post_op_attr	symlink_attributes;
+ *	};
+ *
+ *	union READLINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READLINK3resok	resok;
+ *	default:
+ *		READLINK3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_fattr *result)
 {
-	struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
-	struct kvec *iov = rcvbuf->head;
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_nfspath3(xdr);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.6  READ3res
+ *
+ *	struct READ3resok {
+ *		post_op_attr	file_attributes;
+ *		count3		count;
+ *		bool		eof;
+ *		opaque		data<>;
+ *	};
+ *
+ *	struct READ3resfail {
+ *		post_op_attr	file_attributes;
+ *	};
+ *
+ *	union READ3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READ3resok	resok;
+ *	default:
+ *		READ3resfail	resfail;
+ *	};
+ */
+static int decode_read3resok(struct xdr_stream *xdr,
+			     struct nfs_readres *result)
+{
+	u32 eof, count, ocount, recvd;
 	size_t hdrlen;
-	u32 len, recvd;
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	count = be32_to_cpup(p++);
+	eof = be32_to_cpup(p++);
+	ocount = be32_to_cpup(p++);
+	if (unlikely(ocount != count))
+		goto out_mismatch;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(count > recvd))
+		goto out_cheating;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Convert length of symlink */
-	len = ntohl(*p++);
-	if (len >= rcvbuf->page_len) {
-		dprintk("nfs: server returned giant symlink!\n");
-		return -ENAMETOOLONG;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READLINK reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READLINK header is short. "
-			"iovec will be shifted.\n");
-		xdr_shift_buf(rcvbuf, iov->iov_len - hdrlen);
-	}
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (recvd < len) {
-		dprintk("NFS: server cheating in readlink reply: "
-				"count %u > recvd %u\n", len, recvd);
-		return -EIO;
-	}
-
-	xdr_terminate_string(rcvbuf, len);
-	return 0;
-}
-
-/*
- * Decode READ reply
- */
-static int
-nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
-{
-	struct kvec *iov = req->rq_rcv_buf.head;
-	size_t hdrlen;
-	u32 count, ocount, recvd;
-	int status;
-
-	status = ntohl(*p++);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
-	 * in that it puts the count both in the res struct and in the
-	 * opaque data count. */
-	count    = ntohl(*p++);
-	res->eof = ntohl(*p++);
-	ocount   = ntohl(*p++);
-
-	if (ocount != count) {
-		dprintk("NFS: READ count doesn't match RPC opaque count.\n");
-		return -errno_NFSERR_IO;
-	}
-
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
-	if (iov->iov_len < hdrlen) {
-		dprintk("NFS: READ reply header overflowed:"
-				"length %Zu > %Zu\n", hdrlen, iov->iov_len);
-       		return -errno_NFSERR_IO;
-	} else if (iov->iov_len != hdrlen) {
-		dprintk("NFS: READ header is short. iovec will be shifted.\n");
-		xdr_shift_buf(&req->rq_rcv_buf, iov->iov_len - hdrlen);
-	}
-
-	recvd = req->rq_rcv_buf.len - hdrlen;
-	if (count > recvd) {
-		dprintk("NFS: server cheating in read reply: "
-			"count %u > recvd %u\n", count, recvd);
-		count = recvd;
-		res->eof = 0;
-	}
-
-	if (count < res->count)
-		res->count = count;
-
+out:
+	xdr_read_pages(xdr, count);
+	result->eof = eof;
+	result->count = count;
 	return count;
+out_mismatch:
+	dprintk("NFS: READ count doesn't match length of opaque: "
+		"count %u != ocount %u\n", count, ocount);
+	return -EIO;
+out_cheating:
+	dprintk("NFS: server cheating in read result: "
+		"count %u > recvd %u\n", count, recvd);
+	count = recvd;
+	eof = 0;
+	goto out;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_readres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_read3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode WRITE response
+ * 3.3.7  WRITE3res
+ *
+ *	enum stable_how {
+ *		UNSTABLE  = 0,
+ *		DATA_SYNC = 1,
+ *		FILE_SYNC = 2
+ *	};
+ *
+ *	struct WRITE3resok {
+ *		wcc_data	file_wcc;
+ *		count3		count;
+ *		stable_how	committed;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct WRITE3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union WRITE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		WRITE3resok	resok;
+ *	default:
+ *		WRITE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int decode_write3resok(struct xdr_stream *xdr,
+			      struct nfs_writeres *result)
 {
-	int	status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
+	p = xdr_inline_decode(xdr, 4 + 4 + NFS3_WRITEVERFSIZE);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->count = be32_to_cpup(p++);
+	result->verf->committed = be32_to_cpup(p++);
+	if (unlikely(result->verf->committed > NFS_FILE_SYNC))
+		goto out_badvalue;
+	memcpy(result->verf->verifier, p, NFS3_WRITEVERFSIZE);
+	return result->count;
+out_badvalue:
+	dprintk("NFS: bad stable_how value: %u\n", result->verf->committed);
+	return -EIO;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
+static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs_writeres *result)
+{
+	enum nfs_stat status;
+	int error;
 
-	res->count = ntohl(*p++);
-	res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-
-	return res->count;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_write3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode a CREATE response
+ * 3.3.8  CREATE3res
+ *
+ *	struct CREATE3resok {
+ *		post_op_fh3	obj;
+ *		post_op_attr	obj_attributes;
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	struct CREATE3resfail {
+ *		wcc_data	dir_wcc;
+ *	};
+ *
+ *	union CREATE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		CREATE3resok	resok;
+ *	default:
+ *		CREATE3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
+static int decode_create3resok(struct xdr_stream *xdr,
+			       struct nfs3_diropres *result)
 {
-	int	status;
+	int error;
 
-	status = ntohl(*p++);
-	if (status == 0) {
-		if (*p++) {
-			if (!(p = xdr_decode_fhandle(p, res->fh)))
-				return -errno_NFSERR_IO;
-			p = xdr_decode_post_op_attr(p, res->fattr);
-		} else {
-			memset(res->fh, 0, sizeof(*res->fh));
-			/* Do decode post_op_attr but set it to NULL */
-			p = xdr_decode_post_op_attr(p, res->fattr);
-			res->fattr->valid = 0;
-		}
-	} else {
-		status = nfs_stat_to_errno(status);
+	error = decode_post_op_fh3(xdr, result->fh);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	/* The server isn't required to return a file handle.
+	 * If it didn't, force the client to perform a LOOKUP
+	 * to determine the correct file handle and attribute
+	 * values for the new object. */
+	if (result->fh->size == 0)
+		result->fattr->valid = 0;
+	error = decode_wcc_data(xdr, result->dir_attr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_create3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_diropres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_create3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.12  REMOVE3res
+ *
+ *	struct REMOVE3resok {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	struct REMOVE3resfail {
+ *		wcc_data    dir_wcc;
+ *	};
+ *
+ *	union REMOVE3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		REMOVE3resok   resok;
+ *	default:
+ *		REMOVE3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_removeres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.14  RENAME3res
+ *
+ *	struct RENAME3resok {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	struct RENAME3resfail {
+ *		wcc_data	fromdir_wcc;
+ *		wcc_data	todir_wcc;
+ *	};
+ *
+ *	union RENAME3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		RENAME3resok   resok;
+ *	default:
+ *		RENAME3resfail resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_renameres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->old_fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->new_fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/*
+ * 3.3.15  LINK3res
+ *
+ *	struct LINK3resok {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	struct LINK3resfail {
+ *		post_op_attr	file_attributes;
+ *		wcc_data	linkdir_wcc;
+ *	};
+ *
+ *	union LINK3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		LINK3resok	resok;
+ *	default:
+ *		LINK3resfail	resfail;
+ *	};
+ */
+static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs3_linkres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
+}
+
+/**
+ * nfs3_decode_dirent - Decode a single NFSv3 directory entry stored in
+ *			the local page cache
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ *
+ * 3.3.16  entry3
+ *
+ *	struct entry3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		fhandle3	filehandle;
+ *		post_op_attr3	attributes;
+ *		entry3		*nextentry;
+ *	};
+ *
+ * 3.3.17  entryplus3
+ *	struct entryplus3 {
+ *		fileid3		fileid;
+ *		filename3	name;
+ *		cookie3		cookie;
+ *		post_op_attr	name_attributes;
+ *		post_op_fh3	name_handle;
+ *		entryplus3	*nextentry;
+ *	};
+ */
+int nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
+{
+	struct nfs_entry old = *entry;
+	__be32 *p;
+	int error;
+
+	p = xdr_inline_decode(xdr, 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	if (*p == xdr_zero) {
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p == xdr_zero)
+			return -EAGAIN;
+		entry->eof = 1;
+		return -EBADCOOKIE;
 	}
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
+
+	error = decode_fileid3(xdr, &entry->ino);
+	if (unlikely(error))
+		return error;
+
+	error = decode_inline_filename3(xdr, &entry->name, &entry->len);
+	if (unlikely(error))
+		return error;
+
+	entry->prev_cookie = entry->cookie;
+	error = decode_cookie3(xdr, &entry->cookie);
+	if (unlikely(error))
+		return error;
+
+	entry->d_type = DT_UNKNOWN;
+
+	if (plus) {
+		entry->fattr->valid = 0;
+		error = decode_post_op_attr(xdr, entry->fattr);
+		if (unlikely(error))
+			return error;
+		if (entry->fattr->valid & NFS_ATTR_FATTR_V3)
+			entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
+
+		/* In fact, a post_op_fh3: */
+		p = xdr_inline_decode(xdr, 4);
+		if (unlikely(p == NULL))
+			goto out_overflow;
+		if (*p != xdr_zero) {
+			error = decode_nfs_fh3(xdr, entry->fh);
+			if (unlikely(error)) {
+				if (error == -E2BIG)
+					goto out_truncated;
+				return error;
+			}
+		} else
+			zero_nfs_fh3(entry->fh);
+	}
+
+	return 0;
+
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EAGAIN;
+out_truncated:
+	dprintk("NFS: directory entry contains invalid file handle\n");
+	*entry = old;
+	return -EAGAIN;
 }
 
 /*
- * Decode RENAME reply
+ * 3.3.16  READDIR3res
+ *
+ *	struct dirlist3 {
+ *		entry3		*entries;
+ *		bool		eof;
+ *	};
+ *
+ *	struct READDIR3resok {
+ *		post_op_attr	dir_attributes;
+ *		cookieverf3	cookieverf;
+ *		dirlist3	reply;
+ *	};
+ *
+ *	struct READDIR3resfail {
+ *		post_op_attr	dir_attributes;
+ *	};
+ *
+ *	union READDIR3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		READDIR3resok	resok;
+ *	default:
+ *		READDIR3resfail	resfail;
+ *	};
+ *
+ * Read the directory contents into the page cache, but otherwise
+ * don't touch them.  The actual decoding is done by nfs3_decode_entry()
+ * during subsequent nfs_readdir() calls.
  */
-static int
-nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs_renameres *res)
+static int decode_dirlist3(struct xdr_stream *xdr)
 {
-	int	status;
+	u32 recvd, pglen;
+	size_t hdrlen;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_wcc_data(p, res->old_fattr);
-	p = xdr_decode_wcc_data(p, res->new_fattr);
-	return status;
+	pglen = xdr->buf->page_len;
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
+	recvd = xdr->buf->len - hdrlen;
+	if (unlikely(pglen > recvd))
+		goto out_cheating;
+out:
+	xdr_read_pages(xdr, pglen);
+	return pglen;
+out_cheating:
+	dprintk("NFS: server cheating in readdir result: "
+		"pglen %u > recvd %u\n", pglen, recvd);
+	pglen = recvd;
+	goto out;
+}
+
+static int decode_readdir3resok(struct xdr_stream *xdr,
+				struct nfs3_readdirres *result)
+{
+	int error;
+
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	/* XXX: do we need to check if result->verf != NULL ? */
+	error = decode_cookieverf3(xdr, result->verf);
+	if (unlikely(error))
+		goto out;
+	error = decode_dirlist3(xdr);
+out:
+	return error;
+}
+
+static int nfs3_xdr_dec_readdir3res(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs3_readdirres *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_readdir3resok(xdr, result);
+out:
+	return error;
+out_default:
+	error = decode_post_op_attr(xdr, result->dir_attr);
+	if (unlikely(error))
+		goto out;
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode LINK reply
+ * 3.3.18  FSSTAT3res
+ *
+ *	struct FSSTAT3resok {
+ *		post_op_attr	obj_attributes;
+ *		size3		tbytes;
+ *		size3		fbytes;
+ *		size3		abytes;
+ *		size3		tfiles;
+ *		size3		ffiles;
+ *		size3		afiles;
+ *		uint32		invarsec;
+ *	};
+ *
+ *	struct FSSTAT3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSSTAT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSSTAT3resok	resok;
+ *	default:
+ *		FSSTAT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
+static int decode_fsstat3resok(struct xdr_stream *xdr,
+			       struct nfs_fsstat *result)
 {
-	int	status;
+	__be32 *p;
 
-	if ((status = ntohl(*p++)) != 0)
-		status = nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	p = xdr_decode_wcc_data(p, res->dir_attr);
-	return status;
-}
-
-/*
- * Decode FSSTAT reply
- */
-static int
-nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
-{
-	int		status;
-
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	p = xdr_decode_hyper(p, &res->tbytes);
-	p = xdr_decode_hyper(p, &res->fbytes);
-	p = xdr_decode_hyper(p, &res->abytes);
-	p = xdr_decode_hyper(p, &res->tfiles);
-	p = xdr_decode_hyper(p, &res->ffiles);
-	p = xdr_decode_hyper(p, &res->afiles);
-
+	p = xdr_inline_decode(xdr, 8 * 6 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	p = xdr_decode_size3(p, &result->tbytes);
+	p = xdr_decode_size3(p, &result->fbytes);
+	p = xdr_decode_size3(p, &result->abytes);
+	p = xdr_decode_size3(p, &result->tfiles);
+	p = xdr_decode_size3(p, &result->ffiles);
+	xdr_decode_size3(p, &result->afiles);
 	/* ignore invarsec */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsstat *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsstat3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode FSINFO reply
+ * 3.3.19  FSINFO3res
+ *
+ *	struct FSINFO3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		rtmax;
+ *		uint32		rtpref;
+ *		uint32		rtmult;
+ *		uint32		wtmax;
+ *		uint32		wtpref;
+ *		uint32		wtmult;
+ *		uint32		dtpref;
+ *		size3		maxfilesize;
+ *		nfstime3	time_delta;
+ *		uint32		properties;
+ *	};
+ *
+ *	struct FSINFO3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union FSINFO3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		FSINFO3resok	resok;
+ *	default:
+ *		FSINFO3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
+static int decode_fsinfo3resok(struct xdr_stream *xdr,
+			       struct nfs_fsinfo *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->rtmax  = ntohl(*p++);
-	res->rtpref = ntohl(*p++);
-	res->rtmult = ntohl(*p++);
-	res->wtmax  = ntohl(*p++);
-	res->wtpref = ntohl(*p++);
-	res->wtmult = ntohl(*p++);
-	res->dtpref = ntohl(*p++);
-	p = xdr_decode_hyper(p, &res->maxfilesize);
-	p = xdr_decode_time3(p, &res->time_delta);
+	p = xdr_inline_decode(xdr, 4 * 7 + 8 + 8 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->rtmax  = be32_to_cpup(p++);
+	result->rtpref = be32_to_cpup(p++);
+	result->rtmult = be32_to_cpup(p++);
+	result->wtmax  = be32_to_cpup(p++);
+	result->wtpref = be32_to_cpup(p++);
+	result->wtmult = be32_to_cpup(p++);
+	result->dtpref = be32_to_cpup(p++);
+	p = xdr_decode_size3(p, &result->maxfilesize);
+	xdr_decode_nfstime3(p, &result->time_delta);
 
 	/* ignore properties */
-	res->lease_time = 0;
+	result->lease_time = 0;
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fsinfo *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_fsinfo3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode PATHCONF reply
+ * 3.3.20  PATHCONF3res
+ *
+ *	struct PATHCONF3resok {
+ *		post_op_attr	obj_attributes;
+ *		uint32		linkmax;
+ *		uint32		name_max;
+ *		bool		no_trunc;
+ *		bool		chown_restricted;
+ *		bool		case_insensitive;
+ *		bool		case_preserving;
+ *	};
+ *
+ *	struct PATHCONF3resfail {
+ *		post_op_attr	obj_attributes;
+ *	};
+ *
+ *	union PATHCONF3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		PATHCONF3resok	resok;
+ *	default:
+ *		PATHCONF3resfail resfail;
+ *	};
  */
-static int
-nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
+static int decode_pathconf3resok(struct xdr_stream *xdr,
+				 struct nfs_pathconf *result)
 {
-	int		status;
+	__be32 *p;
 
-	status = ntohl(*p++);
-
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	res->max_link = ntohl(*p++);
-	res->max_namelen = ntohl(*p++);
-
+	p = xdr_inline_decode(xdr, 4 * 6);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	result->max_link = be32_to_cpup(p++);
+	result->max_namelen = be32_to_cpup(p);
 	/* ignore remaining fields */
 	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_pathconf *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_pathconf3resok(xdr, result);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 /*
- * Decode COMMIT reply
+ * 3.3.21  COMMIT3res
+ *
+ *	struct COMMIT3resok {
+ *		wcc_data	file_wcc;
+ *		writeverf3	verf;
+ *	};
+ *
+ *	struct COMMIT3resfail {
+ *		wcc_data	file_wcc;
+ *	};
+ *
+ *	union COMMIT3res switch (nfsstat3 status) {
+ *	case NFS3_OK:
+ *		COMMIT3resok	resok;
+ *	default:
+ *		COMMIT3resfail	resfail;
+ *	};
  */
-static int
-nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
+static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_writeres *result)
 {
-	int		status;
+	enum nfs_stat status;
+	int error;
 
-	status = ntohl(*p++);
-	p = xdr_decode_wcc_data(p, res->fattr);
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-
-	res->verf->verifier[0] = *p++;
-	res->verf->verifier[1] = *p++;
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	error = decode_wcc_data(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_status;
+	error = decode_writeverf3(xdr, result->verf->verifier);
+out:
+	return error;
+out_status:
+	return nfs_stat_to_errno(status);
 }
 
 #ifdef CONFIG_NFS_V3_ACL
-/*
- * Decode GETACL reply
- */
-static int
-nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
-		   struct nfs3_getaclres *res)
+
+static inline int decode_getacl3resok(struct xdr_stream *xdr,
+				      struct nfs3_getaclres *result)
 {
-	struct xdr_buf *buf = &req->rq_rcv_buf;
-	int status = ntohl(*p++);
 	struct posix_acl **acl;
 	unsigned int *aclcnt;
-	int err, base;
+	size_t hdrlen;
+	int error;
 
-	if (status != 0)
-		return nfs_stat_to_errno(status);
-	p = xdr_decode_post_op_attr(p, res->fattr);
-	res->mask = ntohl(*p++);
-	if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
-		return -EINVAL;
-	base = (char *)p - (char *)req->rq_rcv_buf.head->iov_base;
+	error = decode_post_op_attr(xdr, result->fattr);
+	if (unlikely(error))
+		goto out;
+	error = decode_uint32(xdr, &result->mask);
+	if (unlikely(error))
+		goto out;
+	error = -EINVAL;
+	if (result->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
+		goto out;
 
-	acl = (res->mask & NFS_ACL) ? &res->acl_access : NULL;
-	aclcnt = (res->mask & NFS_ACLCNT) ? &res->acl_access_count : NULL;
-	err = nfsacl_decode(buf, base, aclcnt, acl);
+	hdrlen = (u8 *)xdr->p - (u8 *)xdr->iov->iov_base;
 
-	acl = (res->mask & NFS_DFACL) ? &res->acl_default : NULL;
-	aclcnt = (res->mask & NFS_DFACLCNT) ? &res->acl_default_count : NULL;
-	if (err > 0)
-		err = nfsacl_decode(buf, base + err, aclcnt, acl);
-	return (err > 0) ? 0 : err;
+	acl = NULL;
+	if (result->mask & NFS_ACL)
+		acl = &result->acl_access;
+	aclcnt = NULL;
+	if (result->mask & NFS_ACLCNT)
+		aclcnt = &result->acl_access_count;
+	error = nfsacl_decode(xdr->buf, hdrlen, aclcnt, acl);
+	if (unlikely(error <= 0))
+		goto out;
+
+	acl = NULL;
+	if (result->mask & NFS_DFACL)
+		acl = &result->acl_default;
+	aclcnt = NULL;
+	if (result->mask & NFS_DFACLCNT)
+		aclcnt = &result->acl_default_count;
+	error = nfsacl_decode(xdr->buf, hdrlen + error, aclcnt, acl);
+	if (unlikely(error <= 0))
+		return error;
+	error = 0;
+out:
+	return error;
 }
 
-/*
- * Decode setacl reply.
- */
-static int
-nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
+static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs3_getaclres *result)
 {
-	int status = ntohl(*p++);
+	enum nfs_stat status;
+	int error;
 
-	if (status)
-		return nfs_stat_to_errno(status);
-	xdr_decode_post_op_attr(p, fattr);
-	return 0;
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_getacl3resok(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
 }
+
+static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs_fattr *result)
+{
+	enum nfs_stat status;
+	int error;
+
+	error = decode_nfsstat3(xdr, &status);
+	if (unlikely(error))
+		goto out;
+	if (status != NFS3_OK)
+		goto out_default;
+	error = decode_post_op_attr(xdr, result);
+out:
+	return error;
+out_default:
+	return nfs_stat_to_errno(status);
+}
+
 #endif  /* CONFIG_NFS_V3_ACL */
 
 #define PROC(proc, argtype, restype, timer)				\
 [NFS3PROC_##proc] = {							\
 	.p_proc      = NFS3PROC_##proc,					\
-	.p_encode    = (kxdrproc_t) nfs3_xdr_##argtype,			\
-	.p_decode    = (kxdrproc_t) nfs3_xdr_##restype,			\
-	.p_arglen    = NFS3_##argtype##_sz,				\
-	.p_replen    = NFS3_##restype##_sz,				\
+	.p_encode    = (kxdreproc_t)nfs3_xdr_enc_##argtype##3args,	\
+	.p_decode    = (kxdrdproc_t)nfs3_xdr_dec_##restype##3res,	\
+	.p_arglen    = NFS3_##argtype##args_sz,				\
+	.p_replen    = NFS3_##restype##res_sz,				\
 	.p_timer     = timer,						\
 	.p_statidx   = NFS3PROC_##proc,					\
 	.p_name      = #proc,						\
 	}
 
 struct rpc_procinfo	nfs3_procedures[] = {
-  PROC(GETATTR,		fhandle,	attrstat, 1),
-  PROC(SETATTR, 	sattrargs,	wccstat, 0),
-  PROC(LOOKUP,		diropargs,	lookupres, 2),
-  PROC(ACCESS,		accessargs,	accessres, 1),
-  PROC(READLINK,	readlinkargs,	readlinkres, 3),
-  PROC(READ,		readargs,	readres, 3),
-  PROC(WRITE,		writeargs,	writeres, 4),
-  PROC(CREATE,		createargs,	createres, 0),
-  PROC(MKDIR,		mkdirargs,	createres, 0),
-  PROC(SYMLINK,		symlinkargs,	createres, 0),
-  PROC(MKNOD,		mknodargs,	createres, 0),
-  PROC(REMOVE,		removeargs,	removeres, 0),
-  PROC(RMDIR,		diropargs,	wccstat, 0),
-  PROC(RENAME,		renameargs,	renameres, 0),
-  PROC(LINK,		linkargs,	linkres, 0),
-  PROC(READDIR,		readdirargs,	readdirres, 3),
-  PROC(READDIRPLUS,	readdirargs,	readdirres, 3),
-  PROC(FSSTAT,		fhandle,	fsstatres, 0),
-  PROC(FSINFO,  	fhandle,	fsinfores, 0),
-  PROC(PATHCONF,	fhandle,	pathconfres, 0),
-  PROC(COMMIT,		commitargs,	commitres, 5),
+	PROC(GETATTR,		getattr,	getattr,	1),
+	PROC(SETATTR,		setattr,	setattr,	0),
+	PROC(LOOKUP,		lookup,		lookup,		2),
+	PROC(ACCESS,		access,		access,		1),
+	PROC(READLINK,		readlink,	readlink,	3),
+	PROC(READ,		read,		read,		3),
+	PROC(WRITE,		write,		write,		4),
+	PROC(CREATE,		create,		create,		0),
+	PROC(MKDIR,		mkdir,		create,		0),
+	PROC(SYMLINK,		symlink,	create,		0),
+	PROC(MKNOD,		mknod,		create,		0),
+	PROC(REMOVE,		remove,		remove,		0),
+	PROC(RMDIR,		lookup,		setattr,	0),
+	PROC(RENAME,		rename,		rename,		0),
+	PROC(LINK,		link,		link,		0),
+	PROC(READDIR,		readdir,	readdir,	3),
+	PROC(READDIRPLUS,	readdirplus,	readdir,	3),
+	PROC(FSSTAT,		getattr,	fsstat,		0),
+	PROC(FSINFO,		getattr,	fsinfo,		0),
+	PROC(PATHCONF,		getattr,	pathconf,	0),
+	PROC(COMMIT,		commit,		commit,		5),
 };
 
 struct rpc_version		nfs_version3 = {
@@ -1185,8 +2468,8 @@
 static struct rpc_procinfo	nfs3_acl_procedures[] = {
 	[ACLPROC3_GETACL] = {
 		.p_proc = ACLPROC3_GETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_getaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_getaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_getacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_getacl3res,
 		.p_arglen = ACL3_getaclargs_sz,
 		.p_replen = ACL3_getaclres_sz,
 		.p_timer = 1,
@@ -1194,8 +2477,8 @@
 	},
 	[ACLPROC3_SETACL] = {
 		.p_proc = ACLPROC3_SETACL,
-		.p_encode = (kxdrproc_t) nfs3_xdr_setaclargs,
-		.p_decode = (kxdrproc_t) nfs3_xdr_setaclres,
+		.p_encode = (kxdreproc_t)nfs3_xdr_enc_setacl3args,
+		.p_decode = (kxdrdproc_t)nfs3_xdr_dec_setacl3res,
 		.p_arglen = ACL3_setaclargs_sz,
 		.p_replen = ACL3_setaclres_sz,
 		.p_timer = 0,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9fa4963..7a747407 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,6 +44,7 @@
 	NFS4CLNT_RECLAIM_REBOOT,
 	NFS4CLNT_RECLAIM_NOGRACE,
 	NFS4CLNT_DELEGRETURN,
+	NFS4CLNT_LAYOUTRECALL,
 	NFS4CLNT_SESSION_RESET,
 	NFS4CLNT_RECALL_SLOT,
 };
@@ -109,7 +110,7 @@
 struct nfs4_state_owner {
 	struct nfs_unique_id so_owner_id;
 	struct nfs_server    *so_server;
-	struct rb_node	     so_client_node;
+	struct rb_node	     so_server_node;
 
 	struct rpc_cred	     *so_cred;	 /* Associated cred */
 
@@ -227,12 +228,6 @@
 extern const struct dentry_operations nfs4_dentry_operations;
 extern const struct inode_operations nfs4_dir_inode_operations;
 
-/* inode.c */
-extern ssize_t nfs4_getxattr(struct dentry *, const char *, void *, size_t);
-extern int nfs4_setxattr(struct dentry *, const char *, const void *, size_t, int);
-extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
-
-
 /* nfs4proc.c */
 extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
 extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
@@ -241,11 +236,12 @@
 extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
 extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
 extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
-extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait);
+extern int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
 extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
 extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
 		struct nfs4_fs_locations *fs_locations, struct page *page);
 extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
+extern const struct xattr_handler *nfs4_xattr_handlers[];
 
 #if defined(CONFIG_NFS_V4_1)
 static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
@@ -331,7 +327,6 @@
 extern const nfs4_stateid zero_stateid;
 
 /* nfs4xdr.c */
-extern __be32 *nfs4_decode_dirent(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
 extern struct rpc_procinfo nfs4_procedures[];
 
 struct nfs4_mount_data;
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 2e92f0d..23f930c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,7 +82,7 @@
 {
 	struct nfs4_file_layout_dsaddr *dsaddr;
 	int status = -EINVAL;
-	struct nfs_server *nfss = NFS_SERVER(lo->inode);
+	struct nfs_server *nfss = NFS_SERVER(lo->plh_inode);
 
 	dprintk("--> %s\n", __func__);
 
@@ -101,7 +101,7 @@
 	/* find and reference the deviceid */
 	dsaddr = nfs4_fl_find_get_deviceid(nfss->nfs_client, id);
 	if (dsaddr == NULL) {
-		dsaddr = get_device_info(lo->inode, id);
+		dsaddr = get_device_info(lo->plh_inode, id);
 		if (dsaddr == NULL)
 			goto out;
 	}
@@ -243,7 +243,7 @@
 static void
 filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct nfs_server *nfss = NFS_SERVER(lseg->layout->inode);
+	struct nfs_server *nfss = NFS_SERVER(lseg->pls_layout->plh_inode);
 	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
 
 	dprintk("--> %s\n", __func__);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4435e5e..9d992b0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -49,6 +49,7 @@
 #include <linux/mount.h>
 #include <linux/module.h>
 #include <linux/sunrpc/bc_xprt.h>
+#include <linux/xattr.h>
 
 #include "nfs4_fs.h"
 #include "delegation.h"
@@ -355,9 +356,9 @@
 }
 
 /*
- * Signal state manager thread if session is drained
+ * Signal state manager thread if session fore channel is drained
  */
-static void nfs41_check_drain_session_complete(struct nfs4_session *ses)
+static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
 {
 	struct rpc_task *task;
 
@@ -371,8 +372,20 @@
 	if (ses->fc_slot_table.highest_used_slotid != -1)
 		return;
 
-	dprintk("%s COMPLETE: Session Drained\n", __func__);
-	complete(&ses->complete);
+	dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
+	complete(&ses->fc_slot_table.complete);
+}
+
+/*
+ * Signal state manager thread if session back channel is drained
+ */
+void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
+{
+	if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
+	    ses->bc_slot_table.highest_used_slotid != -1)
+		return;
+	dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
+	complete(&ses->bc_slot_table.complete);
 }
 
 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
@@ -389,7 +402,7 @@
 
 	spin_lock(&tbl->slot_tbl_lock);
 	nfs4_free_slot(tbl, res->sr_slot);
-	nfs41_check_drain_session_complete(res->sr_session);
+	nfs4_check_drain_fc_complete(res->sr_session);
 	spin_unlock(&tbl->slot_tbl_lock);
 	res->sr_slot = NULL;
 }
@@ -1826,6 +1839,8 @@
 	struct nfs_closeres res;
 	struct nfs_fattr fattr;
 	unsigned long timestamp;
+	bool roc;
+	u32 roc_barrier;
 };
 
 static void nfs4_free_closedata(void *data)
@@ -1833,6 +1848,8 @@
 	struct nfs4_closedata *calldata = data;
 	struct nfs4_state_owner *sp = calldata->state->owner;
 
+	if (calldata->roc)
+		pnfs_roc_release(calldata->state->inode);
 	nfs4_put_open_state(calldata->state);
 	nfs_free_seqid(calldata->arg.seqid);
 	nfs4_put_state_owner(sp);
@@ -1865,6 +1882,9 @@
 	 */
 	switch (task->tk_status) {
 		case 0:
+			if (calldata->roc)
+				pnfs_roc_set_barrier(state->inode,
+						     calldata->roc_barrier);
 			nfs_set_open_stateid(state, &calldata->res.stateid, 0);
 			renew_lease(server, calldata->timestamp);
 			nfs4_close_clear_stateid_flags(state,
@@ -1917,8 +1937,15 @@
 		return;
 	}
 
-	if (calldata->arg.fmode == 0)
+	if (calldata->arg.fmode == 0) {
 		task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
+		if (calldata->roc &&
+		    pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
+			rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
+				     task, NULL);
+			return;
+		}
+	}
 
 	nfs_fattr_init(calldata->res.fattr);
 	calldata->timestamp = jiffies;
@@ -1946,7 +1973,7 @@
  *
  * NOTE: Caller must be holding the sp->so_owner semaphore!
  */
-int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait)
+int nfs4_do_close(struct path *path, struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
 {
 	struct nfs_server *server = NFS_SERVER(state->inode);
 	struct nfs4_closedata *calldata;
@@ -1981,11 +2008,12 @@
 	calldata->res.fattr = &calldata->fattr;
 	calldata->res.seqid = calldata->arg.seqid;
 	calldata->res.server = server;
+	calldata->roc = roc;
 	path_get(path);
 	calldata->path = *path;
 
-	msg.rpc_argp = &calldata->arg,
-	msg.rpc_resp = &calldata->res,
+	msg.rpc_argp = &calldata->arg;
+	msg.rpc_resp = &calldata->res;
 	task_setup_data.callback_data = calldata;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -1998,6 +2026,8 @@
 out_free_calldata:
 	kfree(calldata);
 out:
+	if (roc)
+		pnfs_roc_release(state->inode);
 	nfs4_put_open_state(state);
 	nfs4_put_state_owner(sp);
 	return status;
@@ -2486,6 +2516,7 @@
 		path = &ctx->path;
 		fmode = ctx->mode;
 	}
+	sattr->ia_mode &= ~current_umask();
 	state = nfs4_do_open(dir, path, fmode, flags, sattr, cred);
 	d_drop(dentry);
 	if (IS_ERR(state)) {
@@ -2816,6 +2847,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mkdir(dir, dentry, sattr),
@@ -2916,6 +2949,8 @@
 {
 	struct nfs4_exception exception = { };
 	int err;
+
+	sattr->ia_mode &= ~current_umask();
 	do {
 		err = nfs4_handle_exception(NFS_SERVER(dir),
 				_nfs4_proc_mknod(dir, dentry, sattr, rdev),
@@ -3478,6 +3513,7 @@
 	struct nfs4_setclientid setclientid = {
 		.sc_verifier = &sc_verifier,
 		.sc_prog = program,
+		.sc_cb_ident = clp->cl_cb_ident,
 	};
 	struct rpc_message msg = {
 		.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
@@ -3517,7 +3553,7 @@
 		if (signalled())
 			break;
 		if (loop++ & 1)
-			ssleep(clp->cl_lease_time + 1);
+			ssleep(clp->cl_lease_time / HZ + 1);
 		else
 			if (++clp->cl_id_uniquifier == 0)
 				break;
@@ -3663,8 +3699,8 @@
 	data->rpc_status = 0;
 
 	task_setup_data.callback_data = data;
-	msg.rpc_argp = &data->args,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->args;
+	msg.rpc_resp = &data->res;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
 		return PTR_ERR(task);
@@ -3743,6 +3779,7 @@
 		goto out;
 	lsp = request->fl_u.nfs4_fl.owner;
 	arg.lock_owner.id = lsp->ls_id.id;
+	arg.lock_owner.s_dev = server->s_dev;
 	status = nfs4_call_sync(server, &msg, &arg, &res, 1);
 	switch (status) {
 		case 0:
@@ -3908,8 +3945,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	return rpc_run_task(&task_setup_data);
 }
@@ -3988,6 +4025,7 @@
 	p->arg.lock_stateid = &lsp->ls_stateid;
 	p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
 	p->arg.lock_owner.id = lsp->ls_id.id;
+	p->arg.lock_owner.s_dev = server->s_dev;
 	p->res.lock_seqid = p->arg.lock_seqid;
 	p->lsp = lsp;
 	p->server = server;
@@ -4145,8 +4183,8 @@
 			data->arg.reclaim = NFS_LOCK_RECLAIM;
 		task_setup_data.callback_ops = &nfs4_recover_lock_ops;
 	}
-	msg.rpc_argp = &data->arg,
-	msg.rpc_resp = &data->res,
+	msg.rpc_argp = &data->arg;
+	msg.rpc_resp = &data->res;
 	task_setup_data.callback_data = data;
 	task = rpc_run_task(&task_setup_data);
 	if (IS_ERR(task))
@@ -4392,48 +4430,43 @@
 		return;
 	args->lock_owner.clientid = server->nfs_client->cl_clientid;
 	args->lock_owner.id = lsp->ls_id.id;
+	args->lock_owner.s_dev = server->s_dev;
 	msg.rpc_argp = args;
 	rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, args);
 }
 
 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
 
-int nfs4_setxattr(struct dentry *dentry, const char *key, const void *buf,
-		size_t buflen, int flags)
+static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
+				   const void *buf, size_t buflen,
+				   int flags, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_set_acl(inode, buf, buflen);
+	return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
 }
 
-/* The getxattr man page suggests returning -ENODATA for unknown attributes,
- * and that's what we'll do for e.g. user attributes that haven't been set.
- * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
- * attributes in kernel-managed attribute namespaces. */
-ssize_t nfs4_getxattr(struct dentry *dentry, const char *key, void *buf,
-		size_t buflen)
+static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
+				   void *buf, size_t buflen, int type)
 {
-	struct inode *inode = dentry->d_inode;
+	if (strcmp(key, "") != 0)
+		return -EINVAL;
 
-	if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
-		return -EOPNOTSUPP;
-
-	return nfs4_proc_get_acl(inode, buf, buflen);
+	return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
 }
 
-ssize_t nfs4_listxattr(struct dentry *dentry, char *buf, size_t buflen)
+static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
+				       size_t list_len, const char *name,
+				       size_t name_len, int type)
 {
-	size_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
+	size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
 
 	if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
 		return 0;
-	if (buf && buflen < len)
-		return -ERANGE;
-	if (buf)
-		memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
+
+	if (list && len <= list_len)
+		memcpy(list, XATTR_NAME_NFSV4_ACL, len);
 	return len;
 }
 
@@ -4486,6 +4519,25 @@
 
 #ifdef CONFIG_NFS_V4_1
 /*
+ * Check the exchange flags returned by the server for invalid flags, having
+ * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
+ * DS flags set.
+ */
+static int nfs4_check_cl_exchange_flags(u32 flags)
+{
+	if (flags & ~EXCHGID4_FLAG_MASK_R)
+		goto out_inval;
+	if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
+	    (flags & EXCHGID4_FLAG_USE_NON_PNFS))
+		goto out_inval;
+	if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
+		goto out_inval;
+	return NFS_OK;
+out_inval:
+	return -NFS4ERR_INVAL;
+}
+
+/*
  * nfs4_proc_exchange_id()
  *
  * Since the clientid has expired, all compounds using sessions
@@ -4498,7 +4550,7 @@
 	nfs4_verifier verifier;
 	struct nfs41_exchange_id_args args = {
 		.client = clp,
-		.flags = clp->cl_exchange_flags,
+		.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
 	};
 	struct nfs41_exchange_id_res res = {
 		.client = clp,
@@ -4515,9 +4567,6 @@
 	dprintk("--> %s\n", __func__);
 	BUG_ON(clp == NULL);
 
-	/* Remove server-only flags */
-	args.flags &= ~EXCHGID4_FLAG_CONFIRMED_R;
-
 	p = (u32 *)verifier.data;
 	*p++ = htonl((u32)clp->cl_boot_time.tv_sec);
 	*p = htonl((u32)clp->cl_boot_time.tv_nsec);
@@ -4543,6 +4592,7 @@
 			break;
 	}
 
+	status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags);
 	dprintk("<-- %s status= %d\n", __func__, status);
 	return status;
 }
@@ -4776,17 +4826,17 @@
 	if (!session)
 		return NULL;
 
-	init_completion(&session->complete);
-
 	tbl = &session->fc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
+	init_completion(&tbl->complete);
 
 	tbl = &session->bc_slot_table;
 	tbl->highest_used_slotid = -1;
 	spin_lock_init(&tbl->slot_tbl_lock);
 	rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
+	init_completion(&tbl->complete);
 
 	session->session_state = 1<<NFS4_SESSION_INITING;
 
@@ -5280,13 +5330,23 @@
 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
 {
 	struct nfs4_layoutget *lgp = calldata;
-	struct inode *ino = lgp->args.inode;
-	struct nfs_server *server = NFS_SERVER(ino);
+	struct nfs_server *server = NFS_SERVER(lgp->args.inode);
 
 	dprintk("--> %s\n", __func__);
+	/* Note the is a race here, where a CB_LAYOUTRECALL can come in
+	 * right now covering the LAYOUTGET we are about to send.
+	 * However, that is not so catastrophic, and there seems
+	 * to be no way to prevent it completely.
+	 */
 	if (nfs4_setup_sequence(server, &lgp->args.seq_args,
 				&lgp->res.seq_res, 0, task))
 		return;
+	if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
+					  NFS_I(lgp->args.inode)->layout,
+					  lgp->args.ctx->state)) {
+		rpc_exit(task, NFS4_OK);
+		return;
+	}
 	rpc_call_start(task);
 }
 
@@ -5313,7 +5373,6 @@
 			return;
 		}
 	}
-	lgp->status = task->tk_status;
 	dprintk("<-- %s\n", __func__);
 }
 
@@ -5322,7 +5381,6 @@
 	struct nfs4_layoutget *lgp = calldata;
 
 	dprintk("--> %s\n", __func__);
-	put_layout_hdr(lgp->args.inode);
 	if (lgp->res.layout.buf != NULL)
 		free_page((unsigned long) lgp->res.layout.buf);
 	put_nfs_open_context(lgp->args.ctx);
@@ -5367,13 +5425,10 @@
 	if (IS_ERR(task))
 		return PTR_ERR(task);
 	status = nfs4_wait_for_completion_rpc_task(task);
-	if (status != 0)
-		goto out;
-	status = lgp->status;
-	if (status != 0)
-		goto out;
-	status = pnfs_layout_process(lgp);
-out:
+	if (status == 0)
+		status = task->tk_status;
+	if (status == 0)
+		status = pnfs_layout_process(lgp);
 	rpc_put_task(task);
 	dprintk("<-- %s status=%d\n", __func__, status);
 	return status;
@@ -5504,9 +5559,10 @@
 	.permission	= nfs_permission,
 	.getattr	= nfs_getattr,
 	.setattr	= nfs_setattr,
-	.getxattr	= nfs4_getxattr,
-	.setxattr	= nfs4_setxattr,
-	.listxattr	= nfs4_listxattr,
+	.getxattr	= generic_getxattr,
+	.setxattr	= generic_setxattr,
+	.listxattr	= generic_listxattr,
+	.removexattr	= generic_removexattr,
 };
 
 const struct nfs_rpc_ops nfs_v4_clientops = {
@@ -5551,6 +5607,18 @@
 	.open_context	= nfs4_atomic_open,
 };
 
+static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
+	.prefix	= XATTR_NAME_NFSV4_ACL,
+	.list	= nfs4_xattr_list_nfs4_acl,
+	.get	= nfs4_xattr_get_nfs4_acl,
+	.set	= nfs4_xattr_set_nfs4_acl,
+};
+
+const struct xattr_handler *nfs4_xattr_handlers[] = {
+	&nfs4_xattr_nfs4_acl_handler,
+	NULL
+};
+
 /*
  * Local variables:
  *  c-basic-offset: 8
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 72b6c58..402143d 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -63,9 +63,14 @@
 
 	ops = clp->cl_mvops->state_renewal_ops;
 	dprintk("%s: start\n", __func__);
-	/* Are there any active superblocks? */
-	if (list_empty(&clp->cl_superblocks))
+
+	rcu_read_lock();
+	if (list_empty(&clp->cl_superblocks)) {
+		rcu_read_unlock();
 		goto out;
+	}
+	rcu_read_unlock();
+
 	spin_lock(&clp->cl_lock);
 	lease = clp->cl_lease_time;
 	last = clp->cl_last_renewal;
@@ -75,7 +80,7 @@
 		cred = ops->get_state_renewal_cred_locked(clp);
 		spin_unlock(&clp->cl_lock);
 		if (cred == NULL) {
-			if (list_empty(&clp->cl_delegations)) {
+			if (!nfs_delegations_present(clp)) {
 				set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
 				goto out;
 			}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f575a31..2336d53 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -105,14 +105,17 @@
 		put_rpccred(cred);
 }
 
-struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_renew_cred_server_locked(struct nfs_server *server)
 {
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred = NULL;
 
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		if (list_empty(&sp->so_states))
 			continue;
 		cred = get_rpccred(sp->so_cred);
@@ -121,6 +124,28 @@
 	return cred;
 }
 
+/**
+ * nfs4_get_renew_cred_locked - Acquire credential for a renew operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ * Caller must hold clp->cl_lock.
+ */
+struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
+{
+	struct rpc_cred *cred = NULL;
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_renew_cred_server_locked(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+	return cred;
+}
+
 #if defined(CONFIG_NFS_V4_1)
 
 static int nfs41_setup_state_renewal(struct nfs_client *clp)
@@ -142,6 +167,11 @@
 	return status;
 }
 
+/*
+ * Back channel returns NFS4ERR_DELAY for new requests when
+ * NFS4_SESSION_DRAINING is set so there is no work to be done when draining
+ * is ended.
+ */
 static void nfs4_end_drain_session(struct nfs_client *clp)
 {
 	struct nfs4_session *ses = clp->cl_session;
@@ -165,22 +195,32 @@
 	}
 }
 
-static int nfs4_begin_drain_session(struct nfs_client *clp)
+static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl)
 {
-	struct nfs4_session *ses = clp->cl_session;
-	struct nfs4_slot_table *tbl = &ses->fc_slot_table;
-
 	spin_lock(&tbl->slot_tbl_lock);
-	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
 	if (tbl->highest_used_slotid != -1) {
-		INIT_COMPLETION(ses->complete);
+		INIT_COMPLETION(tbl->complete);
 		spin_unlock(&tbl->slot_tbl_lock);
-		return wait_for_completion_interruptible(&ses->complete);
+		return wait_for_completion_interruptible(&tbl->complete);
 	}
 	spin_unlock(&tbl->slot_tbl_lock);
 	return 0;
 }
 
+static int nfs4_begin_drain_session(struct nfs_client *clp)
+{
+	struct nfs4_session *ses = clp->cl_session;
+	int ret = 0;
+
+	set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
+	/* back channel */
+	ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table);
+	if (ret)
+		return ret;
+	/* fore channel */
+	return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
+}
+
 int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
 {
 	int status;
@@ -192,6 +232,12 @@
 	status = nfs4_proc_create_session(clp);
 	if (status != 0)
 		goto out;
+	status = nfs4_set_callback_sessionid(clp);
+	if (status != 0) {
+		printk(KERN_WARNING "Sessionid not set. No callback service\n");
+		nfs_callback_down(1);
+		status = 0;
+	}
 	nfs41_setup_state_renewal(clp);
 	nfs_mark_client_ready(clp, NFS_CS_READY);
 out:
@@ -210,28 +256,56 @@
 
 #endif /* CONFIG_NFS_V4_1 */
 
-struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+static struct rpc_cred *
+nfs4_get_setclientid_cred_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
+	struct rpc_cred *cred = NULL;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
-	struct rpc_cred *cred;
 
 	spin_lock(&clp->cl_lock);
-	cred = nfs4_get_machine_cred_locked(clp);
-	if (cred != NULL)
-		goto out;
-	pos = rb_first(&clp->cl_state_owners);
+	pos = rb_first(&server->state_owners);
 	if (pos != NULL) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		cred = get_rpccred(sp->so_cred);
 	}
-out:
 	spin_unlock(&clp->cl_lock);
 	return cred;
 }
 
-static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
-		__u64 minval, int maxbits)
+/**
+ * nfs4_get_setclientid_cred - Acquire credential for a setclientid operation
+ * @clp: client state handle
+ *
+ * Returns an rpc_cred with reference count bumped, or NULL.
+ */
+struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+	struct rpc_cred *cred;
+
+	spin_lock(&clp->cl_lock);
+	cred = nfs4_get_machine_cred_locked(clp);
+	spin_unlock(&clp->cl_lock);
+	if (cred != NULL)
+		goto out;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		cred = nfs4_get_setclientid_cred_server(server);
+		if (cred != NULL)
+			break;
+	}
+	rcu_read_unlock();
+
+out:
+	return cred;
+}
+
+static void nfs_alloc_unique_id_locked(struct rb_root *root,
+				       struct nfs_unique_id *new,
+				       __u64 minval, int maxbits)
 {
 	struct rb_node **p, *parent;
 	struct nfs_unique_id *pos;
@@ -286,16 +360,15 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+nfs4_find_state_owner_locked(struct nfs_server *server, struct rpc_cred *cred)
 {
-	struct nfs_client *clp = server->nfs_client;
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp, *res = NULL;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
 		if (server < sp->so_server) {
 			p = &parent->rb_left;
@@ -319,24 +392,17 @@
 }
 
 static struct nfs4_state_owner *
-nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
+nfs4_insert_state_owner_locked(struct nfs4_state_owner *new)
 {
-	struct rb_node **p = &clp->cl_state_owners.rb_node,
+	struct nfs_server *server = new->so_server;
+	struct rb_node **p = &server->state_owners.rb_node,
 		       *parent = NULL;
 	struct nfs4_state_owner *sp;
 
 	while (*p != NULL) {
 		parent = *p;
-		sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
+		sp = rb_entry(parent, struct nfs4_state_owner, so_server_node);
 
-		if (new->so_server < sp->so_server) {
-			p = &parent->rb_left;
-			continue;
-		}
-		if (new->so_server > sp->so_server) {
-			p = &parent->rb_right;
-			continue;
-		}
 		if (new->so_cred < sp->so_cred)
 			p = &parent->rb_left;
 		else if (new->so_cred > sp->so_cred)
@@ -346,18 +412,21 @@
 			return sp;
 		}
 	}
-	nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
-	rb_link_node(&new->so_client_node, parent, p);
-	rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
+	nfs_alloc_unique_id_locked(&server->openowner_id,
+					&new->so_owner_id, 1, 64);
+	rb_link_node(&new->so_server_node, parent, p);
+	rb_insert_color(&new->so_server_node, &server->state_owners);
 	return new;
 }
 
 static void
-nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
+nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node))
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-	nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
+	struct nfs_server *server = sp->so_server;
+
+	if (!RB_EMPTY_NODE(&sp->so_server_node))
+		rb_erase(&sp->so_server_node, &server->state_owners);
+	nfs_free_unique_id(&server->openowner_id, &sp->so_owner_id);
 }
 
 /*
@@ -386,23 +455,32 @@
 static void
 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
 {
-	if (!RB_EMPTY_NODE(&sp->so_client_node)) {
-		struct nfs_client *clp = sp->so_server->nfs_client;
+	if (!RB_EMPTY_NODE(&sp->so_server_node)) {
+		struct nfs_server *server = sp->so_server;
+		struct nfs_client *clp = server->nfs_client;
 
 		spin_lock(&clp->cl_lock);
-		rb_erase(&sp->so_client_node, &clp->cl_state_owners);
-		RB_CLEAR_NODE(&sp->so_client_node);
+		rb_erase(&sp->so_server_node, &server->state_owners);
+		RB_CLEAR_NODE(&sp->so_server_node);
 		spin_unlock(&clp->cl_lock);
 	}
 }
 
-struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
+/**
+ * nfs4_get_state_owner - Look up a state owner given a credential
+ * @server: nfs_server to search
+ * @cred: RPC credential to match
+ *
+ * Returns a pointer to an instantiated nfs4_state_owner struct, or NULL.
+ */
+struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server,
+					      struct rpc_cred *cred)
 {
 	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp, *new;
 
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_find_state_owner(server, cred);
+	sp = nfs4_find_state_owner_locked(server, cred);
 	spin_unlock(&clp->cl_lock);
 	if (sp != NULL)
 		return sp;
@@ -412,7 +490,7 @@
 	new->so_server = server;
 	new->so_cred = cred;
 	spin_lock(&clp->cl_lock);
-	sp = nfs4_insert_state_owner(clp, new);
+	sp = nfs4_insert_state_owner_locked(new);
 	spin_unlock(&clp->cl_lock);
 	if (sp == new)
 		get_rpccred(cred);
@@ -423,6 +501,11 @@
 	return sp;
 }
 
+/**
+ * nfs4_put_state_owner - Release a nfs4_state_owner
+ * @sp: state owner data to release
+ *
+ */
 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
 {
 	struct nfs_client *clp = sp->so_server->nfs_client;
@@ -430,7 +513,7 @@
 
 	if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
 		return;
-	nfs4_remove_state_owner(clp, sp);
+	nfs4_remove_state_owner_locked(sp);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&sp->so_sequence.wait);
 	put_rpccred(cred);
@@ -585,8 +668,11 @@
 	if (!call_close) {
 		nfs4_put_open_state(state);
 		nfs4_put_state_owner(owner);
-	} else
-		nfs4_do_close(path, state, gfp_mask, wait);
+	} else {
+		bool roc = pnfs_roc(state->inode);
+
+		nfs4_do_close(path, state, gfp_mask, wait, roc);
+	}
 }
 
 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
@@ -633,7 +719,8 @@
 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner, pid_t fl_pid, unsigned int type)
 {
 	struct nfs4_lock_state *lsp;
-	struct nfs_client *clp = state->owner->so_server->nfs_client;
+	struct nfs_server *server = state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	lsp = kzalloc(sizeof(*lsp), GFP_NOFS);
 	if (lsp == NULL)
@@ -657,7 +744,7 @@
 		return NULL;
 	}
 	spin_lock(&clp->cl_lock);
-	nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
+	nfs_alloc_unique_id_locked(&server->lockowner_id, &lsp->ls_id, 1, 64);
 	spin_unlock(&clp->cl_lock);
 	INIT_LIST_HEAD(&lsp->ls_locks);
 	return lsp;
@@ -665,10 +752,11 @@
 
 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
 {
-	struct nfs_client *clp = lsp->ls_state->owner->so_server->nfs_client;
+	struct nfs_server *server = lsp->ls_state->owner->so_server;
+	struct nfs_client *clp = server->nfs_client;
 
 	spin_lock(&clp->cl_lock);
-	nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
+	nfs_free_unique_id(&server->lockowner_id, &lsp->ls_id);
 	spin_unlock(&clp->cl_lock);
 	rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
 	kfree(lsp);
@@ -1114,15 +1202,19 @@
 	}
 }
 
-static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+static void nfs4_reset_seqids(struct nfs_server *server,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	/* Reset all sequence ids to zero */
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		sp->so_seqid.flags = 0;
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
@@ -1131,6 +1223,18 @@
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp,
+	int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
+{
+	struct nfs_server *server;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_reset_seqids(server, mark_reclaim);
+	rcu_read_unlock();
 }
 
 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
@@ -1148,25 +1252,41 @@
 		(void)ops->reclaim_complete(clp);
 }
 
-static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+static void nfs4_clear_reclaim_server(struct nfs_server *server)
 {
+	struct nfs_client *clp = server->nfs_client;
 	struct nfs4_state_owner *sp;
 	struct rb_node *pos;
 	struct nfs4_state *state;
 
-	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
-		return 0;
-
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
+	spin_lock(&clp->cl_lock);
+	for (pos = rb_first(&server->state_owners);
+	     pos != NULL;
+	     pos = rb_next(pos)) {
+		sp = rb_entry(pos, struct nfs4_state_owner, so_server_node);
 		spin_lock(&sp->so_lock);
 		list_for_each_entry(state, &sp->so_states, open_states) {
-			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
+			if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT,
+						&state->flags))
 				continue;
 			nfs4_state_mark_reclaim_nograce(clp, state);
 		}
 		spin_unlock(&sp->so_lock);
 	}
+	spin_unlock(&clp->cl_lock);
+}
+
+static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp)
+{
+	struct nfs_server *server;
+
+	if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
+		return 0;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
+		nfs4_clear_reclaim_server(server);
+	rcu_read_unlock();
 
 	nfs_delegation_reap_unclaimed(clp);
 	return 1;
@@ -1238,27 +1358,40 @@
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
 {
+	struct nfs4_state_owner *sp;
+	struct nfs_server *server;
 	struct rb_node *pos;
 	int status = 0;
 
 restart:
-	spin_lock(&clp->cl_lock);
-	for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
-		struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
-		if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
-			continue;
-		atomic_inc(&sp->so_count);
-		spin_unlock(&clp->cl_lock);
-		status = nfs4_reclaim_open_state(sp, ops);
-		if (status < 0) {
-			set_bit(ops->owner_flag_bit, &sp->so_flags);
+	rcu_read_lock();
+	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+		spin_lock(&clp->cl_lock);
+		for (pos = rb_first(&server->state_owners);
+		     pos != NULL;
+		     pos = rb_next(pos)) {
+			sp = rb_entry(pos,
+				struct nfs4_state_owner, so_server_node);
+			if (!test_and_clear_bit(ops->owner_flag_bit,
+							&sp->so_flags))
+				continue;
+			atomic_inc(&sp->so_count);
+			spin_unlock(&clp->cl_lock);
+			rcu_read_unlock();
+
+			status = nfs4_reclaim_open_state(sp, ops);
+			if (status < 0) {
+				set_bit(ops->owner_flag_bit, &sp->so_flags);
+				nfs4_put_state_owner(sp);
+				return nfs4_recovery_handle_error(clp, status);
+			}
+
 			nfs4_put_state_owner(sp);
-			return nfs4_recovery_handle_error(clp, status);
+			goto restart;
 		}
-		nfs4_put_state_owner(sp);
-		goto restart;
+		spin_unlock(&clp->cl_lock);
 	}
-	spin_unlock(&clp->cl_lock);
+	rcu_read_unlock();
 	return status;
 }
 
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 9f1826b..2ab8e5c 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -71,8 +71,8 @@
 /* lock,open owner id:
  * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT  >> 2)
  */
-#define open_owner_id_maxsz	(1 + 4)
-#define lock_owner_id_maxsz	(1 + 4)
+#define open_owner_id_maxsz	(1 + 1 + 4)
+#define lock_owner_id_maxsz	(1 + 1 + 4)
 #define decode_lockowner_maxsz	(1 + XDR_QUADLEN(IDMAP_NAMESZ))
 #define compound_encode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
 #define compound_decode_hdr_maxsz	(3 + (NFS4_MAXTAGLEN >> 2))
@@ -1088,10 +1088,11 @@
 {
 	__be32 *p;
 
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, lowner->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "lock id:", 8);
+	*p++ = cpu_to_be32(lowner->s_dev);
 	xdr_encode_hyper(p, lowner->id);
 }
 
@@ -1210,10 +1211,11 @@
 	*p++ = cpu_to_be32(OP_OPEN);
 	*p = cpu_to_be32(arg->seqid->sequence->counter);
 	encode_share_access(xdr, arg->fmode);
-	p = reserve_space(xdr, 28);
+	p = reserve_space(xdr, 32);
 	p = xdr_encode_hyper(p, arg->clientid);
-	*p++ = cpu_to_be32(16);
+	*p++ = cpu_to_be32(20);
 	p = xdr_encode_opaque_fixed(p, "open id:", 8);
+	*p++ = cpu_to_be32(arg->server->s_dev);
 	xdr_encode_hyper(p, arg->id);
 }
 
@@ -1510,7 +1512,7 @@
 	hdr->replen += decode_restorefh_maxsz;
 }
 
-static int
+static void
 encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg, struct compound_hdr *hdr)
 {
 	__be32 *p;
@@ -1521,14 +1523,12 @@
 	p = reserve_space(xdr, 2*4);
 	*p++ = cpu_to_be32(1);
 	*p = cpu_to_be32(FATTR4_WORD0_ACL);
-	if (arg->acl_len % 4)
-		return -EINVAL;
+	BUG_ON(arg->acl_len % 4);
 	p = reserve_space(xdr, 4);
 	*p = cpu_to_be32(arg->acl_len);
 	xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, arg->acl_len);
 	hdr->nops++;
 	hdr->replen += decode_setacl_maxsz;
-	return 0;
 }
 
 static void
@@ -1789,7 +1789,6 @@
 		      const struct nfs4_layoutget_args *args,
 		      struct compound_hdr *hdr)
 {
-	nfs4_stateid stateid;
 	__be32 *p;
 
 	p = reserve_space(xdr, 44 + NFS4_STATEID_SIZE);
@@ -1800,9 +1799,7 @@
 	p = xdr_encode_hyper(p, args->range.offset);
 	p = xdr_encode_hyper(p, args->range.length);
 	p = xdr_encode_hyper(p, args->minlength);
-	pnfs_get_layout_stateid(&stateid, NFS_I(args->inode)->layout,
-				args->ctx->state);
-	p = xdr_encode_opaque_fixed(p, &stateid.data, NFS4_STATEID_SIZE);
+	p = xdr_encode_opaque_fixed(p, &args->stateid.data, NFS4_STATEID_SIZE);
 	*p = cpu_to_be32(args->maxcount);
 
 	dprintk("%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n",
@@ -1833,393 +1830,362 @@
 /*
  * Encode an ACCESS request
  */
-static int nfs4_xdr_enc_access(struct rpc_rqst *req, __be32 *p, const struct nfs4_accessargs *args)
+static void nfs4_xdr_enc_access(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_accessargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_access(&xdr, args->access, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_access(xdr, args->access, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP request
  */
-static int nfs4_xdr_enc_lookup(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_arg *args)
+static void nfs4_xdr_enc_lookup(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_lookup_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LOOKUP_ROOT request
  */
-static int nfs4_xdr_enc_lookup_root(struct rpc_rqst *req, __be32 *p, const struct nfs4_lookup_root_arg *args)
+static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_lookup_root_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode REMOVE request
  */
-static int nfs4_xdr_enc_remove(struct rpc_rqst *req, __be32 *p, const struct nfs_removeargs *args)
+static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_removeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_remove(&xdr, &args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_remove(xdr, &args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode RENAME request
  */
-static int nfs4_xdr_enc_rename(struct rpc_rqst *req, __be32 *p, const struct nfs_renameargs *args)
+static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs_renameargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->old_dir, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->new_dir, &hdr);
-	encode_rename(&xdr, args->old_name, args->new_name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->old_dir, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->new_dir, &hdr);
+	encode_rename(xdr, args->old_name, args->new_name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode LINK request
  */
-static int nfs4_xdr_enc_link(struct rpc_rqst *req, __be32 *p, const struct nfs4_link_arg *args)
+static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct nfs4_link_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_link(&xdr, args->name, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_link(xdr, args->name, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode CREATE request
  */
-static int nfs4_xdr_enc_create(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_create_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_create(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_create(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode SYMLINK request
  */
-static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_create_arg *args)
+static void nfs4_xdr_enc_symlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_create_arg *args)
 {
-	return nfs4_xdr_enc_create(req, p, args);
+	nfs4_xdr_enc_create(req, xdr, args);
 }
 
 /*
  * Encode GETATTR request
  */
-static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, __be32 *p, const struct nfs4_getattr_arg *args)
+static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_getattr_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a CLOSE request
  */
-static int nfs4_xdr_enc_close(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_close(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_close(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_close(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request
  */
-static int nfs4_xdr_enc_open(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_savefh(&xdr, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
-	encode_restorefh(&xdr, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_savefh(xdr, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
+	encode_restorefh(xdr, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_CONFIRM request
  */
-static int nfs4_xdr_enc_open_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_open_confirmargs *args)
+static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs_open_confirmargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops   = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_confirm(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_confirm(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN request with no attributes.
  */
-static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, __be32 *p, struct nfs_openargs *args)
+static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs_openargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an OPEN_DOWNGRADE request
  */
-static int nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req, __be32 *p, struct nfs_closeargs *args)
+static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs_closeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_open_downgrade(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_open_downgrade(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCK request
  */
-static int nfs4_xdr_enc_lock(struct rpc_rqst *req, __be32 *p, struct nfs_lock_args *args)
+static void nfs4_xdr_enc_lock(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_lock_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lock(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lock(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKT request
  */
-static int nfs4_xdr_enc_lockt(struct rpc_rqst *req, __be32 *p, struct nfs_lockt_args *args)
+static void nfs4_xdr_enc_lockt(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_lockt_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_lockt(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_lockt(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a LOCKU request
  */
-static int nfs4_xdr_enc_locku(struct rpc_rqst *req, __be32 *p, struct nfs_locku_args *args)
+static void nfs4_xdr_enc_locku(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_locku_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_locku(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_locku(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
-static int nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req, __be32 *p, struct nfs_release_lockowner_args *args)
+static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req,
+					   struct xdr_stream *xdr,
+					struct nfs_release_lockowner_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_release_lockowner(&xdr, &args->lock_owner, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_release_lockowner(xdr, &args->lock_owner, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READLINK request
  */
-static int nfs4_xdr_enc_readlink(struct rpc_rqst *req, __be32 *p, const struct nfs4_readlink *args)
+static void nfs4_xdr_enc_readlink(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_readlink *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readlink(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readlink(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			args->pgbase, args->pglen);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READDIR request
  */
-static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nfs4_readdir_arg *args)
+static void nfs4_xdr_enc_readdir(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 const struct nfs4_readdir_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_readdir(&xdr, args, req, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_readdir(xdr, args, req, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2, args->pages,
 			 args->pgbase, args->count);
@@ -2227,428 +2193,387 @@
 			__func__, hdr.replen << 2, args->pages,
 			args->pgbase, args->count);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a READ request
  */
-static int nfs4_xdr_enc_read(struct rpc_rqst *req, __be32 *p, struct nfs_readargs *args)
+static void nfs4_xdr_enc_read(struct rpc_rqst *req, struct xdr_stream *xdr,
+			      struct nfs_readargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_read(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_read(xdr, args, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, hdr.replen << 2,
 			 args->pages, args->pgbase, args->count);
 	req->rq_rcv_buf.flags |= XDRBUF_READ;
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode an SETATTR request
  */
-static int nfs4_xdr_enc_setattr(struct rpc_rqst *req, __be32 *p, struct nfs_setattrargs *args)
+static void nfs4_xdr_enc_setattr(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 struct nfs_setattrargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_setattr(&xdr, args, args->server, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setattr(xdr, args, args->server, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a GETACL request
  */
-static int
-nfs4_xdr_enc_getacl(struct rpc_rqst *req, __be32 *p,
-		struct nfs_getaclargs *args)
+static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_getaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
 	replen = hdr.replen + op_decode_hdr_maxsz + nfs4_fattr_bitmap_maxsz + 1;
-	encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0, &hdr);
+	encode_getattr_two(xdr, FATTR4_WORD0_ACL, 0, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2,
 		args->acl_pages, args->acl_pgbase, args->acl_len);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode a WRITE request
  */
-static int nfs4_xdr_enc_write(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_write(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_write(xdr, args, &hdr);
 	req->rq_snd_buf.flags |= XDRBUF_WRITE;
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  a COMMIT request
  */
-static int nfs4_xdr_enc_commit(struct rpc_rqst *req, __be32 *p, struct nfs_writeargs *args)
+static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_writeargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_commit(&xdr, args, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_commit(xdr, args, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * FSINFO request
  */
-static int nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs4_fsinfo_arg *args)
+static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs4_fsinfo_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_fsinfo(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_fsinfo(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a PATHCONF request
  */
-static int nfs4_xdr_enc_pathconf(struct rpc_rqst *req, __be32 *p, const struct nfs4_pathconf_arg *args)
+static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  const struct nfs4_pathconf_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_one(&xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_one(xdr, args->bitmask[0] & nfs4_pathconf_bitmap[0],
 			   &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a STATFS request
  */
-static int nfs4_xdr_enc_statfs(struct rpc_rqst *req, __be32 *p, const struct nfs4_statfs_arg *args)
+static void nfs4_xdr_enc_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
+				const struct nfs4_statfs_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	encode_getattr_two(&xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_getattr_two(xdr, args->bitmask[0] & nfs4_statfs_bitmap[0],
 			   args->bitmask[1] & nfs4_statfs_bitmap[1], &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * GETATTR_BITMAP request
  */
-static int nfs4_xdr_enc_server_caps(struct rpc_rqst *req, __be32 *p,
-				    struct nfs4_server_caps_arg *args)
+static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_server_caps_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_getattr_one(&xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_getattr_one(xdr, FATTR4_WORD0_SUPPORTED_ATTRS|
 			   FATTR4_WORD0_LINK_SUPPORT|
 			   FATTR4_WORD0_SYMLINK_SUPPORT|
 			   FATTR4_WORD0_ACLSUPPORT, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RENEW request
  */
-static int nfs4_xdr_enc_renew(struct rpc_rqst *req, __be32 *p, struct nfs_client *clp)
+static void nfs4_xdr_enc_renew(struct rpc_rqst *req, struct xdr_stream *xdr,
+			       struct nfs_client *clp)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_renew(&xdr, clp, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_renew(xdr, clp, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID request
  */
-static int nfs4_xdr_enc_setclientid(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid *sc)
+static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs4_setclientid *sc)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid(&xdr, sc, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid(xdr, sc, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SETCLIENTID_CONFIRM request
  */
-static int nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs4_setclientid_res *arg)
+static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req,
+					     struct xdr_stream *xdr,
+					     struct nfs4_setclientid_res *arg)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.nops	= 0,
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_setclientid_confirm(&xdr, arg, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_setclientid_confirm(xdr, arg, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * DELEGRETURN request
  */
-static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, __be32 *p, const struct nfs4_delegreturnargs *args)
+static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     const struct nfs4_delegreturnargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fhandle, &hdr);
-	encode_delegreturn(&xdr, args->stateid, &hdr);
-	encode_getfattr(&xdr, args->bitmask, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fhandle, &hdr);
+	encode_delegreturn(xdr, args->stateid, &hdr);
+	encode_getfattr(xdr, args->bitmask, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode FS_LOCATIONS request
  */
-static int nfs4_xdr_enc_fs_locations(struct rpc_rqst *req, __be32 *p, struct nfs4_fs_locations_arg *args)
+static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req,
+				      struct xdr_stream *xdr,
+				      struct nfs4_fs_locations_arg *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 	uint32_t replen;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->dir_fh, &hdr);
-	encode_lookup(&xdr, args->name, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->dir_fh, &hdr);
+	encode_lookup(xdr, args->name, &hdr);
 	replen = hdr.replen;	/* get the attribute into args->page */
-	encode_fs_locations(&xdr, args->bitmask, &hdr);
+	encode_fs_locations(xdr, args->bitmask, &hdr);
 
 	xdr_inline_pages(&req->rq_rcv_buf, replen << 2, &args->page,
 			0, PAGE_SIZE);
 	encode_nops(&hdr);
-	return 0;
 }
 
 #if defined(CONFIG_NFS_V4_1)
 /*
  * EXCHANGE_ID request
  */
-static int nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, uint32_t *p,
-				    struct nfs41_exchange_id_args *args)
+static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
+				     struct nfs41_exchange_id_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_exchange_id(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_exchange_id(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a CREATE_SESSION request
  */
-static int nfs4_xdr_enc_create_session(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs41_create_session_args *args)
+static void nfs4_xdr_enc_create_session(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs41_create_session_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = args->client->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_create_session(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_create_session(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a DESTROY_SESSION request
  */
-static int nfs4_xdr_enc_destroy_session(struct rpc_rqst *req, uint32_t *p,
-					struct nfs4_session *session)
+static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
+					 struct xdr_stream *xdr,
+					 struct nfs4_session *session)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = session->clp->cl_mvops->minor_version,
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_destroy_session(&xdr, session, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_destroy_session(xdr, session, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a SEQUENCE request
  */
-static int nfs4_xdr_enc_sequence(struct rpc_rqst *req, uint32_t *p,
-				 struct nfs4_sequence_args *args)
+static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
+				  struct nfs4_sequence_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a GET_LEASE_TIME request
  */
-static int nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req, uint32_t *p,
-				       struct nfs4_get_lease_time_args *args)
+static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req,
+					struct xdr_stream *xdr,
+					struct nfs4_get_lease_time_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->la_seq_args),
 	};
 	const u32 lease_bitmap[2] = { FATTR4_WORD0_LEASE_TIME, 0 };
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->la_seq_args, &hdr);
-	encode_putrootfh(&xdr, &hdr);
-	encode_fsinfo(&xdr, lease_bitmap, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->la_seq_args, &hdr);
+	encode_putrootfh(xdr, &hdr);
+	encode_fsinfo(xdr, lease_bitmap, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * a RECLAIM_COMPLETE request
  */
-static int nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req, uint32_t *p,
-				     struct nfs41_reclaim_complete_args *args)
+static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req,
+					  struct xdr_stream *xdr,
+				struct nfs41_reclaim_complete_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args)
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_reclaim_complete(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_reclaim_complete(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  * Encode GETDEVICEINFO request
  */
-static int nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req, uint32_t *p,
-				      struct nfs4_getdeviceinfo_args *args)
+static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
+				       struct xdr_stream *xdr,
+				       struct nfs4_getdeviceinfo_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_getdeviceinfo(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_getdeviceinfo(xdr, args, &hdr);
 
 	/* set up reply kvec. Subtract notification bitmap max size (2)
 	 * so that notification bitmap is put in xdr_buf tail */
@@ -2657,27 +2582,24 @@
 			 args->pdev->pglen);
 
 	encode_nops(&hdr);
-	return 0;
 }
 
 /*
  *  Encode LAYOUTGET request
  */
-static int nfs4_xdr_enc_layoutget(struct rpc_rqst *req, uint32_t *p,
-				  struct nfs4_layoutget_args *args)
+static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req,
+				   struct xdr_stream *xdr,
+				   struct nfs4_layoutget_args *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, NFS_FH(args->inode), &hdr);
-	encode_layoutget(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, NFS_FH(args->inode), &hdr);
+	encode_layoutget(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return 0;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
@@ -4475,7 +4397,7 @@
 		goto out_overflow;
 	eof = be32_to_cpup(p++);
 	count = be32_to_cpup(p);
-	hdrlen = (u8 *) p - (u8 *) iov->iov_base;
+	hdrlen = (u8 *) xdr->p - (u8 *) iov->iov_base;
 	recvd = req->rq_rcv_buf.len - hdrlen;
 	if (count > recvd) {
 		dprintk("NFS: server cheating in read reply: "
@@ -5000,7 +4922,7 @@
 		goto out_overflow;
 	len = be32_to_cpup(p);
 	if (len) {
-		int i;
+		uint32_t i;
 
 		p = xdr_inline_decode(xdr, 4 * len);
 		if (unlikely(!p))
@@ -5090,26 +5012,26 @@
 /*
  * Decode OPEN_DOWNGRADE response
  */
-static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
+				       struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_downgrade(&xdr, res);
+	status = decode_open_downgrade(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5118,26 +5040,25 @@
 /*
  * Decode ACCESS response
  */
-static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_accessres *res)
+static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_accessres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_access(&xdr, res);
+	status = decode_access(xdr, res);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5146,26 +5067,28 @@
 /*
  * Decode LOOKUP response
  */
-static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server
+	status = decode_getfattr(xdr, res->fattr, res->server
 			,!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5174,23 +5097,25 @@
 /*
  * Decode LOOKUP_ROOT response
  */
-static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_lookup_res *res)
+static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_lookup_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putrootfh(&xdr)) != 0)
+	status = decode_putrootfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) == 0)
-		status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfh(xdr, res->fh);
+	if (status == 0)
+		status = decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5199,24 +5124,25 @@
 /*
  * Decode REMOVE response
  */
-static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, __be32 *p, struct nfs_removeres *res)
+static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_removeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_remove(&xdr, &res->cinfo)) != 0)
+	status = decode_remove(xdr, &res->cinfo);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5225,34 +5151,38 @@
 /*
  * Decode RENAME response
  */
-static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, __be32 *p, struct nfs_renameres *res)
+static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_renameres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_rename(&xdr, &res->old_cinfo, &res->new_cinfo)) != 0)
+	status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
+	if (status)
 		goto out;
 	/* Current FH is target directory */
-	if (decode_getfattr(&xdr, res->new_fattr, res->server,
+	if (decode_getfattr(xdr, res->new_fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->old_fattr, res->server,
+	decode_getfattr(xdr, res->old_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5261,37 +5191,41 @@
 /*
  * Decode LINK response
  */
-static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_link_res *res)
+static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs4_link_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_link(&xdr, &res->cinfo)) != 0)
+	status = decode_link(xdr, &res->cinfo);
+	if (status)
 		goto out;
 	/*
 	 * Note order: OP_LINK leaves the directory as the current
 	 *             filehandle.
 	 */
-	if (decode_getfattr(&xdr, res->dir_attr, res->server,
+	if (decode_getfattr(xdr, res->dir_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5300,33 +5234,37 @@
 /*
  * Decode CREATE response
  */
-static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs4_create_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_savefh(&xdr)) != 0)
+	status = decode_savefh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_create(&xdr,&res->dir_cinfo)) != 0)
+	status = decode_create(xdr, &res->dir_cinfo);
+	if (status)
 		goto out;
-	if ((status = decode_getfh(&xdr, res->fh)) != 0)
+	status = decode_getfh(xdr, res->fh);
+	if (status)
 		goto out;
-	if (decode_getfattr(&xdr, res->fattr, res->server,
+	if (decode_getfattr(xdr, res->fattr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if ((status = decode_restorefh(&xdr)) != 0)
+	status = decode_restorefh(xdr);
+	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->dir_fattr, res->server,
+	decode_getfattr(xdr, res->dir_fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5335,31 +5273,31 @@
 /*
  * Decode SYMLINK response
  */
-static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_create_res *res)
+static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_create_res *res)
 {
-	return nfs4_xdr_dec_create(rqstp, p, res);
+	return nfs4_xdr_dec_create(rqstp, xdr, res);
 }
 
 /*
  * Decode GETATTR response
  */
-static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_getattr_res *res)
+static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_getattr_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getfattr(&xdr, res->fattr, res->server,
+	status = decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5368,46 +5306,40 @@
 /*
  * Encode an SETACL request
  */
-static int
-nfs4_xdr_enc_setacl(struct rpc_rqst *req, __be32 *p, struct nfs_setaclargs *args)
+static void nfs4_xdr_enc_setacl(struct rpc_rqst *req, struct xdr_stream *xdr,
+				struct nfs_setaclargs *args)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr = {
 		.minorversion = nfs4_xdr_minorversion(&args->seq_args),
 	};
-	int status;
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_compound_hdr(&xdr, req, &hdr);
-	encode_sequence(&xdr, &args->seq_args, &hdr);
-	encode_putfh(&xdr, args->fh, &hdr);
-	status = encode_setacl(&xdr, args, &hdr);
+	encode_compound_hdr(xdr, req, &hdr);
+	encode_sequence(xdr, &args->seq_args, &hdr);
+	encode_putfh(xdr, args->fh, &hdr);
+	encode_setacl(xdr, args, &hdr);
 	encode_nops(&hdr);
-	return status;
 }
 
 /*
  * Decode SETACL response
  */
 static int
-nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_setaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 out:
 	return status;
 }
@@ -5416,24 +5348,22 @@
  * Decode GETACL response
  */
 static int
-nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, __be32 *p,
+nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
 		    struct nfs_getaclres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_getacl(&xdr, rqstp, &res->acl_len);
+	status = decode_getacl(xdr, rqstp, &res->acl_len);
 
 out:
 	return status;
@@ -5442,23 +5372,22 @@
 /*
  * Decode CLOSE response
  */
-static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, __be32 *p, struct nfs_closeres *res)
+static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_closeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_close(&xdr, res);
+	status = decode_close(xdr, res);
 	if (status != 0)
 		goto out;
 	/*
@@ -5467,7 +5396,7 @@
 	 * 	an ESTALE error. Shouldn't be a problem,
 	 * 	though, since fattr->valid will remain unset.
 	 */
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5476,36 +5405,35 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_savefh(&xdr);
+	status = decode_savefh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	if (decode_getfh(&xdr, &res->fh) != 0)
+	if (decode_getfh(xdr, &res->fh) != 0)
 		goto out;
-	if (decode_getfattr(&xdr, res->f_attr, res->server,
+	if (decode_getfattr(xdr, res->f_attr, res->server,
 				!RPC_IS_ASYNC(rqstp->rq_task)) != 0)
 		goto out;
-	if (decode_restorefh(&xdr) != 0)
+	if (decode_restorefh(xdr) != 0)
 		goto out;
-	decode_getfattr(&xdr, res->dir_attr, res->server,
+	decode_getfattr(xdr, res->dir_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5514,20 +5442,20 @@
 /*
  * Decode OPEN_CONFIRM response
  */
-static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp, __be32 *p, struct nfs_open_confirmres *res)
+static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp,
+				     struct xdr_stream *xdr,
+				     struct nfs_open_confirmres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open_confirm(&xdr, res);
+	status = decode_open_confirm(xdr, res);
 out:
 	return status;
 }
@@ -5535,26 +5463,26 @@
 /*
  * Decode OPEN response
  */
-static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_openres *res)
+static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs_openres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_open(&xdr, res);
+	status = decode_open(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->f_attr, res->server,
+	decode_getfattr(xdr, res->f_attr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5563,26 +5491,26 @@
 /*
  * Decode SETATTR response
  */
-static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp, __be32 *p, struct nfs_setattrres *res)
+static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp,
+				struct xdr_stream *xdr,
+				struct nfs_setattrres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_setattr(&xdr);
+	status = decode_setattr(xdr);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5591,23 +5519,22 @@
 /*
  * Decode LOCK response
  */
-static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lock_res *res)
+static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_lock_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lock(&xdr, res);
+	status = decode_lock(xdr, res);
 out:
 	return status;
 }
@@ -5615,23 +5542,22 @@
 /*
  * Decode LOCKT response
  */
-static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, __be32 *p, struct nfs_lockt_res *res)
+static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_lockt_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_lockt(&xdr, res);
+	status = decode_lockt(xdr, res);
 out:
 	return status;
 }
@@ -5639,61 +5565,58 @@
 /*
  * Decode LOCKU response
  */
-static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, __be32 *p, struct nfs_locku_res *res)
+static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_locku_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_locku(&xdr, res);
+	status = decode_locku(xdr, res);
 out:
 	return status;
 }
 
-static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp,
+					  struct xdr_stream *xdr, void *dummy)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_release_lockowner(&xdr);
+		status = decode_release_lockowner(xdr);
 	return status;
 }
 
 /*
  * Decode READLINK response
  */
-static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp, __be32 *p,
+static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_readlink_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readlink(&xdr, rqstp);
+	status = decode_readlink(xdr, rqstp);
 out:
 	return status;
 }
@@ -5701,23 +5624,22 @@
 /*
  * Decode READDIR response
  */
-static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_readdir_res *res)
+static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+				struct nfs4_readdir_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_readdir(&xdr, rqstp, res);
+	status = decode_readdir(xdr, rqstp, res);
 out:
 	return status;
 }
@@ -5725,23 +5647,22 @@
 /*
  * Decode Read response
  */
-static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, __be32 *p, struct nfs_readres *res)
+static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			     struct nfs_readres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_read(&xdr, rqstp, res);
+	status = decode_read(xdr, rqstp, res);
 	if (!status)
 		status = res->count;
 out:
@@ -5751,26 +5672,25 @@
 /*
  * Decode WRITE response
  */
-static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_write(&xdr, res);
+	status = decode_write(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 	if (!status)
 		status = res->count;
@@ -5781,26 +5701,25 @@
 /*
  * Decode COMMIT response
  */
-static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, __be32 *p, struct nfs_writeres *res)
+static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			       struct nfs_writeres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_commit(&xdr, res);
+	status = decode_commit(xdr, res);
 	if (status)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5809,85 +5728,80 @@
 /*
  * Decode FSINFO response
  */
-static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_fsinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->fsinfo);
+		status = decode_fsinfo(xdr, res->fsinfo);
 	return status;
 }
 
 /*
  * Decode PATHCONF response
  */
-static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req, struct xdr_stream *xdr,
 				 struct nfs4_pathconf_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_pathconf(&xdr, res->pathconf);
+		status = decode_pathconf(xdr, res->pathconf);
 	return status;
 }
 
 /*
  * Decode STATFS response
  */
-static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_statfs(struct rpc_rqst *req, struct xdr_stream *xdr,
 			       struct nfs4_statfs_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, req);
+		status = decode_sequence(xdr, &res->seq_res, req);
 	if (!status)
-		status = decode_putfh(&xdr);
+		status = decode_putfh(xdr);
 	if (!status)
-		status = decode_statfs(&xdr, res->fsstat);
+		status = decode_statfs(xdr, res->fsstat);
 	return status;
 }
 
 /*
  * Decode GETATTR_BITMAP response
  */
-static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req, __be32 *p, struct nfs4_server_caps_res *res)
+static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_server_caps_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	status = decode_server_caps(&xdr, res);
+	status = decode_server_caps(xdr, res);
 out:
 	return status;
 }
@@ -5895,79 +5809,77 @@
 /*
  * Decode RENEW response
  */
-static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, __be32 *p, void *dummy)
+static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
+			      void *__unused)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_renew(&xdr);
+		status = decode_renew(xdr);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID response
  */
-static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
-		struct nfs4_setclientid_res *res)
+static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req,
+				    struct xdr_stream *xdr,
+				    struct nfs4_setclientid_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid(&xdr, res);
+		status = decode_setclientid(xdr, res);
 	return status;
 }
 
 /*
  * Decode SETCLIENTID_CONFIRM response
  */
-static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *fsinfo)
+static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req,
+					    struct xdr_stream *xdr,
+					    struct nfs_fsinfo *fsinfo)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_setclientid_confirm(&xdr);
+		status = decode_setclientid_confirm(xdr);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, fsinfo);
+		status = decode_fsinfo(xdr, fsinfo);
 	return status;
 }
 
 /*
  * Decode DELEGRETURN response
  */
-static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, __be32 *p, struct nfs4_delegreturnres *res)
+static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
+				    struct nfs4_delegreturnres *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status != 0)
 		goto out;
-	status = decode_delegreturn(&xdr);
+	status = decode_delegreturn(xdr);
 	if (status != 0)
 		goto out;
-	decode_getfattr(&xdr, res->fattr, res->server,
+	decode_getfattr(xdr, res->fattr, res->server,
 			!RPC_IS_ASYNC(rqstp->rq_task));
 out:
 	return status;
@@ -5976,26 +5888,27 @@
 /*
  * Decode FS_LOCATIONS response
  */
-static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req, __be32 *p,
+static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req,
+				     struct xdr_stream *xdr,
 				     struct nfs4_fs_locations_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, req);
+	status = decode_sequence(xdr, &res->seq_res, req);
 	if (status)
 		goto out;
-	if ((status = decode_putfh(&xdr)) != 0)
+	status = decode_putfh(xdr);
+	if (status)
 		goto out;
-	if ((status = decode_lookup(&xdr)) != 0)
+	status = decode_lookup(xdr);
+	if (status)
 		goto out;
-	xdr_enter_page(&xdr, PAGE_SIZE);
-	status = decode_getfattr(&xdr, &res->fs_locations->fattr,
+	xdr_enter_page(xdr, PAGE_SIZE);
+	status = decode_getfattr(xdr, &res->fs_locations->fattr,
 				 res->fs_locations->server,
 				 !RPC_IS_ASYNC(req->rq_task));
 out:
@@ -6006,129 +5919,122 @@
 /*
  * Decode EXCHANGE_ID response
  */
-static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
+				    struct xdr_stream *xdr,
 				    void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_exchange_id(&xdr, res);
+		status = decode_exchange_id(xdr, res);
 	return status;
 }
 
 /*
  * Decode CREATE_SESSION response
  */
-static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs41_create_session_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_create_session(&xdr, res);
+		status = decode_create_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode DESTROY_SESSION response
  */
-static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp, uint32_t *p,
-					void *dummy)
+static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
+					struct xdr_stream *xdr,
+					void *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_destroy_session(&xdr, dummy);
+		status = decode_destroy_session(xdr, res);
 	return status;
 }
 
 /*
  * Decode SEQUENCE response
  */
-static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
+				 struct xdr_stream *xdr,
 				 struct nfs4_sequence_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, res, rqstp);
+		status = decode_sequence(xdr, res, rqstp);
 	return status;
 }
 
 /*
  * Decode GET_LEASE_TIME response
  */
-static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp,
+				       struct xdr_stream *xdr,
 				       struct nfs4_get_lease_time_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->lr_seq_res, rqstp);
+		status = decode_sequence(xdr, &res->lr_seq_res, rqstp);
 	if (!status)
-		status = decode_putrootfh(&xdr);
+		status = decode_putrootfh(xdr);
 	if (!status)
-		status = decode_fsinfo(&xdr, res->lr_fsinfo);
+		status = decode_fsinfo(xdr, res->lr_fsinfo);
 	return status;
 }
 
 /*
  * Decode RECLAIM_COMPLETE response
  */
-static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp,
+					 struct xdr_stream *xdr,
 					 struct nfs41_reclaim_complete_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (!status)
-		status = decode_sequence(&xdr, &res->seq_res, rqstp);
+		status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (!status)
-		status = decode_reclaim_complete(&xdr, (void *)NULL);
+		status = decode_reclaim_complete(xdr, (void *)NULL);
 	return status;
 }
 
 /*
  * Decode GETDEVINFO response
  */
-static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp,
+				      struct xdr_stream *xdr,
 				      struct nfs4_getdeviceinfo_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status != 0)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status != 0)
 		goto out;
-	status = decode_getdeviceinfo(&xdr, res->pdev);
+	status = decode_getdeviceinfo(xdr, res->pdev);
 out:
 	return status;
 }
@@ -6136,31 +6042,44 @@
 /*
  * Decode LAYOUTGET response
  */
-static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp, uint32_t *p,
+static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
 				  struct nfs4_layoutget_res *res)
 {
-	struct xdr_stream xdr;
 	struct compound_hdr hdr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_compound_hdr(&xdr, &hdr);
+	status = decode_compound_hdr(xdr, &hdr);
 	if (status)
 		goto out;
-	status = decode_sequence(&xdr, &res->seq_res, rqstp);
+	status = decode_sequence(xdr, &res->seq_res, rqstp);
 	if (status)
 		goto out;
-	status = decode_putfh(&xdr);
+	status = decode_putfh(xdr);
 	if (status)
 		goto out;
-	status = decode_layoutget(&xdr, rqstp, res);
+	status = decode_layoutget(xdr, rqstp, res);
 out:
 	return status;
 }
 #endif /* CONFIG_NFS_V4_1 */
 
-__be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
-			   struct nfs_server *server, int plus)
+/**
+ * nfs4_decode_dirent - Decode a single NFSv4 directory entry stored in
+ *                      the local page cache.
+ * @xdr: XDR stream where entry resides
+ * @entry: buffer to fill in with entry data
+ * @plus: boolean indicating whether this should be a readdirplus entry
+ *
+ * Returns zero if successful, otherwise a negative errno value is
+ * returned.
+ *
+ * This function is not invoked during READDIR reply decoding, but
+ * rather whenever an application invokes the getdents(2) system call
+ * on a directory already in our cache.
+ */
+int nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
+		       int plus)
 {
 	uint32_t bitmap[2] = {0};
 	uint32_t len;
@@ -6172,9 +6091,9 @@
 		if (unlikely(!p))
 			goto out_overflow;
 		if (!ntohl(*p++))
-			return ERR_PTR(-EAGAIN);
+			return -EAGAIN;
 		entry->eof = 1;
-		return ERR_PTR(-EBADCOOKIE);
+		return -EBADCOOKIE;
 	}
 
 	p = xdr_inline_decode(xdr, 12);
@@ -6203,7 +6122,8 @@
 	if (decode_attr_length(xdr, &len, &p) < 0)
 		goto out_overflow;
 
-	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh, server, 1) < 0)
+	if (decode_getfattr_attrs(xdr, bitmap, entry->fattr, entry->fh,
+					entry->server, 1) < 0)
 		goto out_overflow;
 	if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
 		entry->ino = entry->fattr->fileid;
@@ -6215,17 +6135,11 @@
 	if (verify_attr_len(xdr, p, len) < 0)
 		goto out_overflow;
 
-	p = xdr_inline_peek(xdr, 8);
-	if (p != NULL)
-		entry->eof = !p[0] && p[1];
-	else
-		entry->eof = 0;
-
-	return p;
+	return 0;
 
 out_overflow:
 	print_overflow_msg(__func__, xdr);
-	return ERR_PTR(-EAGAIN);
+	return -EAGAIN;
 }
 
 /*
@@ -6301,8 +6215,8 @@
 #define PROC(proc, argtype, restype)				\
 [NFSPROC4_CLNT_##proc] = {					\
 	.p_proc   = NFSPROC4_COMPOUND,				\
-	.p_encode = (kxdrproc_t) nfs4_xdr_##argtype,		\
-	.p_decode = (kxdrproc_t) nfs4_xdr_##restype,		\
+	.p_encode = (kxdreproc_t)nfs4_xdr_##argtype,		\
+	.p_decode = (kxdrdproc_t)nfs4_xdr_##restype,		\
 	.p_arglen = NFS4_##argtype##_sz,			\
 	.p_replen = NFS4_##restype##_sz,			\
 	.p_statidx = NFSPROC4_CLNT_##proc,			\
@@ -6310,50 +6224,50 @@
 }
 
 struct rpc_procinfo	nfs4_procedures[] = {
-  PROC(READ,		enc_read,	dec_read),
-  PROC(WRITE,		enc_write,	dec_write),
-  PROC(COMMIT,		enc_commit,	dec_commit),
-  PROC(OPEN,		enc_open,	dec_open),
-  PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
-  PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
-  PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
-  PROC(CLOSE,		enc_close,	dec_close),
-  PROC(SETATTR,		enc_setattr,	dec_setattr),
-  PROC(FSINFO,		enc_fsinfo,	dec_fsinfo),
-  PROC(RENEW,		enc_renew,	dec_renew),
-  PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
-  PROC(SETCLIENTID_CONFIRM,	enc_setclientid_confirm,	dec_setclientid_confirm),
-  PROC(LOCK,            enc_lock,       dec_lock),
-  PROC(LOCKT,           enc_lockt,      dec_lockt),
-  PROC(LOCKU,           enc_locku,      dec_locku),
-  PROC(ACCESS,		enc_access,	dec_access),
-  PROC(GETATTR,		enc_getattr,	dec_getattr),
-  PROC(LOOKUP,		enc_lookup,	dec_lookup),
-  PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
-  PROC(REMOVE,		enc_remove,	dec_remove),
-  PROC(RENAME,		enc_rename,	dec_rename),
-  PROC(LINK,		enc_link,	dec_link),
-  PROC(SYMLINK,		enc_symlink,	dec_symlink),
-  PROC(CREATE,		enc_create,	dec_create),
-  PROC(PATHCONF,	enc_pathconf,	dec_pathconf),
-  PROC(STATFS,		enc_statfs,	dec_statfs),
-  PROC(READLINK,	enc_readlink,	dec_readlink),
-  PROC(READDIR,		enc_readdir,	dec_readdir),
-  PROC(SERVER_CAPS,	enc_server_caps, dec_server_caps),
-  PROC(DELEGRETURN,	enc_delegreturn, dec_delegreturn),
-  PROC(GETACL,		enc_getacl,	dec_getacl),
-  PROC(SETACL,		enc_setacl,	dec_setacl),
-  PROC(FS_LOCATIONS,	enc_fs_locations, dec_fs_locations),
-  PROC(RELEASE_LOCKOWNER, enc_release_lockowner, dec_release_lockowner),
+	PROC(READ,		enc_read,		dec_read),
+	PROC(WRITE,		enc_write,		dec_write),
+	PROC(COMMIT,		enc_commit,		dec_commit),
+	PROC(OPEN,		enc_open,		dec_open),
+	PROC(OPEN_CONFIRM,	enc_open_confirm,	dec_open_confirm),
+	PROC(OPEN_NOATTR,	enc_open_noattr,	dec_open_noattr),
+	PROC(OPEN_DOWNGRADE,	enc_open_downgrade,	dec_open_downgrade),
+	PROC(CLOSE,		enc_close,		dec_close),
+	PROC(SETATTR,		enc_setattr,		dec_setattr),
+	PROC(FSINFO,		enc_fsinfo,		dec_fsinfo),
+	PROC(RENEW,		enc_renew,		dec_renew),
+	PROC(SETCLIENTID,	enc_setclientid,	dec_setclientid),
+	PROC(SETCLIENTID_CONFIRM, enc_setclientid_confirm, dec_setclientid_confirm),
+	PROC(LOCK,		enc_lock,		dec_lock),
+	PROC(LOCKT,		enc_lockt,		dec_lockt),
+	PROC(LOCKU,		enc_locku,		dec_locku),
+	PROC(ACCESS,		enc_access,		dec_access),
+	PROC(GETATTR,		enc_getattr,		dec_getattr),
+	PROC(LOOKUP,		enc_lookup,		dec_lookup),
+	PROC(LOOKUP_ROOT,	enc_lookup_root,	dec_lookup_root),
+	PROC(REMOVE,		enc_remove,		dec_remove),
+	PROC(RENAME,		enc_rename,		dec_rename),
+	PROC(LINK,		enc_link,		dec_link),
+	PROC(SYMLINK,		enc_symlink,		dec_symlink),
+	PROC(CREATE,		enc_create,		dec_create),
+	PROC(PATHCONF,		enc_pathconf,		dec_pathconf),
+	PROC(STATFS,		enc_statfs,		dec_statfs),
+	PROC(READLINK,		enc_readlink,		dec_readlink),
+	PROC(READDIR,		enc_readdir,		dec_readdir),
+	PROC(SERVER_CAPS,	enc_server_caps,	dec_server_caps),
+	PROC(DELEGRETURN,	enc_delegreturn,	dec_delegreturn),
+	PROC(GETACL,		enc_getacl,		dec_getacl),
+	PROC(SETACL,		enc_setacl,		dec_setacl),
+	PROC(FS_LOCATIONS,	enc_fs_locations,	dec_fs_locations),
+	PROC(RELEASE_LOCKOWNER,	enc_release_lockowner,	dec_release_lockowner),
 #if defined(CONFIG_NFS_V4_1)
-  PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
-  PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
-  PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
-  PROC(SEQUENCE,	enc_sequence,	dec_sequence),
-  PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
-  PROC(RECLAIM_COMPLETE, enc_reclaim_complete,  dec_reclaim_complete),
-  PROC(GETDEVICEINFO, enc_getdeviceinfo, dec_getdeviceinfo),
-  PROC(LAYOUTGET,  enc_layoutget,     dec_layoutget),
+	PROC(EXCHANGE_ID,	enc_exchange_id,	dec_exchange_id),
+	PROC(CREATE_SESSION,	enc_create_session,	dec_create_session),
+	PROC(DESTROY_SESSION,	enc_destroy_session,	dec_destroy_session),
+	PROC(SEQUENCE,		enc_sequence,		dec_sequence),
+	PROC(GET_LEASE_TIME,	enc_get_lease_time,	dec_get_lease_time),
+	PROC(RECLAIM_COMPLETE,	enc_reclaim_complete,	dec_reclaim_complete),
+	PROC(GETDEVICEINFO,	enc_getdeviceinfo,	dec_getdeviceinfo),
+	PROC(LAYOUTGET,		enc_layoutget,		dec_layoutget),
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b68536c..e1164e3 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,12 +26,9 @@
 static inline struct nfs_page *
 nfs_page_alloc(void)
 {
-	struct nfs_page	*p;
-	p = kmem_cache_alloc(nfs_page_cachep, GFP_KERNEL);
-	if (p) {
-		memset(p, 0, sizeof(*p));
+	struct nfs_page	*p = kmem_cache_zalloc(nfs_page_cachep, GFP_KERNEL);
+	if (p)
 		INIT_LIST_HEAD(&p->wb_list);
-	}
 	return p;
 }
 
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index db77342..bc40897 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -177,105 +177,149 @@
  * pNFS client layout cache
  */
 
-static void
-get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
+/* Need to hold i_lock if caller does not already hold reference */
+void
+get_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	lo->refcount++;
+	atomic_inc(&lo->plh_refcount);
+}
+
+static void
+destroy_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+	dprintk("%s: freeing layout cache %p\n", __func__, lo);
+	BUG_ON(!list_empty(&lo->plh_layouts));
+	NFS_I(lo->plh_inode)->layout = NULL;
+	kfree(lo);
 }
 
 static void
 put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
 {
-	assert_spin_locked(&lo->inode->i_lock);
-	BUG_ON(lo->refcount == 0);
-
-	lo->refcount--;
-	if (!lo->refcount) {
-		dprintk("%s: freeing layout cache %p\n", __func__, lo);
-		BUG_ON(!list_empty(&lo->layouts));
-		NFS_I(lo->inode)->layout = NULL;
-		kfree(lo);
-	}
+	if (atomic_dec_and_test(&lo->plh_refcount))
+		destroy_layout_hdr(lo);
 }
 
 void
-put_layout_hdr(struct inode *inode)
+put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
-	spin_lock(&inode->i_lock);
-	put_layout_hdr_locked(NFS_I(inode)->layout);
-	spin_unlock(&inode->i_lock);
+	struct inode *inode = lo->plh_inode;
+
+	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
+		destroy_layout_hdr(lo);
+		spin_unlock(&inode->i_lock);
+	}
 }
 
 static void
 init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
 {
-	INIT_LIST_HEAD(&lseg->fi_list);
-	kref_init(&lseg->kref);
-	lseg->layout = lo;
+	INIT_LIST_HEAD(&lseg->pls_list);
+	atomic_set(&lseg->pls_refcount, 1);
+	smp_mb();
+	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
+	lseg->pls_layout = lo;
 }
 
-/* Called without i_lock held, as the free_lseg call may sleep */
-static void
-destroy_lseg(struct kref *kref)
+static void free_lseg(struct pnfs_layout_segment *lseg)
 {
-	struct pnfs_layout_segment *lseg =
-		container_of(kref, struct pnfs_layout_segment, kref);
-	struct inode *ino = lseg->layout->inode;
+	struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("--> %s\n", __func__);
 	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
-	/* Matched by get_layout_hdr_locked in pnfs_insert_layout */
-	put_layout_hdr(ino);
+	/* Matched by get_layout_hdr in pnfs_insert_layout */
+	put_layout_hdr(NFS_I(ino)->layout);
 }
 
-static void
-put_lseg(struct pnfs_layout_segment *lseg)
+/* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
+ * could sleep, so must be called outside of the lock.
+ * Returns 1 if object was removed, otherwise return 0.
+ */
+static int
+put_lseg_locked(struct pnfs_layout_segment *lseg,
+		struct list_head *tmp_list)
 {
-	if (!lseg)
-		return;
+	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
+		atomic_read(&lseg->pls_refcount),
+		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+	if (atomic_dec_and_test(&lseg->pls_refcount)) {
+		struct inode *ino = lseg->pls_layout->plh_inode;
 
-	dprintk("%s: lseg %p ref %d\n", __func__, lseg,
-		atomic_read(&lseg->kref.refcount));
-	kref_put(&lseg->kref, destroy_lseg);
+		BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
+		list_del(&lseg->pls_list);
+		if (list_empty(&lseg->pls_layout->plh_segs)) {
+			struct nfs_client *clp;
+
+			clp = NFS_SERVER(ino)->nfs_client;
+			spin_lock(&clp->cl_lock);
+			/* List does not take a reference, so no need for put here */
+			list_del_init(&lseg->pls_layout->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lseg->pls_layout->plh_flags);
+		}
+		rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
+		list_add(&lseg->pls_list, tmp_list);
+		return 1;
+	}
+	return 0;
 }
 
-static void
-pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
+static bool
+should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
+{
+	return (recall_iomode == IOMODE_ANY ||
+		lseg_iomode == recall_iomode);
+}
+
+/* Returns 1 if lseg is removed from list, 0 otherwise */
+static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
+			     struct list_head *tmp_list)
+{
+	int rv = 0;
+
+	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
+		/* Remove the reference keeping the lseg in the
+		 * list.  It will now be removed when all
+		 * outstanding io is finished.
+		 */
+		rv = put_lseg_locked(lseg, tmp_list);
+	}
+	return rv;
+}
+
+/* Returns count of number of matching invalid lsegs remaining in list
+ * after call.
+ */
+int
+mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+			    struct list_head *tmp_list,
+			    u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *next;
-	struct nfs_client *clp;
+	int invalid = 0, removed = 0;
 
 	dprintk("%s:Begin lo %p\n", __func__, lo);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
-		dprintk("%s: freeing lseg %p\n", __func__, lseg);
-		list_move(&lseg->fi_list, tmp_list);
-	}
-	clp = NFS_SERVER(lo->inode)->nfs_client;
-	spin_lock(&clp->cl_lock);
-	/* List does not take a reference, so no need for put here */
-	list_del_init(&lo->layouts);
-	spin_unlock(&clp->cl_lock);
-	write_seqlock(&lo->seqlock);
-	clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-
-	dprintk("%s:Return\n", __func__);
+	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
+		if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
+			dprintk("%s: freeing lseg %p iomode %d "
+				"offset %llu length %llu\n", __func__,
+				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
+				lseg->pls_range.length);
+			invalid++;
+			removed += mark_lseg_invalid(lseg, tmp_list);
+		}
+	dprintk("%s:Return %i\n", __func__, invalid - removed);
+	return invalid - removed;
 }
 
-static void
-pnfs_free_lseg_list(struct list_head *tmp_list)
+void
+pnfs_free_lseg_list(struct list_head *free_me)
 {
-	struct pnfs_layout_segment *lseg;
+	struct pnfs_layout_segment *lseg, *tmp;
 
-	while (!list_empty(tmp_list)) {
-		lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
-				fi_list);
-		dprintk("%s calling put_lseg on %p\n", __func__, lseg);
-		list_del(&lseg->fi_list);
-		put_lseg(lseg);
+	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
+		list_del(&lseg->pls_list);
+		free_lseg(lseg);
 	}
 }
 
@@ -288,7 +332,8 @@
 	spin_lock(&nfsi->vfs_inode.i_lock);
 	lo = nfsi->layout;
 	if (lo) {
-		pnfs_clear_lseg_list(lo, &tmp_list);
+		set_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags);
+		mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
 		/* Matched by refcount set to 1 in alloc_init_layout_hdr */
 		put_layout_hdr_locked(lo);
 	}
@@ -312,76 +357,80 @@
 
 	while (!list_empty(&tmp_list)) {
 		lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
-				layouts);
+				plh_layouts);
 		dprintk("%s freeing layout for inode %lu\n", __func__,
-			lo->inode->i_ino);
-		pnfs_destroy_layout(NFS_I(lo->inode));
+			lo->plh_inode->i_ino);
+		pnfs_destroy_layout(NFS_I(lo->plh_inode));
 	}
 }
 
-/* update lo->stateid with new if is more recent
- *
- * lo->stateid could be the open stateid, in which case we just use what given.
- */
-static void
-pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
-			const nfs4_stateid *new)
-{
-	nfs4_stateid *old = &lo->stateid;
-	bool overwrite = false;
-
-	write_seqlock(&lo->seqlock);
-	if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
-	    memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
-		overwrite = true;
-	else {
-		u32 oldseq, newseq;
-
-		oldseq = be32_to_cpu(old->stateid.seqid);
-		newseq = be32_to_cpu(new->stateid.seqid);
-		if ((int)(newseq - oldseq) > 0)
-			overwrite = true;
-	}
-	if (overwrite)
-		memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
-	write_sequnlock(&lo->seqlock);
-}
-
-static void
-pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
-			      struct nfs4_state *state)
-{
-	int seq;
-
-	dprintk("--> %s\n", __func__);
-	write_seqlock(&lo->seqlock);
-	do {
-		seq = read_seqbegin(&state->seqlock);
-		memcpy(lo->stateid.data, state->stateid.data,
-		       sizeof(state->stateid.data));
-	} while (read_seqretry(&state->seqlock, seq));
-	set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
-	write_sequnlock(&lo->seqlock);
-	dprintk("<-- %s\n", __func__);
-}
-
+/* update lo->plh_stateid with new if is more recent */
 void
-pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			struct nfs4_state *open_state)
+pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
+			bool update_barrier)
 {
-	int seq;
+	u32 oldseq, newseq;
+
+	oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+	newseq = be32_to_cpu(new->stateid.seqid);
+	if ((int)(newseq - oldseq) > 0) {
+		memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
+		if (update_barrier) {
+			u32 new_barrier = be32_to_cpu(new->stateid.seqid);
+
+			if ((int)(new_barrier - lo->plh_barrier))
+				lo->plh_barrier = new_barrier;
+		} else {
+			/* Because of wraparound, we want to keep the barrier
+			 * "close" to the current seqids.  It needs to be
+			 * within 2**31 to count as "behind", so if it
+			 * gets too near that limit, give us a litle leeway
+			 * and bring it to within 2**30.
+			 * NOTE - and yes, this is all unsigned arithmetic.
+			 */
+			if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
+				lo->plh_barrier = newseq - (1 << 30);
+		}
+	}
+}
+
+/* lget is set to 1 if called from inside send_layoutget call chain */
+static bool
+pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
+			int lget)
+{
+	if ((stateid) &&
+	    (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
+		return true;
+	return lo->plh_block_lgets ||
+		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
+		(list_empty(&lo->plh_segs) &&
+		 (atomic_read(&lo->plh_outstanding) > lget));
+}
+
+int
+pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
+			      struct nfs4_state *open_state)
+{
+	int status = 0;
 
 	dprintk("--> %s\n", __func__);
-	do {
-		seq = read_seqbegin(&lo->seqlock);
-		if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
-			/* This will trigger retry of the read */
-			pnfs_layout_from_open_stateid(lo, open_state);
-		} else
-			memcpy(dst->data, lo->stateid.data,
-			       sizeof(lo->stateid.data));
-	} while (read_seqretry(&lo->seqlock, seq));
+	spin_lock(&lo->plh_inode->i_lock);
+	if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
+		status = -EAGAIN;
+	} else if (list_empty(&lo->plh_segs)) {
+		int seq;
+
+		do {
+			seq = read_seqbegin(&open_state->seqlock);
+			memcpy(dst->data, open_state->stateid.data,
+			       sizeof(open_state->stateid.data));
+		} while (read_seqretry(&open_state->seqlock, seq));
+	} else
+		memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
+	spin_unlock(&lo->plh_inode->i_lock);
 	dprintk("<-- %s\n", __func__);
+	return status;
 }
 
 /*
@@ -395,7 +444,7 @@
 	   struct nfs_open_context *ctx,
 	   u32 iomode)
 {
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
 	struct nfs_server *server = NFS_SERVER(ino);
 	struct nfs4_layoutget *lgp;
 	struct pnfs_layout_segment *lseg = NULL;
@@ -404,10 +453,8 @@
 
 	BUG_ON(ctx == NULL);
 	lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
-	if (lgp == NULL) {
-		put_layout_hdr(lo->inode);
+	if (lgp == NULL)
 		return NULL;
-	}
 	lgp->args.minlength = NFS4_MAX_UINT64;
 	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
 	lgp->args.range.iomode = iomode;
@@ -424,11 +471,88 @@
 	nfs4_proc_layoutget(lgp);
 	if (!lseg) {
 		/* remember that LAYOUTGET failed and suspend trying */
-		set_bit(lo_fail_bit(iomode), &lo->state);
+		set_bit(lo_fail_bit(iomode), &lo->plh_flags);
 	}
 	return lseg;
 }
 
+bool pnfs_roc(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+	struct pnfs_layout_segment *lseg, *tmp;
+	LIST_HEAD(tmp_list);
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
+		goto out_nolayout;
+	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			mark_lseg_invalid(lseg, &tmp_list);
+			found = true;
+		}
+	if (!found)
+		goto out_nolayout;
+	lo->plh_block_lgets++;
+	get_layout_hdr(lo); /* matched in pnfs_roc_release */
+	spin_unlock(&ino->i_lock);
+	pnfs_free_lseg_list(&tmp_list);
+	return true;
+
+out_nolayout:
+	spin_unlock(&ino->i_lock);
+	return false;
+}
+
+void pnfs_roc_release(struct inode *ino)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	lo->plh_block_lgets--;
+	put_layout_hdr_locked(lo);
+	spin_unlock(&ino->i_lock);
+}
+
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+	struct pnfs_layout_hdr *lo;
+
+	spin_lock(&ino->i_lock);
+	lo = NFS_I(ino)->layout;
+	if ((int)(barrier - lo->plh_barrier) > 0)
+		lo->plh_barrier = barrier;
+	spin_unlock(&ino->i_lock);
+}
+
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	struct nfs_inode *nfsi = NFS_I(ino);
+	struct pnfs_layout_segment *lseg;
+	bool found = false;
+
+	spin_lock(&ino->i_lock);
+	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
+		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
+			found = true;
+			break;
+		}
+	if (!found) {
+		struct pnfs_layout_hdr *lo = nfsi->layout;
+		u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
+
+		/* Since close does not return a layout stateid for use as
+		 * a barrier, we choose the worst-case barrier.
+		 */
+		*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
+	}
+	spin_unlock(&ino->i_lock);
+	return found;
+}
+
 /*
  * Compare two layout segments for sorting into layout cache.
  * We want to preferentially return RW over RO layouts, so ensure those
@@ -450,37 +574,29 @@
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	if (list_empty(&lo->segs)) {
-		struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
-
-		spin_lock(&clp->cl_lock);
-		BUG_ON(!list_empty(&lo->layouts));
-		list_add_tail(&lo->layouts, &clp->cl_layouts);
-		spin_unlock(&clp->cl_lock);
-	}
-	list_for_each_entry(lp, &lo->segs, fi_list) {
-		if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
+		if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
 			continue;
-		list_add_tail(&lseg->fi_list, &lp->fi_list);
+		list_add_tail(&lseg->pls_list, &lp->pls_list);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu before "
 			"lp %p iomode %d offset %llu length %llu\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length,
-			lp, lp->range.iomode, lp->range.offset,
-			lp->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length,
+			lp, lp->pls_range.iomode, lp->pls_range.offset,
+			lp->pls_range.length);
 		found = 1;
 		break;
 	}
 	if (!found) {
-		list_add_tail(&lseg->fi_list, &lo->segs);
+		list_add_tail(&lseg->pls_list, &lo->plh_segs);
 		dprintk("%s: inserted lseg %p "
 			"iomode %d offset %llu length %llu at tail\n",
-			__func__, lseg, lseg->range.iomode,
-			lseg->range.offset, lseg->range.length);
+			__func__, lseg, lseg->pls_range.iomode,
+			lseg->pls_range.offset, lseg->pls_range.length);
 	}
-	get_layout_hdr_locked(lo);
+	get_layout_hdr(lo);
 
 	dprintk("%s:Return\n", __func__);
 }
@@ -493,11 +609,11 @@
 	lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
 	if (!lo)
 		return NULL;
-	lo->refcount = 1;
-	INIT_LIST_HEAD(&lo->layouts);
-	INIT_LIST_HEAD(&lo->segs);
-	seqlock_init(&lo->seqlock);
-	lo->inode = ino;
+	atomic_set(&lo->plh_refcount, 1);
+	INIT_LIST_HEAD(&lo->plh_layouts);
+	INIT_LIST_HEAD(&lo->plh_segs);
+	INIT_LIST_HEAD(&lo->plh_bulk_recall);
+	lo->plh_inode = ino;
 	return lo;
 }
 
@@ -510,9 +626,12 @@
 	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
 
 	assert_spin_locked(&ino->i_lock);
-	if (nfsi->layout)
-		return nfsi->layout;
-
+	if (nfsi->layout) {
+		if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
+			return NULL;
+		else
+			return nfsi->layout;
+	}
 	spin_unlock(&ino->i_lock);
 	new = alloc_init_layout_hdr(ino);
 	spin_lock(&ino->i_lock);
@@ -538,31 +657,32 @@
 static int
 is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
 {
-	return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
+	return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
 }
 
 /*
  * lookup range in layout
  */
 static struct pnfs_layout_segment *
-pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
+pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
 {
 	struct pnfs_layout_segment *lseg, *ret = NULL;
 
 	dprintk("%s:Begin\n", __func__);
 
-	assert_spin_locked(&lo->inode->i_lock);
-	list_for_each_entry(lseg, &lo->segs, fi_list) {
-		if (is_matching_lseg(lseg, iomode)) {
+	assert_spin_locked(&lo->plh_inode->i_lock);
+	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
+		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
+		    is_matching_lseg(lseg, iomode)) {
 			ret = lseg;
 			break;
 		}
-		if (cmp_layout(iomode, lseg->range.iomode) > 0)
+		if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
 			break;
 	}
 
 	dprintk("%s:Return lseg %p ref %d\n",
-		__func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
+		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
 	return ret;
 }
 
@@ -576,6 +696,7 @@
 		   enum pnfs_iomode iomode)
 {
 	struct nfs_inode *nfsi = NFS_I(ino);
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	struct pnfs_layout_hdr *lo;
 	struct pnfs_layout_segment *lseg = NULL;
 
@@ -588,25 +709,53 @@
 		goto out_unlock;
 	}
 
-	/* Check to see if the layout for the given range already exists */
-	lseg = pnfs_has_layout(lo, iomode);
-	if (lseg) {
-		dprintk("%s: Using cached lseg %p for iomode %d)\n",
-			__func__, lseg, iomode);
+	/* Do we even need to bother with this? */
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s matches recall, use MDS\n", __func__);
 		goto out_unlock;
 	}
-
-	/* if LAYOUTGET already failed once we don't try again */
-	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
+	/* Check to see if the layout for the given range already exists */
+	lseg = pnfs_find_lseg(lo, iomode);
+	if (lseg)
 		goto out_unlock;
 
-	get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
+	/* if LAYOUTGET already failed once we don't try again */
+	if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
+		goto out_unlock;
+
+	if (pnfs_layoutgets_blocked(lo, NULL, 0))
+		goto out_unlock;
+	atomic_inc(&lo->plh_outstanding);
+
+	get_layout_hdr(lo);
+	if (list_empty(&lo->plh_segs)) {
+		/* The lo must be on the clp list if there is any
+		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
+		 */
+		spin_lock(&clp->cl_lock);
+		BUG_ON(!list_empty(&lo->plh_layouts));
+		list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
+		spin_unlock(&clp->cl_lock);
+	}
 	spin_unlock(&ino->i_lock);
 
 	lseg = send_layoutget(lo, ctx, iomode);
+	if (!lseg) {
+		spin_lock(&ino->i_lock);
+		if (list_empty(&lo->plh_segs)) {
+			spin_lock(&clp->cl_lock);
+			list_del_init(&lo->plh_layouts);
+			spin_unlock(&clp->cl_lock);
+			clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
+		}
+		spin_unlock(&ino->i_lock);
+	}
+	atomic_dec(&lo->plh_outstanding);
+	put_layout_hdr(lo);
 out:
 	dprintk("%s end, state 0x%lx lseg %p\n", __func__,
-		nfsi->layout->state, lseg);
+		nfsi->layout->plh_flags, lseg);
 	return lseg;
 out_unlock:
 	spin_unlock(&ino->i_lock);
@@ -619,9 +768,21 @@
 	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
 	struct nfs4_layoutget_res *res = &lgp->res;
 	struct pnfs_layout_segment *lseg;
-	struct inode *ino = lo->inode;
+	struct inode *ino = lo->plh_inode;
+	struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
 	int status = 0;
 
+	/* Verify we got what we asked for.
+	 * Note that because the xdr parsing only accepts a single
+	 * element array, this can fail even if the server is behaving
+	 * correctly.
+	 */
+	if (lgp->args.range.iomode > res->range.iomode ||
+	    res->range.offset != 0 ||
+	    res->range.length != NFS4_MAX_UINT64) {
+		status = -EINVAL;
+		goto out;
+	}
 	/* Inject layout blob into I/O device driver */
 	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
 	if (!lseg || IS_ERR(lseg)) {
@@ -635,16 +796,37 @@
 	}
 
 	spin_lock(&ino->i_lock);
+	if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
+	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
+		dprintk("%s forget reply due to recall\n", __func__);
+		goto out_forget_reply;
+	}
+
+	if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
+		dprintk("%s forget reply due to state\n", __func__);
+		goto out_forget_reply;
+	}
 	init_lseg(lo, lseg);
-	lseg->range = res->range;
+	lseg->pls_range = res->range;
 	*lgp->lsegpp = lseg;
 	pnfs_insert_layout(lo, lseg);
 
+	if (res->return_on_close) {
+		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
+		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
+	}
+
 	/* Done processing layoutget. Set the layout stateid */
-	pnfs_set_layout_stateid(lo, &res->stateid);
+	pnfs_set_layout_stateid(lo, &res->stateid, false);
 	spin_unlock(&ino->i_lock);
 out:
 	return status;
+
+out_forget_reply:
+	spin_unlock(&ino->i_lock);
+	lseg->pls_layout = lo;
+	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
+	goto out;
 }
 
 /*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index e12367d..e2612ea 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -30,11 +30,17 @@
 #ifndef FS_NFS_PNFS_H
 #define FS_NFS_PNFS_H
 
+enum {
+	NFS_LSEG_VALID = 0,	/* cleared when lseg is recalled/returned */
+	NFS_LSEG_ROC,		/* roc bit received from server */
+};
+
 struct pnfs_layout_segment {
-	struct list_head fi_list;
-	struct pnfs_layout_range range;
-	struct kref kref;
-	struct pnfs_layout_hdr *layout;
+	struct list_head pls_list;
+	struct pnfs_layout_range pls_range;
+	atomic_t pls_refcount;
+	unsigned long pls_flags;
+	struct pnfs_layout_hdr *pls_layout;
 };
 
 #ifdef CONFIG_NFS_V4_1
@@ -44,7 +50,9 @@
 enum {
 	NFS_LAYOUT_RO_FAILED = 0,	/* get ro layout failed stop trying */
 	NFS_LAYOUT_RW_FAILED,		/* get rw layout failed stop trying */
-	NFS_LAYOUT_STATEID_SET,		/* have a valid layout stateid */
+	NFS_LAYOUT_BULK_RECALL,		/* bulk recall affecting layout */
+	NFS_LAYOUT_ROC,			/* some lseg had roc bit set */
+	NFS_LAYOUT_DESTROYED,		/* no new use of layout allowed */
 };
 
 /* Per-layout driver specific registration structure */
@@ -60,13 +68,16 @@
 };
 
 struct pnfs_layout_hdr {
-	unsigned long		refcount;
-	struct list_head	layouts;   /* other client layouts */
-	struct list_head	segs;      /* layout segments list */
-	seqlock_t		seqlock;   /* Protects the stateid */
-	nfs4_stateid		stateid;
-	unsigned long		state;
-	struct inode		*inode;
+	atomic_t		plh_refcount;
+	struct list_head	plh_layouts;   /* other client layouts */
+	struct list_head	plh_bulk_recall; /* clnt list of bulk recalls */
+	struct list_head	plh_segs;      /* layout segments list */
+	nfs4_stateid		plh_stateid;
+	atomic_t		plh_outstanding; /* number of RPCs out */
+	unsigned long		plh_block_lgets; /* block LAYOUTGET if >0 */
+	u32			plh_barrier; /* ignore lower seqids */
+	unsigned long		plh_flags;
+	struct inode		*plh_inode;
 };
 
 struct pnfs_device {
@@ -134,17 +145,30 @@
 extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp);
 
 /* pnfs.c */
+void get_layout_hdr(struct pnfs_layout_hdr *lo);
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
 		   enum pnfs_iomode access_type);
 void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 int pnfs_layout_process(struct nfs4_layoutget *lgp);
+void pnfs_free_lseg_list(struct list_head *tmp_list);
 void pnfs_destroy_layout(struct nfs_inode *);
 void pnfs_destroy_all_layouts(struct nfs_client *);
-void put_layout_hdr(struct inode *inode);
-void pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
-			     struct nfs4_state *open_state);
+void put_layout_hdr(struct pnfs_layout_hdr *lo);
+void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
+			     const nfs4_stateid *new,
+			     bool update_barrier);
+int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
+				  struct pnfs_layout_hdr *lo,
+				  struct nfs4_state *open_state);
+int mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
+				struct list_head *tmp_list,
+				u32 iomode);
+bool pnfs_roc(struct inode *ino);
+void pnfs_roc_release(struct inode *ino);
+void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
+bool pnfs_roc_drain(struct inode *ino, u32 *barrier);
 
 
 static inline int lo_fail_bit(u32 iomode)
@@ -176,6 +200,28 @@
 	return NULL;
 }
 
+static inline bool
+pnfs_roc(struct inode *ino)
+{
+	return false;
+}
+
+static inline void
+pnfs_roc_release(struct inode *ino)
+{
+}
+
+static inline void
+pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
+{
+}
+
+static inline bool
+pnfs_roc_drain(struct inode *ino, u32 *barrier)
+{
+	return false;
+}
+
 static inline void set_pnfs_layoutdriver(struct nfs_server *s, u32 id)
 {
 }
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 58e7f84..77d5e21 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -458,7 +458,7 @@
 	fattr = nfs_alloc_fattr();
 	status = -ENOMEM;
 	if (fh == NULL || fattr == NULL)
-		goto out;
+		goto out_free;
 
 	status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
 	nfs_mark_for_revalidate(dir);
@@ -471,6 +471,7 @@
 	if (status == 0)
 		status = nfs_instantiate(dentry, fh, fattr);
 
+out_free:
 	nfs_free_fattr(fattr);
 	nfs_free_fhandle(fh);
 out:
@@ -731,7 +732,7 @@
 	.statfs		= nfs_proc_statfs,
 	.fsinfo		= nfs_proc_fsinfo,
 	.pathconf	= nfs_proc_pathconf,
-	.decode_dirent	= nfs_decode_dirent,
+	.decode_dirent	= nfs2_decode_dirent,
 	.read_setup	= nfs_proc_read_setup,
 	.read_done	= nfs_read_done,
 	.write_setup	= nfs_proc_write_setup,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4100630..b68c860 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -598,7 +598,9 @@
 
 	if (nfss->mountd_version || showdefaults)
 		seq_printf(m, ",mountvers=%u", nfss->mountd_version);
-	if (nfss->mountd_port || showdefaults)
+	if ((nfss->mountd_port &&
+		nfss->mountd_port != (unsigned short)NFS_UNSPEC_PORT) ||
+		showdefaults)
 		seq_printf(m, ",mountport=%u", nfss->mountd_port);
 
 	nfs_show_mountd_netid(m, nfss, showdefaults);
@@ -2200,6 +2202,7 @@
 
 	s->s_flags = sb_mntdata->mntflags;
 	s->s_fs_info = server;
+	s->s_d_op = server->nfs_client->rpc_ops->dentry_ops;
 	ret = set_anon_super(s, server);
 	if (ret == 0)
 		server->s_dev = s->s_dev;
@@ -2494,7 +2497,13 @@
 	sb->s_maxbytes = old_sb->s_maxbytes;
 	sb->s_time_gran = 1;
 	sb->s_op = old_sb->s_op;
- 	nfs_initialise_sb(sb);
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr  = old_sb->s_xattr;
+	nfs_initialise_sb(sb);
 }
 
 /*
@@ -2504,6 +2513,12 @@
 {
 	sb->s_time_gran = 1;
 	sb->s_op = &nfs4_sops;
+	/*
+	 * The VFS shouldn't apply the umask to mode bits. We will do
+	 * so ourselves when necessary.
+	 */
+	sb->s_flags  |= MS_POSIXACL;
+	sb->s_xattr = nfs4_xattr_handlers;
 	nfs_initialise_sb(sb);
 }
 
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 8fe9eb4..e313a51 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -429,7 +429,7 @@
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
 	if (data == NULL)
 		return ERR_PTR(-ENOMEM);
-	task_setup_data.callback_data = data,
+	task_setup_data.callback_data = data;
 
 	data->cred = rpc_lookup_cred();
 	if (IS_ERR(data->cred)) {
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 143da2e..21a63da 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -50,11 +50,6 @@
 	NFSPROC4_CLNT_CB_SEQUENCE,
 };
 
-enum nfs_cb_opnum4 {
-	OP_CB_RECALL            = 4,
-	OP_CB_SEQUENCE          = 11,
-};
-
 #define NFS4_MAXTAGLEN		20
 
 #define NFS4_enc_cb_null_sz		0
@@ -79,61 +74,6 @@
 					cb_sequence_dec_sz +            \
 					op_dec_sz)
 
-/*
-* Generic encode routines from fs/nfs/nfs4xdr.c
-*/
-static inline __be32 *
-xdr_writemem(__be32 *p, const void *ptr, int nbytes)
-{
-	int tmp = XDR_QUADLEN(nbytes);
-	if (!tmp)
-		return p;
-	p[tmp-1] = 0;
-	memcpy(p, ptr, nbytes);
-	return p + tmp;
-}
-
-#define WRITE32(n)               *p++ = htonl(n)
-#define WRITEMEM(ptr,nbytes)     do {                           \
-	p = xdr_writemem(p, ptr, nbytes);                       \
-} while (0)
-#define RESERVE_SPACE(nbytes)   do {                            \
-	p = xdr_reserve_space(xdr, nbytes);                     \
-	if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
-	BUG_ON(!p);                                             \
-} while (0)
-
-/*
- * Generic decode routines from fs/nfs/nfs4xdr.c
- */
-#define DECODE_TAIL                             \
-	status = 0;                             \
-out:                                            \
-	return status;                          \
-xdr_error:                                      \
-	dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
-	status = -EIO;                          \
-	goto out
-
-#define READ32(x)         (x) = ntohl(*p++)
-#define READ64(x)         do {                  \
-	(x) = (u64)ntohl(*p++) << 32;           \
-	(x) |= ntohl(*p++);                     \
-} while (0)
-#define READTIME(x)       do {                  \
-	p++;                                    \
-	(x.tv_sec) = ntohl(*p++);               \
-	(x.tv_nsec) = ntohl(*p++);              \
-} while (0)
-#define READ_BUF(nbytes)  do { \
-	p = xdr_inline_decode(xdr, nbytes); \
-	if (!p) { \
-		dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
-			__func__, __LINE__); \
-		return -EIO; \
-	} \
-} while (0)
-
 struct nfs4_cb_compound_hdr {
 	/* args */
 	u32		ident;	/* minorversion 0 only */
@@ -144,295 +84,513 @@
 	int		status;
 };
 
-static struct {
-int stat;
-int errno;
-} nfs_cb_errtbl[] = {
-	{ NFS4_OK,		0               },
-	{ NFS4ERR_PERM,		EPERM           },
-	{ NFS4ERR_NOENT,	ENOENT          },
-	{ NFS4ERR_IO,		EIO             },
-	{ NFS4ERR_NXIO,		ENXIO           },
-	{ NFS4ERR_ACCESS,	EACCES          },
-	{ NFS4ERR_EXIST,	EEXIST          },
-	{ NFS4ERR_XDEV,		EXDEV           },
-	{ NFS4ERR_NOTDIR,	ENOTDIR         },
-	{ NFS4ERR_ISDIR,	EISDIR          },
-	{ NFS4ERR_INVAL,	EINVAL          },
-	{ NFS4ERR_FBIG,		EFBIG           },
-	{ NFS4ERR_NOSPC,	ENOSPC          },
-	{ NFS4ERR_ROFS,		EROFS           },
-	{ NFS4ERR_MLINK,	EMLINK          },
-	{ NFS4ERR_NAMETOOLONG,	ENAMETOOLONG    },
-	{ NFS4ERR_NOTEMPTY,	ENOTEMPTY       },
-	{ NFS4ERR_DQUOT,	EDQUOT          },
-	{ NFS4ERR_STALE,	ESTALE          },
-	{ NFS4ERR_BADHANDLE,	EBADHANDLE      },
-	{ NFS4ERR_BAD_COOKIE,	EBADCOOKIE      },
-	{ NFS4ERR_NOTSUPP,	ENOTSUPP        },
-	{ NFS4ERR_TOOSMALL,	ETOOSMALL       },
-	{ NFS4ERR_SERVERFAULT,	ESERVERFAULT    },
-	{ NFS4ERR_BADTYPE,	EBADTYPE        },
-	{ NFS4ERR_LOCKED,	EAGAIN          },
-	{ NFS4ERR_RESOURCE,	EREMOTEIO       },
-	{ NFS4ERR_SYMLINK,	ELOOP           },
-	{ NFS4ERR_OP_ILLEGAL,	EOPNOTSUPP      },
-	{ NFS4ERR_DEADLOCK,	EDEADLK         },
-	{ -1,                   EIO             }
-};
-
-static int
-nfs_cb_stat_to_errno(int stat)
+/*
+ * Handle decode buffer overflows out-of-line.
+ */
+static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
 {
-	int i;
-	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
-		if (nfs_cb_errtbl[i].stat == stat)
-			return nfs_cb_errtbl[i].errno;
-	}
-	/* If we cannot translate the error, the recovery routines should
-	* handle it.
-	* Note: remaining NFSv4 error codes have values > 10000, so should
-	* not conflict with native Linux error codes.
-	*/
-	return stat;
+	dprintk("NFS: %s prematurely hit the end of our receive buffer. "
+		"Remaining buffer length is %tu words.\n",
+		func, xdr->end - xdr->p);
+}
+
+static __be32 *xdr_encode_empty_array(__be32 *p)
+{
+	*p++ = xdr_zero;
+	return p;
 }
 
 /*
- * XDR encode
+ * Encode/decode NFSv4 CB basic data types
+ *
+ * Basic NFSv4 callback data types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section
+ * 20 of RFC 5661: "Network File System (NFS) Version 4 Minor Version
+ * 1 Protocol"
  */
 
-static void
-encode_stateid(struct xdr_stream *xdr, stateid_t *sid)
+/*
+ *	nfs_cb_opnum4
+ *
+ *	enum nfs_cb_opnum4 {
+ *		OP_CB_GETATTR		= 3,
+ *		  ...
+ *	};
+ */
+enum nfs_cb_opnum4 {
+	OP_CB_GETATTR			= 3,
+	OP_CB_RECALL			= 4,
+	OP_CB_LAYOUTRECALL		= 5,
+	OP_CB_NOTIFY			= 6,
+	OP_CB_PUSH_DELEG		= 7,
+	OP_CB_RECALL_ANY		= 8,
+	OP_CB_RECALLABLE_OBJ_AVAIL	= 9,
+	OP_CB_RECALL_SLOT		= 10,
+	OP_CB_SEQUENCE			= 11,
+	OP_CB_WANTS_CANCELLED		= 12,
+	OP_CB_NOTIFY_LOCK		= 13,
+	OP_CB_NOTIFY_DEVICEID		= 14,
+	OP_CB_ILLEGAL			= 10044
+};
+
+static void encode_nfs_cb_opnum4(struct xdr_stream *xdr, enum nfs_cb_opnum4 op)
 {
 	__be32 *p;
 
-	RESERVE_SPACE(sizeof(stateid_t));
-	WRITE32(sid->si_generation);
-	WRITEMEM(&sid->si_opaque, sizeof(stateid_opaque_t));
+	p = xdr_reserve_space(xdr, 4);
+	*p = cpu_to_be32(op);
 }
 
-static void
-encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
+/*
+ * nfs_fh4
+ *
+ *	typedef opaque nfs_fh4<NFS4_FHSIZE>;
+ */
+static void encode_nfs_fh4(struct xdr_stream *xdr, const struct knfsd_fh *fh)
+{
+	u32 length = fh->fh_size;
+	__be32 *p;
+
+	BUG_ON(length > NFS4_FHSIZE);
+	p = xdr_reserve_space(xdr, 4 + length);
+	xdr_encode_opaque(p, &fh->fh_base, length);
+}
+
+/*
+ * stateid4
+ *
+ *	struct stateid4 {
+ *		uint32_t	seqid;
+ *		opaque		other[12];
+ *	};
+ */
+static void encode_stateid4(struct xdr_stream *xdr, const stateid_t *sid)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_STATEID_SIZE);
+	*p++ = cpu_to_be32(sid->si_generation);
+	xdr_encode_opaque_fixed(p, &sid->si_opaque, NFS4_STATEID_OTHER_SIZE);
+}
+
+/*
+ * sessionid4
+ *
+ *	typedef opaque sessionid4[NFS4_SESSIONID_SIZE];
+ */
+static void encode_sessionid4(struct xdr_stream *xdr,
+			      const struct nfsd4_session *session)
+{
+	__be32 *p;
+
+	p = xdr_reserve_space(xdr, NFS4_MAX_SESSIONID_LEN);
+	xdr_encode_opaque_fixed(p, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN);
+}
+
+/*
+ * nfsstat4
+ */
+static const struct {
+	int stat;
+	int errno;
+} nfs_cb_errtbl[] = {
+	{ NFS4_OK,		0		},
+	{ NFS4ERR_PERM,		-EPERM		},
+	{ NFS4ERR_NOENT,	-ENOENT		},
+	{ NFS4ERR_IO,		-EIO		},
+	{ NFS4ERR_NXIO,		-ENXIO		},
+	{ NFS4ERR_ACCESS,	-EACCES		},
+	{ NFS4ERR_EXIST,	-EEXIST		},
+	{ NFS4ERR_XDEV,		-EXDEV		},
+	{ NFS4ERR_NOTDIR,	-ENOTDIR	},
+	{ NFS4ERR_ISDIR,	-EISDIR		},
+	{ NFS4ERR_INVAL,	-EINVAL		},
+	{ NFS4ERR_FBIG,		-EFBIG		},
+	{ NFS4ERR_NOSPC,	-ENOSPC		},
+	{ NFS4ERR_ROFS,		-EROFS		},
+	{ NFS4ERR_MLINK,	-EMLINK		},
+	{ NFS4ERR_NAMETOOLONG,	-ENAMETOOLONG	},
+	{ NFS4ERR_NOTEMPTY,	-ENOTEMPTY	},
+	{ NFS4ERR_DQUOT,	-EDQUOT		},
+	{ NFS4ERR_STALE,	-ESTALE		},
+	{ NFS4ERR_BADHANDLE,	-EBADHANDLE	},
+	{ NFS4ERR_BAD_COOKIE,	-EBADCOOKIE	},
+	{ NFS4ERR_NOTSUPP,	-ENOTSUPP	},
+	{ NFS4ERR_TOOSMALL,	-ETOOSMALL	},
+	{ NFS4ERR_SERVERFAULT,	-ESERVERFAULT	},
+	{ NFS4ERR_BADTYPE,	-EBADTYPE	},
+	{ NFS4ERR_LOCKED,	-EAGAIN		},
+	{ NFS4ERR_RESOURCE,	-EREMOTEIO	},
+	{ NFS4ERR_SYMLINK,	-ELOOP		},
+	{ NFS4ERR_OP_ILLEGAL,	-EOPNOTSUPP	},
+	{ NFS4ERR_DEADLOCK,	-EDEADLK	},
+	{ -1,			-EIO		}
+};
+
+/*
+ * If we cannot translate the error, the recovery routines should
+ * handle it.
+ *
+ * Note: remaining NFSv4 error codes have values > 10000, so should
+ * not conflict with native Linux error codes.
+ */
+static int nfs_cb_stat_to_errno(int status)
+{
+	int i;
+
+	for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
+		if (nfs_cb_errtbl[i].stat == status)
+			return nfs_cb_errtbl[i].errno;
+	}
+
+	dprintk("NFSD: Unrecognized NFS CB status value: %u\n", status);
+	return -status;
+}
+
+static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+			       enum nfsstat4 *status)
+{
+	__be32 *p;
+	u32 op;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	op = be32_to_cpup(p++);
+	if (unlikely(op != expected))
+		goto out_unexpected;
+	*status = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+out_unexpected:
+	dprintk("NFSD: Callback server returned operation %d but "
+		"we issued a request for %d\n", op, expected);
+	return -EIO;
+}
+
+/*
+ * CB_COMPOUND4args
+ *
+ *	struct CB_COMPOUND4args {
+ *		utf8str_cs	tag;
+ *		uint32_t	minorversion;
+ *		uint32_t	callback_ident;
+ *		nfs_cb_argop4	argarray<>;
+ *	};
+*/
+static void encode_cb_compound4args(struct xdr_stream *xdr,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 * p;
 
-	RESERVE_SPACE(16);
-	WRITE32(0);            /* tag length is always 0 */
-	WRITE32(hdr->minorversion);
-	WRITE32(hdr->ident);
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4);
+	p = xdr_encode_empty_array(p);		/* empty tag */
+	*p++ = cpu_to_be32(hdr->minorversion);
+	*p++ = cpu_to_be32(hdr->ident);
+
 	hdr->nops_p = p;
-	WRITE32(hdr->nops);
+	*p = cpu_to_be32(hdr->nops);		/* argarray element count */
 }
 
+/*
+ * Update argarray element count
+ */
 static void encode_cb_nops(struct nfs4_cb_compound_hdr *hdr)
 {
-	*hdr->nops_p = htonl(hdr->nops);
+	BUG_ON(hdr->nops > NFS4_MAX_BACK_CHANNEL_OPS);
+	*hdr->nops_p = cpu_to_be32(hdr->nops);
 }
 
-static void
-encode_cb_recall(struct xdr_stream *xdr, struct nfs4_delegation *dp,
-		struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_COMPOUND4res
+ *
+ *	struct CB_COMPOUND4res {
+ *		nfsstat4	status;
+ *		utf8str_cs	tag;
+ *		nfs_cb_resop4	resarray<>;
+ *	};
+ */
+static int decode_cb_compound4res(struct xdr_stream *xdr,
+				  struct nfs4_cb_compound_hdr *hdr)
+{
+	u32 length;
+	__be32 *p;
+
+	p = xdr_inline_decode(xdr, 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->status = be32_to_cpup(p++);
+	/* Ignore the tag */
+	length = be32_to_cpup(p++);
+	p = xdr_inline_decode(xdr, length + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
+	hdr->nops = be32_to_cpup(p);
+	return 0;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+/*
+ * CB_RECALL4args
+ *
+ *	struct CB_RECALL4args {
+ *		stateid4	stateid;
+ *		bool		truncate;
+ *		nfs_fh4		fh;
+ *	};
+ */
+static void encode_cb_recall4args(struct xdr_stream *xdr,
+				  const struct nfs4_delegation *dp,
+				  struct nfs4_cb_compound_hdr *hdr)
 {
 	__be32 *p;
-	int len = dp->dl_fh.fh_size;
 
-	RESERVE_SPACE(4);
-	WRITE32(OP_CB_RECALL);
-	encode_stateid(xdr, &dp->dl_stateid);
-	RESERVE_SPACE(8 + (XDR_QUADLEN(len) << 2));
-	WRITE32(0); /* truncate optimization not implemented */
-	WRITE32(len);
-	WRITEMEM(&dp->dl_fh.fh_base, len);
+	encode_nfs_cb_opnum4(xdr, OP_CB_RECALL);
+	encode_stateid4(xdr, &dp->dl_stateid);
+
+	p = xdr_reserve_space(xdr, 4);
+	*p++ = xdr_zero;			/* truncate */
+
+	encode_nfs_fh4(xdr, &dp->dl_fh);
+
 	hdr->nops++;
 }
 
-static void
-encode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct nfs4_cb_compound_hdr *hdr)
+/*
+ * CB_SEQUENCE4args
+ *
+ *	struct CB_SEQUENCE4args {
+ *		sessionid4		csa_sessionid;
+ *		sequenceid4		csa_sequenceid;
+ *		slotid4			csa_slotid;
+ *		slotid4			csa_highest_slotid;
+ *		bool			csa_cachethis;
+ *		referring_call_list4	csa_referring_call_lists<>;
+ *	};
+ */
+static void encode_cb_sequence4args(struct xdr_stream *xdr,
+				    const struct nfsd4_callback *cb,
+				    struct nfs4_cb_compound_hdr *hdr)
 {
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	__be32 *p;
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
 
 	if (hdr->minorversion == 0)
 		return;
 
-	RESERVE_SPACE(1 + NFS4_MAX_SESSIONID_LEN + 20);
+	encode_nfs_cb_opnum4(xdr, OP_CB_SEQUENCE);
+	encode_sessionid4(xdr, session);
 
-	WRITE32(OP_CB_SEQUENCE);
-	WRITEMEM(ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN);
-	WRITE32(ses->se_cb_seq_nr);
-	WRITE32(0);		/* slotid, always 0 */
-	WRITE32(0);		/* highest slotid always 0 */
-	WRITE32(0);		/* cachethis always 0 */
-	WRITE32(0); /* FIXME: support referring_call_lists */
+	p = xdr_reserve_space(xdr, 4 + 4 + 4 + 4 + 4);
+	*p++ = cpu_to_be32(session->se_cb_seq_nr);	/* csa_sequenceid */
+	*p++ = xdr_zero;			/* csa_slotid */
+	*p++ = xdr_zero;			/* csa_highest_slotid */
+	*p++ = xdr_zero;			/* csa_cachethis */
+	xdr_encode_empty_array(p);		/* csa_referring_call_lists */
+
 	hdr->nops++;
 }
 
-static int
-nfs4_xdr_enc_cb_null(struct rpc_rqst *req, __be32 *p)
-{
-	struct xdr_stream xdrs, *xdr = &xdrs;
-
-	xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
-        RESERVE_SPACE(0);
-	return 0;
-}
-
-static int
-nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, __be32 *p,
-		struct nfsd4_callback *cb)
-{
-	struct xdr_stream xdr;
-	struct nfs4_delegation *args = cb->cb_op;
-	struct nfs4_cb_compound_hdr hdr = {
-		.ident = cb->cb_clp->cl_cb_ident,
-		.minorversion = cb->cb_minorversion,
-	};
-
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-	encode_cb_compound_hdr(&xdr, &hdr);
-	encode_cb_sequence(&xdr, cb, &hdr);
-	encode_cb_recall(&xdr, args, &hdr);
-	encode_cb_nops(&hdr);
-	return 0;
-}
-
-
-static int
-decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
-        __be32 *p;
-	u32 taglen;
-
-        READ_BUF(8);
-        READ32(hdr->status);
-	/* We've got no use for the tag; ignore it: */
-        READ32(taglen);
-        READ_BUF(taglen + 4);
-        p += XDR_QUADLEN(taglen);
-        READ32(hdr->nops);
-        return 0;
-}
-
-static int
-decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
-{
-	__be32 *p;
-	u32 op;
-	int32_t nfserr;
-
-	READ_BUF(8);
-	READ32(op);
-	if (op != expected) {
-		dprintk("NFSD: decode_cb_op_hdr: Callback server returned "
-		         " operation %d but we issued a request for %d\n",
-		         op, expected);
-		return -EIO;
-	}
-	READ32(nfserr);
-	if (nfserr != NFS_OK)
-		return -nfs_cb_stat_to_errno(nfserr);
-	return 0;
-}
-
 /*
+ * CB_SEQUENCE4resok
+ *
+ *	struct CB_SEQUENCE4resok {
+ *		sessionid4	csr_sessionid;
+ *		sequenceid4	csr_sequenceid;
+ *		slotid4		csr_slotid;
+ *		slotid4		csr_highest_slotid;
+ *		slotid4		csr_target_highest_slotid;
+ *	};
+ *
+ *	union CB_SEQUENCE4res switch (nfsstat4 csr_status) {
+ *	case NFS4_OK:
+ *		CB_SEQUENCE4resok	csr_resok4;
+ *	default:
+ *		void;
+ *	};
+ *
  * Our current back channel implmentation supports a single backchannel
  * with a single slot.
  */
-static int
-decode_cb_sequence(struct xdr_stream *xdr, struct nfsd4_callback *cb,
-		   struct rpc_rqst *rqstp)
+static int decode_cb_sequence4resok(struct xdr_stream *xdr,
+				    struct nfsd4_callback *cb)
 {
-	struct nfsd4_session *ses = cb->cb_clp->cl_cb_session;
+	struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
 	struct nfs4_sessionid id;
 	int status;
-	u32 dummy;
 	__be32 *p;
+	u32 dummy;
 
-	if (cb->cb_minorversion == 0)
-		return 0;
-
-	status = decode_cb_op_hdr(xdr, OP_CB_SEQUENCE);
-	if (status)
-		return status;
+	status = -ESERVERFAULT;
 
 	/*
 	 * If the server returns different values for sessionID, slotID or
 	 * sequence number, the server is looney tunes.
 	 */
-	status = -ESERVERFAULT;
-
-	READ_BUF(NFS4_MAX_SESSIONID_LEN + 16);
+	p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4);
+	if (unlikely(p == NULL))
+		goto out_overflow;
 	memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
+	if (memcmp(id.data, session->se_sessionid.data,
+					NFS4_MAX_SESSIONID_LEN) != 0) {
+		dprintk("NFS: %s Invalid session id\n", __func__);
+		goto out;
+	}
 	p += XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN);
-	if (memcmp(id.data, ses->se_sessionid.data, NFS4_MAX_SESSIONID_LEN)) {
-		dprintk("%s Invalid session id\n", __func__);
+
+	dummy = be32_to_cpup(p++);
+	if (dummy != session->se_cb_seq_nr) {
+		dprintk("NFS: %s Invalid sequence number\n", __func__);
 		goto out;
 	}
-	READ32(dummy);
-	if (dummy != ses->se_cb_seq_nr) {
-		dprintk("%s Invalid sequence number\n", __func__);
-		goto out;
-	}
-	READ32(dummy); 	/* slotid must be 0 */
+
+	dummy = be32_to_cpup(p++);
 	if (dummy != 0) {
-		dprintk("%s Invalid slotid\n", __func__);
+		dprintk("NFS: %s Invalid slotid\n", __func__);
 		goto out;
 	}
-	/* FIXME: process highest slotid and target highest slotid */
+
+	/*
+	 * FIXME: process highest slotid and target highest slotid
+	 */
 	status = 0;
 out:
 	return status;
+out_overflow:
+	print_overflow_msg(__func__, xdr);
+	return -EIO;
+}
+
+static int decode_cb_sequence4res(struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
+{
+	enum nfsstat4 nfserr;
+	int status;
+
+	if (cb->cb_minorversion == 0)
+		return 0;
+
+	status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		goto out_default;
+	status = decode_cb_sequence4resok(xdr, cb);
+out:
+	return status;
+out_default:
+	return nfs_cb_stat_to_errno(status);
+}
+
+/*
+ * NFSv4.0 and NFSv4.1 XDR encode functions
+ *
+ * NFSv4.0 callback argument types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+/*
+ * NB: Without this zero space reservation, callbacks over krb5p fail
+ */
+static void nfs4_xdr_enc_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				 void *__unused)
+{
+	xdr_reserve_space(xdr, 0);
+}
+
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static void nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, struct xdr_stream *xdr,
+				   const struct nfsd4_callback *cb)
+{
+	const struct nfs4_delegation *args = cb->cb_op;
+	struct nfs4_cb_compound_hdr hdr = {
+		.ident = cb->cb_clp->cl_cb_ident,
+		.minorversion = cb->cb_minorversion,
+	};
+
+	encode_cb_compound4args(xdr, &hdr);
+	encode_cb_sequence4args(xdr, cb, &hdr);
+	encode_cb_recall4args(xdr, args, &hdr);
+	encode_cb_nops(&hdr);
 }
 
 
-static int
-nfs4_xdr_dec_cb_null(struct rpc_rqst *req, __be32 *p)
+/*
+ * NFSv4.0 and NFSv4.1 XDR decode functions
+ *
+ * NFSv4.0 callback result types are defined in section 15 of RFC
+ * 3530: "Network File System (NFS) version 4 Protocol" and section 20
+ * of RFC 5661:  "Network File System (NFS) Version 4 Minor Version 1
+ * Protocol".
+ */
+
+static int nfs4_xdr_dec_cb_null(struct rpc_rqst *req, struct xdr_stream *xdr,
+				void *__unused)
 {
 	return 0;
 }
 
-static int
-nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, __be32 *p,
-		struct nfsd4_callback *cb)
+/*
+ * 20.2. Operation 4: CB_RECALL - Recall a Delegation
+ */
+static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
+				  struct xdr_stream *xdr,
+				  struct nfsd4_callback *cb)
 {
-	struct xdr_stream xdr;
 	struct nfs4_cb_compound_hdr hdr;
+	enum nfsstat4 nfserr;
 	int status;
 
-	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
-	status = decode_cb_compound_hdr(&xdr, &hdr);
-	if (status)
+	status = decode_cb_compound4res(xdr, &hdr);
+	if (unlikely(status))
 		goto out;
-	if (cb) {
-		status = decode_cb_sequence(&xdr, cb, rqstp);
-		if (status)
+
+	if (cb != NULL) {
+		status = decode_cb_sequence4res(xdr, cb);
+		if (unlikely(status))
 			goto out;
 	}
-	status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
+
+	status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
+	if (unlikely(status))
+		goto out;
+	if (unlikely(nfserr != NFS4_OK))
+		goto out_default;
 out:
 	return status;
+out_default:
+	return nfs_cb_stat_to_errno(status);
 }
 
 /*
  * RPC procedure tables
  */
-#define PROC(proc, call, argtype, restype)                              \
-[NFSPROC4_CLNT_##proc] = {                                      	\
-        .p_proc   = NFSPROC4_CB_##call,					\
-        .p_encode = (kxdrproc_t) nfs4_xdr_##argtype,                    \
-        .p_decode = (kxdrproc_t) nfs4_xdr_##restype,                    \
-        .p_arglen = NFS4_##argtype##_sz,                                \
-        .p_replen = NFS4_##restype##_sz,                                \
-        .p_statidx = NFSPROC4_CB_##call,				\
-	.p_name   = #proc,                                              \
+#define PROC(proc, call, argtype, restype)				\
+[NFSPROC4_CLNT_##proc] = {						\
+	.p_proc    = NFSPROC4_CB_##call,				\
+	.p_encode  = (kxdreproc_t)nfs4_xdr_enc_##argtype,		\
+	.p_decode  = (kxdrdproc_t)nfs4_xdr_dec_##restype,		\
+	.p_arglen  = NFS4_enc_##argtype##_sz,				\
+	.p_replen  = NFS4_dec_##restype##_sz,				\
+	.p_statidx = NFSPROC4_CB_##call,				\
+	.p_name    = #proc,						\
 }
 
-static struct rpc_procinfo     nfs4_cb_procedures[] = {
-    PROC(CB_NULL,      NULL,     enc_cb_null,     dec_cb_null),
-    PROC(CB_RECALL,    COMPOUND,   enc_cb_recall,      dec_cb_recall),
+static struct rpc_procinfo nfs4_cb_procedures[] = {
+	PROC(CB_NULL,	NULL,		cb_null,	cb_null),
+	PROC(CB_RECALL,	COMPOUND,	cb_recall,	cb_recall),
 };
 
-static struct rpc_version       nfs_cb_version4 = {
+static struct rpc_version nfs_cb_version4 = {
 /*
  * Note on the callback rpc program version number: despite language in rfc
  * 5661 section 18.36.3 requiring servers to use 4 in this field, the
@@ -440,29 +598,29 @@
  * in practice that appears to be what implementations use.  The section
  * 18.36.3 language is expected to be fixed in an erratum.
  */
-        .number                 = 1,
-        .nrprocs                = ARRAY_SIZE(nfs4_cb_procedures),
-        .procs                  = nfs4_cb_procedures
+	.number			= 1,
+	.nrprocs		= ARRAY_SIZE(nfs4_cb_procedures),
+	.procs			= nfs4_cb_procedures
 };
 
-static struct rpc_version *	nfs_cb_version[] = {
+static struct rpc_version *nfs_cb_version[] = {
 	&nfs_cb_version4,
 };
 
 static struct rpc_program cb_program;
 
 static struct rpc_stat cb_stats = {
-		.program	= &cb_program
+	.program		= &cb_program
 };
 
 #define NFS4_CALLBACK 0x40000000
 static struct rpc_program cb_program = {
-		.name 		= "nfs4_cb",
-		.number		= NFS4_CALLBACK,
-		.nrvers		= ARRAY_SIZE(nfs_cb_version),
-		.version	= nfs_cb_version,
-		.stats		= &cb_stats,
-		.pipe_dir_name  = "/nfsd4_cb",
+	.name			= "nfs4_cb",
+	.number			= NFS4_CALLBACK,
+	.nrvers			= ARRAY_SIZE(nfs_cb_version),
+	.version		= nfs_cb_version,
+	.stats			= &cb_stats,
+	.pipe_dir_name		= "/nfsd4_cb",
 };
 
 static int max_cb_time(void)
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
index 3ac36b7b..7dceff0 100644
--- a/fs/notify/fanotify/Kconfig
+++ b/fs/notify/fanotify/Kconfig
@@ -6,7 +6,7 @@
 	---help---
 	   Say Y here to enable fanotify suport.  fanotify is a file access
 	   notification system which differs from inotify in that it sends
-	   and open file descriptor to the userspace listener along with
+	   an open file descriptor to the userspace listener along with
 	   the event.
 
 	   If unsure, say Y.
diff --git a/fs/ntfs/Makefile b/fs/ntfs/Makefile
index 58b6be9..4ff028f 100644
--- a/fs/ntfs/Makefile
+++ b/fs/ntfs/Makefile
@@ -6,7 +6,7 @@
 	     index.o inode.o mft.o mst.o namei.o runlist.o super.o sysctl.o \
 	     unistr.o upcase.o
 
-EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.29\"
+EXTRA_CFLAGS = -DNTFS_VERSION=\"2.1.30\"
 
 ifeq ($(CONFIG_NTFS_DEBUG),y)
 EXTRA_CFLAGS += -DDEBUG
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 113ebd9..f4b1057 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -1380,15 +1380,14 @@
  * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
  * single-segment behaviour.
  *
- * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
- * when atomic and when not atomic.  This is ok because
- * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
- * and it is ok to call this when non-atomic.
- * Infact, the only difference between __copy_from_user_inatomic() and
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both when
+ * atomic and when not atomic.  This is ok because it calls
+ * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
+ * fact, the only difference between __copy_from_user_inatomic() and
  * __copy_from_user() is that the latter calls might_sleep() and the former
- * should not zero the tail of the buffer on error.  And on many
- * architectures __copy_from_user_inatomic() is just defined to
- * __copy_from_user() so it makes no difference at all on those architectures.
+ * should not zero the tail of the buffer on error.  And on many architectures
+ * __copy_from_user_inatomic() is just defined to __copy_from_user() so it
+ * makes no difference at all on those architectures.
  */
 static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
 		unsigned nr_pages, unsigned ofs, const struct iovec **iov,
@@ -1409,28 +1408,28 @@
 		if (unlikely(copied != len)) {
 			/* Do it the slow way. */
 			addr = kmap(*pages);
-			copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
-					*iov, *iov_ofs, len);
-			/*
-			 * Zero the rest of the target like __copy_from_user().
-			 */
-			memset(addr + ofs + copied, 0, len - copied);
-			kunmap(*pages);
+			copied = __ntfs_copy_from_user_iovec_inatomic(addr +
+					ofs, *iov, *iov_ofs, len);
 			if (unlikely(copied != len))
 				goto err_out;
+			kunmap(*pages);
 		}
 		total += len;
+		ntfs_set_next_iovec(iov, iov_ofs, len);
 		bytes -= len;
 		if (!bytes)
 			break;
-		ntfs_set_next_iovec(iov, iov_ofs, len);
 		ofs = 0;
 	} while (++pages < last_page);
 out:
 	return total;
 err_out:
-	total += copied;
+	BUG_ON(copied > len);
 	/* Zero the rest of the target like __copy_from_user(). */
+	memset(addr + ofs + copied, 0, len - copied);
+	kunmap(*pages);
+	total += copied;
+	ntfs_set_next_iovec(iov, iov_ofs, copied);
 	while (++pages < last_page) {
 		bytes -= len;
 		if (!bytes)
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index a30ecacc..29099a0 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -1,7 +1,7 @@
 /*
  * super.c - NTFS kernel super block handling. Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2007 Anton Altaparmakov
+ * Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.
  * Copyright (c) 2001,2002 Richard Russon
  *
  * This program/include file is free software; you can redistribute it and/or
@@ -3193,8 +3193,8 @@
 	ntfs_sysctl(0);
 }
 
-MODULE_AUTHOR("Anton Altaparmakov <aia21@cantab.net>");
-MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2007 Anton Altaparmakov");
+MODULE_AUTHOR("Anton Altaparmakov <anton@tuxera.com>");
+MODULE_DESCRIPTION("NTFS 1.2/3.x driver - Copyright (c) 2001-2011 Anton Altaparmakov and Tuxera Inc.");
 MODULE_VERSION(NTFS_VERSION);
 MODULE_LICENSE("GPL");
 #ifdef DEBUG
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
index 0d84066..ab152c0 100644
--- a/fs/ocfs2/Kconfig
+++ b/fs/ocfs2/Kconfig
@@ -51,7 +51,7 @@
 
 config OCFS2_FS_STATS
 	bool "OCFS2 statistics"
-	depends on OCFS2_FS
+	depends on OCFS2_FS && DEBUG_FS
 	default y
 	help
 	  This option allows some fs statistics to be captured. Enabling
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 592fae5..e4984e2 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -565,7 +565,6 @@
 	return ret;
 }
 
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc);
 static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
 					 struct ocfs2_extent_block *eb);
 static void ocfs2_adjust_rightmost_records(handle_t *handle,
@@ -5858,6 +5857,7 @@
 
 	ocfs2_journal_dirty(handle, tl_bh);
 
+	osb->truncated_clusters += num_clusters;
 bail:
 	mlog_exit(status);
 	return status;
@@ -5929,6 +5929,8 @@
 		i--;
 	}
 
+	osb->truncated_clusters = 0;
+
 bail:
 	mlog_exit(status);
 	return status;
@@ -7139,64 +7141,6 @@
 }
 
 /*
- * Expects the inode to already be locked.
- */
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc)
-{
-	int status;
-	unsigned int new_i_clusters;
-	struct ocfs2_dinode *fe;
-	struct ocfs2_extent_block *eb;
-	struct buffer_head *last_eb_bh = NULL;
-
-	mlog_entry_void();
-
-	*tc = NULL;
-
-	new_i_clusters = ocfs2_clusters_for_bytes(osb->sb,
-						  i_size_read(inode));
-	fe = (struct ocfs2_dinode *) fe_bh->b_data;
-
-	mlog(0, "fe->i_clusters = %u, new_i_clusters = %u, fe->i_size ="
-	     "%llu\n", le32_to_cpu(fe->i_clusters), new_i_clusters,
-	     (unsigned long long)le64_to_cpu(fe->i_size));
-
-	*tc = kzalloc(sizeof(struct ocfs2_truncate_context), GFP_KERNEL);
-	if (!(*tc)) {
-		status = -ENOMEM;
-		mlog_errno(status);
-		goto bail;
-	}
-	ocfs2_init_dealloc_ctxt(&(*tc)->tc_dealloc);
-
-	if (fe->id2.i_list.l_tree_depth) {
-		status = ocfs2_read_extent_block(INODE_CACHE(inode),
-						 le64_to_cpu(fe->i_last_eb_blk),
-						 &last_eb_bh);
-		if (status < 0) {
-			mlog_errno(status);
-			goto bail;
-		}
-		eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
-	}
-
-	(*tc)->tc_last_eb_bh = last_eb_bh;
-
-	status = 0;
-bail:
-	if (status < 0) {
-		if (*tc)
-			ocfs2_free_truncate_context(*tc);
-		*tc = NULL;
-	}
-	mlog_exit_void();
-	return status;
-}
-
-/*
  * 'start' is inclusive, 'end' is not.
  */
 int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
@@ -7270,18 +7214,3 @@
 out:
 	return ret;
 }
-
-static void ocfs2_free_truncate_context(struct ocfs2_truncate_context *tc)
-{
-	/*
-	 * The caller is responsible for completing deallocation
-	 * before freeing the context.
-	 */
-	if (tc->tc_dealloc.c_first_suballocator != NULL)
-		mlog(ML_NOTICE,
-		     "Truncate completion has non-empty dealloc context\n");
-
-	brelse(tc->tc_last_eb_bh);
-
-	kfree(tc);
-}
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index 55762b5..3bd08a0 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -228,10 +228,6 @@
 
 int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
 				  u64 range_start, u64 range_end);
-int ocfs2_prepare_truncate(struct ocfs2_super *osb,
-			   struct inode *inode,
-			   struct buffer_head *fe_bh,
-			   struct ocfs2_truncate_context **tc);
 int ocfs2_commit_truncate(struct ocfs2_super *osb,
 			  struct inode *inode,
 			  struct buffer_head *di_bh);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 0d7c554..1fbb0e2 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1630,6 +1630,43 @@
 	return ret;
 }
 
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+					  unsigned int needed)
+{
+	tid_t target;
+	int ret = 0;
+	unsigned int truncated_clusters;
+
+	mutex_lock(&osb->osb_tl_inode->i_mutex);
+	truncated_clusters = osb->truncated_clusters;
+	mutex_unlock(&osb->osb_tl_inode->i_mutex);
+
+	/*
+	 * Check whether we can succeed in allocating if we free
+	 * the truncate log.
+	 */
+	if (truncated_clusters < needed)
+		goto out;
+
+	ret = ocfs2_flush_truncate_log(osb);
+	if (ret) {
+		mlog_errno(ret);
+		goto out;
+	}
+
+	if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+		jbd2_log_wait_commit(osb->journal->j_journal, target);
+		ret = 1;
+	}
+out:
+	return ret;
+}
+
 int ocfs2_write_begin_nolock(struct file *filp,
 			     struct address_space *mapping,
 			     loff_t pos, unsigned len, unsigned flags,
@@ -1637,7 +1674,7 @@
 			     struct buffer_head *di_bh, struct page *mmap_page)
 {
 	int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
-	unsigned int clusters_to_alloc, extents_to_split;
+	unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
 	struct ocfs2_write_ctxt *wc;
 	struct inode *inode = mapping->host;
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
@@ -1646,7 +1683,9 @@
 	struct ocfs2_alloc_context *meta_ac = NULL;
 	handle_t *handle;
 	struct ocfs2_extent_tree et;
+	int try_free = 1, ret1;
 
+try_again:
 	ret = ocfs2_alloc_write_ctxt(&wc, osb, pos, len, di_bh);
 	if (ret) {
 		mlog_errno(ret);
@@ -1681,6 +1720,7 @@
 		mlog_errno(ret);
 		goto out;
 	} else if (ret == 1) {
+		clusters_need = wc->w_clen;
 		ret = ocfs2_refcount_cow(inode, filp, di_bh,
 					 wc->w_cpos, wc->w_clen, UINT_MAX);
 		if (ret) {
@@ -1695,6 +1735,7 @@
 		mlog_errno(ret);
 		goto out;
 	}
+	clusters_need += clusters_to_alloc;
 
 	di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
 
@@ -1817,6 +1858,22 @@
 		ocfs2_free_alloc_context(data_ac);
 	if (meta_ac)
 		ocfs2_free_alloc_context(meta_ac);
+
+	if (ret == -ENOSPC && try_free) {
+		/*
+		 * Try to free some truncate log so that we can have enough
+		 * clusters to allocate.
+		 */
+		try_free = 0;
+
+		ret1 = ocfs2_try_to_free_truncate_log(osb, clusters_need);
+		if (ret1 == 1)
+			goto try_again;
+
+		if (ret1 < 0)
+			mlog_errno(ret1);
+	}
+
 	return ret;
 }
 
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9e3d45b..a6cc053 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -82,6 +82,7 @@
 #define O2HB_DB_TYPE_REGION_LIVENODES	4
 #define O2HB_DB_TYPE_REGION_NUMBER	5
 #define O2HB_DB_TYPE_REGION_ELAPSED_TIME	6
+#define O2HB_DB_TYPE_REGION_PINNED	7
 struct o2hb_debug_buf {
 	int db_type;
 	int db_size;
@@ -101,6 +102,7 @@
 #define O2HB_DEBUG_FAILEDREGIONS	"failed_regions"
 #define O2HB_DEBUG_REGION_NUMBER	"num"
 #define O2HB_DEBUG_REGION_ELAPSED_TIME	"elapsed_time_in_ms"
+#define O2HB_DEBUG_REGION_PINNED	"pinned"
 
 static struct dentry *o2hb_debug_dir;
 static struct dentry *o2hb_debug_livenodes;
@@ -132,6 +134,33 @@
 unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
 unsigned int o2hb_heartbeat_mode = O2HB_HEARTBEAT_LOCAL;
 
+/*
+ * o2hb_dependent_users tracks the number of registered callbacks that depend
+ * on heartbeat. o2net and o2dlm are two entities that register this callback.
+ * However only o2dlm depends on the heartbeat. It does not want the heartbeat
+ * to stop while a dlm domain is still active.
+ */
+unsigned int o2hb_dependent_users;
+
+/*
+ * In global heartbeat mode, all regions are pinned if there are one or more
+ * dependent users and the quorum region count is <= O2HB_PIN_CUT_OFF. All
+ * regions are unpinned if the region count exceeds the cut off or the number
+ * of dependent users falls to zero.
+ */
+#define O2HB_PIN_CUT_OFF		3
+
+/*
+ * In local heartbeat mode, we assume the dlm domain name to be the same as
+ * region uuid. This is true for domains created for the file system but not
+ * necessarily true for userdlm domains. This is a known limitation.
+ *
+ * In global heartbeat mode, we pin/unpin all o2hb regions. This solution
+ * works for both file system and userdlm domains.
+ */
+static int o2hb_region_pin(const char *region_uuid);
+static void o2hb_region_unpin(const char *region_uuid);
+
 /* Only sets a new threshold if there are no active regions.
  *
  * No locking or otherwise interesting code is required for reading
@@ -186,7 +215,9 @@
 	struct config_item	hr_item;
 
 	struct list_head	hr_all_item;
-	unsigned		hr_unclean_stop:1;
+	unsigned		hr_unclean_stop:1,
+				hr_item_pinned:1,
+				hr_item_dropped:1;
 
 	/* protected by the hr_callback_sem */
 	struct task_struct 	*hr_task;
@@ -212,9 +243,11 @@
 	struct dentry		*hr_debug_livenodes;
 	struct dentry		*hr_debug_regnum;
 	struct dentry		*hr_debug_elapsed_time;
+	struct dentry		*hr_debug_pinned;
 	struct o2hb_debug_buf	*hr_db_livenodes;
 	struct o2hb_debug_buf	*hr_db_regnum;
 	struct o2hb_debug_buf	*hr_db_elapsed_time;
+	struct o2hb_debug_buf	*hr_db_pinned;
 
 	/* let the person setting up hb wait for it to return until it
 	 * has reached a 'steady' state.  This will be fixed when we have
@@ -701,6 +734,14 @@
 	       config_item_name(&reg->hr_item));
 
 	set_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
+
+	/*
+	 * If global heartbeat active, unpin all regions if the
+	 * region count > CUT_OFF
+	 */
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) > O2HB_PIN_CUT_OFF)
+		o2hb_region_unpin(NULL);
 }
 
 static int o2hb_check_slot(struct o2hb_region *reg,
@@ -1041,6 +1082,9 @@
 
 	set_user_nice(current, -20);
 
+	/* Pin node */
+	o2nm_depend_this_node();
+
 	while (!kthread_should_stop() && !reg->hr_unclean_stop) {
 		/* We track the time spent inside
 		 * o2hb_do_disk_heartbeat so that we avoid more than
@@ -1090,6 +1134,9 @@
 		mlog_errno(ret);
 	}
 
+	/* Unpin node */
+	o2nm_undepend_this_node();
+
 	mlog(ML_HEARTBEAT|ML_KTHREAD, "hb thread exiting\n");
 
 	return 0;
@@ -1142,6 +1189,12 @@
 						 reg->hr_last_timeout_start));
 		goto done;
 
+	case O2HB_DB_TYPE_REGION_PINNED:
+		reg = (struct o2hb_region *)db->db_data;
+		out += snprintf(buf + out, PAGE_SIZE - out, "%u\n",
+				!!reg->hr_item_pinned);
+		goto done;
+
 	default:
 		goto done;
 	}
@@ -1315,6 +1368,8 @@
 	memset(o2hb_quorum_region_bitmap, 0, sizeof(o2hb_quorum_region_bitmap));
 	memset(o2hb_failed_region_bitmap, 0, sizeof(o2hb_failed_region_bitmap));
 
+	o2hb_dependent_users = 0;
+
 	return o2hb_debug_init();
 }
 
@@ -1384,6 +1439,7 @@
 	debugfs_remove(reg->hr_debug_livenodes);
 	debugfs_remove(reg->hr_debug_regnum);
 	debugfs_remove(reg->hr_debug_elapsed_time);
+	debugfs_remove(reg->hr_debug_pinned);
 	debugfs_remove(reg->hr_debug_dir);
 
 	spin_lock(&o2hb_live_lock);
@@ -1948,6 +2004,18 @@
 		goto bail;
 	}
 
+	reg->hr_debug_pinned =
+			o2hb_debug_create(O2HB_DEBUG_REGION_PINNED,
+					  reg->hr_debug_dir,
+					  &(reg->hr_db_pinned),
+					  sizeof(*(reg->hr_db_pinned)),
+					  O2HB_DB_TYPE_REGION_PINNED,
+					  0, 0, reg);
+	if (!reg->hr_debug_pinned) {
+		mlog_errno(ret);
+		goto bail;
+	}
+
 	ret = 0;
 bail:
 	return ret;
@@ -2002,15 +2070,20 @@
 {
 	struct task_struct *hb_task;
 	struct o2hb_region *reg = to_o2hb_region(item);
+	int quorum_region = 0;
 
 	/* stop the thread when the user removes the region dir */
 	spin_lock(&o2hb_live_lock);
 	if (o2hb_global_heartbeat_active()) {
 		clear_bit(reg->hr_region_num, o2hb_region_bitmap);
 		clear_bit(reg->hr_region_num, o2hb_live_region_bitmap);
+		if (test_bit(reg->hr_region_num, o2hb_quorum_region_bitmap))
+			quorum_region = 1;
+		clear_bit(reg->hr_region_num, o2hb_quorum_region_bitmap);
 	}
 	hb_task = reg->hr_task;
 	reg->hr_task = NULL;
+	reg->hr_item_dropped = 1;
 	spin_unlock(&o2hb_live_lock);
 
 	if (hb_task)
@@ -2028,7 +2101,27 @@
 	if (o2hb_global_heartbeat_active())
 		printk(KERN_NOTICE "o2hb: Heartbeat stopped on region %s\n",
 		       config_item_name(&reg->hr_item));
+
 	config_item_put(item);
+
+	if (!o2hb_global_heartbeat_active() || !quorum_region)
+		return;
+
+	/*
+	 * If global heartbeat active and there are dependent users,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	spin_lock(&o2hb_live_lock);
+
+	if (!o2hb_dependent_users)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 struct o2hb_heartbeat_group_attribute {
@@ -2214,63 +2307,138 @@
 }
 EXPORT_SYMBOL_GPL(o2hb_setup_callback);
 
-static struct o2hb_region *o2hb_find_region(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only pin the matching region. In global we pin all the active
+ * regions.
+ */
+static int o2hb_region_pin(const char *region_uuid)
 {
-	struct o2hb_region *p, *reg = NULL;
+	int ret = 0, found = 0;
+	struct o2hb_region *reg;
+	char *uuid;
 
 	assert_spin_locked(&o2hb_live_lock);
 
-	list_for_each_entry(p, &o2hb_all_regions, hr_all_item) {
-		if (!strcmp(region_uuid, config_item_name(&p->hr_item))) {
-			reg = p;
-			break;
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+
+		/* local heartbeat */
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
 		}
+
+		if (reg->hr_item_pinned || reg->hr_item_dropped)
+			goto skip_pin;
+
+		/* Ignore ENOENT only for local hb (userdlm domain) */
+		ret = o2nm_depend_item(&reg->hr_item);
+		if (!ret) {
+			mlog(ML_CLUSTER, "Pin region %s\n", uuid);
+			reg->hr_item_pinned = 1;
+		} else {
+			if (ret == -ENOENT && found)
+				ret = 0;
+			else {
+				mlog(ML_ERROR, "Pin region %s fails with %d\n",
+				     uuid, ret);
+				break;
+			}
+		}
+skip_pin:
+		if (found)
+			break;
 	}
 
-	return reg;
-}
-
-static int o2hb_region_get(const char *region_uuid)
-{
-	int ret = 0;
-	struct o2hb_region *reg;
-
-	spin_lock(&o2hb_live_lock);
-
-	reg = o2hb_find_region(region_uuid);
-	if (!reg)
-		ret = -ENOENT;
-	spin_unlock(&o2hb_live_lock);
-
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_this_node();
-	if (ret)
-		goto out;
-
-	ret = o2nm_depend_item(&reg->hr_item);
-	if (ret)
-		o2nm_undepend_this_node();
-
-out:
 	return ret;
 }
 
-static void o2hb_region_put(const char *region_uuid)
+/*
+ * In local heartbeat mode, region_uuid passed matches the dlm domain name.
+ * In global heartbeat mode, region_uuid passed is NULL.
+ *
+ * In local, we only unpin the matching region. In global we unpin all the
+ * active regions.
+ */
+static void o2hb_region_unpin(const char *region_uuid)
 {
 	struct o2hb_region *reg;
+	char *uuid;
+	int found = 0;
+
+	assert_spin_locked(&o2hb_live_lock);
+
+	list_for_each_entry(reg, &o2hb_all_regions, hr_all_item) {
+		uuid = config_item_name(&reg->hr_item);
+		if (region_uuid) {
+			if (strcmp(region_uuid, uuid))
+				continue;
+			found = 1;
+		}
+
+		if (reg->hr_item_pinned) {
+			mlog(ML_CLUSTER, "Unpin region %s\n", uuid);
+			o2nm_undepend_item(&reg->hr_item);
+			reg->hr_item_pinned = 0;
+		}
+		if (found)
+			break;
+	}
+}
+
+static int o2hb_region_inc_user(const char *region_uuid)
+{
+	int ret = 0;
 
 	spin_lock(&o2hb_live_lock);
 
-	reg = o2hb_find_region(region_uuid);
-
-	spin_unlock(&o2hb_live_lock);
-
-	if (reg) {
-		o2nm_undepend_item(&reg->hr_item);
-		o2nm_undepend_this_node();
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    ret = o2hb_region_pin(region_uuid);
+	    goto unlock;
 	}
+
+	/*
+	 * if global heartbeat active and this is the first dependent user,
+	 * pin all regions if quorum region count <= CUT_OFF
+	 */
+	o2hb_dependent_users++;
+	if (o2hb_dependent_users > 1)
+		goto unlock;
+
+	if (o2hb_pop_count(&o2hb_quorum_region_bitmap,
+			   O2NM_MAX_REGIONS) <= O2HB_PIN_CUT_OFF)
+		ret = o2hb_region_pin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
+	return ret;
+}
+
+void o2hb_region_dec_user(const char *region_uuid)
+{
+	spin_lock(&o2hb_live_lock);
+
+	/* local heartbeat */
+	if (!o2hb_global_heartbeat_active()) {
+	    o2hb_region_unpin(region_uuid);
+	    goto unlock;
+	}
+
+	/*
+	 * if global heartbeat active and there are no dependent users,
+	 * unpin all quorum regions
+	 */
+	o2hb_dependent_users--;
+	if (!o2hb_dependent_users)
+		o2hb_region_unpin(NULL);
+
+unlock:
+	spin_unlock(&o2hb_live_lock);
 }
 
 int o2hb_register_callback(const char *region_uuid,
@@ -2291,9 +2459,11 @@
 	}
 
 	if (region_uuid) {
-		ret = o2hb_region_get(region_uuid);
-		if (ret)
+		ret = o2hb_region_inc_user(region_uuid);
+		if (ret) {
+			mlog_errno(ret);
 			goto out;
+		}
 	}
 
 	down_write(&o2hb_callback_sem);
@@ -2311,7 +2481,7 @@
 	up_write(&o2hb_callback_sem);
 	ret = 0;
 out:
-	mlog(ML_HEARTBEAT, "returning %d on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
 	     ret, __builtin_return_address(0), hc);
 	return ret;
 }
@@ -2322,7 +2492,7 @@
 {
 	BUG_ON(hc->hc_magic != O2HB_CB_MAGIC);
 
-	mlog(ML_HEARTBEAT, "on behalf of %p for funcs %p\n",
+	mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
 	     __builtin_return_address(0), hc);
 
 	/* XXX Can this happen _with_ a region reference? */
@@ -2330,7 +2500,7 @@
 		return;
 
 	if (region_uuid)
-		o2hb_region_put(region_uuid);
+		o2hb_region_dec_user(region_uuid);
 
 	down_write(&o2hb_callback_sem);
 
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index a3f150e..3a583590 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -46,10 +46,15 @@
 #define O2NET_DEBUG_DIR		"o2net"
 #define SC_DEBUG_NAME		"sock_containers"
 #define NST_DEBUG_NAME		"send_tracking"
+#define STATS_DEBUG_NAME	"stats"
+
+#define SHOW_SOCK_CONTAINERS	0
+#define SHOW_SOCK_STATS		1
 
 static struct dentry *o2net_dentry;
 static struct dentry *sc_dentry;
 static struct dentry *nst_dentry;
+static struct dentry *stats_dentry;
 
 static DEFINE_SPINLOCK(o2net_debug_lock);
 
@@ -123,37 +128,42 @@
 static int nst_seq_show(struct seq_file *seq, void *v)
 {
 	struct o2net_send_tracking *nst, *dummy_nst = seq->private;
+	ktime_t now;
+	s64 sock, send, status;
 
 	spin_lock(&o2net_debug_lock);
 	nst = next_nst(dummy_nst);
+	if (!nst)
+		goto out;
 
-	if (nst != NULL) {
-		/* get_task_comm isn't exported.  oh well. */
-		seq_printf(seq, "%p:\n"
-			   "  pid:          %lu\n"
-			   "  tgid:         %lu\n"
-			   "  process name: %s\n"
-			   "  node:         %u\n"
-			   "  sc:           %p\n"
-			   "  message id:   %d\n"
-			   "  message type: %u\n"
-			   "  message key:  0x%08x\n"
-			   "  sock acquiry: %lu.%ld\n"
-			   "  send start:   %lu.%ld\n"
-			   "  wait start:   %lu.%ld\n",
-			   nst, (unsigned long)nst->st_task->pid,
-			   (unsigned long)nst->st_task->tgid,
-			   nst->st_task->comm, nst->st_node,
-			   nst->st_sc, nst->st_id, nst->st_msg_type,
-			   nst->st_msg_key,
-			   nst->st_sock_time.tv_sec,
-			   (long)nst->st_sock_time.tv_usec,
-			   nst->st_send_time.tv_sec,
-			   (long)nst->st_send_time.tv_usec,
-			   nst->st_status_time.tv_sec,
-			   (long)nst->st_status_time.tv_usec);
-	}
+	now = ktime_get();
+	sock = ktime_to_us(ktime_sub(now, nst->st_sock_time));
+	send = ktime_to_us(ktime_sub(now, nst->st_send_time));
+	status = ktime_to_us(ktime_sub(now, nst->st_status_time));
 
+	/* get_task_comm isn't exported.  oh well. */
+	seq_printf(seq, "%p:\n"
+		   "  pid:          %lu\n"
+		   "  tgid:         %lu\n"
+		   "  process name: %s\n"
+		   "  node:         %u\n"
+		   "  sc:           %p\n"
+		   "  message id:   %d\n"
+		   "  message type: %u\n"
+		   "  message key:  0x%08x\n"
+		   "  sock acquiry: %lld usecs ago\n"
+		   "  send start:   %lld usecs ago\n"
+		   "  wait start:   %lld usecs ago\n",
+		   nst, (unsigned long)task_pid_nr(nst->st_task),
+		   (unsigned long)nst->st_task->tgid,
+		   nst->st_task->comm, nst->st_node,
+		   nst->st_sc, nst->st_id, nst->st_msg_type,
+		   nst->st_msg_key,
+		   (long long)sock,
+		   (long long)send,
+		   (long long)status);
+
+out:
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -228,6 +238,11 @@
 	spin_unlock(&o2net_debug_lock);
 }
 
+struct o2net_sock_debug {
+	int dbg_ctxt;
+	struct o2net_sock_container *dbg_sock;
+};
+
 static struct o2net_sock_container
 			*next_sc(struct o2net_sock_container *sc_start)
 {
@@ -253,7 +268,8 @@
 
 static void *sc_seq_start(struct seq_file *seq, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -264,7 +280,8 @@
 
 static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
@@ -276,65 +293,107 @@
 	return sc; /* unused, just needs to be null when done */
 }
 
-#define TV_SEC_USEC(TV) TV.tv_sec, (long)TV.tv_usec
+#ifdef CONFIG_OCFS2_FS_STATS
+# define sc_send_count(_s)		((_s)->sc_send_count)
+# define sc_recv_count(_s)		((_s)->sc_recv_count)
+# define sc_tv_acquiry_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_acquiry_total))
+# define sc_tv_send_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_send_total))
+# define sc_tv_status_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_status_total))
+# define sc_tv_process_total_ns(_s)	(ktime_to_ns((_s)->sc_tv_process_total))
+#else
+# define sc_send_count(_s)		(0U)
+# define sc_recv_count(_s)		(0U)
+# define sc_tv_acquiry_total_ns(_s)	(0LL)
+# define sc_tv_send_total_ns(_s)	(0LL)
+# define sc_tv_status_total_ns(_s)	(0LL)
+# define sc_tv_process_total_ns(_s)	(0LL)
+#endif
+
+/* So that debugfs.ocfs2 can determine which format is being used */
+#define O2NET_STATS_STR_VERSION		1
+static void sc_show_sock_stats(struct seq_file *seq,
+			       struct o2net_sock_container *sc)
+{
+	if (!sc)
+		return;
+
+	seq_printf(seq, "%d,%u,%lu,%lld,%lld,%lld,%lu,%lld\n", O2NET_STATS_STR_VERSION,
+		   sc->sc_node->nd_num, (unsigned long)sc_send_count(sc),
+		   (long long)sc_tv_acquiry_total_ns(sc),
+		   (long long)sc_tv_send_total_ns(sc),
+		   (long long)sc_tv_status_total_ns(sc),
+		   (unsigned long)sc_recv_count(sc),
+		   (long long)sc_tv_process_total_ns(sc));
+}
+
+static void sc_show_sock_container(struct seq_file *seq,
+				   struct o2net_sock_container *sc)
+{
+	struct inet_sock *inet = NULL;
+	__be32 saddr = 0, daddr = 0;
+	__be16 sport = 0, dport = 0;
+
+	if (!sc)
+		return;
+
+	if (sc->sc_sock) {
+		inet = inet_sk(sc->sc_sock->sk);
+		/* the stack's structs aren't sparse endian clean */
+		saddr = (__force __be32)inet->inet_saddr;
+		daddr = (__force __be32)inet->inet_daddr;
+		sport = (__force __be16)inet->inet_sport;
+		dport = (__force __be16)inet->inet_dport;
+	}
+
+	/* XXX sigh, inet-> doesn't have sparse annotation so any
+	 * use of it here generates a warning with -Wbitwise */
+	seq_printf(seq, "%p:\n"
+		   "  krefs:           %d\n"
+		   "  sock:            %pI4:%u -> "
+				      "%pI4:%u\n"
+		   "  remote node:     %s\n"
+		   "  page off:        %zu\n"
+		   "  handshake ok:    %u\n"
+		   "  timer:           %lld usecs\n"
+		   "  data ready:      %lld usecs\n"
+		   "  advance start:   %lld usecs\n"
+		   "  advance stop:    %lld usecs\n"
+		   "  func start:      %lld usecs\n"
+		   "  func stop:       %lld usecs\n"
+		   "  func key:        0x%08x\n"
+		   "  func type:       %u\n",
+		   sc,
+		   atomic_read(&sc->sc_kref.refcount),
+		   &saddr, inet ? ntohs(sport) : 0,
+		   &daddr, inet ? ntohs(dport) : 0,
+		   sc->sc_node->nd_name,
+		   sc->sc_page_off,
+		   sc->sc_handshake_ok,
+		   (long long)ktime_to_us(sc->sc_tv_timer),
+		   (long long)ktime_to_us(sc->sc_tv_data_ready),
+		   (long long)ktime_to_us(sc->sc_tv_advance_start),
+		   (long long)ktime_to_us(sc->sc_tv_advance_stop),
+		   (long long)ktime_to_us(sc->sc_tv_func_start),
+		   (long long)ktime_to_us(sc->sc_tv_func_stop),
+		   sc->sc_msg_key,
+		   sc->sc_msg_type);
+}
 
 static int sc_seq_show(struct seq_file *seq, void *v)
 {
-	struct o2net_sock_container *sc, *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock;
 
 	spin_lock(&o2net_debug_lock);
 	sc = next_sc(dummy_sc);
 
-	if (sc != NULL) {
-		struct inet_sock *inet = NULL;
-
-		__be32 saddr = 0, daddr = 0;
-		__be16 sport = 0, dport = 0;
-
-		if (sc->sc_sock) {
-			inet = inet_sk(sc->sc_sock->sk);
-			/* the stack's structs aren't sparse endian clean */
-			saddr = (__force __be32)inet->inet_saddr;
-			daddr = (__force __be32)inet->inet_daddr;
-			sport = (__force __be16)inet->inet_sport;
-			dport = (__force __be16)inet->inet_dport;
-		}
-
-		/* XXX sigh, inet-> doesn't have sparse annotation so any
-		 * use of it here generates a warning with -Wbitwise */
-		seq_printf(seq, "%p:\n"
-			   "  krefs:           %d\n"
-			   "  sock:            %pI4:%u -> "
-					      "%pI4:%u\n"
-			   "  remote node:     %s\n"
-			   "  page off:        %zu\n"
-			   "  handshake ok:    %u\n"
-			   "  timer:           %lu.%ld\n"
-			   "  data ready:      %lu.%ld\n"
-			   "  advance start:   %lu.%ld\n"
-			   "  advance stop:    %lu.%ld\n"
-			   "  func start:      %lu.%ld\n"
-			   "  func stop:       %lu.%ld\n"
-			   "  func key:        %u\n"
-			   "  func type:       %u\n",
-			   sc,
-			   atomic_read(&sc->sc_kref.refcount),
-			   &saddr, inet ? ntohs(sport) : 0,
-			   &daddr, inet ? ntohs(dport) : 0,
-			   sc->sc_node->nd_name,
-			   sc->sc_page_off,
-			   sc->sc_handshake_ok,
-			   TV_SEC_USEC(sc->sc_tv_timer),
-			   TV_SEC_USEC(sc->sc_tv_data_ready),
-			   TV_SEC_USEC(sc->sc_tv_advance_start),
-			   TV_SEC_USEC(sc->sc_tv_advance_stop),
-			   TV_SEC_USEC(sc->sc_tv_func_start),
-			   TV_SEC_USEC(sc->sc_tv_func_stop),
-			   sc->sc_msg_key,
-			   sc->sc_msg_type);
+	if (sc) {
+		if (sd->dbg_ctxt == SHOW_SOCK_CONTAINERS)
+			sc_show_sock_container(seq, sc);
+		else
+			sc_show_sock_stats(seq, sc);
 	}
 
-
 	spin_unlock(&o2net_debug_lock);
 
 	return 0;
@@ -351,7 +410,7 @@
 	.show = sc_seq_show,
 };
 
-static int sc_fop_open(struct inode *inode, struct file *file)
+static int sc_common_open(struct file *file, struct o2net_sock_debug *sd)
 {
 	struct o2net_sock_container *dummy_sc;
 	struct seq_file *seq;
@@ -369,7 +428,8 @@
 		goto out;
 
 	seq = file->private_data;
-	seq->private = dummy_sc;
+	seq->private = sd;
+	sd->dbg_sock = dummy_sc;
 	o2net_debug_add_sc(dummy_sc);
 
 	dummy_sc = NULL;
@@ -382,12 +442,48 @@
 static int sc_fop_release(struct inode *inode, struct file *file)
 {
 	struct seq_file *seq = file->private_data;
-	struct o2net_sock_container *dummy_sc = seq->private;
+	struct o2net_sock_debug *sd = seq->private;
+	struct o2net_sock_container *dummy_sc = sd->dbg_sock;
 
 	o2net_debug_del_sc(dummy_sc);
 	return seq_release_private(inode, file);
 }
 
+static int stats_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_STATS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
+static const struct file_operations stats_seq_fops = {
+	.open = stats_fop_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = sc_fop_release,
+};
+
+static int sc_fop_open(struct inode *inode, struct file *file)
+{
+	struct o2net_sock_debug *sd;
+
+	sd = kmalloc(sizeof(struct o2net_sock_debug), GFP_KERNEL);
+	if (sd == NULL)
+		return -ENOMEM;
+
+	sd->dbg_ctxt = SHOW_SOCK_CONTAINERS;
+	sd->dbg_sock = NULL;
+
+	return sc_common_open(file, sd);
+}
+
 static const struct file_operations sc_seq_fops = {
 	.open = sc_fop_open,
 	.read = seq_read,
@@ -419,25 +515,29 @@
 		goto bail;
 	}
 
+	stats_dentry = debugfs_create_file(STATS_DEBUG_NAME, S_IFREG|S_IRUSR,
+					   o2net_dentry, NULL,
+					   &stats_seq_fops);
+	if (!stats_dentry) {
+		mlog_errno(-ENOMEM);
+		goto bail;
+	}
+
 	return 0;
 bail:
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 	return -ENOMEM;
 }
 
 void o2net_debugfs_exit(void)
 {
-	if (sc_dentry)
-		debugfs_remove(sc_dentry);
-	if (nst_dentry)
-		debugfs_remove(nst_dentry);
-	if (o2net_dentry)
-		debugfs_remove(o2net_dentry);
+	debugfs_remove(stats_dentry);
+	debugfs_remove(sc_dentry);
+	debugfs_remove(nst_dentry);
+	debugfs_remove(o2net_dentry);
 }
 
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 9aa426e..3b11cb1 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -153,63 +153,114 @@
 	nst->st_node = node;
 }
 
-static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_sock_time);
-}
-
-static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_send_time);
-}
-
-static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
-{
-	do_gettimeofday(&nst->st_status_time);
-}
-
-static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
-					 struct o2net_sock_container *sc)
-{
-	nst->st_sc = sc;
-}
-
-static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
-{
-	nst->st_id = msg_id;
-}
-
-#else  /* CONFIG_DEBUG_FS */
-
-static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
-				  u32 msgkey, struct task_struct *task, u8 node)
-{
-}
-
 static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
 {
+	nst->st_sock_time = ktime_get();
 }
 
 static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
 {
+	nst->st_send_time = ktime_get();
 }
 
 static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
 {
+	nst->st_status_time = ktime_get();
 }
 
 static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
 						struct o2net_sock_container *sc)
 {
+	nst->st_sc = sc;
 }
 
 static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
 					u32 msg_id)
 {
+	nst->st_id = msg_id;
 }
 
+static inline void o2net_set_sock_timer(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_timer = ktime_get();
+}
+
+static inline void o2net_set_data_ready_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_data_ready = ktime_get();
+}
+
+static inline void o2net_set_advance_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_start = ktime_get();
+}
+
+static inline void o2net_set_advance_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_advance_stop = ktime_get();
+}
+
+static inline void o2net_set_func_start_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_start = ktime_get();
+}
+
+static inline void o2net_set_func_stop_time(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_func_stop = ktime_get();
+}
+
+static ktime_t o2net_get_func_run_time(struct o2net_sock_container *sc)
+{
+	return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
+}
+#else  /* CONFIG_DEBUG_FS */
+# define o2net_init_nst(a, b, c, d, e)
+# define o2net_set_nst_sock_time(a)
+# define o2net_set_nst_send_time(a)
+# define o2net_set_nst_status_time(a)
+# define o2net_set_nst_sock_container(a, b)
+# define o2net_set_nst_msg_id(a, b)
+# define o2net_set_sock_timer(a)
+# define o2net_set_data_ready_time(a)
+# define o2net_set_advance_start_time(a)
+# define o2net_set_advance_stop_time(a)
+# define o2net_set_func_start_time(a)
+# define o2net_set_func_stop_time(a)
+# define o2net_get_func_run_time(a)		(ktime_t)0
 #endif /* CONFIG_DEBUG_FS */
 
+#ifdef CONFIG_OCFS2_FS_STATS
+static void o2net_update_send_stats(struct o2net_send_tracking *nst,
+				    struct o2net_sock_container *sc)
+{
+	sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
+					   ktime_sub(ktime_get(),
+						     nst->st_status_time));
+	sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
+					 ktime_sub(nst->st_status_time,
+						   nst->st_send_time));
+	sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
+					    ktime_sub(nst->st_send_time,
+						      nst->st_sock_time));
+	sc->sc_send_count++;
+}
+
+static void o2net_update_recv_stats(struct o2net_sock_container *sc)
+{
+	sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
+					    o2net_get_func_run_time(sc));
+	sc->sc_recv_count++;
+}
+
+#else
+
+# define o2net_update_send_stats(a, b)
+
+# define o2net_update_recv_stats(sc)
+
+#endif /* CONFIG_OCFS2_FS_STATS */
+
 static inline int o2net_reconnect_delay(void)
 {
 	return o2nm_single_cluster->cl_reconnect_delay_ms;
@@ -355,6 +406,7 @@
 		sc->sc_sock = NULL;
 	}
 
+	o2nm_undepend_item(&sc->sc_node->nd_item);
 	o2nm_node_put(sc->sc_node);
 	sc->sc_node = NULL;
 
@@ -376,6 +428,7 @@
 {
 	struct o2net_sock_container *sc, *ret = NULL;
 	struct page *page = NULL;
+	int status = 0;
 
 	page = alloc_page(GFP_NOFS);
 	sc = kzalloc(sizeof(*sc), GFP_NOFS);
@@ -386,6 +439,13 @@
 	o2nm_node_get(node);
 	sc->sc_node = node;
 
+	/* pin the node item of the remote node */
+	status = o2nm_depend_item(&node->nd_item);
+	if (status) {
+		mlog_errno(status);
+		o2nm_node_put(node);
+		goto out;
+	}
 	INIT_WORK(&sc->sc_connect_work, o2net_sc_connect_completed);
 	INIT_WORK(&sc->sc_rx_work, o2net_rx_until_empty);
 	INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
@@ -546,7 +606,7 @@
 	if (sk->sk_user_data) {
 		struct o2net_sock_container *sc = sk->sk_user_data;
 		sclog(sc, "data_ready hit\n");
-		do_gettimeofday(&sc->sc_tv_data_ready);
+		o2net_set_data_ready_time(sc);
 		o2net_sc_queue_work(sc, &sc->sc_rx_work);
 		ready = sc->sc_data_ready;
 	} else {
@@ -1070,6 +1130,8 @@
 	o2net_set_nst_status_time(&nst);
 	wait_event(nsw.ns_wq, o2net_nsw_completed(nn, &nsw));
 
+	o2net_update_send_stats(&nst, sc);
+
 	/* Note that we avoid overwriting the callers status return
 	 * variable if a system error was reported on the other
 	 * side. Callers beware. */
@@ -1183,13 +1245,15 @@
 	if (syserr != O2NET_ERR_NONE)
 		goto out_respond;
 
-	do_gettimeofday(&sc->sc_tv_func_start);
+	o2net_set_func_start_time(sc);
 	sc->sc_msg_key = be32_to_cpu(hdr->key);
 	sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
 	handler_status = (nmh->nh_func)(hdr, sizeof(struct o2net_msg) +
 					     be16_to_cpu(hdr->data_len),
 					nmh->nh_func_data, &ret_data);
-	do_gettimeofday(&sc->sc_tv_func_stop);
+	o2net_set_func_stop_time(sc);
+
+	o2net_update_recv_stats(sc);
 
 out_respond:
 	/* this destroys the hdr, so don't use it after this */
@@ -1300,7 +1364,7 @@
 	size_t datalen;
 
 	sclog(sc, "receiving\n");
-	do_gettimeofday(&sc->sc_tv_advance_start);
+	o2net_set_advance_start_time(sc);
 
 	if (unlikely(sc->sc_handshake_ok == 0)) {
 		if(sc->sc_page_off < sizeof(struct o2net_handshake)) {
@@ -1375,7 +1439,7 @@
 
 out:
 	sclog(sc, "ret = %d\n", ret);
-	do_gettimeofday(&sc->sc_tv_advance_stop);
+	o2net_set_advance_stop_time(sc);
 	return ret;
 }
 
@@ -1475,27 +1539,28 @@
 {
 	struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
 	struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
-	struct timeval now;
 
-	do_gettimeofday(&now);
+#ifdef CONFIG_DEBUG_FS
+	ktime_t now = ktime_get();
+#endif
 
 	printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
 	     "seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
 		     o2net_idle_timeout() / 1000,
 		     o2net_idle_timeout() % 1000);
-	mlog(ML_NOTICE, "here are some times that might help debug the "
-	     "situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
-	     "%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
-	     sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
-	     now.tv_sec, (long) now.tv_usec,
-	     sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
-	     sc->sc_tv_advance_start.tv_sec,
-	     (long) sc->sc_tv_advance_start.tv_usec,
-	     sc->sc_tv_advance_stop.tv_sec,
-	     (long) sc->sc_tv_advance_stop.tv_usec,
+
+#ifdef CONFIG_DEBUG_FS
+	mlog(ML_NOTICE, "Here are some times that might help debug the "
+	     "situation: (Timer: %lld, Now %lld, DataReady %lld, Advance %lld-%lld, "
+	     "Key 0x%08x, Func %u, FuncTime %lld-%lld)\n",
+	     (long long)ktime_to_us(sc->sc_tv_timer), (long long)ktime_to_us(now),
+	     (long long)ktime_to_us(sc->sc_tv_data_ready),
+	     (long long)ktime_to_us(sc->sc_tv_advance_start),
+	     (long long)ktime_to_us(sc->sc_tv_advance_stop),
 	     sc->sc_msg_key, sc->sc_msg_type,
-	     sc->sc_tv_func_start.tv_sec, (long) sc->sc_tv_func_start.tv_usec,
-	     sc->sc_tv_func_stop.tv_sec, (long) sc->sc_tv_func_stop.tv_usec);
+	     (long long)ktime_to_us(sc->sc_tv_func_start),
+	     (long long)ktime_to_us(sc->sc_tv_func_stop));
+#endif
 
 	/*
 	 * Initialize the nn_timeout so that the next connection attempt
@@ -1511,7 +1576,7 @@
 	o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
 	o2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
 		      msecs_to_jiffies(o2net_keepalive_delay()));
-	do_gettimeofday(&sc->sc_tv_timer);
+	o2net_set_sock_timer(sc);
 	mod_timer(&sc->sc_idle_timeout,
 	       jiffies + msecs_to_jiffies(o2net_idle_timeout()));
 }
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 15fdbdf..4cbcb65 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -166,18 +166,27 @@
 	/* original handlers for the sockets */
 	void			(*sc_state_change)(struct sock *sk);
 	void			(*sc_data_ready)(struct sock *sk, int bytes);
-#ifdef CONFIG_DEBUG_FS
-	struct list_head        sc_net_debug_item;
-#endif
-	struct timeval 		sc_tv_timer;
-	struct timeval 		sc_tv_data_ready;
-	struct timeval 		sc_tv_advance_start;
-	struct timeval 		sc_tv_advance_stop;
-	struct timeval 		sc_tv_func_start;
-	struct timeval 		sc_tv_func_stop;
+
 	u32			sc_msg_key;
 	u16			sc_msg_type;
 
+#ifdef CONFIG_DEBUG_FS
+	struct list_head        sc_net_debug_item;
+	ktime_t			sc_tv_timer;
+	ktime_t			sc_tv_data_ready;
+	ktime_t			sc_tv_advance_start;
+	ktime_t			sc_tv_advance_stop;
+	ktime_t			sc_tv_func_start;
+	ktime_t			sc_tv_func_stop;
+#endif
+#ifdef CONFIG_OCFS2_FS_STATS
+	ktime_t			sc_tv_acquiry_total;
+	ktime_t			sc_tv_send_total;
+	ktime_t			sc_tv_status_total;
+	u32			sc_send_count;
+	u32			sc_recv_count;
+	ktime_t			sc_tv_process_total;
+#endif
 	struct mutex		sc_send_lock;
 };
 
@@ -220,9 +229,9 @@
 	u32				st_msg_type;
 	u32				st_msg_key;
 	u8				st_node;
-	struct timeval			st_sock_time;
-	struct timeval			st_send_time;
-	struct timeval			st_status_time;
+	ktime_t				st_sock_time;
+	ktime_t				st_send_time;
+	ktime_t				st_status_time;
 };
 #else
 struct o2net_send_tracking {
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index f449991..3a3ed4b 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -90,19 +90,29 @@
 
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
+	res = lock->lockres;
+
 	assert_spin_locked(&dlm->ast_lock);
+
 	if (!list_empty(&lock->ast_list)) {
-		mlog(ML_ERROR, "ast list not empty!!  pending=%d, newlevel=%d\n",
+		mlog(ML_ERROR, "%s: res %.*s, lock %u:%llu, "
+		     "AST list not empty, pending %d, newlevel %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
 		     lock->ast_pending, lock->ml.type);
 		BUG();
 	}
 	if (lock->ast_pending)
-		mlog(0, "lock has an ast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, AST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -110,9 +120,10 @@
 
 	/* check to see if this ast obsoletes the bast */
 	if (dlm_should_cancel_bast(dlm, lock)) {
-		struct dlm_lock_resource *res = lock->lockres;
-		mlog(0, "%s: cancelling bast for %.*s\n",
-		     dlm->name, res->lockname.len, res->lockname.name);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Cancelling BAST\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 		lock->bast_pending = 0;
 		list_del_init(&lock->bast_list);
 		lock->ml.highest_blocked = LKM_IVMODE;
@@ -134,8 +145,6 @@
 
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -147,15 +156,21 @@
 
 void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
+	struct dlm_lock_resource *res;
 
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
+
 	assert_spin_locked(&dlm->ast_lock);
 
+	res = lock->lockres;
+
 	BUG_ON(!list_empty(&lock->bast_list));
 	if (lock->bast_pending)
-		mlog(0, "lock has a bast getting flushed right now\n");
+		mlog(0, "%s: res %.*s, lock %u:%llu, BAST getting flushed\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	/* putting lock on list, add a ref */
 	dlm_lock_get(lock);
@@ -167,8 +182,6 @@
 
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
 {
-	mlog_entry_void();
-
 	BUG_ON(!dlm);
 	BUG_ON(!lock);
 
@@ -213,7 +226,10 @@
 	dlm_astlockfunc_t *fn;
 	struct dlm_lockstatus *lksb;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	fn = lock->ast;
@@ -231,7 +247,10 @@
 	struct dlm_lockstatus *lksb;
 	int lksbflags;
 
-	mlog_entry_void();
+	mlog(0, "%s: res %.*s, lock %u:%llu, Remote AST\n", dlm->name,
+	     res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)));
 
 	lksb = lock->lksb;
 	BUG_ON(lock->ml.node == dlm->node_num);
@@ -250,9 +269,14 @@
 {
 	dlm_bastlockfunc_t *fn = lock->bast;
 
-	mlog_entry_void();
 	BUG_ON(lock->ml.node != dlm->node_num);
 
+	mlog(0, "%s: res %.*s, lock %u:%llu, Local BAST, blocked %d\n",
+	     dlm->name, res->lockname.len, res->lockname.name,
+	     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+	     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+	     blocked_type);
+
 	(*fn)(lock->astdata, blocked_type);
 }
 
@@ -332,7 +356,8 @@
 	/* cannot get a proxy ast message if this node owns it */
 	BUG_ON(res->owner == dlm->node_num);
 
-	mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 
 	spin_lock(&res->spinlock);
 	if (res->state & DLM_LOCK_RES_RECOVERING) {
@@ -382,8 +407,12 @@
 	if (past->type == DLM_AST) {
 		/* do not alter lock refcount.  switching lists. */
 		list_move_tail(&lock->list, &res->granted);
-		mlog(0, "ast: Adding to granted list... type=%d, "
-		     "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
+		mlog(0, "%s: res %.*s, lock %u:%llu, Granted type %d => %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(cookie)),
+		     lock->ml.type, lock->ml.convert_type);
+
 		if (lock->ml.convert_type != LKM_IVMODE) {
 			lock->ml.type = lock->ml.convert_type;
 			lock->ml.convert_type = LKM_IVMODE;
@@ -426,9 +455,9 @@
 	size_t veclen = 1;
 	int status;
 
-	mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
-		   res->lockname.len, res->lockname.name, lock->ml.node,
-		   msg_type, blocked_type);
+	mlog(0, "%s: res %.*s, to %u, type %d, blocked_type %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, lock->ml.node, msg_type,
+	     blocked_type);
 
 	memset(&past, 0, sizeof(struct dlm_proxy_ast));
 	past.node_idx = dlm->node_num;
@@ -441,7 +470,6 @@
 	vec[0].iov_len = sizeof(struct dlm_proxy_ast);
 	vec[0].iov_base = &past;
 	if (flags & DLM_LKSB_GET_LVB) {
-		mlog(0, "returning requested LVB data\n");
 		be32_add_cpu(&past.flags, LKM_GET_LVB);
 		vec[1].iov_len = DLM_LVB_LEN;
 		vec[1].iov_base = lock->lksb->lvb;
@@ -451,8 +479,8 @@
 	ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
 				     lock->ml.node, &status);
 	if (ret < 0)
-		mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-		     "node %u\n", ret, DLM_PROXY_AST_MSG, dlm->key,
+		mlog(ML_ERROR, "%s: res %.*s, error %d send AST to node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name, ret,
 		     lock->ml.node);
 	else {
 		if (status == DLM_RECOVERING) {
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index b36d0bf..4bdf7ba 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -50,10 +50,10 @@
 #define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
 
 enum dlm_mle_type {
-	DLM_MLE_BLOCK,
-	DLM_MLE_MASTER,
-	DLM_MLE_MIGRATION,
-	DLM_MLE_NUM_TYPES
+	DLM_MLE_BLOCK = 0,
+	DLM_MLE_MASTER = 1,
+	DLM_MLE_MIGRATION = 2,
+	DLM_MLE_NUM_TYPES = 3,
 };
 
 struct dlm_master_list_entry {
@@ -82,8 +82,8 @@
 
 enum dlm_ast_type {
 	DLM_AST = 0,
-	DLM_BAST,
-	DLM_ASTUNLOCK
+	DLM_BAST = 1,
+	DLM_ASTUNLOCK = 2,
 };
 
 
@@ -119,9 +119,9 @@
 
 enum dlm_ctxt_state {
 	DLM_CTXT_NEW = 0,
-	DLM_CTXT_JOINED,
-	DLM_CTXT_IN_SHUTDOWN,
-	DLM_CTXT_LEAVING,
+	DLM_CTXT_JOINED = 1,
+	DLM_CTXT_IN_SHUTDOWN = 2,
+	DLM_CTXT_LEAVING = 3,
 };
 
 struct dlm_ctxt
@@ -388,8 +388,8 @@
 
 enum dlm_lockres_list {
 	DLM_GRANTED_LIST = 0,
-	DLM_CONVERTING_LIST,
-	DLM_BLOCKED_LIST
+	DLM_CONVERTING_LIST = 1,
+	DLM_BLOCKED_LIST = 2,
 };
 
 static inline int dlm_lvb_is_empty(char *lvb)
@@ -427,27 +427,27 @@
 
 
 enum {
-	DLM_MASTER_REQUEST_MSG    = 500,
-	DLM_UNUSED_MSG1,         /* 501 */
-	DLM_ASSERT_MASTER_MSG,	 /* 502 */
-	DLM_CREATE_LOCK_MSG,	 /* 503 */
-	DLM_CONVERT_LOCK_MSG,	 /* 504 */
-	DLM_PROXY_AST_MSG,	 /* 505 */
-	DLM_UNLOCK_LOCK_MSG,	 /* 506 */
-	DLM_DEREF_LOCKRES_MSG,	 /* 507 */
-	DLM_MIGRATE_REQUEST_MSG, /* 508 */
-	DLM_MIG_LOCKRES_MSG, 	 /* 509 */
-	DLM_QUERY_JOIN_MSG,	 /* 510 */
-	DLM_ASSERT_JOINED_MSG,	 /* 511 */
-	DLM_CANCEL_JOIN_MSG,	 /* 512 */
-	DLM_EXIT_DOMAIN_MSG,	 /* 513 */
-	DLM_MASTER_REQUERY_MSG,	 /* 514 */
-	DLM_LOCK_REQUEST_MSG,	 /* 515 */
-	DLM_RECO_DATA_DONE_MSG,	 /* 516 */
-	DLM_BEGIN_RECO_MSG,	 /* 517 */
-	DLM_FINALIZE_RECO_MSG,	 /* 518 */
-	DLM_QUERY_REGION,	 /* 519 */
-	DLM_QUERY_NODEINFO,	 /* 520 */
+	DLM_MASTER_REQUEST_MSG		= 500,
+	DLM_UNUSED_MSG1			= 501,
+	DLM_ASSERT_MASTER_MSG		= 502,
+	DLM_CREATE_LOCK_MSG		= 503,
+	DLM_CONVERT_LOCK_MSG		= 504,
+	DLM_PROXY_AST_MSG		= 505,
+	DLM_UNLOCK_LOCK_MSG		= 506,
+	DLM_DEREF_LOCKRES_MSG		= 507,
+	DLM_MIGRATE_REQUEST_MSG		= 508,
+	DLM_MIG_LOCKRES_MSG		= 509,
+	DLM_QUERY_JOIN_MSG		= 510,
+	DLM_ASSERT_JOINED_MSG		= 511,
+	DLM_CANCEL_JOIN_MSG		= 512,
+	DLM_EXIT_DOMAIN_MSG		= 513,
+	DLM_MASTER_REQUERY_MSG		= 514,
+	DLM_LOCK_REQUEST_MSG		= 515,
+	DLM_RECO_DATA_DONE_MSG		= 516,
+	DLM_BEGIN_RECO_MSG		= 517,
+	DLM_FINALIZE_RECO_MSG		= 518,
+	DLM_QUERY_REGION		= 519,
+	DLM_QUERY_NODEINFO		= 520,
 };
 
 struct dlm_reco_node_data
@@ -460,19 +460,19 @@
 enum {
 	DLM_RECO_NODE_DATA_DEAD = -1,
 	DLM_RECO_NODE_DATA_INIT = 0,
-	DLM_RECO_NODE_DATA_REQUESTING,
-	DLM_RECO_NODE_DATA_REQUESTED,
-	DLM_RECO_NODE_DATA_RECEIVING,
-	DLM_RECO_NODE_DATA_DONE,
-	DLM_RECO_NODE_DATA_FINALIZE_SENT,
+	DLM_RECO_NODE_DATA_REQUESTING = 1,
+	DLM_RECO_NODE_DATA_REQUESTED = 2,
+	DLM_RECO_NODE_DATA_RECEIVING = 3,
+	DLM_RECO_NODE_DATA_DONE = 4,
+	DLM_RECO_NODE_DATA_FINALIZE_SENT = 5,
 };
 
 
 enum {
 	DLM_MASTER_RESP_NO = 0,
-	DLM_MASTER_RESP_YES,
-	DLM_MASTER_RESP_MAYBE,
-	DLM_MASTER_RESP_ERROR
+	DLM_MASTER_RESP_YES = 1,
+	DLM_MASTER_RESP_MAYBE = 2,
+	DLM_MASTER_RESP_ERROR = 3,
 };
 
 
@@ -649,9 +649,9 @@
 #define DLM_MOD_KEY (0x666c6172)
 enum dlm_query_join_response_code {
 	JOIN_DISALLOW = 0,
-	JOIN_OK,
-	JOIN_OK_NO_MAP,
-	JOIN_PROTOCOL_MISMATCH,
+	JOIN_OK = 1,
+	JOIN_OK_NO_MAP = 2,
+	JOIN_PROTOCOL_MISMATCH = 3,
 };
 
 struct dlm_query_join_packet {
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 272ec86..04a32be 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -370,92 +370,46 @@
 	kref_get(&dc->debug_refcnt);
 }
 
-static struct debug_buffer *debug_buffer_allocate(void)
+static int debug_release(struct inode *inode, struct file *file)
 {
-	struct debug_buffer *db = NULL;
-
-	db = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
-	if (!db)
-		goto bail;
-
-	db->len = PAGE_SIZE;
-	db->buf = kmalloc(db->len, GFP_KERNEL);
-	if (!db->buf)
-		goto bail;
-
-	return db;
-bail:
-	kfree(db);
-	return NULL;
-}
-
-static ssize_t debug_buffer_read(struct file *file, char __user *buf,
-				 size_t nbytes, loff_t *ppos)
-{
-	struct debug_buffer *db = file->private_data;
-
-	return simple_read_from_buffer(buf, nbytes, ppos, db->buf, db->len);
-}
-
-static loff_t debug_buffer_llseek(struct file *file, loff_t off, int whence)
-{
-	struct debug_buffer *db = file->private_data;
-	loff_t new = -1;
-
-	switch (whence) {
-	case 0:
-		new = off;
-		break;
-	case 1:
-		new = file->f_pos + off;
-		break;
-	}
-
-	if (new < 0 || new > db->len)
-		return -EINVAL;
-
-	return (file->f_pos = new);
-}
-
-static int debug_buffer_release(struct inode *inode, struct file *file)
-{
-	struct debug_buffer *db = file->private_data;
-
-	if (db)
-		kfree(db->buf);
-	kfree(db);
-
+	free_page((unsigned long)file->private_data);
 	return 0;
 }
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+			  size_t nbytes, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+				       i_size_read(file->f_mapping->host));
+}
 /* end - util funcs */
 
 /* begin - purge list funcs */
-static int debug_purgelist_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_purgelist_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_lock_resource *res;
 	int out = 0;
 	unsigned long total = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping Purgelist for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->spinlock);
 	list_for_each_entry(res, &dlm->purge_list, purge) {
 		++total;
-		if (db->len - out < 100)
+		if (len - out < 100)
 			continue;
 		spin_lock(&res->spinlock);
 		out += stringify_lockname(res->lockname.name,
 					  res->lockname.len,
-					  db->buf + out, db->len - out);
-		out += snprintf(db->buf + out, db->len - out, "\t%ld\n",
+					  buf + out, len - out);
+		out += snprintf(buf + out, len - out, "\t%ld\n",
 				(jiffies - res->last_used)/HZ);
 		spin_unlock(&res->spinlock);
 	}
 	spin_unlock(&dlm->spinlock);
 
-	out += snprintf(db->buf + out, db->len - out,
-			"Total on list: %ld\n", total);
+	out += snprintf(buf + out, len - out, "Total on list: %ld\n", total);
 
 	return out;
 }
@@ -463,15 +417,15 @@
 static int debug_purgelist_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_purgelist_print(dlm, db);
+	i_size_write(inode, debug_purgelist_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -480,14 +434,14 @@
 
 static const struct file_operations debug_purgelist_fops = {
 	.open =		debug_purgelist_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end - purge list funcs */
 
 /* begin - debug mle funcs */
-static int debug_mle_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_mle_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	struct dlm_master_list_entry *mle;
 	struct hlist_head *bucket;
@@ -495,7 +449,7 @@
 	int i, out = 0;
 	unsigned long total = 0, longest = 0, bucket_count = 0;
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dumping MLEs for Domain: %s\n", dlm->name);
 
 	spin_lock(&dlm->master_lock);
@@ -506,16 +460,16 @@
 					  master_hash_node);
 			++total;
 			++bucket_count;
-			if (db->len - out < 200)
+			if (len - out < 200)
 				continue;
-			out += dump_mle(mle, db->buf + out, db->len - out);
+			out += dump_mle(mle, buf + out, len - out);
 		}
 		longest = max(longest, bucket_count);
 		bucket_count = 0;
 	}
 	spin_unlock(&dlm->master_lock);
 
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Total: %ld, Longest: %ld\n", total, longest);
 	return out;
 }
@@ -523,15 +477,15 @@
 static int debug_mle_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_mle_print(dlm, db);
+	i_size_write(inode, debug_mle_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -540,9 +494,9 @@
 
 static const struct file_operations debug_mle_fops = {
 	.open =		debug_mle_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 
 /* end - debug mle funcs */
@@ -757,7 +711,7 @@
 /* end - debug lockres funcs */
 
 /* begin - debug state funcs */
-static int debug_state_print(struct dlm_ctxt *dlm, struct debug_buffer *db)
+static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
 {
 	int out = 0;
 	struct dlm_reco_node_data *node;
@@ -781,35 +735,35 @@
 	}
 
 	/* Domain: xxxxxxxxxx  Key: 0xdfbac769 */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Domain: %s  Key: 0x%08x  Protocol: %d.%d\n",
 			dlm->name, dlm->key, dlm->dlm_locking_proto.pv_major,
 			dlm->dlm_locking_proto.pv_minor);
 
 	/* Thread Pid: xxx  Node: xxx  State: xxxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Thread Pid: %d  Node: %d  State: %s\n",
-			dlm->dlm_thread_task->pid, dlm->node_num, state);
+			task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state);
 
 	/* Number of Joins: xxx  Joining Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Number of Joins: %d  Joining Node: %d\n",
 			dlm->num_joins, dlm->joining_node);
 
 	/* Domain Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Domain Map: ");
+	out += snprintf(buf + out, len - out, "Domain Map: ");
 	out += stringify_nodemap(dlm->domain_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Live Map: xx xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Live Map: ");
+	out += snprintf(buf + out, len - out, "Live Map: ");
 	out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Lock Resources: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lock Resources: %d (%d)\n",
 			atomic_read(&dlm->res_cur_count),
 			atomic_read(&dlm->res_tot_count));
@@ -821,29 +775,29 @@
 		cur_mles += atomic_read(&dlm->mle_cur_count[i]);
 
 	/* MLEs: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"MLEs: %d (%d)\n", cur_mles, tot_mles);
 
 	/*  Blocking: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Blocking: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_BLOCK]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_BLOCK]));
 
 	/*  Mastery: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Mastery: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MASTER]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MASTER]));
 
 	/*  Migration: xxx (xxx) */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"  Migration: %d (%d)\n",
 			atomic_read(&dlm->mle_cur_count[DLM_MLE_MIGRATION]),
 			atomic_read(&dlm->mle_tot_count[DLM_MLE_MIGRATION]));
 
 	/* Lists: Dirty=Empty  Purge=InUse  PendingASTs=Empty  ... */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Lists: Dirty=%s  Purge=%s  PendingASTs=%s  "
 			"PendingBASTs=%s\n",
 			(list_empty(&dlm->dirty_list) ? "Empty" : "InUse"),
@@ -852,12 +806,12 @@
 			(list_empty(&dlm->pending_basts) ? "Empty" : "InUse"));
 
 	/* Purge Count: xxx  Refs: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Purge Count: %d  Refs: %d\n", dlm->purge_count,
 			atomic_read(&dlm->dlm_refs.refcount));
 
 	/* Dead Node: xxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Dead Node: %d\n", dlm->reco.dead_node);
 
 	/* What about DLM_RECO_STATE_FINALIZE? */
@@ -867,19 +821,19 @@
 		state = "INACTIVE";
 
 	/* Recovery Pid: xxxx  Master: xxx  State: xxxx */
-	out += snprintf(db->buf + out, db->len - out,
+	out += snprintf(buf + out, len - out,
 			"Recovery Pid: %d  Master: %d  State: %s\n",
-			dlm->dlm_reco_thread_task->pid,
+			task_pid_nr(dlm->dlm_reco_thread_task),
 			dlm->reco.new_master, state);
 
 	/* Recovery Map: xx xx */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Map: ");
+	out += snprintf(buf + out, len - out, "Recovery Map: ");
 	out += stringify_nodemap(dlm->recovery_map, O2NM_MAX_NODES,
-				 db->buf + out, db->len - out);
-	out += snprintf(db->buf + out, db->len - out, "\n");
+				 buf + out, len - out);
+	out += snprintf(buf + out, len - out, "\n");
 
 	/* Recovery Node State: */
-	out += snprintf(db->buf + out, db->len - out, "Recovery Node State:\n");
+	out += snprintf(buf + out, len - out, "Recovery Node State:\n");
 	list_for_each_entry(node, &dlm->reco.node_data, list) {
 		switch (node->state) {
 		case DLM_RECO_NODE_DATA_INIT:
@@ -907,7 +861,7 @@
 			state = "BAD";
 			break;
 		}
-		out += snprintf(db->buf + out, db->len - out, "\t%u - %s\n",
+		out += snprintf(buf + out, len - out, "\t%u - %s\n",
 				node->node_num, state);
 	}
 
@@ -919,15 +873,15 @@
 static int debug_state_open(struct inode *inode, struct file *file)
 {
 	struct dlm_ctxt *dlm = inode->i_private;
-	struct debug_buffer *db = NULL;
+	char *buf = NULL;
 
-	db = debug_buffer_allocate();
-	if (!db)
+	buf = (char *) get_zeroed_page(GFP_NOFS);
+	if (!buf)
 		goto bail;
 
-	db->len = debug_state_print(dlm, db);
+	i_size_write(inode, debug_state_print(dlm, buf, PAGE_SIZE - 1));
 
-	file->private_data = db;
+	file->private_data = buf;
 
 	return 0;
 bail:
@@ -936,9 +890,9 @@
 
 static const struct file_operations debug_state_fops = {
 	.open =		debug_state_open,
-	.release =	debug_buffer_release,
-	.read =		debug_buffer_read,
-	.llseek =	debug_buffer_llseek,
+	.release =	debug_release,
+	.read =		debug_read,
+	.llseek =	generic_file_llseek,
 };
 /* end  - debug state funcs */
 
@@ -1002,14 +956,10 @@
 	struct dlm_debug_ctxt *dc = dlm->dlm_debug_ctxt;
 
 	if (dc) {
-		if (dc->debug_purgelist_dentry)
-			debugfs_remove(dc->debug_purgelist_dentry);
-		if (dc->debug_mle_dentry)
-			debugfs_remove(dc->debug_mle_dentry);
-		if (dc->debug_lockres_dentry)
-			debugfs_remove(dc->debug_lockres_dentry);
-		if (dc->debug_state_dentry)
-			debugfs_remove(dc->debug_state_dentry);
+		debugfs_remove(dc->debug_purgelist_dentry);
+		debugfs_remove(dc->debug_mle_dentry);
+		debugfs_remove(dc->debug_lockres_dentry);
+		debugfs_remove(dc->debug_state_dentry);
 		dlm_debug_put(dc);
 	}
 }
@@ -1040,8 +990,7 @@
 
 void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
 {
-	if (dlm->dlm_debugfs_subroot)
-		debugfs_remove(dlm->dlm_debugfs_subroot);
+	debugfs_remove(dlm->dlm_debugfs_subroot);
 }
 
 /* debugfs root */
@@ -1057,7 +1006,6 @@
 
 void dlm_destroy_debugfs_root(void)
 {
-	if (dlm_debugfs_root)
-		debugfs_remove(dlm_debugfs_root);
+	debugfs_remove(dlm_debugfs_root);
 }
 #endif	/* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 8c686d2..1f27c48 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -37,11 +37,6 @@
 	struct dentry *debug_purgelist_dentry;
 };
 
-struct debug_buffer {
-	int len;
-	char *buf;
-};
-
 struct debug_lockres {
 	int dl_len;
 	char *dl_buf;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index cc2aaa9..7e38a07 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -460,8 +460,6 @@
 		}
 		cond_resched_lock(&dlm->spinlock);
 		num += n;
-		mlog(0, "%s: touched %d lockreses in bucket %d "
-		     "(tot=%d)\n", dlm->name, n, i, num);
 	}
 	spin_unlock(&dlm->spinlock);
 	wake_up(&dlm->dlm_thread_wq);
@@ -1661,8 +1659,8 @@
 
 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
 {
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_up);
-	o2hb_unregister_callback(NULL, &dlm->dlm_hb_down);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
+	o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
 	o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
 }
 
@@ -1674,13 +1672,13 @@
 
 	o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
 			    dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_down);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
 	if (status)
 		goto bail;
 
 	o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
 			    dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
-	status = o2hb_register_callback(NULL, &dlm->dlm_hb_up);
+	status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
 	if (status)
 		goto bail;
 
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 69cf369..7009292 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -106,6 +106,9 @@
 
 		if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
 			return 0;
+		if (!dlm_lock_compatible(tmplock->ml.convert_type,
+					 lock->ml.type))
+			return 0;
 	}
 
 	return 1;
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 2211acf..1d6d1d2 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -122,15 +122,13 @@
 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			      struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
 	if (__dlm_lockres_unused(res)){
 		if (list_empty(&res->purge)) {
-			mlog(0, "putting lockres %.*s:%p onto purge list\n",
-			     res->lockname.len, res->lockname.name, res);
+			mlog(0, "%s: Adding res %.*s to purge list\n",
+			     dlm->name, res->lockname.len, res->lockname.name);
 
 			res->last_used = jiffies;
 			dlm_lockres_get(res);
@@ -138,8 +136,8 @@
 			dlm->purge_count++;
 		}
 	} else if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
-		     res->lockname.len, res->lockname.name, res, res->owner);
+		mlog(0, "%s: Removing res %.*s from purge list\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
@@ -150,7 +148,6 @@
 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
 			    struct dlm_lock_resource *res)
 {
-	mlog_entry("%.*s\n", res->lockname.len, res->lockname.name);
 	spin_lock(&dlm->spinlock);
 	spin_lock(&res->spinlock);
 
@@ -171,9 +168,8 @@
 
 	master = (res->owner == dlm->node_num);
 
-
-	mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len,
-	     res->lockname.name, master);
+	mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
+	     res->lockname.len, res->lockname.name, master);
 
 	if (!master) {
 		res->state |= DLM_LOCK_RES_DROPPING_REF;
@@ -189,27 +185,25 @@
 		/* clear our bit from the master's refmap, ignore errors */
 		ret = dlm_drop_lockres_ref(dlm, res);
 		if (ret < 0) {
-			mlog_errno(ret);
+			mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
+			     res->lockname.len, res->lockname.name, ret);
 			if (!dlm_is_host_down(ret))
 				BUG();
 		}
-		mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
-		     dlm->name, res->lockname.len, res->lockname.name, ret);
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
 	}
 
 	if (!list_empty(&res->purge)) {
-		mlog(0, "removing lockres %.*s:%p from purgelist, "
-		     "master = %d\n", res->lockname.len, res->lockname.name,
-		     res, master);
+		mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
+		     dlm->name, res->lockname.len, res->lockname.name, master);
 		list_del_init(&res->purge);
 		dlm_lockres_put(res);
 		dlm->purge_count--;
 	}
 
 	if (!__dlm_lockres_unused(res)) {
-		mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n",
+		mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
 		     dlm->name, res->lockname.len, res->lockname.name);
 		__dlm_print_one_lock_resource(res);
 		BUG();
@@ -266,10 +260,10 @@
 		unused = __dlm_lockres_unused(lockres);
 		if (!unused ||
 		    (lockres->state & DLM_LOCK_RES_MIGRATING)) {
-			mlog(0, "lockres %s:%.*s: is in use or "
-			     "being remastered, used %d, state %d\n",
-			     dlm->name, lockres->lockname.len,
-			     lockres->lockname.name, !unused, lockres->state);
+			mlog(0, "%s: res %.*s is in use or being remastered, "
+			     "used %d, state %d\n", dlm->name,
+			     lockres->lockname.len, lockres->lockname.name,
+			     !unused, lockres->state);
 			list_move_tail(&dlm->purge_list, &lockres->purge);
 			spin_unlock(&lockres->spinlock);
 			continue;
@@ -296,15 +290,12 @@
 	struct list_head *head;
 	int can_grant = 1;
 
-	//mlog(0, "res->lockname.len=%d\n", res->lockname.len);
-	//mlog(0, "res->lockname.name=%p\n", res->lockname.name);
-	//mlog(0, "shuffle res %.*s\n", res->lockname.len,
-	//	  res->lockname.name);
-
-	/* because this function is called with the lockres
+	/*
+	 * Because this function is called with the lockres
 	 * spinlock, and because we know that it is not migrating/
 	 * recovering/in-progress, it is fine to reserve asts and
-	 * basts right before queueing them all throughout */
+	 * basts right before queueing them all throughout
+	 */
 	assert_spin_locked(&dlm->ast_lock);
 	assert_spin_locked(&res->spinlock);
 	BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
@@ -314,13 +305,13 @@
 converting:
 	if (list_empty(&res->converting))
 		goto blocked;
-	mlog(0, "res %.*s has locks on a convert queue\n", res->lockname.len,
-	     res->lockname.name);
+	mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
+	     res->lockname.len, res->lockname.name);
 
 	target = list_entry(res->converting.next, struct dlm_lock, list);
 	if (target->ml.convert_type == LKM_IVMODE) {
-		mlog(ML_ERROR, "%.*s: converting a lock with no "
-		     "convert_type!\n", res->lockname.len, res->lockname.name);
+		mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
+		     dlm->name, res->lockname.len, res->lockname.name);
 		BUG();
 	}
 	head = &res->granted;
@@ -365,9 +356,12 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for converting lock: %.*s, have: %d, "
-		     "granting: %d, node: %u\n", res->lockname.len,
-		     res->lockname.name, target->ml.type,
+		mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
+		     "%d => %d, node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
+		     target->ml.type,
 		     target->ml.convert_type, target->ml.node);
 
 		target->ml.type = target->ml.convert_type;
@@ -428,11 +422,14 @@
 		spin_lock(&target->spinlock);
 		BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
 
-		mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
-		     "node: %u\n", res->lockname.len, res->lockname.name,
+		mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
 		     target->ml.type, target->ml.node);
 
-		// target->ml.type is already correct
+		/* target->ml.type is already correct */
 		list_move_tail(&target->list, &res->granted);
 
 		BUG_ON(!target->lksb);
@@ -453,7 +450,6 @@
 /* must have NO locks when calling this with res !=NULL * */
 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
 	if (res) {
 		spin_lock(&dlm->spinlock);
 		spin_lock(&res->spinlock);
@@ -466,8 +462,6 @@
 
 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
-	mlog_entry("dlm=%p, res=%p\n", dlm, res);
-
 	assert_spin_locked(&dlm->spinlock);
 	assert_spin_locked(&res->spinlock);
 
@@ -484,13 +478,16 @@
 			res->state |= DLM_LOCK_RES_DIRTY;
 		}
 	}
+
+	mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
+	     res->lockname.name);
 }
 
 
 /* Launch the NM thread for the mounted volume */
 int dlm_launch_thread(struct dlm_ctxt *dlm)
 {
-	mlog(0, "starting dlm thread...\n");
+	mlog(0, "Starting dlm_thread...\n");
 
 	dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
 	if (IS_ERR(dlm->dlm_thread_task)) {
@@ -505,7 +502,7 @@
 void dlm_complete_thread(struct dlm_ctxt *dlm)
 {
 	if (dlm->dlm_thread_task) {
-		mlog(ML_KTHREAD, "waiting for dlm thread to exit\n");
+		mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
 		kthread_stop(dlm->dlm_thread_task);
 		dlm->dlm_thread_task = NULL;
 	}
@@ -536,7 +533,12 @@
 		/* get an extra ref on lock */
 		dlm_lock_get(lock);
 		res = lock->lockres;
-		mlog(0, "delivering an ast for this lockres\n");
+		mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
+		     "node %u\n", dlm->name, res->lockname.len,
+		     res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     lock->ml.type, lock->ml.node);
 
 		BUG_ON(!lock->ast_pending);
 
@@ -557,9 +559,9 @@
 		/* possible that another ast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->ast_list)) {
-			mlog(0, "aha another ast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the ast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, AST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->ast_pending = 0;
 
@@ -590,8 +592,12 @@
 		dlm_lock_put(lock);
 		spin_unlock(&dlm->ast_lock);
 
-		mlog(0, "delivering a bast for this lockres "
-		     "(blocked = %d\n", hi);
+		mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
+		     "blocked %d, node %u\n",
+		     dlm->name, res->lockname.len, res->lockname.name,
+		     dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+		     dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+		     hi, lock->ml.node);
 
 		if (lock->ml.node != dlm->node_num) {
 			ret = dlm_send_proxy_bast(dlm, res, lock, hi);
@@ -605,9 +611,9 @@
 		/* possible that another bast was queued while
 		 * we were delivering the last one */
 		if (!list_empty(&lock->bast_list)) {
-			mlog(0, "aha another bast got queued while "
-			     "we were finishing the last one.  will "
-			     "keep the bast_pending flag set.\n");
+			mlog(0, "%s: res %.*s, BAST queued while flushing last "
+			     "one\n", dlm->name, res->lockname.len,
+			     res->lockname.name);
 		} else
 			lock->bast_pending = 0;
 
@@ -675,11 +681,12 @@
 			spin_lock(&res->spinlock);
 			if (res->owner != dlm->node_num) {
 				__dlm_print_one_lock_resource(res);
-				mlog(ML_ERROR, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
-				     res->state & DLM_LOCK_RES_IN_PROGRESS ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_MIGRATING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_RECOVERING ? "yes" : "no",
-				     res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
+				mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
+				     " dirty %d\n", dlm->name,
+				     !!(res->state & DLM_LOCK_RES_IN_PROGRESS),
+				     !!(res->state & DLM_LOCK_RES_MIGRATING),
+				     !!(res->state & DLM_LOCK_RES_RECOVERING),
+				     !!(res->state & DLM_LOCK_RES_DIRTY));
 			}
 			BUG_ON(res->owner != dlm->node_num);
 
@@ -693,8 +700,8 @@
 				res->state &= ~DLM_LOCK_RES_DIRTY;
 				spin_unlock(&res->spinlock);
 				spin_unlock(&dlm->ast_lock);
-				mlog(0, "delaying list shuffling for in-"
-				     "progress lockres %.*s, state=%d\n",
+				mlog(0, "%s: res %.*s, inprogress, delay list "
+				     "shuffle, state %d\n", dlm->name,
 				     res->lockname.len, res->lockname.name,
 				     res->state);
 				delay = 1;
@@ -706,10 +713,6 @@
 			 * spinlock and do NOT have the dlm lock.
 			 * safe to reserve/queue asts and run the lists. */
 
-			mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
-			     "res=%.*s\n", dlm->name,
-			     res->lockname.len, res->lockname.name);
-
 			/* called while holding lockres lock */
 			dlm_shuffle_lists(dlm, res);
 			res->state &= ~DLM_LOCK_RES_DIRTY;
@@ -733,7 +736,8 @@
 			/* unlikely, but we may need to give time to
 			 * other tasks */
 			if (!--n) {
-				mlog(0, "throttling dlm_thread\n");
+				mlog(0, "%s: Throttling dlm thread\n",
+				     dlm->name);
 				break;
 			}
 		}
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 6adafa5..5dbc306 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -137,9 +137,7 @@
 	}
 
 	result = d_obtain_alias(inode);
-	if (!IS_ERR(result))
-		d_set_d_op(result, &ocfs2_dentry_ops);
-	else
+	if (IS_ERR(result))
 		mlog_errno(PTR_ERR(result));
 
 bail:
@@ -175,8 +173,6 @@
 	}
 
 	parent = d_obtain_alias(ocfs2_iget(OCFS2_SB(dir->i_sb), blkno, 0, 0));
-	if (!IS_ERR(parent))
-		d_set_d_op(parent, &ocfs2_dentry_ops);
 
 bail_unlock:
 	ocfs2_inode_unlock(dir, 0);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index bdadbae0..63e3fca 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1995,6 +1995,7 @@
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 	struct ocfs2_space_resv sr;
 	int change_size = 1;
+	int cmd = OCFS2_IOC_RESVSP64;
 
 	if (!ocfs2_writes_unwritten_extents(osb))
 		return -EOPNOTSUPP;
@@ -2005,12 +2006,15 @@
 	if (mode & FALLOC_FL_KEEP_SIZE)
 		change_size = 0;
 
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = OCFS2_IOC_UNRESVSP64;
+
 	sr.l_whence = 0;
 	sr.l_start = (s64)offset;
 	sr.l_len = (s64)len;
 
-	return __ocfs2_change_file_space(NULL, inode, offset,
-					 OCFS2_IOC_RESVSP64, &sr, change_size);
+	return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
+					 change_size);
 }
 
 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index f935fd6..4068c6c 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -434,7 +434,7 @@
 	 * #1 and #2 can be simply solved by never taking the lock
 	 * here for system files (which are the only type we read
 	 * during mount). It's a heavier approach, but our main
-	 * concern is user-accesible files anyway.
+	 * concern is user-accessible files anyway.
 	 *
 	 * #3 works itself out because we'll eventually take the
 	 * cluster lock before trusting anything anyway.
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d14cad6..849fb4a 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -147,7 +147,6 @@
 	spin_unlock(&oi->ip_lock);
 
 bail_add:
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	ret = d_splice_alias(inode, dentry);
 
 	if (inode) {
@@ -415,7 +414,6 @@
 		mlog_errno(status);
 		goto leave;
 	}
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
@@ -743,7 +741,6 @@
 	}
 
 	ihold(inode);
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	d_instantiate(dentry, inode);
 
 out_commit:
@@ -1017,8 +1014,11 @@
 		 * An error return must mean that no cluster locks
 		 * were held on function exit.
 		 */
-		if (oi1->ip_blkno != oi2->ip_blkno)
+		if (oi1->ip_blkno != oi2->ip_blkno) {
 			ocfs2_inode_unlock(inode2, 1);
+			brelse(*bh2);
+			*bh2 = NULL;
+		}
 
 		if (status != -ENOENT)
 			mlog_errno(status);
@@ -1794,7 +1794,6 @@
 		mlog_errno(status);
 		goto bail;
 	}
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 
 	status = ocfs2_add_entry(handle, dentry, inode,
 				 le64_to_cpu(fe->i_blkno), parent_fe_bh,
@@ -2459,7 +2458,6 @@
 		goto out_commit;
 	}
 
-	d_set_d_op(dentry, &ocfs2_dentry_ops);
 	d_instantiate(dentry, inode);
 	status = 0;
 out_commit:
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 70dd3b1..51cd689 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -420,6 +420,11 @@
 	struct inode			*osb_tl_inode;
 	struct buffer_head		*osb_tl_bh;
 	struct delayed_work		osb_truncate_log_wq;
+	/*
+	 * How many clusters in our truncate log.
+	 * It must be protected by osb_tl_inode->i_mutex.
+	 */
+	unsigned int truncated_clusters;
 
 	struct ocfs2_node_map		osb_recovering_orphan_dirs;
 	unsigned int			*osb_orphan_wipes;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 5fed60d..71998d4 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1916,7 +1916,7 @@
 	if (res->sr_bg_blkno) {
 		/* Attempt to short-circuit the usual search mechanism
 		 * by jumping straight to the most recently used
-		 * allocation group. This helps us mantain some
+		 * allocation group. This helps us maintain some
 		 * contiguousness across allocations. */
 		status = ocfs2_search_one_group(ac, handle, bits_wanted,
 						min_bits, res, &bits_left);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 17ff46f..06d1f74 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2097,6 +2097,7 @@
 
 	sb->s_fs_info = osb;
 	sb->s_op = &ocfs2_sops;
+	sb->s_d_op = &ocfs2_dentry_ops;
 	sb->s_export_op = &ocfs2_export_ops;
 	sb->s_qcop = &ocfs2_quotactl_ops;
 	sb->dq_op = &ocfs2_quota_operations;
diff --git a/fs/open.c b/fs/open.c
index 4197b9e..5b6ef7e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -223,7 +223,12 @@
 		return -EINVAL;
 
 	/* Return error if mode is not supported */
-	if (mode && !(mode & FALLOC_FL_KEEP_SIZE))
+	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+		return -EOPNOTSUPP;
+
+	/* Punch hole must have keep size set */
+	if ((mode & FALLOC_FL_PUNCH_HOLE) &&
+	    !(mode & FALLOC_FL_KEEP_SIZE))
 		return -EOPNOTSUPP;
 
 	if (!(file->f_mode & FMODE_WRITE))
diff --git a/fs/pipe.c b/fs/pipe.c
index 68f1f8e..e2e95fb 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -441,7 +441,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
  			kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 		}
 		pipe_wait(pipe);
@@ -450,7 +450,7 @@
 
 	/* Signal writers asynchronously that there is more room. */
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
 	if (ret > 0)
@@ -612,7 +612,7 @@
 			break;
 		}
 		if (do_wakeup) {
-			wake_up_interruptible_sync(&pipe->wait);
+			wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
 			kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 			do_wakeup = 0;
 		}
@@ -623,7 +623,7 @@
 out:
 	mutex_unlock(&inode->i_mutex);
 	if (do_wakeup) {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 	}
 	if (ret > 0)
@@ -715,7 +715,7 @@
 	if (!pipe->readers && !pipe->writers) {
 		free_pipe_info(inode);
 	} else {
-		wake_up_interruptible_sync(&pipe->wait);
+		wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT);
 		kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
 		kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
 	}
@@ -1004,7 +1004,6 @@
 		goto err_inode;
 	path.mnt = mntget(pipe_mnt);
 
-	d_set_d_op(path.dentry, &pipefs_dentry_operations);
 	d_instantiate(path.dentry, inode);
 
 	err = -ENFILE;
@@ -1266,7 +1265,8 @@
 static struct dentry *pipefs_mount(struct file_system_type *fs_type,
 			 int flags, const char *dev_name, void *data)
 {
-	return mount_pseudo(fs_type, "pipe:", &pipefs_ops, PIPEFS_MAGIC);
+	return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
+			&pipefs_dentry_operations, PIPEFS_MAGIC);
 }
 
 static struct file_system_type pipe_fs_type = {
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 288a49e0..df434c5 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -10,12 +10,12 @@
 proc-y       += inode.o root.o base.o generic.o array.o \
 		proc_tty.o
 proc-y	+= cmdline.o
+proc-y	+= consoles.o
 proc-y	+= cpuinfo.o
 proc-y	+= devices.o
 proc-y	+= interrupts.o
 proc-y	+= loadavg.o
 proc-y	+= meminfo.o
-proc-y	+= proc_console.o
 proc-y	+= stat.o
 proc-y	+= uptime.o
 proc-y	+= version.o
diff --git a/fs/proc/array.c b/fs/proc/array.c
index fff6572..df2b703 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -95,7 +95,7 @@
 
 	get_task_comm(tcomm, p);
 
-	seq_printf(m, "Name:\t");
+	seq_puts(m, "Name:\t");
 	end = m->buf + m->size;
 	buf = m->buf + m->count;
 	name = tcomm;
@@ -122,7 +122,7 @@
 		buf++;
 	}
 	m->count = buf - m->buf;
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 /*
@@ -208,7 +208,7 @@
 		seq_printf(m, "%d ", GROUP_AT(group_info, g));
 	put_cred(cred);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void render_sigset_t(struct seq_file *m, const char *header,
@@ -216,7 +216,7 @@
 {
 	int i;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 
 	i = _NSIG;
 	do {
@@ -230,7 +230,7 @@
 		seq_printf(m, "%x", x);
 	} while (i >= 4);
 
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
@@ -291,12 +291,12 @@
 {
 	unsigned __capi;
 
-	seq_printf(m, "%s", header);
+	seq_puts(m, header);
 	CAP_FOR_EACH_U32(__capi) {
 		seq_printf(m, "%08x",
 			   a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
 	}
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 static inline void task_cap(struct seq_file *m, struct task_struct *p)
@@ -329,12 +329,12 @@
 
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
-	seq_printf(m, "Cpus_allowed:\t");
+	seq_puts(m, "Cpus_allowed:\t");
 	seq_cpumask(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
-	seq_printf(m, "Cpus_allowed_list:\t");
+	seq_putc(m, '\n');
+	seq_puts(m, "Cpus_allowed_list:\t");
 	seq_cpumask_list(m, &task->cpus_allowed);
-	seq_printf(m, "\n");
+	seq_putc(m, '\n');
 }
 
 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
@@ -535,15 +535,15 @@
 int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
 			struct pid *pid, struct task_struct *task)
 {
-	int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
+	unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
 	struct mm_struct *mm = get_task_mm(task);
 
 	if (mm) {
 		size = task_statm(mm, &shared, &text, &data, &resident);
 		mmput(mm);
 	}
-	seq_printf(m, "%d %d %d %d %d %d %d\n",
-			size, resident, shared, text, lib, data, 0);
+	seq_printf(m, "%lu %lu %lu %lu 0 %lu 0\n",
+			size, resident, shared, text, data);
 
 	return 0;
 }
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b20962c..93f1cdd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -373,26 +373,20 @@
 		return -ESRCH;
 	seq_puts(m, "Latency Top version : v0.1\n");
 	for (i = 0; i < 32; i++) {
-		if (task->latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &task->latency_record[i];
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %li %li ",
-				task->latency_record[i].count,
-				task->latency_record[i].time,
-				task->latency_record[i].max);
+			seq_printf(m, "%i %li %li",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!task->latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (task->latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, task->latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
-			seq_printf(m, "\n");
+			seq_putc(m, '\n');
 		}
 
 	}
@@ -751,14 +745,7 @@
 
 static int proc_single_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-	ret = single_open(filp, proc_single_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, proc_single_show, inode);
 }
 
 static const struct file_operations proc_single_file_operations = {
@@ -1386,15 +1373,7 @@
 
 static int sched_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, sched_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, sched_show, inode);
 }
 
 static const struct file_operations proc_pid_sched_operations = {
@@ -1530,15 +1509,7 @@
 
 static int comm_open(struct inode *inode, struct file *filp)
 {
-	int ret;
-
-	ret = single_open(filp, comm_show, NULL);
-	if (!ret) {
-		struct seq_file *m = filp->private_data;
-
-		m->private = inode;
-	}
-	return ret;
+	return single_open(filp, comm_show, inode);
 }
 
 static const struct file_operations proc_pid_set_comm_operations = {
diff --git a/fs/proc/proc_console.c b/fs/proc/consoles.c
similarity index 96%
rename from fs/proc/proc_console.c
rename to fs/proc/consoles.c
index 8a70760..eafc22a 100644
--- a/fs/proc/proc_console.c
+++ b/fs/proc/consoles.c
@@ -106,9 +106,9 @@
 	.release	= seq_release,
 };
 
-static int register_proc_consoles(void)
+static int __init proc_consoles_init(void)
 {
 	proc_create("consoles", 0, NULL, &proc_consoles_operations);
 	return 0;
 }
-module_init(register_proc_consoles);
+module_init(proc_consoles_init);
diff --git a/fs/proc/devices.c b/fs/proc/devices.c
index 59ee7da..b143471 100644
--- a/fs/proc/devices.c
+++ b/fs/proc/devices.c
@@ -9,14 +9,14 @@
 
 	if (i < CHRDEV_MAJOR_HASH_SIZE) {
 		if (i == 0)
-			seq_printf(f, "Character devices:\n");
+			seq_puts(f, "Character devices:\n");
 		chrdev_show(f, i);
 	}
 #ifdef CONFIG_BLOCK
 	else {
 		i -= CHRDEV_MAJOR_HASH_SIZE;
 		if (i == 0)
-			seq_printf(f, "\nBlock devices:\n");
+			seq_puts(f, "\nBlock devices:\n");
 		blkdev_show(f, i);
 	}
 #endif
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index f766be2..01e07f2 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -425,13 +425,10 @@
 		if (de->namelen != dentry->d_name.len)
 			continue;
 		if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
-			unsigned int ino;
-
-			ino = de->low_ino;
 			pde_get(de);
 			spin_unlock(&proc_subdir_lock);
 			error = -EINVAL;
-			inode = proc_get_inode(dir->i_sb, ino, de);
+			inode = proc_get_inode(dir->i_sb, de);
 			goto out_unlock;
 		}
 	}
@@ -768,12 +765,7 @@
 
 static void free_proc_entry(struct proc_dir_entry *de)
 {
-	unsigned int ino = de->low_ino;
-
-	if (ino < PROC_DYNAMIC_FIRST)
-		return;
-
-	release_inode_number(ino);
+	release_inode_number(de->low_ino);
 
 	if (S_ISLNK(de->mode))
 		kfree(de->data);
@@ -834,12 +826,9 @@
 
 		wait_for_completion(de->pde_unload_completion);
 
-		goto continue_removing;
+		spin_lock(&de->pde_unload_lock);
 	}
-	spin_unlock(&de->pde_unload_lock);
 
-continue_removing:
-	spin_lock(&de->pde_unload_lock);
 	while (!list_empty(&de->pde_openers)) {
 		struct pde_opener *pdeo;
 
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6bcb926..176ce4c 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -416,12 +416,11 @@
 };
 #endif
 
-struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
-				struct proc_dir_entry *de)
+struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
 	struct inode * inode;
 
-	inode = iget_locked(sb, ino);
+	inode = iget_locked(sb, de->low_ino);
 	if (!inode)
 		return NULL;
 	if (inode->i_state & I_NEW) {
@@ -471,7 +470,7 @@
 	s->s_time_gran = 1;
 	
 	pde_get(&proc_root);
-	root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
+	root_inode = proc_get_inode(s, &proc_root);
 	if (!root_inode)
 		goto out_no_root;
 	root_inode->i_uid = 0;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 1f24a3e..9ad561d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -96,7 +96,8 @@
 struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
 int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
 unsigned long task_vsize(struct mm_struct *);
-int task_statm(struct mm_struct *, int *, int *, int *, int *);
+unsigned long task_statm(struct mm_struct *,
+	unsigned long *, unsigned long *, unsigned long *, unsigned long *);
 void task_mem(struct seq_file *, struct mm_struct *);
 
 static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
@@ -108,7 +109,7 @@
 
 extern struct vfsmount *proc_mnt;
 int proc_fill_super(struct super_block *);
-struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *);
+struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
 
 /*
  * These are generic /proc routines that use the internal
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 6f37c39..d245cb2 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -558,7 +558,7 @@
 static const struct file_operations proc_kcore_operations = {
 	.read		= read_kcore,
 	.open		= open_kcore,
-	.llseek		= generic_file_llseek,
+	.llseek		= default_llseek,
 };
 
 #ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 3b8b4566..b06c674 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -40,7 +40,7 @@
 			ppage = pfn_to_page(pfn);
 		else
 			ppage = NULL;
-		if (!ppage)
+		if (!ppage || PageSlab(ppage))
 			pcount = 0;
 		else
 			pcount = page_mapcount(ppage);
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index 83adcc8..cb761f0 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -36,27 +36,27 @@
 	}
 	switch (p->type) {
 	case TTY_DRIVER_TYPE_SYSTEM:
-		seq_printf(m, "system");
+		seq_puts(m, "system");
 		if (p->subtype == SYSTEM_TYPE_TTY)
-			seq_printf(m, ":/dev/tty");
+			seq_puts(m, ":/dev/tty");
 		else if (p->subtype == SYSTEM_TYPE_SYSCONS)
-			seq_printf(m, ":console");
+			seq_puts(m, ":console");
 		else if (p->subtype == SYSTEM_TYPE_CONSOLE)
-			seq_printf(m, ":vtmaster");
+			seq_puts(m, ":vtmaster");
 		break;
 	case TTY_DRIVER_TYPE_CONSOLE:
-		seq_printf(m, "console");
+		seq_puts(m, "console");
 		break;
 	case TTY_DRIVER_TYPE_SERIAL:
-		seq_printf(m, "serial");
+		seq_puts(m, "serial");
 		break;
 	case TTY_DRIVER_TYPE_PTY:
 		if (p->subtype == PTY_TYPE_MASTER)
-			seq_printf(m, "pty:master");
+			seq_puts(m, "pty:master");
 		else if (p->subtype == PTY_TYPE_SLAVE)
-			seq_printf(m, "pty:slave");
+			seq_puts(m, "pty:slave");
 		else
-			seq_printf(m, "pty");
+			seq_puts(m, "pty");
 		break;
 	default:
 		seq_printf(m, "type:%d.%d", p->type, p->subtype);
@@ -74,19 +74,19 @@
 		/* pseudo-drivers first */
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/tty", "tty");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 0);
-		seq_printf(m, "system:/dev/tty\n");
+		seq_puts(m, "system:/dev/tty\n");
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/console", "console");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 1);
-		seq_printf(m, "system:console\n");
+		seq_puts(m, "system:console\n");
 #ifdef CONFIG_UNIX98_PTYS
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/ptmx", "ptmx");
 		seq_printf(m, "%3d %7d ", TTYAUX_MAJOR, 2);
-		seq_printf(m, "system\n");
+		seq_puts(m, "system\n");
 #endif
 #ifdef CONFIG_VT
 		seq_printf(m, "%-20s /dev/%-8s ", "/dev/vc/0", "vc/0");
 		seq_printf(m, "%3d %7d ", TTY_MAJOR, 0);
-		seq_printf(m, "system:vtmaster\n");
+		seq_puts(m, "system:vtmaster\n");
 #endif
 	}
 
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index 3799473..62604be 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -10,16 +10,16 @@
 {
 	int i, j;
 
-	seq_printf(p, "                    ");
+	seq_puts(p, "                    ");
 	for_each_possible_cpu(i)
 		seq_printf(p, "CPU%-8d", i);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	for (i = 0; i < NR_SOFTIRQS; i++) {
 		seq_printf(p, "%12s:", softirq_to_name[i]);
 		for_each_possible_cpu(j)
 			seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
-		seq_printf(p, "\n");
+		seq_putc(p, '\n');
 	}
 	return 0;
 }
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index e15a19c..1cffa2b 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -126,7 +126,7 @@
 
 	for (i = 0; i < NR_SOFTIRQS; i++)
 		seq_printf(p, " %u", per_softirq_sums[i]);
-	seq_printf(p, "\n");
+	seq_putc(p, '\n');
 
 	return 0;
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index c126c83..c3755bd 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -66,8 +66,9 @@
 	return PAGE_SIZE * mm->total_vm;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	*shared = get_mm_counter(mm, MM_FILEPAGES);
 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index cb6306e..b535d3e 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -92,13 +92,14 @@
 	return vsize;
 }
 
-int task_statm(struct mm_struct *mm, int *shared, int *text,
-	       int *data, int *resident)
+unsigned long task_statm(struct mm_struct *mm,
+			 unsigned long *shared, unsigned long *text,
+			 unsigned long *data, unsigned long *resident)
 {
 	struct vm_area_struct *vma;
 	struct vm_region *region;
 	struct rb_node *p;
-	int size = kobjsize(mm);
+	unsigned long size = kobjsize(mm);
 
 	down_read(&mm->mmap_sem);
 	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 0fed41e..84becd3 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -133,16 +133,20 @@
 EXPORT_SYMBOL(dq_data_lock);
 
 void __quota_error(struct super_block *sb, const char *func,
-		  const char *fmt, ...)
+		   const char *fmt, ...)
 {
-	va_list args;
-
 	if (printk_ratelimit()) {
+		va_list args;
+		struct va_format vaf;
+
 		va_start(args, fmt);
-		printk(KERN_ERR "Quota error (device %s): %s: ",
-		       sb->s_id, func);
-		vprintk(fmt, args);
-		printk("\n");
+
+		vaf.fmt = fmt;
+		vaf.va = &args;
+
+		printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
+		       sb->s_id, func, &vaf);
+
 		va_end(args);
 	}
 }
diff --git a/fs/quota/quota_tree.c b/fs/quota/quota_tree.c
index 9e48874..e41c1becf 100644
--- a/fs/quota/quota_tree.c
+++ b/fs/quota/quota_tree.c
@@ -468,8 +468,8 @@
 		return -ENOMEM;
 	ret = read_blk(info, *blk, buf);
 	if (ret < 0) {
-		quota_error(dquot->dq_sb, "Can't read quota data "
-			    "block %u", blk);
+		quota_error(dquot->dq_sb, "Can't read quota data block %u",
+			    *blk);
 		goto out_buf;
 	}
 	newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
@@ -493,8 +493,9 @@
 		} else {
 			ret = write_blk(info, *blk, buf);
 			if (ret < 0)
-				quota_error(dquot->dq_sb, "Can't write quota "
-					    "tree block %u", blk);
+				quota_error(dquot->dq_sb,
+					    "Can't write quota tree block %u",
+					    *blk);
 		}
 	}
 out_buf:
diff --git a/fs/read_write.c b/fs/read_write.c
index 5d431ba..5520f8a 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -30,18 +30,9 @@
 
 EXPORT_SYMBOL(generic_ro_fops);
 
-static int
-__negative_fpos_check(struct file *file, loff_t pos, size_t count)
+static inline int unsigned_offsets(struct file *file)
 {
-	/*
-	 * pos or pos+count is negative here, check overflow.
-	 * too big "count" will be caught in rw_verify_area().
-	 */
-	if ((pos < 0) && (pos + count < pos))
-		return -EOVERFLOW;
-	if (file->f_mode & FMODE_UNSIGNED_OFFSET)
-		return 0;
-	return -EINVAL;
+	return file->f_mode & FMODE_UNSIGNED_OFFSET;
 }
 
 /**
@@ -75,7 +66,7 @@
 		break;
 	}
 
-	if (offset < 0 && __negative_fpos_check(file, offset, 0))
+	if (offset < 0 && !unsigned_offsets(file))
 		return -EINVAL;
 	if (offset > inode->i_sb->s_maxbytes)
 		return -EINVAL;
@@ -152,7 +143,7 @@
 			offset += file->f_pos;
 	}
 	retval = -EINVAL;
-	if (offset >= 0 || !__negative_fpos_check(file, offset, 0)) {
+	if (offset >= 0 || unsigned_offsets(file)) {
 		if (offset != file->f_pos) {
 			file->f_pos = offset;
 			file->f_version = 0;
@@ -252,9 +243,13 @@
 	if (unlikely((ssize_t) count < 0))
 		return retval;
 	pos = *ppos;
-	if (unlikely((pos < 0) || (loff_t) (pos + count) < 0)) {
-		retval = __negative_fpos_check(file, pos, count);
-		if (retval)
+	if (unlikely(pos < 0)) {
+		if (!unsigned_offsets(file))
+			return retval;
+		if (count >= -pos) /* both values are in 0..LLONG_MAX */
+			return -EOVERFLOW;
+	} else if (unlikely((loff_t) (pos + count) < 0)) {
+		if (!unsigned_offsets(file))
 			return retval;
 	}
 
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index adbc6f5..45de98b 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -586,13 +586,13 @@
 	va_list args;
 	int mode, first, last;
 
-	va_start(args, bh);
-
 	if (!bh) {
 		printk("print_block: buffer is NULL\n");
 		return;
 	}
 
+	va_start(args, bh);
+
 	mode = va_arg(args, int);
 	first = va_arg(args, int);
 	last = va_arg(args, int);
diff --git a/fs/select.c b/fs/select.c
index b7b10aa..e56560d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -306,6 +306,8 @@
 		rts.tv_sec = rts.tv_nsec = 0;
 
 	if (timeval) {
+		if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec))
+			memset(&rtv, 0, sizeof(rtv));
 		rtv.tv_sec = rts.tv_sec;
 		rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC;
 
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index b5e68da..b427b12 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -48,7 +48,6 @@
 	struct inode * inode = NULL;
 	ino_t ino;
 
-	d_set_d_op(dentry, dir->i_sb->s_root->d_op);
 	if (dentry->d_name.len > SYSV_NAMELEN)
 		return ERR_PTR(-ENAMETOOLONG);
 	ino = sysv_inode_by_name(dentry);
diff --git a/fs/sysv/super.c b/fs/sysv/super.c
index 76712ae..f60c196 100644
--- a/fs/sysv/super.c
+++ b/fs/sysv/super.c
@@ -332,6 +332,10 @@
 	sb->s_magic = SYSV_MAGIC_BASE + sbi->s_type;
 	/* set up enough so that it can read an inode */
 	sb->s_op = &sysv_sops;
+	if (sbi->s_forced_ro)
+		sb->s_flags |= MS_RDONLY;
+	if (sbi->s_truncate)
+		sb->s_d_op = &sysv_dentry_operations;
 	root_inode = sysv_iget(sb, SYSV_ROOT_INO);
 	if (IS_ERR(root_inode)) {
 		printk("SysV FS: get root inode failed\n");
@@ -343,10 +347,6 @@
 		printk("SysV FS: get root dentry failed\n");
 		return 0;
 	}
-	if (sbi->s_forced_ro)
-		sb->s_flags |= MS_RDONLY;
-	if (sbi->s_truncate)
-		d_set_d_op(sb->s_root, &sysv_dentry_operations);
 	return 1;
 }
 
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
index f8def3c..0e0e99b 100644
--- a/fs/udf/Kconfig
+++ b/fs/udf/Kconfig
@@ -1,6 +1,5 @@
 config UDF_FS
 	tristate "UDF file system support"
-	depends on BKL # needs serious work to remove
 	select CRC_ITU_T
 	help
 	  This is the new file system used on some CD-ROMs and DVDs. Say Y if
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index b608efa..306ee39 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -157,10 +157,9 @@
 				udf_debug("bit %ld already set\n", bit + i);
 				udf_debug("byte=%2x\n",
 					((char *)bh->b_data)[(bit + i) >> 3]);
-			} else {
-				udf_add_free_space(sb, sbi->s_partition, 1);
 			}
 		}
+		udf_add_free_space(sb, sbi->s_partition, count);
 		mark_buffer_dirty(bh);
 		if (overflow) {
 			block += count;
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 51552bf..eb8bfe2 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -30,7 +30,6 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 
 #include "udf_i.h"
@@ -190,18 +189,14 @@
 	struct inode *dir = filp->f_path.dentry->d_inode;
 	int result;
 
-	lock_kernel();
-
 	if (filp->f_pos == 0) {
 		if (filldir(dirent, ".", 1, filp->f_pos, dir->i_ino, DT_DIR) < 0) {
-			unlock_kernel();
 			return 0;
 		}
 		filp->f_pos++;
 	}
 
 	result = do_udf_readdir(dir, filp, filldir, dirent);
-	unlock_kernel();
  	return result;
 }
 
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 66b9e7e..89c7848 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -32,7 +32,6 @@
 #include <linux/string.h> /* memset */
 #include <linux/capability.h>
 #include <linux/errno.h>
-#include <linux/smp_lock.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/aio.h>
@@ -114,6 +113,7 @@
 	size_t count = iocb->ki_left;
 	struct udf_inode_info *iinfo = UDF_I(inode);
 
+	down_write(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		if (file->f_flags & O_APPEND)
 			pos = inode->i_size;
@@ -126,6 +126,7 @@
 			udf_expand_file_adinicb(inode, pos + count, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				udf_debug("udf_expand_adinicb: err=%d\n", err);
+				up_write(&iinfo->i_data_sem);
 				return err;
 			}
 		} else {
@@ -135,6 +136,7 @@
 				iinfo->i_lenAlloc = inode->i_size;
 		}
 	}
+	up_write(&iinfo->i_data_sem);
 
 	retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
 	if (retval > 0)
@@ -149,8 +151,6 @@
 	long old_block, new_block;
 	int result = -EINVAL;
 
-	lock_kernel();
-
 	if (file_permission(filp, MAY_READ) != 0) {
 		udf_debug("no permission to access inode %lu\n", inode->i_ino);
 		result = -EPERM;
@@ -196,7 +196,6 @@
 	}
 
 out:
-	unlock_kernel();
 	return result;
 }
 
@@ -204,10 +203,10 @@
 {
 	if (filp->f_mode & FMODE_WRITE) {
 		mutex_lock(&inode->i_mutex);
-		lock_kernel();
+		down_write(&UDF_I(inode)->i_data_sem);
 		udf_discard_prealloc(inode);
 		udf_truncate_tail_extent(inode);
-		unlock_kernel();
+		up_write(&UDF_I(inode)->i_data_sem);
 		mutex_unlock(&inode->i_mutex);
 	}
 	return 0;
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 75d9304..6fb7e0a 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -92,28 +92,19 @@
 		return NULL;
 	}
 
-	mutex_lock(&sbi->s_alloc_mutex);
 	if (sbi->s_lvid_bh) {
-		struct logicalVolIntegrityDesc *lvid =
-			(struct logicalVolIntegrityDesc *)
-			sbi->s_lvid_bh->b_data;
-		struct logicalVolIntegrityDescImpUse *lvidiu =
-							udf_sb_lvidiu(sbi);
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
+		struct logicalVolIntegrityDescImpUse *lvidiu;
+
+		iinfo->i_unique = lvid_get_unique_id(sb);
+		mutex_lock(&sbi->s_alloc_mutex);
+		lvidiu = udf_sb_lvidiu(sbi);
 		if (S_ISDIR(mode))
 			le32_add_cpu(&lvidiu->numDirs, 1);
 		else
 			le32_add_cpu(&lvidiu->numFiles, 1);
-		iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
 		udf_updated_lvid(sb);
+		mutex_unlock(&sbi->s_alloc_mutex);
 	}
-	mutex_unlock(&sbi->s_alloc_mutex);
 
 	inode_init_owner(inode, dir, mode);
 
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index fc48f37..c6a2e78 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -31,7 +31,6 @@
 
 #include "udfdecl.h"
 #include <linux/mm.h>
-#include <linux/smp_lock.h>
 #include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
@@ -51,6 +50,7 @@
 static mode_t udf_convert_permissions(struct fileEntry *);
 static int udf_update_inode(struct inode *, int);
 static void udf_fill_inode(struct inode *, struct buffer_head *);
+static int udf_sync_inode(struct inode *inode);
 static int udf_alloc_i_data(struct inode *inode, size_t size);
 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
 					sector_t *, int *);
@@ -79,9 +79,7 @@
 		want_delete = 1;
 		inode->i_size = 0;
 		udf_truncate(inode);
-		lock_kernel();
 		udf_update_inode(inode, IS_SYNC(inode));
-		unlock_kernel();
 	}
 	invalidate_inode_buffers(inode);
 	end_writeback(inode);
@@ -97,9 +95,7 @@
 	kfree(iinfo->i_ext.i_data);
 	iinfo->i_ext.i_data = NULL;
 	if (want_delete) {
-		lock_kernel();
 		udf_free_inode(inode);
-		unlock_kernel();
 	}
 }
 
@@ -302,10 +298,9 @@
 	err = -EIO;
 	new = 0;
 	bh = NULL;
-
-	lock_kernel();
-
 	iinfo = UDF_I(inode);
+
+	down_write(&iinfo->i_data_sem);
 	if (block == iinfo->i_next_alloc_block + 1) {
 		iinfo->i_next_alloc_block++;
 		iinfo->i_next_alloc_goal++;
@@ -324,7 +319,7 @@
 	map_bh(bh_result, inode->i_sb, phys);
 
 abort:
-	unlock_kernel();
+	up_write(&iinfo->i_data_sem);
 	return err;
 }
 
@@ -1022,16 +1017,16 @@
 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
 		return;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+		down_write(&iinfo->i_data_sem);
 		if (inode->i_sb->s_blocksize <
 				(udf_file_entry_alloc_offset(inode) +
 				 inode->i_size)) {
 			udf_expand_file_adinicb(inode, inode->i_size, &err);
 			if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 				inode->i_size = iinfo->i_lenAlloc;
-				unlock_kernel();
+				up_write(&iinfo->i_data_sem);
 				return;
 			} else
 				udf_truncate_extents(inode);
@@ -1042,10 +1037,13 @@
 				offset - udf_file_entry_alloc_offset(inode));
 			iinfo->i_lenAlloc = inode->i_size;
 		}
+		up_write(&iinfo->i_data_sem);
 	} else {
 		block_truncate_page(inode->i_mapping, inode->i_size,
 				    udf_get_block);
+		down_write(&iinfo->i_data_sem);
 		udf_truncate_extents(inode);
+		up_write(&iinfo->i_data_sem);
 	}
 
 	inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
@@ -1053,7 +1051,6 @@
 		udf_sync_inode(inode);
 	else
 		mark_inode_dirty(inode);
-	unlock_kernel();
 }
 
 static void __udf_read_inode(struct inode *inode)
@@ -1202,6 +1199,7 @@
 		return;
 	}
 
+	read_lock(&sbi->s_cred_lock);
 	inode->i_uid = le32_to_cpu(fe->uid);
 	if (inode->i_uid == -1 ||
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
@@ -1214,13 +1212,6 @@
 	    UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
 		inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
 
-	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
-	if (!inode->i_nlink)
-		inode->i_nlink = 1;
-
-	inode->i_size = le64_to_cpu(fe->informationLength);
-	iinfo->i_lenExtents = inode->i_size;
-
 	if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
 			sbi->s_fmode != UDF_INVALID_MODE)
 		inode->i_mode = sbi->s_fmode;
@@ -1230,6 +1221,14 @@
 	else
 		inode->i_mode = udf_convert_permissions(fe);
 	inode->i_mode &= ~sbi->s_umask;
+	read_unlock(&sbi->s_cred_lock);
+
+	inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
+	if (!inode->i_nlink)
+		inode->i_nlink = 1;
+
+	inode->i_size = le64_to_cpu(fe->informationLength);
+	iinfo->i_lenExtents = inode->i_size;
 
 	if (iinfo->i_efe == 0) {
 		inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1373,16 +1372,10 @@
 
 int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
-	int ret;
-
-	lock_kernel();
-	ret = udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
-	unlock_kernel();
-
-	return ret;
+	return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
 }
 
-int udf_sync_inode(struct inode *inode)
+static int udf_sync_inode(struct inode *inode)
 {
 	return udf_update_inode(inode, 1);
 }
@@ -2048,7 +2041,7 @@
 	struct extent_position epos = {};
 	int ret;
 
-	lock_kernel();
+	down_read(&UDF_I(inode)->i_data_sem);
 
 	if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
 						(EXT_RECORDED_ALLOCATED >> 30))
@@ -2056,7 +2049,7 @@
 	else
 		ret = 0;
 
-	unlock_kernel();
+	up_read(&UDF_I(inode)->i_data_sem);
 	brelse(epos.bh);
 
 	if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 6d8dc02..2be0f9e 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -27,7 +27,6 @@
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/sched.h>
 #include <linux/crc-itu-t.h>
@@ -228,10 +227,8 @@
 		}
 
 		if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) &&
-		    isdotdot) {
-			brelse(epos.bh);
-			return fi;
-		}
+		    isdotdot)
+			goto out_ok;
 
 		if (!lfi)
 			continue;
@@ -263,7 +260,6 @@
 	if (dentry->d_name.len > UDF_NAME_LEN - 2)
 		return ERR_PTR(-ENAMETOOLONG);
 
-	lock_kernel();
 #ifdef UDF_RECOVERY
 	/* temporary shorthand for specifying files by inode number */
 	if (!strncmp(dentry->d_name.name, ".B=", 3)) {
@@ -275,7 +271,6 @@
 		};
 		inode = udf_iget(dir->i_sb, lb);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	} else
@@ -291,11 +286,9 @@
 		loc = lelb_to_cpu(cfi.icb.extLocation);
 		inode = udf_iget(dir->i_sb, &loc);
 		if (!inode) {
-			unlock_kernel();
 			return ERR_PTR(-EACCES);
 		}
 	}
-	unlock_kernel();
 
 	return d_splice_alias(inode, dentry);
 }
@@ -476,15 +469,19 @@
 				f_pos >> dir->i_sb->s_blocksize_bits, 1, err);
 		if (!fibh->ebh)
 			goto out_err;
+		/* Extents could have been merged, invalidate our position */
+		brelse(epos.bh);
+		epos.bh = NULL;
+		epos.block = dinfo->i_location;
+		epos.offset = udf_file_entry_alloc_offset(dir);
 
 		if (!fibh->soffset) {
-			if (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
-			    (EXT_RECORDED_ALLOCATED >> 30)) {
-				block = eloc.logicalBlockNum + ((elen - 1) >>
+			/* Find the freshly allocated block */
+			while (udf_next_aext(dir, &epos, &eloc, &elen, 1) ==
+				(EXT_RECORDED_ALLOCATED >> 30))
+				;
+			block = eloc.logicalBlockNum + ((elen - 1) >>
 					dir->i_sb->s_blocksize_bits);
-			} else
-				block++;
-
 			brelse(fibh->sbh);
 			fibh->sbh = fibh->ebh;
 			fi = (struct fileIdentDesc *)(fibh->sbh->b_data);
@@ -562,10 +559,8 @@
 	int err;
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode) {
-		unlock_kernel();
 		return err;
 	}
 
@@ -583,7 +578,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -596,7 +590,6 @@
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
-	unlock_kernel();
 	d_instantiate(dentry, inode);
 
 	return 0;
@@ -614,7 +607,6 @@
 	if (!old_valid_dev(rdev))
 		return -EINVAL;
 
-	lock_kernel();
 	err = -EIO;
 	inode = udf_new_inode(dir, mode, &err);
 	if (!inode)
@@ -627,7 +619,6 @@
 		inode->i_nlink--;
 		mark_inode_dirty(inode);
 		iput(inode);
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
@@ -646,7 +637,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -659,7 +649,6 @@
 	struct udf_inode_info *dinfo = UDF_I(dir);
 	struct udf_inode_info *iinfo;
 
-	lock_kernel();
 	err = -EMLINK;
 	if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1)
 		goto out;
@@ -712,7 +701,6 @@
 	err = 0;
 
 out:
-	unlock_kernel();
 	return err;
 }
 
@@ -794,7 +782,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -826,7 +813,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -840,7 +826,6 @@
 	struct kernel_lb_addr tloc;
 
 	retval = -ENOENT;
-	lock_kernel();
 	fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
 	if (!fi)
 		goto out;
@@ -870,7 +855,6 @@
 	brelse(fibh.sbh);
 
 out:
-	unlock_kernel();
 	return retval;
 }
 
@@ -890,21 +874,21 @@
 	int block;
 	unsigned char *name = NULL;
 	int namelen;
-	struct buffer_head *bh;
 	struct udf_inode_info *iinfo;
+	struct super_block *sb = dir->i_sb;
 
-	lock_kernel();
 	inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err);
 	if (!inode)
 		goto out;
 
+	iinfo = UDF_I(inode);
+	down_write(&iinfo->i_data_sem);
 	name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
 	if (!name) {
 		err = -ENOMEM;
 		goto out_no_entry;
 	}
 
-	iinfo = UDF_I(inode);
 	inode->i_data.a_ops = &udf_symlink_aops;
 	inode->i_op = &udf_symlink_inode_operations;
 
@@ -912,7 +896,7 @@
 		struct kernel_lb_addr eloc;
 		uint32_t bsize;
 
-		block = udf_new_block(inode->i_sb, inode,
+		block = udf_new_block(sb, inode,
 				iinfo->i_location.partitionReferenceNum,
 				iinfo->i_location.logicalBlockNum, &err);
 		if (!block)
@@ -923,17 +907,17 @@
 		eloc.logicalBlockNum = block;
 		eloc.partitionReferenceNum =
 				iinfo->i_location.partitionReferenceNum;
-		bsize = inode->i_sb->s_blocksize;
+		bsize = sb->s_blocksize;
 		iinfo->i_lenExtents = bsize;
 		udf_add_aext(inode, &epos, &eloc, bsize, 0);
 		brelse(epos.bh);
 
-		block = udf_get_pblock(inode->i_sb, block,
+		block = udf_get_pblock(sb, block,
 				iinfo->i_location.partitionReferenceNum,
 				0);
-		epos.bh = udf_tgetblk(inode->i_sb, block);
+		epos.bh = udf_tgetblk(sb, block);
 		lock_buffer(epos.bh);
-		memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize);
+		memset(epos.bh->b_data, 0x00, bsize);
 		set_buffer_uptodate(epos.bh);
 		unlock_buffer(epos.bh);
 		mark_buffer_dirty_inode(epos.bh, inode);
@@ -941,7 +925,7 @@
 	} else
 		ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 
-	eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode);
+	eoffset = sb->s_blocksize - udf_ext0_offset(inode);
 	pc = (struct pathComponent *)ea;
 
 	if (*symname == '/') {
@@ -981,7 +965,7 @@
 		}
 
 		if (pc->componentType == 5) {
-			namelen = udf_put_filename(inode->i_sb, compstart, name,
+			namelen = udf_put_filename(sb, compstart, name,
 						   symname - compstart);
 			if (!namelen)
 				goto out_no_entry;
@@ -1015,27 +999,16 @@
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi)
 		goto out_no_entry;
-	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
+	cfi.icb.extLength = cpu_to_le32(sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				lvid->logicalVolContentsUse;
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
 		mark_inode_dirty(dir);
+	up_write(&iinfo->i_data_sem);
 	if (fibh.sbh != fibh.ebh)
 		brelse(fibh.ebh);
 	brelse(fibh.sbh);
@@ -1044,10 +1017,10 @@
 
 out:
 	kfree(name);
-	unlock_kernel();
 	return err;
 
 out_no_entry:
+	up_write(&iinfo->i_data_sem);
 	inode_dec_link_count(inode);
 	iput(inode);
 	goto out;
@@ -1060,36 +1033,20 @@
 	struct udf_fileident_bh fibh;
 	struct fileIdentDesc cfi, *fi;
 	int err;
-	struct buffer_head *bh;
 
-	lock_kernel();
 	if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) {
-		unlock_kernel();
 		return -EMLINK;
 	}
 
 	fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
 	if (!fi) {
-		unlock_kernel();
 		return err;
 	}
 	cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize);
 	cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location);
-	bh = UDF_SB(inode->i_sb)->s_lvid_bh;
-	if (bh) {
-		struct logicalVolIntegrityDesc *lvid =
-				(struct logicalVolIntegrityDesc *)bh->b_data;
-		struct logicalVolHeaderDesc *lvhd;
-		uint64_t uniqueID;
-		lvhd = (struct logicalVolHeaderDesc *)
-				(lvid->logicalVolContentsUse);
-		uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (UDF_SB(inode->i_sb)->s_lvid_bh) {
 		*(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse =
-			cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL);
-		if (!(++uniqueID & 0x00000000FFFFFFFFUL))
-			uniqueID += 16;
-		lvhd->uniqueID = cpu_to_le64(uniqueID);
-		mark_buffer_dirty(bh);
+			cpu_to_le32(lvid_get_unique_id(inode->i_sb));
 	}
 	udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
 	if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
@@ -1103,7 +1060,6 @@
 	mark_inode_dirty(inode);
 	ihold(inode);
 	d_instantiate(dentry, inode);
-	unlock_kernel();
 
 	return 0;
 }
@@ -1124,7 +1080,6 @@
 	struct kernel_lb_addr tloc;
 	struct udf_inode_info *old_iinfo = UDF_I(old_inode);
 
-	lock_kernel();
 	ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
 	if (ofi) {
 		if (ofibh.sbh != ofibh.ebh)
@@ -1248,7 +1203,6 @@
 			brelse(nfibh.ebh);
 		brelse(nfibh.sbh);
 	}
-	unlock_kernel();
 
 	return retval;
 }
@@ -1261,7 +1215,6 @@
 	struct fileIdentDesc cfi;
 	struct udf_fileident_bh fibh;
 
-	lock_kernel();
 	if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi))
 		goto out_unlock;
 
@@ -1273,11 +1226,9 @@
 	inode = udf_iget(child->d_inode->i_sb, &tloc);
 	if (!inode)
 		goto out_unlock;
-	unlock_kernel();
 
 	return d_obtain_alias(inode);
 out_unlock:
-	unlock_kernel();
 	return ERR_PTR(-EACCES);
 }
 
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 745eb20..a71090e 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -25,6 +25,7 @@
 #include <linux/fs.h>
 #include <linux/string.h>
 #include <linux/buffer_head.h>
+#include <linux/mutex.h>
 
 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
 			uint16_t partition, uint32_t offset)
@@ -159,7 +160,9 @@
 	struct udf_sb_info *sbi = UDF_SB(sb);
 	u16 reallocationTableLen;
 	struct buffer_head *bh;
+	int ret = 0;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	for (i = 0; i < sbi->s_partitions; i++) {
 		struct udf_part_map *map = &sbi->s_partmaps[i];
 		if (old_block > map->s_partition_root &&
@@ -175,8 +178,10 @@
 					break;
 				}
 
-			if (!st)
-				return 1;
+			if (!st) {
+				ret = 1;
+				goto out;
+			}
 
 			reallocationTableLen =
 					le16_to_cpu(st->reallocationTableLen);
@@ -207,14 +212,16 @@
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc == packet) {
 					*new_block = le32_to_cpu(
 							entry->mappedLocation) +
 						     ((old_block -
 							map->s_partition_root) &
 						     (sdata->s_packet_len - 1));
-					return 0;
+					ret = 0;
+					goto out;
 				} else if (origLoc > packet)
 					break;
 			}
@@ -251,20 +258,24 @@
 					      st->mapEntry[k].mappedLocation) +
 					((old_block - map->s_partition_root) &
 					 (sdata->s_packet_len - 1));
-				return 0;
+				ret = 0;
+				goto out;
 			}
 
-			return 1;
+			ret = 1;
+			goto out;
 		} /* if old_block */
 	}
 
 	if (i == sbi->s_partitions) {
 		/* outside of partitions */
 		/* for now, fail =) */
-		return 1;
+		ret = 1;
 	}
 
-	return 0;
+out:
+	mutex_unlock(&sbi->s_alloc_mutex);
+	return ret;
 }
 
 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
diff --git a/fs/udf/super.c b/fs/udf/super.c
index b539d53..7b27b06 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -48,7 +48,6 @@
 #include <linux/stat.h>
 #include <linux/cdrom.h>
 #include <linux/nls.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include <linux/vfs.h>
 #include <linux/vmalloc.h>
@@ -135,6 +134,7 @@
 	ei->i_next_alloc_block = 0;
 	ei->i_next_alloc_goal = 0;
 	ei->i_strat4096 = 0;
+	init_rwsem(&ei->i_data_sem);
 
 	return &ei->vfs_inode;
 }
@@ -574,13 +574,14 @@
 	if (!udf_parse_options(options, &uopt, true))
 		return -EINVAL;
 
-	lock_kernel();
+	write_lock(&sbi->s_cred_lock);
 	sbi->s_flags = uopt.flags;
 	sbi->s_uid   = uopt.uid;
 	sbi->s_gid   = uopt.gid;
 	sbi->s_umask = uopt.umask;
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
+	write_unlock(&sbi->s_cred_lock);
 
 	if (sbi->s_lvid_bh) {
 		int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -597,7 +598,6 @@
 		udf_open_lvid(sb);
 
 out_unlock:
-	unlock_kernel();
 	return error;
 }
 
@@ -966,9 +966,9 @@
 		(sizeof(struct buffer_head *) * nr_groups);
 
 	if (size <= PAGE_SIZE)
-		bitmap = kmalloc(size, GFP_KERNEL);
+		bitmap = kzalloc(size, GFP_KERNEL);
 	else
-		bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
+		bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
 
 	if (bitmap == NULL) {
 		udf_error(sb, __func__,
@@ -977,7 +977,6 @@
 		return NULL;
 	}
 
-	memset(bitmap, 0x00, size);
 	bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
 	bitmap->s_nr_groups = nr_groups;
 	return bitmap;
@@ -1781,6 +1780,8 @@
 
 	if (!bh)
 		return;
+
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 
@@ -1797,6 +1798,7 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
 }
 
 static void udf_close_lvid(struct super_block *sb)
@@ -1809,6 +1811,7 @@
 	if (!bh)
 		return;
 
+	mutex_lock(&sbi->s_alloc_mutex);
 	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
 	lvidiu = udf_sb_lvidiu(sbi);
 	lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
@@ -1829,6 +1832,34 @@
 	lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
 	mark_buffer_dirty(bh);
 	sbi->s_lvid_dirty = 0;
+	mutex_unlock(&sbi->s_alloc_mutex);
+}
+
+u64 lvid_get_unique_id(struct super_block *sb)
+{
+	struct buffer_head *bh;
+	struct udf_sb_info *sbi = UDF_SB(sb);
+	struct logicalVolIntegrityDesc *lvid;
+	struct logicalVolHeaderDesc *lvhd;
+	u64 uniqueID;
+	u64 ret;
+
+	bh = sbi->s_lvid_bh;
+	if (!bh)
+		return 0;
+
+	lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
+	lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
+
+	mutex_lock(&sbi->s_alloc_mutex);
+	ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
+	if (!(++uniqueID & 0xFFFFFFFF))
+		uniqueID += 16;
+	lvhd->uniqueID = cpu_to_le64(uniqueID);
+	mutex_unlock(&sbi->s_alloc_mutex);
+	mark_buffer_dirty(bh);
+
+	return ret;
 }
 
 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
@@ -1886,8 +1917,6 @@
 	struct kernel_lb_addr rootdir, fileset;
 	struct udf_sb_info *sbi;
 
-	lock_kernel();
-
 	uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
 	uopt.uid = -1;
 	uopt.gid = -1;
@@ -1896,10 +1925,8 @@
 	uopt.dmode = UDF_INVALID_MODE;
 
 	sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
-	if (!sbi) {
-		unlock_kernel();
+	if (!sbi)
 		return -ENOMEM;
-	}
 
 	sb->s_fs_info = sbi;
 
@@ -1936,6 +1963,7 @@
 	sbi->s_fmode = uopt.fmode;
 	sbi->s_dmode = uopt.dmode;
 	sbi->s_nls_map = uopt.nls_map;
+	rwlock_init(&sbi->s_cred_lock);
 
 	if (uopt.session == 0xFFFFFFFF)
 		sbi->s_session = udf_get_last_session(sb);
@@ -2045,7 +2073,6 @@
 		goto error_out;
 	}
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
-	unlock_kernel();
 	return 0;
 
 error_out:
@@ -2066,7 +2093,6 @@
 	kfree(sbi);
 	sb->s_fs_info = NULL;
 
-	unlock_kernel();
 	return -EINVAL;
 }
 
@@ -2105,8 +2131,6 @@
 
 	sbi = UDF_SB(sb);
 
-	lock_kernel();
-
 	if (sbi->s_vat_inode)
 		iput(sbi->s_vat_inode);
 	if (sbi->s_partitions)
@@ -2122,8 +2146,6 @@
 	kfree(sbi->s_partmaps);
 	kfree(sb->s_fs_info);
 	sb->s_fs_info = NULL;
-
-	unlock_kernel();
 }
 
 static int udf_sync_fs(struct super_block *sb, int wait)
@@ -2186,8 +2208,6 @@
 	uint16_t ident;
 	struct spaceBitmapDesc *bm;
 
-	lock_kernel();
-
 	loc.logicalBlockNum = bitmap->s_extPosition;
 	loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
 	bh = udf_read_ptagged(sb, &loc, 0, &ident);
@@ -2224,10 +2244,7 @@
 		}
 	}
 	brelse(bh);
-
 out:
-	unlock_kernel();
-
 	return accum;
 }
 
@@ -2240,8 +2257,7 @@
 	int8_t etype;
 	struct extent_position epos;
 
-	lock_kernel();
-
+	mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
 	epos.block = UDF_I(table)->i_location;
 	epos.offset = sizeof(struct unallocSpaceEntry);
 	epos.bh = NULL;
@@ -2250,8 +2266,7 @@
 		accum += (elen >> table->i_sb->s_blocksize_bits);
 
 	brelse(epos.bh);
-
-	unlock_kernel();
+	mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
 
 	return accum;
 }
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 1606478..b1d4488 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -27,7 +27,6 @@
 #include <linux/mm.h>
 #include <linux/stat.h>
 #include <linux/pagemap.h>
-#include <linux/smp_lock.h>
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
@@ -78,13 +77,16 @@
 	int err = -EIO;
 	unsigned char *p = kmap(page);
 	struct udf_inode_info *iinfo;
+	uint32_t pos;
 
-	lock_kernel();
 	iinfo = UDF_I(inode);
+	pos = udf_block_map(inode, 0);
+
+	down_read(&iinfo->i_data_sem);
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
 		symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
 	} else {
-		bh = sb_bread(inode->i_sb, udf_block_map(inode, 0));
+		bh = sb_bread(inode->i_sb, pos);
 
 		if (!bh)
 			goto out;
@@ -95,14 +97,14 @@
 	udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
 	brelse(bh);
 
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageUptodate(page);
 	kunmap(page);
 	unlock_page(page);
 	return 0;
 
 out:
-	unlock_kernel();
+	up_read(&iinfo->i_data_sem);
 	SetPageError(page);
 	kunmap(page);
 	unlock_page(page);
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index e58d1de..d1bd31e 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -1,6 +1,18 @@
 #ifndef _UDF_I_H
 #define _UDF_I_H
 
+/*
+ * The i_data_sem and i_mutex serve for protection of allocation information
+ * of a regular files and symlinks. This includes all extents belonging to
+ * the file/symlink, a fact whether data are in-inode or in external data
+ * blocks, preallocation, goal block information... When extents are read,
+ * i_mutex or i_data_sem must be held (for reading is enough in case of
+ * i_data_sem). When extents are changed, i_data_sem must be held for writing
+ * and also i_mutex must be held.
+ *
+ * For directories i_mutex is used for all the necessary protection.
+ */
+
 struct udf_inode_info {
 	struct timespec		i_crtime;
 	/* Physical address of inode */
@@ -21,6 +33,7 @@
 		struct long_ad		*i_lad;
 		__u8		*i_data;
 	} i_ext;
+	struct rw_semaphore	i_data_sem;
 	struct inode vfs_inode;
 };
 
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index d113b72..4858c19 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -2,6 +2,7 @@
 #define __LINUX_UDF_SB_H
 
 #include <linux/mutex.h>
+#include <linux/bitops.h>
 
 /* Since UDF 2.01 is ISO 13346 based... */
 #define UDF_SUPER_MAGIC			0x15013346
@@ -128,6 +129,8 @@
 	uid_t			s_uid;
 	mode_t			s_fmode;
 	mode_t			s_dmode;
+	/* Lock protecting consistency of above permission settings */
+	rwlock_t		s_cred_lock;
 
 	/* Root Info */
 	struct timespec		s_record_time;
@@ -139,7 +142,7 @@
 	__u16			s_udfrev;
 
 	/* Miscellaneous flags */
-	__u32			s_flags;
+	unsigned long		s_flags;
 
 	/* Encoding info */
 	struct nls_table	*s_nls_map;
@@ -161,8 +164,19 @@
 
 int udf_compute_nr_groups(struct super_block *sb, u32 partition);
 
-#define UDF_QUERY_FLAG(X,Y)			( UDF_SB(X)->s_flags & ( 1 << (Y) ) )
-#define UDF_SET_FLAG(X,Y)			( UDF_SB(X)->s_flags |= ( 1 << (Y) ) )
-#define UDF_CLEAR_FLAG(X,Y)			( UDF_SB(X)->s_flags &= ~( 1 << (Y) ) )
+static inline int UDF_QUERY_FLAG(struct super_block *sb, int flag)
+{
+	return test_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_SET_FLAG(struct super_block *sb, int flag)
+{
+	set_bit(flag, &UDF_SB(sb)->s_flags);
+}
+
+static inline void UDF_CLEAR_FLAG(struct super_block *sb, int flag)
+{
+	clear_bit(flag, &UDF_SB(sb)->s_flags);
+}
 
 #endif /* __LINUX_UDF_SB_H */
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 6995ab1..eba4820 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -111,6 +111,8 @@
 };
 
 /* super.c */
+
+__attribute__((format(printf, 3, 4)))
 extern void udf_warning(struct super_block *, const char *, const char *, ...);
 static inline void udf_updated_lvid(struct super_block *sb)
 {
@@ -123,6 +125,7 @@
 	sb->s_dirt = 1;
 	UDF_SB(sb)->s_lvid_dirty = 1;
 }
+extern u64 lvid_get_unique_id(struct super_block *sb);
 
 /* namei.c */
 extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
@@ -133,7 +136,6 @@
 extern long udf_ioctl(struct file *, unsigned int, unsigned long);
 /* inode.c */
 extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
-extern int udf_sync_inode(struct inode *);
 extern void udf_expand_file_adinicb(struct inode *, int, int *);
 extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
 extern struct buffer_head *udf_bread(struct inode *, int, int, int *);
diff --git a/fs/xfs/linux-2.6/sv.h b/fs/xfs/linux-2.6/sv.h
deleted file mode 100644
index 4dfc7c3..0000000
--- a/fs/xfs/linux-2.6/sv.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-#ifndef __XFS_SUPPORT_SV_H__
-#define __XFS_SUPPORT_SV_H__
-
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
-
-/*
- * Synchronisation variables.
- *
- * (Parameters "pri", "svf" and "rts" are not implemented)
- */
-
-typedef struct sv_s {
-	wait_queue_head_t waiters;
-} sv_t;
-
-static inline void _sv_wait(sv_t *sv, spinlock_t *lock)
-{
-	DECLARE_WAITQUEUE(wait, current);
-
-	add_wait_queue_exclusive(&sv->waiters, &wait);
-	__set_current_state(TASK_UNINTERRUPTIBLE);
-	spin_unlock(lock);
-
-	schedule();
-
-	remove_wait_queue(&sv->waiters, &wait);
-}
-
-#define sv_init(sv,flag,name) \
-	init_waitqueue_head(&(sv)->waiters)
-#define sv_destroy(sv) \
-	/*NOTHING*/
-#define sv_wait(sv, pri, lock, s) \
-	_sv_wait(sv, lock)
-#define sv_signal(sv) \
-	wake_up(&(sv)->waiters)
-#define sv_broadcast(sv) \
-	wake_up_all(&(sv)->waiters)
-
-#endif /* __XFS_SUPPORT_SV_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 691f612..ec7bbb5 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -38,15 +38,6 @@
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
-/*
- * Types of I/O for bmap clustering and I/O completion tracking.
- */
-enum {
-	IO_READ,	/* mapping for a read */
-	IO_DELAY,	/* mapping covers delalloc region */
-	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
-	IO_NEW		/* just allocated */
-};
 
 /*
  * Prime number of hash buckets since address is used as the key.
@@ -182,9 +173,6 @@
 	xfs_inode_t		*ip = XFS_I(ioend->io_inode);
 	xfs_fsize_t		isize;
 
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-	ASSERT(ioend->io_type != IO_READ);
-
 	if (unlikely(ioend->io_error))
 		return 0;
 
@@ -244,10 +232,8 @@
 	 * We might have to update the on-disk file size after extending
 	 * writes.
 	 */
-	if (ioend->io_type != IO_READ) {
-		error = xfs_setfilesize(ioend);
-		ASSERT(!error || error == EAGAIN);
-	}
+	error = xfs_setfilesize(ioend);
+	ASSERT(!error || error == EAGAIN);
 
 	/*
 	 * If we didn't complete processing of the ioend, requeue it to the
@@ -318,14 +304,63 @@
 xfs_map_blocks(
 	struct inode		*inode,
 	loff_t			offset,
-	ssize_t			count,
 	struct xfs_bmbt_irec	*imap,
-	int			flags)
+	int			type,
+	int			nonblocking)
 {
-	int			nmaps = 1;
-	int			new = 0;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	ssize_t			count = 1 << inode->i_blkbits;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			bmapi_flags = XFS_BMAPI_ENTIRE;
+	int			nimaps = 1;
 
-	return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new);
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
+
+	if (type == IO_UNWRITTEN)
+		bmapi_flags |= XFS_BMAPI_IGSTATE;
+
+	if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+		if (nonblocking)
+			return -XFS_ERROR(EAGAIN);
+		xfs_ilock(ip, XFS_ILOCK_SHARED);
+	}
+
+	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+	       (ip->i_df.if_flags & XFS_IFEXTENTS));
+	ASSERT(offset <= mp->m_maxioffset);
+
+	if (offset + count > mp->m_maxioffset)
+		count = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  bmapi_flags,  NULL, 0, imap, &nimaps, NULL);
+	xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
+	if (error)
+		return -XFS_ERROR(error);
+
+	if (type == IO_DELALLOC &&
+	    (!nimaps || isnullstartblock(imap->br_startblock))) {
+		error = xfs_iomap_write_allocate(ip, offset, count, imap);
+		if (!error)
+			trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
+		return -XFS_ERROR(error);
+	}
+
+#ifdef DEBUG
+	if (type == IO_UNWRITTEN) {
+		ASSERT(nimaps);
+		ASSERT(imap->br_startblock != HOLESTARTBLOCK);
+		ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
+	}
+#endif
+	if (nimaps)
+		trace_xfs_map_blocks_found(ip, offset, count, type, imap);
+	return 0;
 }
 
 STATIC int
@@ -380,26 +415,18 @@
 
 	submit_bio(wbc->sync_mode == WB_SYNC_ALL ?
 		   WRITE_SYNC_PLUG : WRITE, bio);
-	ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
-	bio_put(bio);
 }
 
 STATIC struct bio *
 xfs_alloc_ioend_bio(
 	struct buffer_head	*bh)
 {
-	struct bio		*bio;
 	int			nvecs = bio_get_nr_vecs(bh->b_bdev);
-
-	do {
-		bio = bio_alloc(GFP_NOIO, nvecs);
-		nvecs >>= 1;
-	} while (!bio);
+	struct bio		*bio = bio_alloc(GFP_NOIO, nvecs);
 
 	ASSERT(bio->bi_private == NULL);
 	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
 	bio->bi_bdev = bh->b_bdev;
-	bio_get(bio);
 	return bio;
 }
 
@@ -470,9 +497,8 @@
 	/* Pass 1 - start writeback */
 	do {
 		next = ioend->io_list;
-		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+		for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
 			xfs_start_buffer_writeback(bh);
-		}
 	} while ((ioend = next) != NULL);
 
 	/* Pass 2 - submit I/O */
@@ -600,117 +626,13 @@
 	ASSERT(imap->br_startblock != HOLESTARTBLOCK);
 	ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
 
-	lock_buffer(bh);
 	xfs_map_buffer(inode, bh, imap, offset);
-	bh->b_bdev = xfs_find_bdev_for_inode(inode);
 	set_buffer_mapped(bh);
 	clear_buffer_delay(bh);
 	clear_buffer_unwritten(bh);
 }
 
 /*
- * Look for a page at index that is suitable for clustering.
- */
-STATIC unsigned int
-xfs_probe_page(
-	struct page		*page,
-	unsigned int		pg_offset)
-{
-	struct buffer_head	*bh, *head;
-	int			ret = 0;
-
-	if (PageWriteback(page))
-		return 0;
-	if (!PageDirty(page))
-		return 0;
-	if (!page->mapping)
-		return 0;
-	if (!page_has_buffers(page))
-		return 0;
-
-	bh = head = page_buffers(page);
-	do {
-		if (!buffer_uptodate(bh))
-			break;
-		if (!buffer_mapped(bh))
-			break;
-		ret += bh->b_size;
-		if (ret >= pg_offset)
-			break;
-	} while ((bh = bh->b_this_page) != head);
-
-	return ret;
-}
-
-STATIC size_t
-xfs_probe_cluster(
-	struct inode		*inode,
-	struct page		*startpage,
-	struct buffer_head	*bh,
-	struct buffer_head	*head)
-{
-	struct pagevec		pvec;
-	pgoff_t			tindex, tlast, tloff;
-	size_t			total = 0;
-	int			done = 0, i;
-
-	/* First sum forwards in this page */
-	do {
-		if (!buffer_uptodate(bh) || !buffer_mapped(bh))
-			return total;
-		total += bh->b_size;
-	} while ((bh = bh->b_this_page) != head);
-
-	/* if we reached the end of the page, sum forwards in following pages */
-	tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-	tindex = startpage->index + 1;
-
-	/* Prune this back to avoid pathological behavior */
-	tloff = min(tlast, startpage->index + 64);
-
-	pagevec_init(&pvec, 0);
-	while (!done && tindex <= tloff) {
-		unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
-
-		if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
-			break;
-
-		for (i = 0; i < pagevec_count(&pvec); i++) {
-			struct page *page = pvec.pages[i];
-			size_t pg_offset, pg_len = 0;
-
-			if (tindex == tlast) {
-				pg_offset =
-				    i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
-				if (!pg_offset) {
-					done = 1;
-					break;
-				}
-			} else
-				pg_offset = PAGE_CACHE_SIZE;
-
-			if (page->index == tindex && trylock_page(page)) {
-				pg_len = xfs_probe_page(page, pg_offset);
-				unlock_page(page);
-			}
-
-			if (!pg_len) {
-				done = 1;
-				break;
-			}
-
-			total += pg_len;
-			tindex++;
-		}
-
-		pagevec_release(&pvec);
-		cond_resched();
-	}
-
-	return total;
-}
-
-/*
  * Test if a given page is suitable for writing as part of an unwritten
  * or delayed allocate extent.
  */
@@ -731,9 +653,9 @@
 			if (buffer_unwritten(bh))
 				acceptable = (type == IO_UNWRITTEN);
 			else if (buffer_delay(bh))
-				acceptable = (type == IO_DELAY);
+				acceptable = (type == IO_DELALLOC);
 			else if (buffer_dirty(bh) && buffer_mapped(bh))
-				acceptable = (type == IO_NEW);
+				acceptable = (type == IO_OVERWRITE);
 			else
 				break;
 		} while ((bh = bh->b_this_page) != head);
@@ -758,8 +680,7 @@
 	loff_t			tindex,
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
-	struct writeback_control *wbc,
-	int			all_bh)
+	struct writeback_control *wbc)
 {
 	struct buffer_head	*bh, *head;
 	xfs_off_t		end_offset;
@@ -814,37 +735,30 @@
 			continue;
 		}
 
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
+		if (buffer_unwritten(bh) || buffer_delay(bh) ||
+		    buffer_mapped(bh)) {
 			if (buffer_unwritten(bh))
 				type = IO_UNWRITTEN;
+			else if (buffer_delay(bh))
+				type = IO_DELALLOC;
 			else
-				type = IO_DELAY;
+				type = IO_OVERWRITE;
 
 			if (!xfs_imap_valid(inode, imap, offset)) {
 				done = 1;
 				continue;
 			}
 
-			ASSERT(imap->br_startblock != HOLESTARTBLOCK);
-			ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
-
-			xfs_map_at_offset(inode, bh, imap, offset);
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, imap, offset);
 			xfs_add_to_ioend(inode, bh, offset, type,
 					 ioendp, done);
 
 			page_dirty--;
 			count++;
 		} else {
-			type = IO_NEW;
-			if (buffer_mapped(bh) && all_bh) {
-				lock_buffer(bh);
-				xfs_add_to_ioend(inode, bh, offset,
-						type, ioendp, done);
-				count++;
-				page_dirty--;
-			} else {
-				done = 1;
-			}
+			done = 1;
 		}
 	} while (offset += len, (bh = bh->b_this_page) != head);
 
@@ -876,7 +790,6 @@
 	struct xfs_bmbt_irec	*imap,
 	xfs_ioend_t		**ioendp,
 	struct writeback_control *wbc,
-	int			all_bh,
 	pgoff_t			tlast)
 {
 	struct pagevec		pvec;
@@ -891,7 +804,7 @@
 
 		for (i = 0; i < pagevec_count(&pvec); i++) {
 			done = xfs_convert_page(inode, pvec.pages[i], tindex++,
-					imap, ioendp, wbc, all_bh);
+					imap, ioendp, wbc);
 			if (done)
 				break;
 		}
@@ -935,7 +848,7 @@
 	struct buffer_head	*bh, *head;
 	loff_t			offset = page_offset(page);
 
-	if (!xfs_is_delayed_page(page, IO_DELAY))
+	if (!xfs_is_delayed_page(page, IO_DELALLOC))
 		goto out_invalidate;
 
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
@@ -1002,10 +915,10 @@
 	unsigned int		type;
 	__uint64_t              end_offset;
 	pgoff_t                 end_index, last_index;
-	ssize_t			size, len;
-	int			flags, err, imap_valid = 0, uptodate = 1;
+	ssize_t			len;
+	int			err, imap_valid = 0, uptodate = 1;
 	int			count = 0;
-	int			all_bh = 0;
+	int			nonblocking = 0;
 
 	trace_xfs_writepage(inode, page, 0);
 
@@ -1056,10 +969,14 @@
 
 	bh = head = page_buffers(page);
 	offset = page_offset(page);
-	flags = BMAPI_READ;
-	type = IO_NEW;
+	type = IO_OVERWRITE;
+
+	if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+		nonblocking = 1;
 
 	do {
+		int new_ioend = 0;
+
 		if (offset >= end_offset)
 			break;
 		if (!buffer_uptodate(bh))
@@ -1076,90 +993,54 @@
 			continue;
 		}
 
-		if (imap_valid)
-			imap_valid = xfs_imap_valid(inode, &imap, offset);
-
-		if (buffer_unwritten(bh) || buffer_delay(bh)) {
-			int new_ioend = 0;
-
-			/*
-			 * Make sure we don't use a read-only iomap
-			 */
-			if (flags == BMAPI_READ)
-				imap_valid = 0;
-
-			if (buffer_unwritten(bh)) {
+		if (buffer_unwritten(bh)) {
+			if (type != IO_UNWRITTEN) {
 				type = IO_UNWRITTEN;
-				flags = BMAPI_WRITE | BMAPI_IGNSTATE;
-			} else if (buffer_delay(bh)) {
-				type = IO_DELAY;
-				flags = BMAPI_ALLOCATE;
-
-				if (wbc->sync_mode == WB_SYNC_NONE)
-					flags |= BMAPI_TRYLOCK;
+				imap_valid = 0;
 			}
-
-			if (!imap_valid) {
-				/*
-				 * If we didn't have a valid mapping then we
-				 * need to ensure that we put the new mapping
-				 * in a new ioend structure. This needs to be
-				 * done to ensure that the ioends correctly
-				 * reflect the block mappings at io completion
-				 * for unwritten extent conversion.
-				 */
-				new_ioend = 1;
-				err = xfs_map_blocks(inode, offset, len,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-			if (imap_valid) {
-				xfs_map_at_offset(inode, bh, &imap, offset);
-				xfs_add_to_ioend(inode, bh, offset, type,
-						 &ioend, new_ioend);
-				count++;
+		} else if (buffer_delay(bh)) {
+			if (type != IO_DELALLOC) {
+				type = IO_DELALLOC;
+				imap_valid = 0;
 			}
 		} else if (buffer_uptodate(bh)) {
-			/*
-			 * we got here because the buffer is already mapped.
-			 * That means it must already have extents allocated
-			 * underneath it. Map the extent by reading it.
-			 */
-			if (!imap_valid || flags != BMAPI_READ) {
-				flags = BMAPI_READ;
-				size = xfs_probe_cluster(inode, page, bh, head);
-				err = xfs_map_blocks(inode, offset, size,
-						&imap, flags);
-				if (err)
-					goto error;
-				imap_valid = xfs_imap_valid(inode, &imap,
-							    offset);
-			}
-
-			/*
-			 * We set the type to IO_NEW in case we are doing a
-			 * small write at EOF that is extending the file but
-			 * without needing an allocation. We need to update the
-			 * file size on I/O completion in this case so it is
-			 * the same case as having just allocated a new extent
-			 * that we are writing into for the first time.
-			 */
-			type = IO_NEW;
-			if (trylock_buffer(bh)) {
-				if (imap_valid)
-					all_bh = 1;
-				xfs_add_to_ioend(inode, bh, offset, type,
-						&ioend, !imap_valid);
-				count++;
-			} else {
+			if (type != IO_OVERWRITE) {
+				type = IO_OVERWRITE;
 				imap_valid = 0;
 			}
-		} else if (PageUptodate(page)) {
-			ASSERT(buffer_mapped(bh));
-			imap_valid = 0;
+		} else {
+			if (PageUptodate(page)) {
+				ASSERT(buffer_mapped(bh));
+				imap_valid = 0;
+			}
+			continue;
+		}
+
+		if (imap_valid)
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		if (!imap_valid) {
+			/*
+			 * If we didn't have a valid mapping then we need to
+			 * put the new mapping into a separate ioend structure.
+			 * This ensures non-contiguous extents always have
+			 * separate ioends, which is particularly important
+			 * for unwritten extent conversion at I/O completion
+			 * time.
+			 */
+			new_ioend = 1;
+			err = xfs_map_blocks(inode, offset, &imap, type,
+					     nonblocking);
+			if (err)
+				goto error;
+			imap_valid = xfs_imap_valid(inode, &imap, offset);
+		}
+		if (imap_valid) {
+			lock_buffer(bh);
+			if (type != IO_OVERWRITE)
+				xfs_map_at_offset(inode, bh, &imap, offset);
+			xfs_add_to_ioend(inode, bh, offset, type, &ioend,
+					 new_ioend);
+			count++;
 		}
 
 		if (!iohead)
@@ -1188,7 +1069,7 @@
 			end_index = last_index;
 
 		xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
-					wbc, all_bh, end_index);
+				  wbc, end_index);
 	}
 
 	if (iohead)
@@ -1257,13 +1138,19 @@
 	int			create,
 	int			direct)
 {
-	int			flags = create ? BMAPI_WRITE : BMAPI_READ;
+	struct xfs_inode	*ip = XFS_I(inode);
+	struct xfs_mount	*mp = ip->i_mount;
+	xfs_fileoff_t		offset_fsb, end_fsb;
+	int			error = 0;
+	int			lockmode = 0;
 	struct xfs_bmbt_irec	imap;
+	int			nimaps = 1;
 	xfs_off_t		offset;
 	ssize_t			size;
-	int			nimap = 1;
 	int			new = 0;
-	int			error;
+
+	if (XFS_FORCED_SHUTDOWN(mp))
+		return -XFS_ERROR(EIO);
 
 	offset = (xfs_off_t)iblock << inode->i_blkbits;
 	ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
@@ -1272,15 +1159,45 @@
 	if (!create && direct && offset >= i_size_read(inode))
 		return 0;
 
-	if (direct && create)
-		flags |= BMAPI_DIRECT;
+	if (create) {
+		lockmode = XFS_ILOCK_EXCL;
+		xfs_ilock(ip, lockmode);
+	} else {
+		lockmode = xfs_ilock_map_shared(ip);
+	}
 
-	error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap,
-			  &new);
+	ASSERT(offset <= mp->m_maxioffset);
+	if (offset + size > mp->m_maxioffset)
+		size = mp->m_maxioffset - offset;
+	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
+	offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+	error = xfs_bmapi(NULL, ip, offset_fsb, end_fsb - offset_fsb,
+			  XFS_BMAPI_ENTIRE,  NULL, 0, &imap, &nimaps, NULL);
 	if (error)
-		return -error;
-	if (nimap == 0)
-		return 0;
+		goto out_unlock;
+
+	if (create &&
+	    (!nimaps ||
+	     (imap.br_startblock == HOLESTARTBLOCK ||
+	      imap.br_startblock == DELAYSTARTBLOCK))) {
+		if (direct) {
+			error = xfs_iomap_write_direct(ip, offset, size,
+						       &imap, nimaps);
+		} else {
+			error = xfs_iomap_write_delay(ip, offset, size, &imap);
+		}
+		if (error)
+			goto out_unlock;
+
+		trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
+	} else if (nimaps) {
+		trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
+	} else {
+		trace_xfs_get_blocks_notfound(ip, offset, size);
+		goto out_unlock;
+	}
+	xfs_iunlock(ip, lockmode);
 
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK) {
@@ -1347,6 +1264,10 @@
 	}
 
 	return 0;
+
+out_unlock:
+	xfs_iunlock(ip, lockmode);
+	return -error;
 }
 
 int
@@ -1434,7 +1355,7 @@
 	ssize_t			ret;
 
 	if (rw & WRITE) {
-		iocb->private = xfs_alloc_ioend(inode, IO_NEW);
+		iocb->private = xfs_alloc_ioend(inode, IO_DIRECT);
 
 		ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
 					    offset, nr_segs,
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index c5057fb..71f721e 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -23,6 +23,22 @@
 extern mempool_t *xfs_ioend_pool;
 
 /*
+ * Types of I/O for bmap clustering and I/O completion tracking.
+ */
+enum {
+	IO_DIRECT = 0,	/* special case for direct I/O ioends */
+	IO_DELALLOC,	/* mapping covers delalloc region */
+	IO_UNWRITTEN,	/* mapping covers allocated but uninitialized data */
+	IO_OVERWRITE,	/* mapping covers already allocated extent */
+};
+
+#define XFS_IO_TYPES \
+	{ 0,			"" }, \
+	{ IO_DELALLOC,		"delalloc" }, \
+	{ IO_UNWRITTEN,		"unwritten" }, \
+	{ IO_OVERWRITE,		"overwrite" }
+
+/*
  * xfs_ioend struct manages large extent writes for XFS.
  * It can manage several multi-page bio's at once.
  */
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6..92f1f2a 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
 
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
-STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
-static struct shrinker xfs_buf_shake = {
-	.shrink = xfsbufd_wakeup,
-	.seeks = DEFAULT_SEEKS,
-};
 
 static struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
@@ -168,8 +163,79 @@
 }
 
 /*
- *	Internal xfs_buf_t object manipulation
+ * xfs_buf_lru_add - add a buffer to the LRU.
+ *
+ * The LRU takes a new reference to the buffer so that it will only be freed
+ * once the shrinker takes the buffer off the LRU.
  */
+STATIC void
+xfs_buf_lru_add(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (list_empty(&bp->b_lru)) {
+		atomic_inc(&bp->b_hold);
+		list_add_tail(&bp->b_lru, &btp->bt_lru);
+		btp->bt_lru_nr++;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * xfs_buf_lru_del - remove a buffer from the LRU
+ *
+ * The unlocked check is safe here because it only occurs when there are not
+ * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
+ * to optimise the shrinker removing the buffer from the LRU and calling
+ * xfs_buf_free(). i.e. it removes an unneccessary round trip on the
+ * bt_lru_lock.
+ */
+STATIC void
+xfs_buf_lru_del(
+	struct xfs_buf	*bp)
+{
+	struct xfs_buftarg *btp = bp->b_target;
+
+	if (list_empty(&bp->b_lru))
+		return;
+
+	spin_lock(&btp->bt_lru_lock);
+	if (!list_empty(&bp->b_lru)) {
+		list_del_init(&bp->b_lru);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+}
+
+/*
+ * When we mark a buffer stale, we remove the buffer from the LRU and clear the
+ * b_lru_ref count so that the buffer is freed immediately when the buffer
+ * reference count falls to zero. If the buffer is already on the LRU, we need
+ * to remove the reference that LRU holds on the buffer.
+ *
+ * This prevents build-up of stale buffers on the LRU.
+ */
+void
+xfs_buf_stale(
+	struct xfs_buf	*bp)
+{
+	bp->b_flags |= XBF_STALE;
+	atomic_set(&(bp)->b_lru_ref, 0);
+	if (!list_empty(&bp->b_lru)) {
+		struct xfs_buftarg *btp = bp->b_target;
+
+		spin_lock(&btp->bt_lru_lock);
+		if (!list_empty(&bp->b_lru)) {
+			list_del_init(&bp->b_lru);
+			btp->bt_lru_nr--;
+			atomic_dec(&bp->b_hold);
+		}
+		spin_unlock(&btp->bt_lru_lock);
+	}
+	ASSERT(atomic_read(&bp->b_hold) >= 1);
+}
 
 STATIC void
 _xfs_buf_initialize(
@@ -186,7 +252,9 @@
 
 	memset(bp, 0, sizeof(xfs_buf_t));
 	atomic_set(&bp->b_hold, 1);
+	atomic_set(&bp->b_lru_ref, 1);
 	init_completion(&bp->b_iowait);
+	INIT_LIST_HEAD(&bp->b_lru);
 	INIT_LIST_HEAD(&bp->b_list);
 	RB_CLEAR_NODE(&bp->b_rbnode);
 	sema_init(&bp->b_sema, 0); /* held, no waiters */
@@ -262,6 +330,8 @@
 {
 	trace_xfs_buf_free(bp, _RET_IP_);
 
+	ASSERT(list_empty(&bp->b_lru));
+
 	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
 		uint		i;
 
@@ -337,7 +407,6 @@
 					__func__, gfp_mask);
 
 			XFS_STATS_INC(xb_page_retries);
-			xfsbufd_wakeup(NULL, 0, gfp_mask);
 			congestion_wait(BLK_RW_ASYNC, HZ/50);
 			goto retry;
 		}
@@ -828,6 +897,7 @@
 
 	if (!pag) {
 		ASSERT(!bp->b_relse);
+		ASSERT(list_empty(&bp->b_lru));
 		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
 		if (atomic_dec_and_test(&bp->b_hold))
 			xfs_buf_free(bp);
@@ -835,13 +905,19 @@
 	}
 
 	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
+
 	ASSERT(atomic_read(&bp->b_hold) > 0);
 	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
 		if (bp->b_relse) {
 			atomic_inc(&bp->b_hold);
 			spin_unlock(&pag->pag_buf_lock);
 			bp->b_relse(bp);
+		} else if (!(bp->b_flags & XBF_STALE) &&
+			   atomic_read(&bp->b_lru_ref)) {
+			xfs_buf_lru_add(bp);
+			spin_unlock(&pag->pag_buf_lock);
 		} else {
+			xfs_buf_lru_del(bp);
 			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
 			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
 			spin_unlock(&pag->pag_buf_lock);
@@ -1438,51 +1514,84 @@
  */
 
 /*
- *	Wait for any bufs with callbacks that have been submitted but
- *	have not yet returned... walk the hash list for the target.
+ * Wait for any bufs with callbacks that have been submitted but have not yet
+ * returned. These buffers will have an elevated hold count, so wait on those
+ * while freeing all the buffers only held by the LRU.
  */
 void
 xfs_wait_buftarg(
 	struct xfs_buftarg	*btp)
 {
-	struct xfs_perag	*pag;
-	uint			i;
+	struct xfs_buf		*bp;
 
-	for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
-		pag = xfs_perag_get(btp->bt_mount, i);
-		spin_lock(&pag->pag_buf_lock);
-		while (rb_first(&pag->pag_buf_tree)) {
-			spin_unlock(&pag->pag_buf_lock);
+restart:
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+		if (atomic_read(&bp->b_hold) > 1) {
+			spin_unlock(&btp->bt_lru_lock);
 			delay(100);
-			spin_lock(&pag->pag_buf_lock);
+			goto restart;
 		}
-		spin_unlock(&pag->pag_buf_lock);
-		xfs_perag_put(pag);
+		/*
+		 * clear the LRU reference count so the bufer doesn't get
+		 * ignored in xfs_buf_rele().
+		 */
+		atomic_set(&bp->b_lru_ref, 0);
+		spin_unlock(&btp->bt_lru_lock);
+		xfs_buf_rele(bp);
+		spin_lock(&btp->bt_lru_lock);
 	}
+	spin_unlock(&btp->bt_lru_lock);
 }
 
-/*
- *	buftarg list for delwrite queue processing
- */
-static LIST_HEAD(xfs_buftarg_list);
-static DEFINE_SPINLOCK(xfs_buftarg_lock);
-
-STATIC void
-xfs_register_buftarg(
-	xfs_buftarg_t           *btp)
+int
+xfs_buftarg_shrink(
+	struct shrinker		*shrink,
+	int			nr_to_scan,
+	gfp_t			mask)
 {
-	spin_lock(&xfs_buftarg_lock);
-	list_add(&btp->bt_list, &xfs_buftarg_list);
-	spin_unlock(&xfs_buftarg_lock);
-}
+	struct xfs_buftarg	*btp = container_of(shrink,
+					struct xfs_buftarg, bt_shrinker);
+	struct xfs_buf		*bp;
+	LIST_HEAD(dispose);
 
-STATIC void
-xfs_unregister_buftarg(
-	xfs_buftarg_t           *btp)
-{
-	spin_lock(&xfs_buftarg_lock);
-	list_del(&btp->bt_list);
-	spin_unlock(&xfs_buftarg_lock);
+	if (!nr_to_scan)
+		return btp->bt_lru_nr;
+
+	spin_lock(&btp->bt_lru_lock);
+	while (!list_empty(&btp->bt_lru)) {
+		if (nr_to_scan-- <= 0)
+			break;
+
+		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
+
+		/*
+		 * Decrement the b_lru_ref count unless the value is already
+		 * zero. If the value is already zero, we need to reclaim the
+		 * buffer, otherwise it gets another trip through the LRU.
+		 */
+		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
+			list_move_tail(&bp->b_lru, &btp->bt_lru);
+			continue;
+		}
+
+		/*
+		 * remove the buffer from the LRU now to avoid needing another
+		 * lock round trip inside xfs_buf_rele().
+		 */
+		list_move(&bp->b_lru, &dispose);
+		btp->bt_lru_nr--;
+	}
+	spin_unlock(&btp->bt_lru_lock);
+
+	while (!list_empty(&dispose)) {
+		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
+		list_del_init(&bp->b_lru);
+		xfs_buf_rele(bp);
+	}
+
+	return btp->bt_lru_nr;
 }
 
 void
@@ -1490,17 +1599,14 @@
 	struct xfs_mount	*mp,
 	struct xfs_buftarg	*btp)
 {
+	unregister_shrinker(&btp->bt_shrinker);
+
 	xfs_flush_buftarg(btp, 1);
 	if (mp->m_flags & XFS_MOUNT_BARRIER)
 		xfs_blkdev_issue_flush(btp);
 	iput(btp->bt_mapping->host);
 
-	/* Unregister the buftarg first so that we don't get a
-	 * wakeup finding a non-existent task
-	 */
-	xfs_unregister_buftarg(btp);
 	kthread_stop(btp->bt_task);
-
 	kmem_free(btp);
 }
 
@@ -1597,20 +1703,13 @@
 	xfs_buftarg_t		*btp,
 	const char		*fsname)
 {
-	int	error = 0;
-
-	INIT_LIST_HEAD(&btp->bt_list);
 	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
 	spin_lock_init(&btp->bt_delwrite_lock);
 	btp->bt_flags = 0;
 	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
-	if (IS_ERR(btp->bt_task)) {
-		error = PTR_ERR(btp->bt_task);
-		goto out_error;
-	}
-	xfs_register_buftarg(btp);
-out_error:
-	return error;
+	if (IS_ERR(btp->bt_task))
+		return PTR_ERR(btp->bt_task);
+	return 0;
 }
 
 xfs_buftarg_t *
@@ -1627,12 +1726,17 @@
 	btp->bt_mount = mp;
 	btp->bt_dev =  bdev->bd_dev;
 	btp->bt_bdev = bdev;
+	INIT_LIST_HEAD(&btp->bt_lru);
+	spin_lock_init(&btp->bt_lru_lock);
 	if (xfs_setsize_buftarg_early(btp, bdev))
 		goto error;
 	if (xfs_mapping_buftarg(btp, bdev))
 		goto error;
 	if (xfs_alloc_delwrite_queue(btp, fsname))
 		goto error;
+	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
+	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
+	register_shrinker(&btp->bt_shrinker);
 	return btp;
 
 error:
@@ -1737,27 +1841,6 @@
 	flush_workqueue(queue);
 }
 
-STATIC int
-xfsbufd_wakeup(
-	struct shrinker		*shrink,
-	int			priority,
-	gfp_t			mask)
-{
-	xfs_buftarg_t		*btp;
-
-	spin_lock(&xfs_buftarg_lock);
-	list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
-		if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
-			continue;
-		if (list_empty(&btp->bt_delwrite_queue))
-			continue;
-		set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
-		wake_up_process(btp->bt_task);
-	}
-	spin_unlock(&xfs_buftarg_lock);
-	return 0;
-}
-
 /*
  * Move as many buffers as specified to the supplied list
  * idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +2035,6 @@
 	if (!xfsconvertd_workqueue)
 		goto out_destroy_xfsdatad_workqueue;
 
-	register_shrinker(&xfs_buf_shake);
 	return 0;
 
  out_destroy_xfsdatad_workqueue:
@@ -1968,7 +2050,6 @@
 void
 xfs_buf_terminate(void)
 {
-	unregister_shrinker(&xfs_buf_shake);
 	destroy_workqueue(xfsconvertd_workqueue);
 	destroy_workqueue(xfsdatad_workqueue);
 	destroy_workqueue(xfslogd_workqueue);
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 383a3f3..a76c242 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -128,10 +128,15 @@
 
 	/* per device delwri queue */
 	struct task_struct	*bt_task;
-	struct list_head	bt_list;
 	struct list_head	bt_delwrite_queue;
 	spinlock_t		bt_delwrite_lock;
 	unsigned long		bt_flags;
+
+	/* LRU control structures */
+	struct shrinker		bt_shrinker;
+	struct list_head	bt_lru;
+	spinlock_t		bt_lru_lock;
+	unsigned int		bt_lru_nr;
 } xfs_buftarg_t;
 
 /*
@@ -164,9 +169,11 @@
 	xfs_off_t		b_file_offset;	/* offset in file */
 	size_t			b_buffer_length;/* size of buffer in bytes */
 	atomic_t		b_hold;		/* reference count */
+	atomic_t		b_lru_ref;	/* lru reclaim ref count */
 	xfs_buf_flags_t		b_flags;	/* status flags */
 	struct semaphore	b_sema;		/* semaphore for lockables */
 
+	struct list_head	b_lru;		/* lru list */
 	wait_queue_head_t	b_waiters;	/* unpin waiters */
 	struct list_head	b_list;
 	struct xfs_perag	*b_pag;		/* contains rbtree root */
@@ -264,7 +271,8 @@
 #define XFS_BUF_ZEROFLAGS(bp)	((bp)->b_flags &= \
 		~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
 
-#define XFS_BUF_STALE(bp)	((bp)->b_flags |= XBF_STALE)
+void xfs_buf_stale(struct xfs_buf *bp);
+#define XFS_BUF_STALE(bp)	xfs_buf_stale(bp);
 #define XFS_BUF_UNSTALE(bp)	((bp)->b_flags &= ~XBF_STALE)
 #define XFS_BUF_ISSTALE(bp)	((bp)->b_flags & XBF_STALE)
 #define XFS_BUF_SUPER_STALE(bp)	do {				\
@@ -328,9 +336,15 @@
 #define XFS_BUF_SIZE(bp)		((bp)->b_buffer_length)
 #define XFS_BUF_SET_SIZE(bp, cnt)	((bp)->b_buffer_length = (cnt))
 
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	do { } while (0)
+static inline void
+xfs_buf_set_ref(
+	struct xfs_buf	*bp,
+	int		lru_ref)
+{
+	atomic_set(&bp->b_lru_ref, lru_ref);
+}
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)	xfs_buf_set_ref(bp, ref)
 #define XFS_BUF_SET_VTYPE(bp, type)		do { } while (0)
-#define XFS_BUF_SET_REF(bp, ref)		do { } while (0)
 
 #define XFS_BUF_ISPINNED(bp)	atomic_read(&((bp)->b_pin_count))
 
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 3764d74..fc0114d 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -70,8 +70,16 @@
 	else
 		fileid_type = FILEID_INO32_GEN_PARENT;
 
-	/* filesystem may contain 64bit inode numbers */
-	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS))
+	/*
+	 * If the the filesystem may contain 64bit inode numbers, we need
+	 * to use larger file handles that can represent them.
+	 *
+	 * While we only allocate inodes that do not fit into 32 bits any
+	 * large enough filesystem may contain them, thus the slightly
+	 * confusing looking conditional below.
+	 */
+	if (!(XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_SMALL_INUMS) ||
+	    (XFS_M(inode->i_sb)->m_flags & XFS_MOUNT_32BITINODES))
 		fileid_type |= XFS_FILEID_TYPE_64FLAG;
 
 	/*
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 94d5fd6..da54403 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -516,6 +516,7 @@
 	loff_t		new_size = 0;
 	xfs_flock64_t	bf;
 	xfs_inode_t	*ip = XFS_I(inode);
+	int		cmd = XFS_IOC_RESVSP;
 
 	/* preallocation on directories not yet supported */
 	error = -ENODEV;
@@ -528,6 +529,9 @@
 
 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
+	if (mode & FALLOC_FL_PUNCH_HOLE)
+		cmd = XFS_IOC_UNRESVSP;
+
 	/* check the new inode size is valid before allocating */
 	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 	    offset + len > i_size_read(inode)) {
@@ -537,8 +541,7 @@
 			goto out_unlock;
 	}
 
-	error = -xfs_change_file_space(ip, XFS_IOC_RESVSP, &bf,
-				       0, XFS_ATTR_NOLOCK);
+	error = -xfs_change_file_space(ip, cmd, &bf, 0, XFS_ATTR_NOLOCK);
 	if (error)
 		goto out_unlock;
 
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 214ddd7..0964949 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -37,7 +37,6 @@
 
 #include <kmem.h>
 #include <mrlock.h>
-#include <sv.h>
 #include <time.h>
 
 #include <support/debug.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 064f964..a10f641 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -834,8 +834,11 @@
 	struct xfs_ail		*ailp,
 	xfs_lsn_t		threshold_lsn)
 {
-	ailp->xa_target = threshold_lsn;
-	wake_up_process(ailp->xa_task);
+	/* only ever move the target forwards */
+	if (XFS_LSN_CMP(threshold_lsn, ailp->xa_target) > 0) {
+		ailp->xa_target = threshold_lsn;
+		wake_up_process(ailp->xa_task);
+	}
 }
 
 STATIC int
@@ -847,8 +850,17 @@
 	long		tout = 0; /* milliseconds */
 
 	while (!kthread_should_stop()) {
-		schedule_timeout_interruptible(tout ?
-				msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
+		/*
+		 * for short sleeps indicating congestion, don't allow us to
+		 * get woken early. Otherwise all we do is bang on the AIL lock
+		 * without making progress.
+		 */
+		if (tout && tout <= 20)
+			__set_current_state(TASK_KILLABLE);
+		else
+			__set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(tout ?
+				 msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
 
 		/* swsusp */
 		try_to_freeze();
@@ -935,7 +947,7 @@
  * Slab object creation initialisation for the XFS inode.
  * This covers only the idempotent fields in the XFS inode;
  * all other fields need to be initialised on allocation
- * from the slab. This avoids the need to repeatedly intialise
+ * from the slab. This avoids the need to repeatedly initialise
  * fields in the xfs inode that left in the initialise state
  * when freeing the inode.
  */
@@ -1118,6 +1130,8 @@
 	 */
 	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
 
 	xfs_inactive(ip);
 }
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index afb0d7c..a02480d 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -53,14 +53,30 @@
 {
 	struct inode		*inode = VFS_I(ip);
 
+	ASSERT(rcu_read_lock_held());
+
+	/*
+	 * check for stale RCU freed inode
+	 *
+	 * If the inode has been reallocated, it doesn't matter if it's not in
+	 * the AG we are walking - we are walking for writeback, so if it
+	 * passes all the "valid inode" checks and is dirty, then we'll write
+	 * it back anyway.  If it has been reallocated and still being
+	 * initialised, the XFS_INEW check below will catch it.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	if (!ip->i_ino)
+		goto out_unlock_noent;
+
+	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+		goto out_unlock_noent;
+	spin_unlock(&ip->i_flags_lock);
+
 	/* nothing to sync during shutdown */
 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
 		return EFSCORRUPTED;
 
-	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
-	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
-		return ENOENT;
-
 	/* If we can't grab the inode, it must on it's way to reclaim. */
 	if (!igrab(inode))
 		return ENOENT;
@@ -72,6 +88,10 @@
 
 	/* inode is valid */
 	return 0;
+
+out_unlock_noent:
+	spin_unlock(&ip->i_flags_lock);
+	return ENOENT;
 }
 
 STATIC int
@@ -98,12 +118,12 @@
 		int		error = 0;
 		int		i;
 
-		read_lock(&pag->pag_ici_lock);
+		rcu_read_lock();
 		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH);
 		if (!nr_found) {
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 			break;
 		}
 
@@ -118,18 +138,26 @@
 				batch[i] = NULL;
 
 			/*
-			 * Update the index for the next lookup. Catch overflows
-			 * into the next AG range which can occur if we have inodes
-			 * in the last block of the AG and we are currently
-			 * pointing to the last inode.
+			 * Update the index for the next lookup. Catch
+			 * overflows into the next AG range which can occur if
+			 * we have inodes in the last block of the AG and we
+			 * are currently pointing to the last inode.
+			 *
+			 * Because we may see inodes that are from the wrong AG
+			 * due to RCU freeing and reallocation, only update the
+			 * index if it lies in this AG. It was a race that lead
+			 * us to see this inode, so another lookup from the
+			 * same index will not find it again.
 			 */
+			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
+				continue;
 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 				done = 1;
 		}
 
 		/* unlock now we've grabbed the inodes. */
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		for (i = 0; i < nr_found; i++) {
 			if (!batch[i])
@@ -592,12 +620,12 @@
 	struct xfs_perag *pag;
 
 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	spin_lock(&ip->i_flags_lock);
 	__xfs_inode_set_reclaim_tag(pag, ip);
 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
 	spin_unlock(&ip->i_flags_lock);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	xfs_perag_put(pag);
 }
 
@@ -639,9 +667,14 @@
 	struct xfs_inode	*ip,
 	int			flags)
 {
+	ASSERT(rcu_read_lock_held());
+
+	/* quick check for stale RCU freed inode */
+	if (!ip->i_ino)
+		return 1;
 
 	/*
-	 * do some unlocked checks first to avoid unnecceary lock traffic.
+	 * do some unlocked checks first to avoid unnecessary lock traffic.
 	 * The first is a flush lock check, the second is a already in reclaim
 	 * check. Only do these checks if we are not going to block on locks.
 	 */
@@ -654,11 +687,16 @@
 	 * The radix tree lock here protects a thread in xfs_iget from racing
 	 * with us starting reclaim on the inode.  Once we have the
 	 * XFS_IRECLAIM flag set it will not touch us.
+	 *
+	 * Due to RCU lookup, we may find inodes that have been freed and only
+	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
+	 * aren't candidates for reclaim at all, so we must check the
+	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
 	 */
 	spin_lock(&ip->i_flags_lock);
-	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
-	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
-		/* ignore as it is already under reclaim */
+	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
+	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
+		/* not a reclaim candidate. */
 		spin_unlock(&ip->i_flags_lock);
 		return 1;
 	}
@@ -795,12 +833,12 @@
 	 * added to the tree assert that it's been there before to catch
 	 * problems with the inode life time early on.
 	 */
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 	if (!radix_tree_delete(&pag->pag_ici_root,
 				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
 		ASSERT(0);
 	__xfs_inode_clear_reclaim(pag, ip);
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 
 	/*
 	 * Here we do an (almost) spurious inode lock in order to coordinate
@@ -864,14 +902,14 @@
 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
 			int	i;
 
-			write_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			nr_found = radix_tree_gang_lookup_tag(
 					&pag->pag_ici_root,
 					(void **)batch, first_index,
 					XFS_LOOKUP_BATCH,
 					XFS_ICI_RECLAIM_TAG);
 			if (!nr_found) {
-				write_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				break;
 			}
 
@@ -891,14 +929,24 @@
 				 * occur if we have inodes in the last block of
 				 * the AG and we are currently pointing to the
 				 * last inode.
+				 *
+				 * Because we may see inodes that are from the
+				 * wrong AG due to RCU freeing and
+				 * reallocation, only update the index if it
+				 * lies in this AG. It was a race that lead us
+				 * to see this inode, so another lookup from
+				 * the same index will not find it again.
 				 */
+				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
+								pag->pag_agno)
+					continue;
 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
 					done = 1;
 			}
 
 			/* unlock now we've grabbed the inodes. */
-			write_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			for (i = 0; i < nr_found; i++) {
 				if (!batch[i])
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index acef2e9..647af2a 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -766,8 +766,8 @@
 		__field(int, curr_res)
 		__field(int, unit_res)
 		__field(unsigned int, flags)
-		__field(void *, reserve_headq)
-		__field(void *, write_headq)
+		__field(int, reserveq)
+		__field(int, writeq)
 		__field(int, grant_reserve_cycle)
 		__field(int, grant_reserve_bytes)
 		__field(int, grant_write_cycle)
@@ -784,19 +784,21 @@
 		__entry->curr_res = tic->t_curr_res;
 		__entry->unit_res = tic->t_unit_res;
 		__entry->flags = tic->t_flags;
-		__entry->reserve_headq = log->l_reserve_headq;
-		__entry->write_headq = log->l_write_headq;
-		__entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
-		__entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
-		__entry->grant_write_cycle = log->l_grant_write_cycle;
-		__entry->grant_write_bytes = log->l_grant_write_bytes;
+		__entry->reserveq = list_empty(&log->l_reserveq);
+		__entry->writeq = list_empty(&log->l_writeq);
+		xlog_crack_grant_head(&log->l_grant_reserve_head,
+				&__entry->grant_reserve_cycle,
+				&__entry->grant_reserve_bytes);
+		xlog_crack_grant_head(&log->l_grant_write_head,
+				&__entry->grant_write_cycle,
+				&__entry->grant_write_bytes);
 		__entry->curr_cycle = log->l_curr_cycle;
 		__entry->curr_block = log->l_curr_block;
-		__entry->tail_lsn = log->l_tail_lsn;
+		__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
 	),
 	TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
-		  "t_unit_res %u t_flags %s reserve_headq 0x%p "
-		  "write_headq 0x%p grant_reserve_cycle %d "
+		  "t_unit_res %u t_flags %s reserveq %s "
+		  "writeq %s grant_reserve_cycle %d "
 		  "grant_reserve_bytes %d grant_write_cycle %d "
 		  "grant_write_bytes %d curr_cycle %d curr_block %d "
 		  "tail_cycle %d tail_block %d",
@@ -807,8 +809,8 @@
 		  __entry->curr_res,
 		  __entry->unit_res,
 		  __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
-		  __entry->reserve_headq,
-		  __entry->write_headq,
+		  __entry->reserveq ? "empty" : "active",
+		  __entry->writeq ? "empty" : "active",
 		  __entry->grant_reserve_cycle,
 		  __entry->grant_reserve_bytes,
 		  __entry->grant_write_cycle,
@@ -835,6 +837,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
@@ -842,6 +845,7 @@
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_sub);
@@ -935,10 +939,10 @@
 DEFINE_PAGE_EVENT(xfs_releasepage);
 DEFINE_PAGE_EVENT(xfs_invalidatepage);
 
-DECLARE_EVENT_CLASS(xfs_iomap_class,
+DECLARE_EVENT_CLASS(xfs_imap_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,
-		 int flags, struct xfs_bmbt_irec *irec),
-	TP_ARGS(ip, offset, count, flags, irec),
+		 int type, struct xfs_bmbt_irec *irec),
+	TP_ARGS(ip, offset, count, type, irec),
 	TP_STRUCT__entry(
 		__field(dev_t, dev)
 		__field(xfs_ino_t, ino)
@@ -946,7 +950,7 @@
 		__field(loff_t, new_size)
 		__field(loff_t, offset)
 		__field(size_t, count)
-		__field(int, flags)
+		__field(int, type)
 		__field(xfs_fileoff_t, startoff)
 		__field(xfs_fsblock_t, startblock)
 		__field(xfs_filblks_t, blockcount)
@@ -958,13 +962,13 @@
 		__entry->new_size = ip->i_new_size;
 		__entry->offset = offset;
 		__entry->count = count;
-		__entry->flags = flags;
+		__entry->type = type;
 		__entry->startoff = irec ? irec->br_startoff : 0;
 		__entry->startblock = irec ? irec->br_startblock : 0;
 		__entry->blockcount = irec ? irec->br_blockcount : 0;
 	),
 	TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
-		  "offset 0x%llx count %zd flags %s "
+		  "offset 0x%llx count %zd type %s "
 		  "startoff 0x%llx startblock %lld blockcount 0x%llx",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  __entry->ino,
@@ -972,20 +976,21 @@
 		  __entry->new_size,
 		  __entry->offset,
 		  __entry->count,
-		  __print_flags(__entry->flags, "|", BMAPI_FLAGS),
+		  __print_symbolic(__entry->type, XFS_IO_TYPES),
 		  __entry->startoff,
 		  (__int64_t)__entry->startblock,
 		  __entry->blockcount)
 )
 
 #define DEFINE_IOMAP_EVENT(name)	\
-DEFINE_EVENT(xfs_iomap_class, name,	\
+DEFINE_EVENT(xfs_imap_class, name,	\
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count,	\
-		 int flags, struct xfs_bmbt_irec *irec),		\
-	TP_ARGS(ip, offset, count, flags, irec))
-DEFINE_IOMAP_EVENT(xfs_iomap_enter);
-DEFINE_IOMAP_EVENT(xfs_iomap_found);
-DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+		 int type, struct xfs_bmbt_irec *irec),		\
+	TP_ARGS(ip, offset, count, type, irec))
+DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
 	TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1022,6 +1027,7 @@
 	TP_ARGS(ip, offset, count))
 DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
 DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
+DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
 
 
 TRACE_EVENT(xfs_itruncate_start,
@@ -1420,6 +1426,7 @@
 	TP_PROTO(struct xfs_alloc_arg *args), \
 	TP_ARGS(args))
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
+DEFINE_ALLOC_EVENT(xfs_alloc_exact_notfound);
 DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
 DEFINE_ALLOC_EVENT(xfs_alloc_near_first);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index faf8e1a..d22aa31 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -149,7 +149,6 @@
 	ASSERT(list_empty(&dqp->q_freelist));
 
 	mutex_destroy(&dqp->q_qlock);
-	sv_destroy(&dqp->q_pinwait);
 	kmem_zone_free(xfs_Gqm->qm_dqzone, dqp);
 
 	atomic_dec(&xfs_Gqm->qm_totaldquots);
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 63c7a1a..58632cc 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -227,7 +227,7 @@
 
 	atomic_t        pagf_fstrms;    /* # of filestreams active in this AG */
 
-	rwlock_t	pag_ici_lock;	/* incore inode lock */
+	spinlock_t	pag_ici_lock;	/* incore inode cache lock */
 	struct radix_tree_root pag_ici_root;	/* incore inode cache root */
 	int		pag_ici_reclaimable;	/* reclaimable inodes */
 	struct mutex	pag_ici_reclaim_lock;	/* serialisation point */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 112abc4..fa8723f 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -577,61 +577,58 @@
 	xfs_extlen_t	rlen;	/* length of returned extent */
 
 	ASSERT(args->alignment == 1);
+
 	/*
 	 * Allocate/initialize a cursor for the by-number freespace btree.
 	 */
 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
-		args->agno, XFS_BTNUM_BNO);
+					  args->agno, XFS_BTNUM_BNO);
+
 	/*
 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
 	 * Look for the closest free block <= bno, it must contain bno
 	 * if any free block does.
 	 */
-	if ((error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i)))
+	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
+	if (error)
 		goto error0;
-	if (!i) {
-		/*
-		 * Didn't find it, return null.
-		 */
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (!i)
+		goto not_found;
+
 	/*
 	 * Grab the freespace record.
 	 */
-	if ((error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i)))
+	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
+	if (error)
 		goto error0;
 	XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
 	ASSERT(fbno <= args->agbno);
 	minend = args->agbno + args->minlen;
 	maxend = args->agbno + args->maxlen;
 	fend = fbno + flen;
+
 	/*
 	 * Give up if the freespace isn't long enough for the minimum request.
 	 */
-	if (fend < minend) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		args->agbno = NULLAGBLOCK;
-		return 0;
-	}
+	if (fend < minend)
+		goto not_found;
+
 	/*
 	 * End of extent will be smaller of the freespace end and the
 	 * maximal requested end.
-	 */
-	end = XFS_AGBLOCK_MIN(fend, maxend);
-	/*
+	 *
 	 * Fix the length according to mod and prod if given.
 	 */
+	end = XFS_AGBLOCK_MIN(fend, maxend);
 	args->len = end - args->agbno;
 	xfs_alloc_fix_len(args);
-	if (!xfs_alloc_fix_minleft(args)) {
-		xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
-		return 0;
-	}
+	if (!xfs_alloc_fix_minleft(args))
+		goto not_found;
+
 	rlen = args->len;
 	ASSERT(args->agbno + rlen <= fend);
 	end = args->agbno + rlen;
+
 	/*
 	 * We are allocating agbno for rlen [agbno .. end]
 	 * Allocate/initialize a cursor for the by-size btree.
@@ -640,16 +637,25 @@
 		args->agno, XFS_BTNUM_CNT);
 	ASSERT(args->agbno + args->len <=
 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
-	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
-			args->agbno, args->len, XFSA_FIXUP_BNO_OK))) {
+	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
+				      args->len, XFSA_FIXUP_BNO_OK);
+	if (error) {
 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
 		goto error0;
 	}
+
 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
 
-	trace_xfs_alloc_exact_done(args);
 	args->wasfromfl = 0;
+	trace_xfs_alloc_exact_done(args);
+	return 0;
+
+not_found:
+	/* Didn't find it, return null. */
+	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
+	args->agbno = NULLAGBLOCK;
+	trace_xfs_alloc_exact_notfound(args);
 	return 0;
 
 error0:
@@ -659,6 +665,95 @@
 }
 
 /*
+ * Search the btree in a given direction via the search cursor and compare
+ * the records found against the good extent we've already found.
+ */
+STATIC int
+xfs_alloc_find_best_extent(
+	struct xfs_alloc_arg	*args,	/* allocation argument structure */
+	struct xfs_btree_cur	**gcur,	/* good cursor */
+	struct xfs_btree_cur	**scur,	/* searching cursor */
+	xfs_agblock_t		gdiff,	/* difference for search comparison */
+	xfs_agblock_t		*sbno,	/* extent found by search */
+	xfs_extlen_t		*slen,
+	xfs_extlen_t		*slena,	/* aligned length */
+	int			dir)	/* 0 = search right, 1 = search left */
+{
+	xfs_agblock_t		bno;
+	xfs_agblock_t		new;
+	xfs_agblock_t		sdiff;
+	int			error;
+	int			i;
+
+	/* The good extent is perfect, no need to  search. */
+	if (!gdiff)
+		goto out_use_good;
+
+	/*
+	 * Look until we find a better one, run out of space or run off the end.
+	 */
+	do {
+		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
+		if (error)
+			goto error0;
+		XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
+		xfs_alloc_compute_aligned(*sbno, *slen, args->alignment,
+					  args->minlen, &bno, slena);
+
+		/*
+		 * The good extent is closer than this one.
+		 */
+		if (!dir) {
+			if (bno >= args->agbno + gdiff)
+				goto out_use_good;
+		} else {
+			if (bno <= args->agbno - gdiff)
+				goto out_use_good;
+		}
+
+		/*
+		 * Same distance, compare length and pick the best.
+		 */
+		if (*slena >= args->minlen) {
+			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
+			xfs_alloc_fix_len(args);
+
+			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
+						       args->alignment, *sbno,
+						       *slen, &new);
+
+			/*
+			 * Choose closer size and invalidate other cursor.
+			 */
+			if (sdiff < gdiff)
+				goto out_use_search;
+			goto out_use_good;
+		}
+
+		if (!dir)
+			error = xfs_btree_increment(*scur, 0, &i);
+		else
+			error = xfs_btree_decrement(*scur, 0, &i);
+		if (error)
+			goto error0;
+	} while (i);
+
+out_use_good:
+	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
+	*scur = NULL;
+	return 0;
+
+out_use_search:
+	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
+	*gcur = NULL;
+	return 0;
+
+error0:
+	/* caller invalidates cursors */
+	return error;
+}
+
+/*
  * Allocate a variable extent near bno in the allocation group agno.
  * Extent's length (returned in len) will be between minlen and maxlen,
  * and of the form k * prod + mod unless there's nothing that large.
@@ -925,203 +1020,45 @@
 			}
 		}
 	} while (bno_cur_lt || bno_cur_gt);
+
 	/*
 	 * Got both cursors still active, need to find better entry.
 	 */
 	if (bno_cur_lt && bno_cur_gt) {
-		/*
-		 * Left side is long enough, look for a right side entry.
-		 */
 		if (ltlena >= args->minlen) {
 			/*
-			 * Fix up the length.
+			 * Left side is good, look for a right side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			ltdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, ltbno, ltlen, &ltnew);
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_lt, &bno_cur_gt,
+						ltdiff, &gtbno, &gtlen, &gtlena,
+						0 /* search right */);
+		} else {
+			ASSERT(gtlena >= args->minlen);
+
 			/*
-			 * Not perfect.
-			 */
-			if (ltdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_gt, &gtbno,
-							&gtlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(gtbno, gtlen,
-						args->alignment, args->minlen,
-						&gtbnoa, &gtlena);
-					/*
-					 * The left one is clearly better.
-					 */
-					if (gtbnoa >= args->agbno + ltdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (gtlena >= args->minlen) {
-						args->len =
-							XFS_EXTLEN_MIN(gtlena,
-								args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						gtdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							gtbno, gtlen, &gtnew);
-						/*
-						 * Right side is better.
-						 */
-						if (gtdiff < ltdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						/*
-						 * Left side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the right end.
-					 */
-					if ((error = xfs_btree_increment(
-							bno_cur_gt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(
-							bno_cur_gt,
-							XFS_BTREE_NOERROR);
-						bno_cur_gt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The left side is perfect, trash the right side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_gt,
-						     XFS_BTREE_NOERROR);
-				bno_cur_gt = NULL;
-			}
-		}
-		/*
-		 * It's the right side that was found first, look left.
-		 */
-		else {
-			/*
-			 * Fix up the length.
+			 * Right side is good, look for a left side entry.
 			 */
 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
 			xfs_alloc_fix_len(args);
-			rlen = args->len;
-			gtdiff = xfs_alloc_compute_diff(args->agbno, rlen,
+			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
 				args->alignment, gtbno, gtlen, &gtnew);
-			/*
-			 * Right side entry isn't perfect.
-			 */
-			if (gtdiff) {
-				/*
-				 * Look until we find a better one, run out of
-				 * space, or run off the end.
-				 */
-				while (bno_cur_lt && bno_cur_gt) {
-					if ((error = xfs_alloc_get_rec(
-							bno_cur_lt, &ltbno,
-							&ltlen, &i)))
-						goto error0;
-					XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
-					xfs_alloc_compute_aligned(ltbno, ltlen,
-						args->alignment, args->minlen,
-						&ltbnoa, &ltlena);
-					/*
-					 * The right one is clearly better.
-					 */
-					if (ltbnoa <= args->agbno - gtdiff) {
-						xfs_btree_del_cursor(
-							bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-					/*
-					 * If we reach a big enough entry,
-					 * compare the two and pick the best.
-					 */
-					if (ltlena >= args->minlen) {
-						args->len = XFS_EXTLEN_MIN(
-							ltlena, args->maxlen);
-						xfs_alloc_fix_len(args);
-						rlen = args->len;
-						ltdiff = xfs_alloc_compute_diff(
-							args->agbno, rlen,
-							args->alignment,
-							ltbno, ltlen, &ltnew);
-						/*
-						 * Left side is better.
-						 */
-						if (ltdiff < gtdiff) {
-							xfs_btree_del_cursor(
-								bno_cur_gt,
-								XFS_BTREE_NOERROR);
-							bno_cur_gt = NULL;
-						}
-						/*
-						 * Right side is better.
-						 */
-						else {
-							xfs_btree_del_cursor(
-								bno_cur_lt,
-								XFS_BTREE_NOERROR);
-							bno_cur_lt = NULL;
-						}
-						break;
-					}
-					/*
-					 * Fell off the left end.
-					 */
-					if ((error = xfs_btree_decrement(
-							bno_cur_lt, 0, &i)))
-						goto error0;
-					if (!i) {
-						xfs_btree_del_cursor(bno_cur_lt,
-							XFS_BTREE_NOERROR);
-						bno_cur_lt = NULL;
-						break;
-					}
-				}
-			}
-			/*
-			 * The right side is perfect, trash the left side.
-			 */
-			else {
-				xfs_btree_del_cursor(bno_cur_lt,
-					XFS_BTREE_NOERROR);
-				bno_cur_lt = NULL;
-			}
+
+			error = xfs_alloc_find_best_extent(args,
+						&bno_cur_gt, &bno_cur_lt,
+						gtdiff, &ltbno, &ltlen, &ltlena,
+						1 /* search left */);
 		}
+
+		if (error)
+			goto error0;
 	}
+
 	/*
 	 * If we couldn't get anything, give up.
 	 */
@@ -1130,6 +1067,7 @@
 		args->agbno = NULLAGBLOCK;
 		return 0;
 	}
+
 	/*
 	 * At this point we have selected a freespace entry, either to the
 	 * left or to the right.  If it's on the right, copy all the
@@ -1146,6 +1084,7 @@
 		j = 1;
 	} else
 		j = 0;
+
 	/*
 	 * Fix up the length and compute the useful address.
 	 */
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index a6cff8e..71e90dc2 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -637,7 +637,7 @@
 	 * It didn't all fit, so we have to sort everything on hashval.
 	 */
 	sbsize = sf->hdr.count * sizeof(*sbuf);
-	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP);
+	sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS);
 
 	/*
 	 * Scan the attribute list for the rest of the entries, storing
@@ -2386,7 +2386,7 @@
 				args.dp = context->dp;
 				args.whichfork = XFS_ATTR_FORK;
 				args.valuelen = valuelen;
-				args.value = kmem_alloc(valuelen, KM_SLEEP);
+				args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
 				args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
 				args.rmtblkcnt = XFS_B_TO_FSB(args.dp->i_mount, valuelen);
 				retval = xfs_attr_rmtval_get(&args);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 04f9cca..2f9e97c 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -634,9 +634,8 @@
 		return error;
 	}
 	ASSERT(!bp || !XFS_BUF_GETERROR(bp));
-	if (bp != NULL) {
+	if (bp)
 		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, refval);
-	}
 	*bpp = bp;
 	return 0;
 }
@@ -944,13 +943,13 @@
 	switch (cur->bc_btnum) {
 	case XFS_BTNUM_BNO:
 	case XFS_BTNUM_CNT:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_ALLOC_BTREE_REF);
 		break;
 	case XFS_BTNUM_INO:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_INOMAP, XFS_INO_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_INOMAP, XFS_INO_BTREE_REF);
 		break;
 	case XFS_BTNUM_BMAP:
-		XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_MAP, XFS_BMAP_BTREE_REF);
+		XFS_BUF_SET_VTYPE_REF(bp, B_FS_MAP, XFS_BMAP_BTREE_REF);
 		break;
 	default:
 		ASSERT(0);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2686d0d..ed2b65f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -142,7 +142,7 @@
 #endif
 
 STATIC void	xfs_buf_error_relse(xfs_buf_t *bp);
-STATIC void	xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
+STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
 
 /*
  * This returns the number of log iovecs needed to log the
@@ -450,7 +450,7 @@
 		 * xfs_trans_ail_delete() drops the AIL lock.
 		 */
 		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
-			xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
+			xfs_buf_do_callbacks(bp);
 			XFS_BUF_SET_FSPRIVATE(bp, NULL);
 			XFS_BUF_CLR_IODONE_FUNC(bp);
 		} else {
@@ -918,15 +918,26 @@
 	XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
 }
 
+/*
+ * We can have many callbacks on a buffer. Running the callbacks individually
+ * can cause a lot of contention on the AIL lock, so we allow for a single
+ * callback to be able to scan the remaining lip->li_bio_list for other items
+ * of the same type and callback to be processed in the first call.
+ *
+ * As a result, the loop walking the callback list below will also modify the
+ * list. it removes the first item from the list and then runs the callback.
+ * The loop then restarts from the new head of the list. This allows the
+ * callback to scan and modify the list attached to the buffer and we don't
+ * have to care about maintaining a next item pointer.
+ */
 STATIC void
 xfs_buf_do_callbacks(
-	xfs_buf_t	*bp,
-	xfs_log_item_t	*lip)
+	struct xfs_buf		*bp)
 {
-	xfs_log_item_t	*nlip;
+	struct xfs_log_item	*lip;
 
-	while (lip != NULL) {
-		nlip = lip->li_bio_list;
+	while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
+		XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
 		ASSERT(lip->li_cb != NULL);
 		/*
 		 * Clear the next pointer so we don't have any
@@ -936,7 +947,6 @@
 		 */
 		lip->li_bio_list = NULL;
 		lip->li_cb(bp, lip);
-		lip = nlip;
 	}
 }
 
@@ -970,7 +980,7 @@
 			ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
 			XFS_BUF_SUPER_STALE(bp);
 			trace_xfs_buf_item_iodone(bp, _RET_IP_);
-			xfs_buf_do_callbacks(bp, lip);
+			xfs_buf_do_callbacks(bp);
 			XFS_BUF_SET_FSPRIVATE(bp, NULL);
 			XFS_BUF_CLR_IODONE_FUNC(bp);
 			xfs_buf_ioend(bp, 0);
@@ -1029,7 +1039,7 @@
 		return;
 	}
 
-	xfs_buf_do_callbacks(bp, lip);
+	xfs_buf_do_callbacks(bp);
 	XFS_BUF_SET_FSPRIVATE(bp, NULL);
 	XFS_BUF_CLR_IODONE_FUNC(bp);
 	xfs_buf_ioend(bp, 0);
@@ -1063,7 +1073,7 @@
 	 * We have to unpin the pinned buffers so do the
 	 * callbacks.
 	 */
-	xfs_buf_do_callbacks(bp, lip);
+	xfs_buf_do_callbacks(bp);
 	XFS_BUF_SET_FSPRIVATE(bp, NULL);
 	XFS_BUF_CLR_IODONE_FUNC(bp);
 	XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 0e2ed43..b6ecd20 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -105,17 +105,6 @@
 	xfs_buf_log_format_t	bli_format;	/* in-log header */
 } xfs_buf_log_item_t;
 
-/*
- * This structure is used during recovery to record the buf log
- * items which have been canceled and should not be replayed.
- */
-typedef struct xfs_buf_cancel {
-	xfs_daddr_t		bc_blkno;
-	uint			bc_len;
-	int			bc_refcount;
-	struct xfs_buf_cancel	*bc_next;
-} xfs_buf_cancel_t;
-
 void	xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
 void	xfs_buf_item_relse(struct xfs_buf *);
 void	xfs_buf_item_log(xfs_buf_log_item_t *, uint, uint);
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index a55e687..75f2ef6 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -48,6 +48,28 @@
 }
 
 /*
+ * Freeing the efi requires that we remove it from the AIL if it has already
+ * been placed there. However, the EFI may not yet have been placed in the AIL
+ * when called by xfs_efi_release() from EFD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the
+ * test_and_clear_bit(XFS_EFI_COMMITTED) to ensure only the last caller frees
+ * the EFI.
+ */
+STATIC void
+__xfs_efi_release(
+	struct xfs_efi_log_item	*efip)
+{
+	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
+
+	if (!test_and_clear_bit(XFS_EFI_COMMITTED, &efip->efi_flags)) {
+		spin_lock(&ailp->xa_lock);
+		/* xfs_trans_ail_delete() drops the AIL lock. */
+		xfs_trans_ail_delete(ailp, &efip->efi_item);
+		xfs_efi_item_free(efip);
+	}
+}
+
+/*
  * This returns the number of iovecs needed to log the given efi item.
  * We only need 1 iovec for an efi item.  It just logs the efi_log_format
  * structure.
@@ -74,7 +96,8 @@
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
 	uint			size;
 
-	ASSERT(efip->efi_next_extent == efip->efi_format.efi_nextents);
+	ASSERT(atomic_read(&efip->efi_next_extent) ==
+				efip->efi_format.efi_nextents);
 
 	efip->efi_format.efi_type = XFS_LI_EFI;
 
@@ -99,10 +122,12 @@
 }
 
 /*
- * While EFIs cannot really be pinned, the unpin operation is the
- * last place at which the EFI is manipulated during a transaction.
- * Here we coordinate with xfs_efi_cancel() to determine who gets to
- * free the EFI.
+ * While EFIs cannot really be pinned, the unpin operation is the last place at
+ * which the EFI is manipulated during a transaction.  If we are being asked to
+ * remove the EFI it's because the transaction has been cancelled and by
+ * definition that means the EFI cannot be in the AIL so remove it from the
+ * transaction and free it.  Otherwise coordinate with xfs_efi_release() (via
+ * XFS_EFI_COMMITTED) to determine who gets to free the EFI.
  */
 STATIC void
 xfs_efi_item_unpin(
@@ -110,20 +135,14 @@
 	int			remove)
 {
 	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
-	struct xfs_ail		*ailp = lip->li_ailp;
 
-	spin_lock(&ailp->xa_lock);
-	if (efip->efi_flags & XFS_EFI_CANCELED) {
-		if (remove)
-			xfs_trans_del_item(lip);
-
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, lip);
+	if (remove) {
+		ASSERT(!(lip->li_flags & XFS_LI_IN_AIL));
+		xfs_trans_del_item(lip);
 		xfs_efi_item_free(efip);
-	} else {
-		efip->efi_flags |= XFS_EFI_COMMITTED;
-		spin_unlock(&ailp->xa_lock);
+		return;
 	}
+	__xfs_efi_release(efip);
 }
 
 /*
@@ -152,16 +171,20 @@
 }
 
 /*
- * The EFI is logged only once and cannot be moved in the log, so
- * simply return the lsn at which it's been logged.  The canceled
- * flag is not paid any attention here.  Checking for that is delayed
- * until the EFI is unpinned.
+ * The EFI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.  For bulk transaction committed
+ * processing, the EFI may be processed but not yet unpinned prior to the EFD
+ * being processed. Set the XFS_EFI_COMMITTED flag so this case can be detected
+ * when processing the EFD.
  */
 STATIC xfs_lsn_t
 xfs_efi_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		lsn)
 {
+	struct xfs_efi_log_item	*efip = EFI_ITEM(lip);
+
+	set_bit(XFS_EFI_COMMITTED, &efip->efi_flags);
 	return lsn;
 }
 
@@ -230,6 +253,7 @@
 	xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops);
 	efip->efi_format.efi_nextents = nextents;
 	efip->efi_format.efi_id = (__psint_t)(void*)efip;
+	atomic_set(&efip->efi_next_extent, 0);
 
 	return efip;
 }
@@ -289,37 +313,18 @@
 }
 
 /*
- * This is called by the efd item code below to release references to
- * the given efi item.  Each efd calls this with the number of
- * extents that it has logged, and when the sum of these reaches
- * the total number of extents logged by this efi item we can free
- * the efi item.
- *
- * Freeing the efi item requires that we remove it from the AIL.
- * We'll use the AIL lock to protect our counters as well as
- * the removal from the AIL.
+ * This is called by the efd item code below to release references to the given
+ * efi item.  Each efd calls this with the number of extents that it has
+ * logged, and when the sum of these reaches the total number of extents logged
+ * by this efi item we can free the efi item.
  */
 void
 xfs_efi_release(xfs_efi_log_item_t	*efip,
 		uint			nextents)
 {
-	struct xfs_ail		*ailp = efip->efi_item.li_ailp;
-	int			extents_left;
-
-	ASSERT(efip->efi_next_extent > 0);
-	ASSERT(efip->efi_flags & XFS_EFI_COMMITTED);
-
-	spin_lock(&ailp->xa_lock);
-	ASSERT(efip->efi_next_extent >= nextents);
-	efip->efi_next_extent -= nextents;
-	extents_left = efip->efi_next_extent;
-	if (extents_left == 0) {
-		/* xfs_trans_ail_delete() drops the AIL lock. */
-		xfs_trans_ail_delete(ailp, (xfs_log_item_t *)efip);
-		xfs_efi_item_free(efip);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
+	ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
+	if (atomic_sub_and_test(nextents, &efip->efi_next_extent))
+		__xfs_efi_release(efip);
 }
 
 static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 0d22c56..375f68e 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -111,11 +111,10 @@
 #define	XFS_EFI_MAX_FAST_EXTENTS	16
 
 /*
- * Define EFI flags.
+ * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
  */
-#define	XFS_EFI_RECOVERED	0x1
-#define	XFS_EFI_COMMITTED	0x2
-#define	XFS_EFI_CANCELED	0x4
+#define	XFS_EFI_RECOVERED	1
+#define	XFS_EFI_COMMITTED	2
 
 /*
  * This is the "extent free intention" log item.  It is used
@@ -125,8 +124,8 @@
  */
 typedef struct xfs_efi_log_item {
 	xfs_log_item_t		efi_item;
-	uint			efi_flags;	/* misc flags */
-	uint			efi_next_extent;
+	atomic_t		efi_next_extent;
+	unsigned long		efi_flags;	/* misc flags */
 	xfs_efi_log_format_t	efi_format;
 } xfs_efi_log_item_t;
 
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7c116e..f56d30e 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -374,6 +374,7 @@
 		mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
 	} else
 		mp->m_maxicount = 0;
+	xfs_set_low_space_thresholds(mp);
 
 	/* update secondary superblocks. */
 	for (agno = 1; agno < nagcount; agno++) {
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index d7de5a3..cb9b6d1 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -43,6 +43,17 @@
 
 
 /*
+ * Define xfs inode iolock lockdep classes. We need to ensure that all active
+ * inodes are considered the same for lockdep purposes, including inodes that
+ * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
+ * guarantee the locks are considered the same when there are multiple lock
+ * initialisation siteѕ. Also, define a reclaimable inode class so it is
+ * obvious in lockdep reports which class the report is against.
+ */
+static struct lock_class_key xfs_iolock_active;
+struct lock_class_key xfs_iolock_reclaimable;
+
+/*
  * Allocate and initialise an xfs_inode.
  */
 STATIC struct xfs_inode *
@@ -69,8 +80,11 @@
 	ASSERT(atomic_read(&ip->i_pincount) == 0);
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
+	ASSERT(ip->i_ino == 0);
 
 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+			&xfs_iolock_active, "xfs_iolock_active");
 
 	/* initialise the xfs inode */
 	ip->i_ino = ino;
@@ -85,9 +99,6 @@
 	ip->i_size = 0;
 	ip->i_new_size = 0;
 
-	/* prevent anyone from using this yet */
-	VFS_I(ip)->i_state = I_NEW;
-
 	return ip;
 }
 
@@ -145,7 +156,18 @@
 	ASSERT(!spin_is_locked(&ip->i_flags_lock));
 	ASSERT(completion_done(&ip->i_flush));
 
-	call_rcu(&ip->i_vnode.i_rcu, xfs_inode_free_callback);
+	/*
+	 * Because we use RCU freeing we need to ensure the inode always
+	 * appears to be reclaimed with an invalid inode number when in the
+	 * free state. The ip->i_flags_lock provides the barrier against lookup
+	 * races.
+	 */
+	spin_lock(&ip->i_flags_lock);
+	ip->i_flags = XFS_IRECLAIM;
+	ip->i_ino = 0;
+	spin_unlock(&ip->i_flags_lock);
+
+	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
 /*
@@ -155,14 +177,29 @@
 xfs_iget_cache_hit(
 	struct xfs_perag	*pag,
 	struct xfs_inode	*ip,
+	xfs_ino_t		ino,
 	int			flags,
-	int			lock_flags) __releases(pag->pag_ici_lock)
+	int			lock_flags) __releases(RCU)
 {
 	struct inode		*inode = VFS_I(ip);
 	struct xfs_mount	*mp = ip->i_mount;
 	int			error;
 
+	/*
+	 * check for re-use of an inode within an RCU grace period due to the
+	 * radix tree nodes not being updated yet. We monitor for this by
+	 * setting the inode number to zero before freeing the inode structure.
+	 * If the inode has been reallocated and set up, then the inode number
+	 * will not match, so check for that, too.
+	 */
 	spin_lock(&ip->i_flags_lock);
+	if (ip->i_ino != ino) {
+		trace_xfs_iget_skip(ip);
+		XFS_STATS_INC(xs_ig_frecycle);
+		error = EAGAIN;
+		goto out_error;
+	}
+
 
 	/*
 	 * If we are racing with another cache hit that is currently
@@ -205,7 +242,7 @@
 		ip->i_flags |= XFS_IRECLAIM;
 
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 
 		error = -inode_init_always(mp->m_super, inode);
 		if (error) {
@@ -213,7 +250,7 @@
 			 * Re-initializing the inode failed, and we are in deep
 			 * trouble.  Try to re-add it to the reclaim list.
 			 */
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			spin_lock(&ip->i_flags_lock);
 
 			ip->i_flags &= ~XFS_INEW;
@@ -223,14 +260,20 @@
 			goto out_error;
 		}
 
-		write_lock(&pag->pag_ici_lock);
+		spin_lock(&pag->pag_ici_lock);
 		spin_lock(&ip->i_flags_lock);
 		ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
 		ip->i_flags |= XFS_INEW;
 		__xfs_inode_clear_reclaim_tag(mp, pag, ip);
 		inode->i_state = I_NEW;
+
+		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
+		lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
+				&xfs_iolock_active, "xfs_iolock_active");
+
 		spin_unlock(&ip->i_flags_lock);
-		write_unlock(&pag->pag_ici_lock);
+		spin_unlock(&pag->pag_ici_lock);
 	} else {
 		/* If the VFS inode is being torn down, pause and try again. */
 		if (!igrab(inode)) {
@@ -241,7 +284,7 @@
 
 		/* We've got a live one. */
 		spin_unlock(&ip->i_flags_lock);
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		trace_xfs_iget_hit(ip);
 	}
 
@@ -255,7 +298,7 @@
 
 out_error:
 	spin_unlock(&ip->i_flags_lock);
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	return error;
 }
 
@@ -308,7 +351,7 @@
 			BUG();
 	}
 
-	write_lock(&pag->pag_ici_lock);
+	spin_lock(&pag->pag_ici_lock);
 
 	/* insert the new inode */
 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
@@ -323,14 +366,14 @@
 	ip->i_udquot = ip->i_gdquot = NULL;
 	xfs_iflags_set(ip, XFS_INEW);
 
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 
 	*ipp = ip;
 	return 0;
 
 out_preload_end:
-	write_unlock(&pag->pag_ici_lock);
+	spin_unlock(&pag->pag_ici_lock);
 	radix_tree_preload_end();
 	if (lock_flags)
 		xfs_iunlock(ip, lock_flags);
@@ -377,7 +420,7 @@
 	xfs_agino_t	agino;
 
 	/* reject inode numbers outside existing AGs */
-	if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
+	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
 		return EINVAL;
 
 	/* get the perag structure and ensure that it's inode capable */
@@ -386,15 +429,15 @@
 
 again:
 	error = 0;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
 
 	if (ip) {
-		error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
+		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
 		if (error)
 			goto out_error_or_again;
 	} else {
-		read_unlock(&pag->pag_ici_lock);
+		rcu_read_unlock();
 		XFS_STATS_INC(xs_ig_missed);
 
 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 108c7a0..be7cf62 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -887,7 +887,7 @@
 	 * around for a while.  This helps to keep recently accessed
 	 * meta-data in-core longer.
 	 */
-	XFS_BUF_SET_REF(bp, XFS_INO_REF);
+	xfs_buf_set_ref(bp, XFS_INO_REF);
 
 	/*
 	 * Use xfs_trans_brelse() to release the buffer containing the
@@ -2000,17 +2000,33 @@
 		 */
 		for (i = 0; i < ninodes; i++) {
 retry:
-			read_lock(&pag->pag_ici_lock);
+			rcu_read_lock();
 			ip = radix_tree_lookup(&pag->pag_ici_root,
 					XFS_INO_TO_AGINO(mp, (inum + i)));
 
-			/* Inode not in memory or stale, nothing to do */
-			if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
-				read_unlock(&pag->pag_ici_lock);
+			/* Inode not in memory, nothing to do */
+			if (!ip) {
+				rcu_read_unlock();
 				continue;
 			}
 
 			/*
+			 * because this is an RCU protected lookup, we could
+			 * find a recently freed or even reallocated inode
+			 * during the lookup. We need to check under the
+			 * i_flags_lock for a valid inode here. Skip it if it
+			 * is not valid, the wrong inode or stale.
+			 */
+			spin_lock(&ip->i_flags_lock);
+			if (ip->i_ino != inum + i ||
+			    __xfs_iflags_test(ip, XFS_ISTALE)) {
+				spin_unlock(&ip->i_flags_lock);
+				rcu_read_unlock();
+				continue;
+			}
+			spin_unlock(&ip->i_flags_lock);
+
+			/*
 			 * Don't try to lock/unlock the current inode, but we
 			 * _cannot_ skip the other inodes that we did not find
 			 * in the list attached to the buffer and are not
@@ -2019,11 +2035,11 @@
 			 */
 			if (ip != free_ip &&
 			    !xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
-				read_unlock(&pag->pag_ici_lock);
+				rcu_read_unlock();
 				delay(1);
 				goto retry;
 			}
-			read_unlock(&pag->pag_ici_lock);
+			rcu_read_unlock();
 
 			xfs_iflock(ip);
 			xfs_iflags_set(ip, XFS_ISTALE);
@@ -2629,7 +2645,7 @@
 
 	mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
 	first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
-	read_lock(&pag->pag_ici_lock);
+	rcu_read_lock();
 	/* really need a gang lookup range call here */
 	nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
 					first_index, inodes_per_cluster);
@@ -2640,9 +2656,21 @@
 		iq = ilist[i];
 		if (iq == ip)
 			continue;
-		/* if the inode lies outside this cluster, we're done. */
-		if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
-			break;
+
+		/*
+		 * because this is an RCU protected lookup, we could find a
+		 * recently freed or even reallocated inode during the lookup.
+		 * We need to check under the i_flags_lock for a valid inode
+		 * here. Skip it if it is not valid or the wrong inode.
+		 */
+		spin_lock(&ip->i_flags_lock);
+		if (!ip->i_ino ||
+		    (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
+			spin_unlock(&ip->i_flags_lock);
+			continue;
+		}
+		spin_unlock(&ip->i_flags_lock);
+
 		/*
 		 * Do an un-protected check to see if the inode is dirty and
 		 * is a candidate for flushing.  These checks will be repeated
@@ -2692,7 +2720,7 @@
 	}
 
 out_free:
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	kmem_free(ilist);
 out_put:
 	xfs_perag_put(pag);
@@ -2704,7 +2732,7 @@
 	 * Corruption detected in the clustering loop.  Invalidate the
 	 * inode buffer and shut down the filesystem.
 	 */
-	read_unlock(&pag->pag_ici_lock);
+	rcu_read_unlock();
 	/*
 	 * Clean up the buffer.  If it was B_DELWRI, just release it --
 	 * brelse can handle it with no problems.  If not, shut down the
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index fb2ca2e..5c95fa8 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -376,12 +376,13 @@
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM    0x0001  /* we have started reclaiming this inode    */
-#define XFS_ISTALE	0x0002	/* inode has been staled */
-#define XFS_IRECLAIMABLE 0x0004 /* inode can be reclaimed */
-#define XFS_INEW	0x0008	/* inode has just been allocated */
-#define XFS_IFILESTREAM	0x0010	/* inode is in a filestream directory */
-#define XFS_ITRUNCATED	0x0020	/* truncated down so flush-on-close */
+#define XFS_IRECLAIM		0x0001  /* started reclaiming this inode */
+#define XFS_ISTALE		0x0002	/* inode has been staled */
+#define XFS_IRECLAIMABLE	0x0004	/* inode can be reclaimed */
+#define XFS_INEW		0x0008	/* inode has just been allocated */
+#define XFS_IFILESTREAM		0x0010	/* inode is in a filestream directory */
+#define XFS_ITRUNCATED		0x0020	/* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE	0x0040	/* dirty release already seen */
 
 /*
  * Flags for inode locking.
@@ -438,6 +439,8 @@
 #define XFS_IOLOCK_DEP(flags)	(((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
 #define XFS_ILOCK_DEP(flags)	(((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
 
+extern struct lock_class_key xfs_iolock_reclaimable;
+
 /*
  * Flags for xfs_itruncate_start().
  */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 7c8d30c..fd4f398 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -842,15 +842,64 @@
  * flushed to disk.  It is responsible for removing the inode item
  * from the AIL if it has not been re-logged, and unlocking the inode's
  * flush lock.
+ *
+ * To reduce AIL lock traffic as much as possible, we scan the buffer log item
+ * list for other inodes that will run this function. We remove them from the
+ * buffer list so we can process all the inode IO completions in one AIL lock
+ * traversal.
  */
 void
 xfs_iflush_done(
 	struct xfs_buf		*bp,
 	struct xfs_log_item	*lip)
 {
-	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
-	xfs_inode_t		*ip = iip->ili_inode;
+	struct xfs_inode_log_item *iip;
+	struct xfs_log_item	*blip;
+	struct xfs_log_item	*next;
+	struct xfs_log_item	*prev;
 	struct xfs_ail		*ailp = lip->li_ailp;
+	int			need_ail = 0;
+
+	/*
+	 * Scan the buffer IO completions for other inodes being completed and
+	 * attach them to the current inode log item.
+	 */
+	blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+	prev = NULL;
+	while (blip != NULL) {
+		if (lip->li_cb != xfs_iflush_done) {
+			prev = blip;
+			blip = blip->li_bio_list;
+			continue;
+		}
+
+		/* remove from list */
+		next = blip->li_bio_list;
+		if (!prev) {
+			XFS_BUF_SET_FSPRIVATE(bp, next);
+		} else {
+			prev->li_bio_list = next;
+		}
+
+		/* add to current list */
+		blip->li_bio_list = lip->li_bio_list;
+		lip->li_bio_list = blip;
+
+		/*
+		 * while we have the item, do the unlocked check for needing
+		 * the AIL lock.
+		 */
+		iip = INODE_ITEM(blip);
+		if (iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn)
+			need_ail++;
+
+		blip = next;
+	}
+
+	/* make sure we capture the state of the initial inode. */
+	iip = INODE_ITEM(lip);
+	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn)
+		need_ail++;
 
 	/*
 	 * We only want to pull the item from the AIL if it is
@@ -861,28 +910,37 @@
 	 * the lock since it's cheaper, and then we recheck while
 	 * holding the lock before removing the inode from the AIL.
 	 */
-	if (iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) {
+	if (need_ail) {
+		struct xfs_log_item *log_items[need_ail];
+		int i = 0;
 		spin_lock(&ailp->xa_lock);
-		if (lip->li_lsn == iip->ili_flush_lsn) {
-			/* xfs_trans_ail_delete() drops the AIL lock. */
-			xfs_trans_ail_delete(ailp, lip);
-		} else {
-			spin_unlock(&ailp->xa_lock);
+		for (blip = lip; blip; blip = blip->li_bio_list) {
+			iip = INODE_ITEM(blip);
+			if (iip->ili_logged &&
+			    blip->li_lsn == iip->ili_flush_lsn) {
+				log_items[i++] = blip;
+			}
+			ASSERT(i <= need_ail);
 		}
+		/* xfs_trans_ail_delete_bulk() drops the AIL lock. */
+		xfs_trans_ail_delete_bulk(ailp, log_items, i);
 	}
 
-	iip->ili_logged = 0;
 
 	/*
-	 * Clear the ili_last_fields bits now that we know that the
-	 * data corresponding to them is safely on disk.
+	 * clean up and unlock the flush lock now we are done. We can clear the
+	 * ili_last_fields bits now that we know that the data corresponding to
+	 * them is safely on disk.
 	 */
-	iip->ili_last_fields = 0;
+	for (blip = lip; blip; blip = next) {
+		next = blip->li_bio_list;
+		blip->li_bio_list = NULL;
 
-	/*
-	 * Release the inode's flush lock since we're done with it.
-	 */
-	xfs_ifunlock(ip);
+		iip = INODE_ITEM(blip);
+		iip->ili_logged = 0;
+		iip->ili_last_fields = 0;
+		xfs_ifunlock(iip->ili_inode);
+	}
 }
 
 /*
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 2057614..55582bd 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -47,127 +47,8 @@
 
 #define XFS_WRITEIO_ALIGN(mp,off)	(((off) >> mp->m_writeio_log) \
 						<< mp->m_writeio_log)
-#define XFS_STRAT_WRITE_IMAPS	2
 #define XFS_WRITE_IMAPS		XFS_BMAP_MAX_NMAP
 
-STATIC int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
-				  int, struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t, int,
-				 struct xfs_bmbt_irec *, int *);
-STATIC int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
-				struct xfs_bmbt_irec *, int *);
-
-int
-xfs_iomap(
-	struct xfs_inode	*ip,
-	xfs_off_t		offset,
-	ssize_t			count,
-	int			flags,
-	struct xfs_bmbt_irec	*imap,
-	int			*nimaps,
-	int			*new)
-{
-	struct xfs_mount	*mp = ip->i_mount;
-	xfs_fileoff_t		offset_fsb, end_fsb;
-	int			error = 0;
-	int			lockmode = 0;
-	int			bmapi_flags = 0;
-
-	ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
-
-	*new = 0;
-
-	if (XFS_FORCED_SHUTDOWN(mp))
-		return XFS_ERROR(EIO);
-
-	trace_xfs_iomap_enter(ip, offset, count, flags, NULL);
-
-	switch (flags & (BMAPI_READ | BMAPI_WRITE | BMAPI_ALLOCATE)) {
-	case BMAPI_READ:
-		lockmode = xfs_ilock_map_shared(ip);
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-		break;
-	case BMAPI_WRITE:
-		lockmode = XFS_ILOCK_EXCL;
-		if (flags & BMAPI_IGNSTATE)
-			bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
-		xfs_ilock(ip, lockmode);
-		break;
-	case BMAPI_ALLOCATE:
-		lockmode = XFS_ILOCK_SHARED;
-		bmapi_flags = XFS_BMAPI_ENTIRE;
-
-		/* Attempt non-blocking lock */
-		if (flags & BMAPI_TRYLOCK) {
-			if (!xfs_ilock_nowait(ip, lockmode))
-				return XFS_ERROR(EAGAIN);
-		} else {
-			xfs_ilock(ip, lockmode);
-		}
-		break;
-	default:
-		BUG();
-	}
-
-	ASSERT(offset <= mp->m_maxioffset);
-	if ((xfs_fsize_t)offset + count > mp->m_maxioffset)
-		count = mp->m_maxioffset - offset;
-	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
-	offset_fsb = XFS_B_TO_FSBT(mp, offset);
-
-	error = xfs_bmapi(NULL, ip, offset_fsb,
-			(xfs_filblks_t)(end_fsb - offset_fsb),
-			bmapi_flags,  NULL, 0, imap,
-			nimaps, NULL);
-
-	if (error)
-		goto out;
-
-	switch (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
-	case BMAPI_WRITE:
-		/* If we found an extent, return it */
-		if (*nimaps &&
-		    (imap->br_startblock != HOLESTARTBLOCK) &&
-		    (imap->br_startblock != DELAYSTARTBLOCK)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		if (flags & BMAPI_DIRECT) {
-			error = xfs_iomap_write_direct(ip, offset, count, flags,
-						       imap, nimaps);
-		} else {
-			error = xfs_iomap_write_delay(ip, offset, count, flags,
-						      imap, nimaps);
-		}
-		if (!error) {
-			trace_xfs_iomap_alloc(ip, offset, count, flags, imap);
-		}
-		*new = 1;
-		break;
-	case BMAPI_ALLOCATE:
-		/* If we found an extent, return it */
-		xfs_iunlock(ip, lockmode);
-		lockmode = 0;
-
-		if (*nimaps && !isnullstartblock(imap->br_startblock)) {
-			trace_xfs_iomap_found(ip, offset, count, flags, imap);
-			break;
-		}
-
-		error = xfs_iomap_write_allocate(ip, offset, count,
-						 imap, nimaps);
-		break;
-	}
-
-	ASSERT(*nimaps <= 1);
-
-out:
-	if (lockmode)
-		xfs_iunlock(ip, lockmode);
-	return XFS_ERROR(error);
-}
-
 STATIC int
 xfs_iomap_eof_align_last_fsb(
 	xfs_mount_t	*mp,
@@ -236,14 +117,13 @@
 	return EFSCORRUPTED;
 }
 
-STATIC int
+int
 xfs_iomap_write_direct(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		flags,
 	xfs_bmbt_irec_t *imap,
-	int		*nmaps)
+	int		nmaps)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -279,7 +159,7 @@
 		if (error)
 			goto error_out;
 	} else {
-		if (*nmaps && (imap->br_startblock == HOLESTARTBLOCK))
+		if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
 			last_fsb = MIN(last_fsb, (xfs_fileoff_t)
 					imap->br_blockcount +
 					imap->br_startoff);
@@ -331,7 +211,7 @@
 	xfs_trans_ijoin(tp, ip);
 
 	bmapi_flag = XFS_BMAPI_WRITE;
-	if ((flags & BMAPI_DIRECT) && (offset < ip->i_size || extsz))
+	if (offset < ip->i_size || extsz)
 		bmapi_flag |= XFS_BMAPI_PREALLOC;
 
 	/*
@@ -370,7 +250,6 @@
 		goto error_out;
 	}
 
-	*nmaps = 1;
 	return 0;
 
 error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
@@ -379,7 +258,6 @@
 
 error1:	/* Just cancel transaction */
 	xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
-	*nmaps = 0;	/* nothing set-up here */
 
 error_out:
 	return XFS_ERROR(error);
@@ -389,6 +267,9 @@
  * If the caller is doing a write at the end of the file, then extend the
  * allocation out to the file system's write iosize.  We clean up any extra
  * space left over when the file is closed in xfs_inactive().
+ *
+ * If we find we already have delalloc preallocation beyond EOF, don't do more
+ * preallocation as it it not needed.
  */
 STATIC int
 xfs_iomap_eof_want_preallocate(
@@ -396,7 +277,6 @@
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
 	xfs_bmbt_irec_t *imap,
 	int		nimaps,
 	int		*prealloc)
@@ -405,6 +285,7 @@
 	xfs_filblks_t   count_fsb;
 	xfs_fsblock_t	firstblock;
 	int		n, error, imaps;
+	int		found_delalloc = 0;
 
 	*prealloc = 0;
 	if ((offset + count) <= ip->i_size)
@@ -429,20 +310,66 @@
 				return 0;
 			start_fsb += imap[n].br_blockcount;
 			count_fsb -= imap[n].br_blockcount;
+
+			if (imap[n].br_startblock == DELAYSTARTBLOCK)
+				found_delalloc = 1;
 		}
 	}
-	*prealloc = 1;
+	if (!found_delalloc)
+		*prealloc = 1;
 	return 0;
 }
 
-STATIC int
+/*
+ * If we don't have a user specified preallocation size, dynamically increase
+ * the preallocation size as the size of the file grows. Cap the maximum size
+ * at a single extent or less if the filesystem is near full. The closer the
+ * filesystem is to full, the smaller the maximum prealocation.
+ */
+STATIC xfs_fsblock_t
+xfs_iomap_prealloc_size(
+	struct xfs_mount	*mp,
+	struct xfs_inode	*ip)
+{
+	xfs_fsblock_t		alloc_blocks = 0;
+
+	if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
+		int shift = 0;
+		int64_t freesp;
+
+		alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size);
+		alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
+					rounddown_pow_of_two(alloc_blocks));
+
+		xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
+		freesp = mp->m_sb.sb_fdblocks;
+		if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
+			shift = 2;
+			if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
+				shift++;
+			if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
+				shift++;
+		}
+		if (shift)
+			alloc_blocks >>= shift;
+	}
+
+	if (alloc_blocks < mp->m_writeio_blocks)
+		alloc_blocks = mp->m_writeio_blocks;
+
+	return alloc_blocks;
+}
+
+int
 xfs_iomap_write_delay(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	int		ioflag,
-	xfs_bmbt_irec_t *ret_imap,
-	int		*nmaps)
+	xfs_bmbt_irec_t *ret_imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb;
@@ -469,16 +396,19 @@
 	extsz = xfs_get_extsz_hint(ip);
 	offset_fsb = XFS_B_TO_FSBT(mp, offset);
 
+
 	error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
-				ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+				imap, XFS_WRITE_IMAPS, &prealloc);
 	if (error)
 		return error;
 
 retry:
 	if (prealloc) {
+		xfs_fsblock_t	alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
+
 		aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
 		ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
-		last_fsb = ioalign + mp->m_writeio_blocks;
+		last_fsb = ioalign + alloc_blocks;
 	} else {
 		last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
 	}
@@ -496,22 +426,31 @@
 			  XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
 			  XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
 			  &nimaps, NULL);
-	if (error && (error != ENOSPC))
+	switch (error) {
+	case 0:
+	case ENOSPC:
+	case EDQUOT:
+		break;
+	default:
 		return XFS_ERROR(error);
+	}
 
 	/*
-	 * If bmapi returned us nothing, and if we didn't get back EDQUOT,
-	 * then we must have run out of space - flush all other inodes with
-	 * delalloc blocks and retry without EOF preallocation.
+	 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT.  For
+	 * ENOSPC, * flush all other inodes with delalloc blocks to free up
+	 * some of the excess reserved metadata space. For both cases, retry
+	 * without EOF preallocation.
 	 */
 	if (nimaps == 0) {
 		trace_xfs_delalloc_enospc(ip, offset, count);
 		if (flushed)
-			return XFS_ERROR(ENOSPC);
+			return XFS_ERROR(error ? error : ENOSPC);
 
-		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-		xfs_flush_inodes(ip);
-		xfs_ilock(ip, XFS_ILOCK_EXCL);
+		if (error == ENOSPC) {
+			xfs_iunlock(ip, XFS_ILOCK_EXCL);
+			xfs_flush_inodes(ip);
+			xfs_ilock(ip, XFS_ILOCK_EXCL);
+		}
 
 		flushed = 1;
 		error = 0;
@@ -523,8 +462,6 @@
 		return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
 
 	*ret_imap = imap[0];
-	*nmaps = 1;
-
 	return 0;
 }
 
@@ -538,13 +475,12 @@
  * We no longer bother to look at the incoming map - all we have to
  * guarantee is that whatever we allocate fills the required range.
  */
-STATIC int
+int
 xfs_iomap_write_allocate(
 	xfs_inode_t	*ip,
 	xfs_off_t	offset,
 	size_t		count,
-	xfs_bmbt_irec_t *imap,
-	int		*retmap)
+	xfs_bmbt_irec_t *imap)
 {
 	xfs_mount_t	*mp = ip->i_mount;
 	xfs_fileoff_t	offset_fsb, last_block;
@@ -557,8 +493,6 @@
 	int		error = 0;
 	int		nres;
 
-	*retmap = 0;
-
 	/*
 	 * Make sure that the dquots are there.
 	 */
@@ -680,7 +614,6 @@
 		if ((offset_fsb >= imap->br_startoff) &&
 		    (offset_fsb < (imap->br_startoff +
 				   imap->br_blockcount))) {
-			*retmap = 1;
 			XFS_STATS_INC(xs_xstrat_quick);
 			return 0;
 		}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7748a43..8061576 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,30 +18,15 @@
 #ifndef __XFS_IOMAP_H__
 #define __XFS_IOMAP_H__
 
-/* base extent manipulation calls */
-#define BMAPI_READ	(1 << 0)	/* read extents */
-#define BMAPI_WRITE	(1 << 1)	/* create extents */
-#define BMAPI_ALLOCATE	(1 << 2)	/* delayed allocate to real extents */
-
-/* modifiers */
-#define BMAPI_IGNSTATE	(1 << 4)	/* ignore unwritten state on read */
-#define BMAPI_DIRECT	(1 << 5)	/* direct instead of buffered write */
-#define BMAPI_MMA	(1 << 6)	/* allocate for mmap write */
-#define BMAPI_TRYLOCK	(1 << 7)	/* non-blocking request */
-
-#define BMAPI_FLAGS \
-	{ BMAPI_READ,		"READ" }, \
-	{ BMAPI_WRITE,		"WRITE" }, \
-	{ BMAPI_ALLOCATE,	"ALLOCATE" }, \
-	{ BMAPI_IGNSTATE,	"IGNSTATE" }, \
-	{ BMAPI_DIRECT,		"DIRECT" }, \
-	{ BMAPI_TRYLOCK,	"TRYLOCK" }
-
 struct xfs_inode;
 struct xfs_bmbt_irec;
 
-extern int xfs_iomap(struct xfs_inode *, xfs_off_t, ssize_t, int,
-		     struct xfs_bmbt_irec *, int *, int *);
+extern int xfs_iomap_write_direct(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *, int);
+extern int xfs_iomap_write_delay(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
+extern int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t, size_t,
+			struct xfs_bmbt_irec *);
 extern int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, size_t);
 
 #endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index cee4ab9..0bf24b1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -47,7 +47,7 @@
 				xfs_buftarg_t	*log_target,
 				xfs_daddr_t	blk_offset,
 				int		num_bblks);
-STATIC int	 xlog_space_left(xlog_t *log, int cycle, int bytes);
+STATIC int	 xlog_space_left(struct log *log, atomic64_t *head);
 STATIC int	 xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
 STATIC void	 xlog_dealloc_log(xlog_t *log);
 
@@ -70,7 +70,7 @@
 /* local functions to manipulate grant head */
 STATIC int  xlog_grant_log_space(xlog_t		*log,
 				 xlog_ticket_t	*xtic);
-STATIC void xlog_grant_push_ail(xfs_mount_t	*mp,
+STATIC void xlog_grant_push_ail(struct log	*log,
 				int		need_bytes);
 STATIC void xlog_regrant_reserve_log_space(xlog_t	 *log,
 					   xlog_ticket_t *ticket);
@@ -81,98 +81,73 @@
 
 #if defined(DEBUG)
 STATIC void	xlog_verify_dest_ptr(xlog_t *log, char *ptr);
-STATIC void	xlog_verify_grant_head(xlog_t *log, int equals);
+STATIC void	xlog_verify_grant_tail(struct log *log);
 STATIC void	xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
 				  int count, boolean_t syncing);
 STATIC void	xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
 				     xfs_lsn_t tail_lsn);
 #else
 #define xlog_verify_dest_ptr(a,b)
-#define xlog_verify_grant_head(a,b)
+#define xlog_verify_grant_tail(a)
 #define xlog_verify_iclog(a,b,c,d)
 #define xlog_verify_tail_lsn(a,b,c)
 #endif
 
 STATIC int	xlog_iclogs_empty(xlog_t *log);
 
-
 static void
-xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_sub_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (*qp) {
-		tic->t_next	    = (*qp);
-		tic->t_prev	    = (*qp)->t_prev;
-		(*qp)->t_prev->t_next = tic;
-		(*qp)->t_prev	    = tic;
-	} else {
-		tic->t_prev = tic->t_next = tic;
-		*qp = tic;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_flags |= XLOG_TIC_IN_Q;
+	do {
+		int	cycle, space;
+
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
+
+		space -= bytes;
+		if (space < 0) {
+			space += log->l_logsize;
+			cycle--;
+		}
+
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
-xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+xlog_grant_add_space(
+	struct log	*log,
+	atomic64_t	*head,
+	int		bytes)
 {
-	if (tic == tic->t_next) {
-		*qp = NULL;
-	} else {
-		*qp = tic->t_next;
-		tic->t_next->t_prev = tic->t_prev;
-		tic->t_prev->t_next = tic->t_next;
-	}
+	int64_t	head_val = atomic64_read(head);
+	int64_t new, old;
 
-	tic->t_next = tic->t_prev = NULL;
-	tic->t_flags &= ~XLOG_TIC_IN_Q;
-}
+	do {
+		int		tmp;
+		int		cycle, space;
 
-static void
-xlog_grant_sub_space(struct log *log, int bytes)
-{
-	log->l_grant_write_bytes -= bytes;
-	if (log->l_grant_write_bytes < 0) {
-		log->l_grant_write_bytes += log->l_logsize;
-		log->l_grant_write_cycle--;
-	}
+		xlog_crack_grant_head_val(head_val, &cycle, &space);
 
-	log->l_grant_reserve_bytes -= bytes;
-	if ((log)->l_grant_reserve_bytes < 0) {
-		log->l_grant_reserve_bytes += log->l_logsize;
-		log->l_grant_reserve_cycle--;
-	}
+		tmp = log->l_logsize - space;
+		if (tmp > bytes)
+			space += bytes;
+		else {
+			space = bytes - tmp;
+			cycle++;
+		}
 
-}
-
-static void
-xlog_grant_add_space_write(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_write_bytes;
-	if (tmp > bytes)
-		log->l_grant_write_bytes += bytes;
-	else {
-		log->l_grant_write_cycle++;
-		log->l_grant_write_bytes = bytes - tmp;
-	}
-}
-
-static void
-xlog_grant_add_space_reserve(struct log *log, int bytes)
-{
-	int tmp = log->l_logsize - log->l_grant_reserve_bytes;
-	if (tmp > bytes)
-		log->l_grant_reserve_bytes += bytes;
-	else {
-		log->l_grant_reserve_cycle++;
-		log->l_grant_reserve_bytes = bytes - tmp;
-	}
-}
-
-static inline void
-xlog_grant_add_space(struct log *log, int bytes)
-{
-	xlog_grant_add_space_write(log, bytes);
-	xlog_grant_add_space_reserve(log, bytes);
+		old = head_val;
+		new = xlog_assign_grant_head_val(cycle, space);
+		head_val = atomic64_cmpxchg(head, old, new);
+	} while (head_val != old);
 }
 
 static void
@@ -355,7 +330,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp, internal_ticket->t_unit_res);
+		xlog_grant_push_ail(log, internal_ticket->t_unit_res);
 		retval = xlog_regrant_write_log_space(log, internal_ticket);
 	} else {
 		/* may sleep if need to allocate more tickets */
@@ -369,7 +344,7 @@
 
 		trace_xfs_log_reserve(log, internal_ticket);
 
-		xlog_grant_push_ail(mp,
+		xlog_grant_push_ail(log,
 				    (internal_ticket->t_unit_res *
 				     internal_ticket->t_cnt));
 		retval = xlog_grant_log_space(log, internal_ticket);
@@ -584,8 +559,8 @@
 		if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
 		      iclog->ic_state == XLOG_STATE_DIRTY)) {
 			if (!XLOG_FORCED_SHUTDOWN(log)) {
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 			} else {
 				spin_unlock(&log->l_icloglock);
 			}
@@ -625,8 +600,8 @@
 			|| iclog->ic_state == XLOG_STATE_DIRTY
 			|| iclog->ic_state == XLOG_STATE_IOERROR) ) {
 
-				sv_wait(&iclog->ic_force_wait, PMEM,
-					&log->l_icloglock, s);
+				xlog_wait(&iclog->ic_force_wait,
+							&log->l_icloglock);
 		} else {
 			spin_unlock(&log->l_icloglock);
 		}
@@ -703,55 +678,46 @@
 {
 	xlog_ticket_t	*tic;
 	xlog_t		*log = mp->m_log;
-	int		need_bytes, free_bytes, cycle, bytes;
+	int		need_bytes, free_bytes;
 
 	if (XLOG_FORCED_SHUTDOWN(log))
 		return;
 
-	if (tail_lsn == 0) {
-		/* needed since sync_lsn is 64 bits */
-		spin_lock(&log->l_icloglock);
-		tail_lsn = log->l_last_sync_lsn;
-		spin_unlock(&log->l_icloglock);
-	}
+	if (tail_lsn == 0)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
-	spin_lock(&log->l_grant_lock);
+	/* tail_lsn == 1 implies that we weren't passed a valid value.  */
+	if (tail_lsn != 1)
+		atomic64_set(&log->l_tail_lsn, tail_lsn);
 
-	/* Also an invalid lsn.  1 implies that we aren't passing in a valid
-	 * tail_lsn.
-	 */
-	if (tail_lsn != 1) {
-		log->l_tail_lsn = tail_lsn;
-	}
-
-	if ((tic = log->l_write_headq)) {
+	if (!list_empty_careful(&log->l_writeq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_write_cycle;
-		bytes = log->l_grant_write_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(tic, &log->l_writeq, t_queue) {
 			ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < tic->t_unit_res && tail_lsn != 1)
 				break;
 			tail_lsn = 0;
 			free_bytes -= tic->t_unit_res;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
+			trace_xfs_log_regrant_write_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_write_lock);
 	}
-	if ((tic = log->l_reserve_headq)) {
+
+	if (!list_empty_careful(&log->l_reserveq)) {
 #ifdef DEBUG
 		if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 			panic("Recovery problem");
 #endif
-		cycle = log->l_grant_reserve_cycle;
-		bytes = log->l_grant_reserve_bytes;
-		free_bytes = xlog_space_left(log, cycle, bytes);
-		do {
+		spin_lock(&log->l_grant_reserve_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+		list_for_each_entry(tic, &log->l_reserveq, t_queue) {
 			if (tic->t_flags & XLOG_TIC_PERM_RESERV)
 				need_bytes = tic->t_unit_res*tic->t_cnt;
 			else
@@ -760,12 +726,12 @@
 				break;
 			tail_lsn = 0;
 			free_bytes -= need_bytes;
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
+			trace_xfs_log_grant_wake_up(log, tic);
+			wake_up(&tic->t_wait);
+		}
+		spin_unlock(&log->l_grant_reserve_lock);
 	}
-	spin_unlock(&log->l_grant_lock);
-}	/* xfs_log_move_tail */
+}
 
 /*
  * Determine if we have a transaction that has gone to disk
@@ -831,23 +797,19 @@
  * We may be holding the log iclog lock upon entering this routine.
  */
 xfs_lsn_t
-xlog_assign_tail_lsn(xfs_mount_t *mp)
+xlog_assign_tail_lsn(
+	struct xfs_mount	*mp)
 {
-	xfs_lsn_t tail_lsn;
-	xlog_t	  *log = mp->m_log;
+	xfs_lsn_t		tail_lsn;
+	struct log		*log = mp->m_log;
 
 	tail_lsn = xfs_trans_ail_tail(mp->m_ail);
-	spin_lock(&log->l_grant_lock);
-	if (tail_lsn != 0) {
-		log->l_tail_lsn = tail_lsn;
-	} else {
-		tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
-	}
-	spin_unlock(&log->l_grant_lock);
+	if (!tail_lsn)
+		tail_lsn = atomic64_read(&log->l_last_sync_lsn);
 
+	atomic64_set(&log->l_tail_lsn, tail_lsn);
 	return tail_lsn;
-}	/* xlog_assign_tail_lsn */
-
+}
 
 /*
  * Return the space in the log between the tail and the head.  The head
@@ -864,21 +826,26 @@
  * result is that we return the size of the log as the amount of space left.
  */
 STATIC int
-xlog_space_left(xlog_t *log, int cycle, int bytes)
+xlog_space_left(
+	struct log	*log,
+	atomic64_t	*head)
 {
-	int free_bytes;
-	int tail_bytes;
-	int tail_cycle;
+	int		free_bytes;
+	int		tail_bytes;
+	int		tail_cycle;
+	int		head_cycle;
+	int		head_bytes;
 
-	tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
-	tail_cycle = CYCLE_LSN(log->l_tail_lsn);
-	if ((tail_cycle == cycle) && (bytes >= tail_bytes)) {
-		free_bytes = log->l_logsize - (bytes - tail_bytes);
-	} else if ((tail_cycle + 1) < cycle) {
+	xlog_crack_grant_head(head, &head_cycle, &head_bytes);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
+	tail_bytes = BBTOB(tail_bytes);
+	if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
+		free_bytes = log->l_logsize - (head_bytes - tail_bytes);
+	else if (tail_cycle + 1 < head_cycle)
 		return 0;
-	} else if (tail_cycle < cycle) {
-		ASSERT(tail_cycle == (cycle - 1));
-		free_bytes = tail_bytes - bytes;
+	else if (tail_cycle < head_cycle) {
+		ASSERT(tail_cycle == (head_cycle - 1));
+		free_bytes = tail_bytes - head_bytes;
 	} else {
 		/*
 		 * The reservation head is behind the tail.
@@ -889,12 +856,12 @@
 			"xlog_space_left: head behind tail\n"
 			"  tail_cycle = %d, tail_bytes = %d\n"
 			"  GH   cycle = %d, GH   bytes = %d",
-			tail_cycle, tail_bytes, cycle, bytes);
+			tail_cycle, tail_bytes, head_cycle, head_bytes);
 		ASSERT(0);
 		free_bytes = log->l_logsize;
 	}
 	return free_bytes;
-}	/* xlog_space_left */
+}
 
 
 /*
@@ -1047,12 +1014,16 @@
 	log->l_flags	   |= XLOG_ACTIVE_RECOVERY;
 
 	log->l_prev_block  = -1;
-	log->l_tail_lsn	   = xlog_assign_lsn(1, 0);
 	/* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
-	log->l_last_sync_lsn = log->l_tail_lsn;
+	xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
+	xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
 	log->l_curr_cycle  = 1;	    /* 0 is bad since this is initial value */
-	log->l_grant_reserve_cycle = 1;
-	log->l_grant_write_cycle = 1;
+	xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
+	xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
+	INIT_LIST_HEAD(&log->l_reserveq);
+	INIT_LIST_HEAD(&log->l_writeq);
+	spin_lock_init(&log->l_grant_reserve_lock);
+	spin_lock_init(&log->l_grant_write_lock);
 
 	error = EFSCORRUPTED;
 	if (xfs_sb_version_hassector(&mp->m_sb)) {
@@ -1094,8 +1065,7 @@
 	log->l_xbuf = bp;
 
 	spin_lock_init(&log->l_icloglock);
-	spin_lock_init(&log->l_grant_lock);
-	sv_init(&log->l_flush_wait, 0, "flush_wait");
+	init_waitqueue_head(&log->l_flush_wait);
 
 	/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
 	ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1151,8 +1121,8 @@
 
 		ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
 		ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
-		sv_init(&iclog->ic_force_wait, SV_DEFAULT, "iclog-force");
-		sv_init(&iclog->ic_write_wait, SV_DEFAULT, "iclog-write");
+		init_waitqueue_head(&iclog->ic_force_wait);
+		init_waitqueue_head(&iclog->ic_write_wait);
 
 		iclogp = &iclog->ic_next;
 	}
@@ -1167,15 +1137,11 @@
 out_free_iclog:
 	for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
 		prev_iclog = iclog->ic_next;
-		if (iclog->ic_bp) {
-			sv_destroy(&iclog->ic_force_wait);
-			sv_destroy(&iclog->ic_write_wait);
+		if (iclog->ic_bp)
 			xfs_buf_free(iclog->ic_bp);
-		}
 		kmem_free(iclog);
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 	xfs_buf_free(log->l_xbuf);
 out_free_log:
 	kmem_free(log);
@@ -1223,61 +1189,60 @@
  * water mark.  In this manner, we would be creating a low water mark.
  */
 STATIC void
-xlog_grant_push_ail(xfs_mount_t	*mp,
-		    int		need_bytes)
+xlog_grant_push_ail(
+	struct log	*log,
+	int		need_bytes)
 {
-    xlog_t	*log = mp->m_log;	/* pointer to the log */
-    xfs_lsn_t	tail_lsn;		/* lsn of the log tail */
-    xfs_lsn_t	threshold_lsn = 0;	/* lsn we'd like to be at */
-    int		free_blocks;		/* free blocks left to write to */
-    int		free_bytes;		/* free bytes left to write to */
-    int		threshold_block;	/* block in lsn we'd like to be at */
-    int		threshold_cycle;	/* lsn cycle we'd like to be at */
-    int		free_threshold;
+	xfs_lsn_t	threshold_lsn = 0;
+	xfs_lsn_t	last_sync_lsn;
+	int		free_blocks;
+	int		free_bytes;
+	int		threshold_block;
+	int		threshold_cycle;
+	int		free_threshold;
 
-    ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
+	ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
 
-    spin_lock(&log->l_grant_lock);
-    free_bytes = xlog_space_left(log,
-				 log->l_grant_reserve_cycle,
-				 log->l_grant_reserve_bytes);
-    tail_lsn = log->l_tail_lsn;
-    free_blocks = BTOBBT(free_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
+	free_blocks = BTOBBT(free_bytes);
 
-    /*
-     * Set the threshold for the minimum number of free blocks in the
-     * log to the maximum of what the caller needs, one quarter of the
-     * log, and 256 blocks.
-     */
-    free_threshold = BTOBB(need_bytes);
-    free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
-    free_threshold = MAX(free_threshold, 256);
-    if (free_blocks < free_threshold) {
-	threshold_block = BLOCK_LSN(tail_lsn) + free_threshold;
-	threshold_cycle = CYCLE_LSN(tail_lsn);
-	if (threshold_block >= log->l_logBBsize) {
-	    threshold_block -= log->l_logBBsize;
-	    threshold_cycle += 1;
-	}
-	threshold_lsn = xlog_assign_lsn(threshold_cycle, threshold_block);
-
-	/* Don't pass in an lsn greater than the lsn of the last
-	 * log record known to be on disk.
+	/*
+	 * Set the threshold for the minimum number of free blocks in the
+	 * log to the maximum of what the caller needs, one quarter of the
+	 * log, and 256 blocks.
 	 */
-	if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
-	    threshold_lsn = log->l_last_sync_lsn;
-    }
-    spin_unlock(&log->l_grant_lock);
+	free_threshold = BTOBB(need_bytes);
+	free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
+	free_threshold = MAX(free_threshold, 256);
+	if (free_blocks >= free_threshold)
+		return;
 
-    /*
-     * Get the transaction layer to kick the dirty buffers out to
-     * disk asynchronously. No point in trying to do this if
-     * the filesystem is shutting down.
-     */
-    if (threshold_lsn &&
-	!XLOG_FORCED_SHUTDOWN(log))
-	    xfs_trans_ail_push(log->l_ailp, threshold_lsn);
-}	/* xlog_grant_push_ail */
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
+						&threshold_block);
+	threshold_block += free_threshold;
+	if (threshold_block >= log->l_logBBsize) {
+		threshold_block -= log->l_logBBsize;
+		threshold_cycle += 1;
+	}
+	threshold_lsn = xlog_assign_lsn(threshold_cycle,
+					threshold_block);
+	/*
+	 * Don't pass in an lsn greater than the lsn of the last
+	 * log record known to be on disk. Use a snapshot of the last sync lsn
+	 * so that it doesn't change between the compare and the set.
+	 */
+	last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
+	if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
+		threshold_lsn = last_sync_lsn;
+
+	/*
+	 * Get the transaction layer to kick the dirty buffers out to
+	 * disk asynchronously. No point in trying to do this if
+	 * the filesystem is shutting down.
+	 */
+	if (!XLOG_FORCED_SHUTDOWN(log))
+		xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+}
 
 /*
  * The bdstrat callback function for log bufs. This gives us a central
@@ -1372,9 +1337,8 @@
 		 roundoff < BBTOB(1)));
 
 	/* move grant heads by roundoff in sync */
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_add_space(log, roundoff);
-	spin_unlock(&log->l_grant_lock);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
+	xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
 
 	/* put cycle number in every block */
 	xlog_pack_data(log, iclog, roundoff); 
@@ -1489,15 +1453,12 @@
 
 	iclog = log->l_iclog;
 	for (i=0; i<log->l_iclog_bufs; i++) {
-		sv_destroy(&iclog->ic_force_wait);
-		sv_destroy(&iclog->ic_write_wait);
 		xfs_buf_free(iclog->ic_bp);
 		next_iclog = iclog->ic_next;
 		kmem_free(iclog);
 		iclog = next_iclog;
 	}
 	spinlock_destroy(&log->l_icloglock);
-	spinlock_destroy(&log->l_grant_lock);
 
 	xfs_buf_free(log->l_xbuf);
 	log->l_mp->m_log = NULL;
@@ -2232,7 +2193,7 @@
 				lowest_lsn = xlog_get_lowest_lsn(log);
 				if (lowest_lsn &&
 				    XFS_LSN_CMP(lowest_lsn,
-				    		be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
+						be64_to_cpu(iclog->ic_header.h_lsn)) < 0) {
 					iclog = iclog->ic_next;
 					continue; /* Leave this iclog for
 						   * another thread */
@@ -2240,23 +2201,21 @@
 
 				iclog->ic_state = XLOG_STATE_CALLBACK;
 
-				spin_unlock(&log->l_icloglock);
 
-				/* l_last_sync_lsn field protected by
-				 * l_grant_lock. Don't worry about iclog's lsn.
-				 * No one else can be here except us.
+				/*
+				 * update the last_sync_lsn before we drop the
+				 * icloglock to ensure we are the only one that
+				 * can update it.
 				 */
-				spin_lock(&log->l_grant_lock);
-				ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
-				       be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
-				log->l_last_sync_lsn =
-					be64_to_cpu(iclog->ic_header.h_lsn);
-				spin_unlock(&log->l_grant_lock);
+				ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
+					be64_to_cpu(iclog->ic_header.h_lsn)) <= 0);
+				atomic64_set(&log->l_last_sync_lsn,
+					be64_to_cpu(iclog->ic_header.h_lsn));
 
-			} else {
-				spin_unlock(&log->l_icloglock);
+			} else
 				ioerrors++;
-			}
+
+			spin_unlock(&log->l_icloglock);
 
 			/*
 			 * Keep processing entries in the callback list until
@@ -2297,7 +2256,7 @@
 			xlog_state_clean_log(log);
 
 			/* wake up threads waiting in xfs_log_force() */
-			sv_broadcast(&iclog->ic_force_wait);
+			wake_up_all(&iclog->ic_force_wait);
 
 			iclog = iclog->ic_next;
 		} while (first_iclog != iclog);
@@ -2344,7 +2303,7 @@
 	spin_unlock(&log->l_icloglock);
 
 	if (wake)
-		sv_broadcast(&log->l_flush_wait);
+		wake_up_all(&log->l_flush_wait);
 }
 
 
@@ -2395,7 +2354,7 @@
 	 * iclog buffer, we wake them all, one will get to do the
 	 * I/O, the others get to wait for the result.
 	 */
-	sv_broadcast(&iclog->ic_write_wait);
+	wake_up_all(&iclog->ic_write_wait);
 	spin_unlock(&log->l_icloglock);
 	xlog_state_do_callback(log, aborted, iclog);	/* also cleans log */
 }	/* xlog_state_done_syncing */
@@ -2444,7 +2403,7 @@
 		XFS_STATS_INC(xs_log_noiclogs);
 
 		/* Wait for log writes to have flushed */
-		sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
+		xlog_wait(&log->l_flush_wait, &log->l_icloglock);
 		goto restart;
 	}
 
@@ -2527,6 +2486,18 @@
  *
  * Once a ticket gets put onto the reserveq, it will only return after
  * the needed reservation is satisfied.
+ *
+ * This function is structured so that it has a lock free fast path. This is
+ * necessary because every new transaction reservation will come through this
+ * path. Hence any lock will be globally hot if we take it unconditionally on
+ * every pass.
+ *
+ * As tickets are only ever moved on and off the reserveq under the
+ * l_grant_reserve_lock, we only need to take that lock if we are going
+ * to add the ticket to the queue and sleep. We can avoid taking the lock if the
+ * ticket was never added to the reserveq because the t_queue list head will be
+ * empty and we hold the only reference to it so it can safely be checked
+ * unlocked.
  */
 STATIC int
 xlog_grant_log_space(xlog_t	   *log,
@@ -2534,24 +2505,27 @@
 {
 	int		 free_bytes;
 	int		 need_bytes;
-#ifdef DEBUG
-	xfs_lsn_t	 tail_lsn;
-#endif
-
 
 #ifdef DEBUG
 	if (log->l_flags & XLOG_ACTIVE_RECOVERY)
 		panic("grant Recovery problem");
 #endif
 
-	/* Is there space or do we need to sleep? */
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_grant_enter(log, tic);
 
+	need_bytes = tic->t_unit_res;
+	if (tic->t_flags & XFS_LOG_PERM_RESERV)
+		need_bytes *= tic->t_ocnt;
+
 	/* something is already sleeping; insert new transaction at end */
-	if (log->l_reserve_headq) {
-		xlog_ins_ticketq(&log->l_reserve_headq, tic);
+	if (!list_empty_careful(&log->l_reserveq)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		/* recheck the queue now we are locked */
+		if (list_empty(&log->l_reserveq)) {
+			spin_unlock(&log->l_grant_reserve_lock);
+			goto redo;
+		}
+		list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep1(log, tic);
 
@@ -2563,72 +2537,57 @@
 			goto error_return;
 
 		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
 		/*
 		 * If we got an error, and the filesystem is shutting down,
 		 * we'll catch it down below. So just continue...
 		 */
 		trace_xfs_log_grant_wake1(log, tic);
-		spin_lock(&log->l_grant_lock);
 	}
-	if (tic->t_flags & XFS_LOG_PERM_RESERV)
-		need_bytes = tic->t_unit_res*tic->t_ocnt;
-	else
-		need_bytes = tic->t_unit_res;
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
-				     log->l_grant_reserve_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_reserve_headq, tic);
+		spin_lock(&log->l_grant_reserve_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_reserveq);
 
 		trace_xfs_log_grant_sleep2(log, tic);
 
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
-
-		XFS_STATS_INC(xs_sleep_logspace);
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
-		trace_xfs_log_grant_wake2(log, tic);
+		xlog_grant_push_ail(log, need_bytes);
 
+		XFS_STATS_INC(xs_sleep_logspace);
+		xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+
+		trace_xfs_log_grant_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_reserve_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_reserve_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	/*
-	 * Check to make sure the grant write head didn't just over lap the
-	 * tail.  If the cycles are the same, we can't be overlapping.
-	 * Otherwise, make sure that the cycles differ by exactly one and
-	 * check the byte count.
-	 */
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
+	xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_grant_exit(log, tic);
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
- error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+error_return_unlocked:
+	spin_lock(&log->l_grant_reserve_lock);
+error_return:
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_reserve_lock);
 	trace_xfs_log_grant_error(log, tic);
 
 	/*
@@ -2638,7 +2597,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_grant_log_space */
 
@@ -2646,17 +2604,14 @@
 /*
  * Replenish the byte reservation required by moving the grant write head.
  *
- *
+ * Similar to xlog_grant_log_space, the function is structured to have a lock
+ * free fast path.
  */
 STATIC int
 xlog_regrant_write_log_space(xlog_t	   *log,
 			     xlog_ticket_t *tic)
 {
 	int		free_bytes, need_bytes;
-	xlog_ticket_t	*ntic;
-#ifdef DEBUG
-	xfs_lsn_t	tail_lsn;
-#endif
 
 	tic->t_curr_res = tic->t_unit_res;
 	xlog_tic_reset_res(tic);
@@ -2669,12 +2624,9 @@
 		panic("regrant Recovery problem");
 #endif
 
-	spin_lock(&log->l_grant_lock);
-
 	trace_xfs_log_regrant_write_enter(log, tic);
-
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
 	/* If there are other waiters on the queue then give them a
 	 * chance at logspace before us. Wake up the first waiters,
@@ -2683,92 +2635,76 @@
 	 * this transaction.
 	 */
 	need_bytes = tic->t_unit_res;
-	if ((ntic = log->l_write_headq)) {
-		free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-					     log->l_grant_write_bytes);
-		do {
+	if (!list_empty_careful(&log->l_writeq)) {
+		struct xlog_ticket *ntic;
+
+		spin_lock(&log->l_grant_write_lock);
+		free_bytes = xlog_space_left(log, &log->l_grant_write_head);
+		list_for_each_entry(ntic, &log->l_writeq, t_queue) {
 			ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
 
 			if (free_bytes < ntic->t_unit_res)
 				break;
 			free_bytes -= ntic->t_unit_res;
-			sv_signal(&ntic->t_wait);
-			ntic = ntic->t_next;
-		} while (ntic != log->l_write_headq);
+			wake_up(&ntic->t_wait);
+		}
 
-		if (ntic != log->l_write_headq) {
-			if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-				xlog_ins_ticketq(&log->l_write_headq, tic);
-
+		if (ntic != list_first_entry(&log->l_writeq,
+						struct xlog_ticket, t_queue)) {
+			if (list_empty(&tic->t_queue))
+				list_add_tail(&tic->t_queue, &log->l_writeq);
 			trace_xfs_log_regrant_write_sleep1(log, tic);
 
-			spin_unlock(&log->l_grant_lock);
-			xlog_grant_push_ail(log->l_mp, need_bytes);
-			spin_lock(&log->l_grant_lock);
+			xlog_grant_push_ail(log, need_bytes);
 
 			XFS_STATS_INC(xs_sleep_logspace);
-			sv_wait(&tic->t_wait, PINOD|PLTWAIT,
-				&log->l_grant_lock, s);
-
-			/* If we're shutting down, this tic is already
-			 * off the queue */
-			spin_lock(&log->l_grant_lock);
-			if (XLOG_FORCED_SHUTDOWN(log))
-				goto error_return;
-
+			xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
 			trace_xfs_log_regrant_write_wake1(log, tic);
-		}
+		} else
+			spin_unlock(&log->l_grant_write_lock);
 	}
 
 redo:
 	if (XLOG_FORCED_SHUTDOWN(log))
-		goto error_return;
+		goto error_return_unlocked;
 
-	free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
-				     log->l_grant_write_bytes);
+	free_bytes = xlog_space_left(log, &log->l_grant_write_head);
 	if (free_bytes < need_bytes) {
-		if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-			xlog_ins_ticketq(&log->l_write_headq, tic);
-		spin_unlock(&log->l_grant_lock);
-		xlog_grant_push_ail(log->l_mp, need_bytes);
-		spin_lock(&log->l_grant_lock);
+		spin_lock(&log->l_grant_write_lock);
+		if (list_empty(&tic->t_queue))
+			list_add_tail(&tic->t_queue, &log->l_writeq);
 
-		XFS_STATS_INC(xs_sleep_logspace);
-		trace_xfs_log_regrant_write_sleep2(log, tic);
-
-		sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
-
-		/* If we're shutting down, this tic is already off the queue */
-		spin_lock(&log->l_grant_lock);
 		if (XLOG_FORCED_SHUTDOWN(log))
 			goto error_return;
 
+		xlog_grant_push_ail(log, need_bytes);
+
+		XFS_STATS_INC(xs_sleep_logspace);
+		trace_xfs_log_regrant_write_sleep2(log, tic);
+		xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+
 		trace_xfs_log_regrant_write_wake2(log, tic);
 		goto redo;
-	} else if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_write_headq, tic);
+	}
+
+	if (!list_empty(&tic->t_queue)) {
+		spin_lock(&log->l_grant_write_lock);
+		list_del_init(&tic->t_queue);
+		spin_unlock(&log->l_grant_write_lock);
+	}
 
 	/* we've got enough space */
-	xlog_grant_add_space_write(log, need_bytes);
-#ifdef DEBUG
-	tail_lsn = log->l_tail_lsn;
-	if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
-		ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
-		ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
-	}
-#endif
-
+	xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
 	trace_xfs_log_regrant_write_exit(log, tic);
-
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
+	xlog_verify_grant_tail(log);
 	return 0;
 
 
+ error_return_unlocked:
+	spin_lock(&log->l_grant_write_lock);
  error_return:
-	if (tic->t_flags & XLOG_TIC_IN_Q)
-		xlog_del_ticketq(&log->l_reserve_headq, tic);
-
+	list_del_init(&tic->t_queue);
+	spin_unlock(&log->l_grant_write_lock);
 	trace_xfs_log_regrant_write_error(log, tic);
 
 	/*
@@ -2778,7 +2714,6 @@
 	 */
 	tic->t_curr_res = 0;
 	tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-	spin_unlock(&log->l_grant_lock);
 	return XFS_ERROR(EIO);
 }	/* xlog_regrant_write_log_space */
 
@@ -2799,27 +2734,24 @@
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
-	xlog_grant_sub_space(log, ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head,
+					ticket->t_curr_res);
+	xlog_grant_sub_space(log, &log->l_grant_write_head,
+					ticket->t_curr_res);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 
 	trace_xfs_log_regrant_reserve_sub(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-
 	/* just return if we still have some of the pre-reserved space */
-	if (ticket->t_cnt > 0) {
-		spin_unlock(&log->l_grant_lock);
+	if (ticket->t_cnt > 0)
 		return;
-	}
 
-	xlog_grant_add_space_reserve(log, ticket->t_unit_res);
+	xlog_grant_add_space(log, &log->l_grant_reserve_head,
+					ticket->t_unit_res);
 
 	trace_xfs_log_regrant_reserve_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 0);
-	spin_unlock(&log->l_grant_lock);
 	ticket->t_curr_res = ticket->t_unit_res;
 	xlog_tic_reset_res(ticket);
 }	/* xlog_regrant_reserve_log_space */
@@ -2843,28 +2775,29 @@
 xlog_ungrant_log_space(xlog_t	     *log,
 		       xlog_ticket_t *ticket)
 {
+	int	bytes;
+
 	if (ticket->t_cnt > 0)
 		ticket->t_cnt--;
 
-	spin_lock(&log->l_grant_lock);
 	trace_xfs_log_ungrant_enter(log, ticket);
-
-	xlog_grant_sub_space(log, ticket->t_curr_res);
-
 	trace_xfs_log_ungrant_sub(log, ticket);
 
-	/* If this is a permanent reservation ticket, we may be able to free
+	/*
+	 * If this is a permanent reservation ticket, we may be able to free
 	 * up more space based on the remaining count.
 	 */
+	bytes = ticket->t_curr_res;
 	if (ticket->t_cnt > 0) {
 		ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
-		xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
+		bytes += ticket->t_unit_res*ticket->t_cnt;
 	}
 
+	xlog_grant_sub_space(log, &log->l_grant_reserve_head, bytes);
+	xlog_grant_sub_space(log, &log->l_grant_write_head, bytes);
+
 	trace_xfs_log_ungrant_exit(log, ticket);
 
-	xlog_verify_grant_head(log, 1);
-	spin_unlock(&log->l_grant_lock);
 	xfs_log_move_tail(log->l_mp, 1);
 }	/* xlog_ungrant_log_space */
 
@@ -2901,11 +2834,11 @@
 
 	if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
 		/* update tail before writing to iclog */
-		xlog_assign_tail_lsn(log->l_mp);
+		xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp);
 		sync++;
 		iclog->ic_state = XLOG_STATE_SYNCING;
-		iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
-		xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
+		iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
+		xlog_verify_tail_lsn(log, iclog, tail_lsn);
 		/* cycle incremented when incrementing curr_block */
 	}
 	spin_unlock(&log->l_icloglock);
@@ -3088,7 +3021,7 @@
 			return XFS_ERROR(EIO);
 		}
 		XFS_STATS_INC(xs_log_force_sleep);
-		sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
+		xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 		/*
 		 * No need to grab the log lock here since we're
 		 * only deciding whether or not to return EIO
@@ -3206,8 +3139,8 @@
 
 				XFS_STATS_INC(xs_log_force_sleep);
 
-				sv_wait(&iclog->ic_prev->ic_write_wait,
-					PSWP, &log->l_icloglock, s);
+				xlog_wait(&iclog->ic_prev->ic_write_wait,
+							&log->l_icloglock);
 				if (log_flushed)
 					*log_flushed = 1;
 				already_slept = 1;
@@ -3235,7 +3168,7 @@
 				return XFS_ERROR(EIO);
 			}
 			XFS_STATS_INC(xs_log_force_sleep);
-			sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+			xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
 			/*
 			 * No need to grab the log lock here since we're
 			 * only deciding whether or not to return EIO
@@ -3310,10 +3243,8 @@
 	xlog_ticket_t	*ticket)
 {
 	ASSERT(atomic_read(&ticket->t_ref) > 0);
-	if (atomic_dec_and_test(&ticket->t_ref)) {
-		sv_destroy(&ticket->t_wait);
+	if (atomic_dec_and_test(&ticket->t_ref))
 		kmem_zone_free(xfs_log_ticket_zone, ticket);
-	}
 }
 
 xlog_ticket_t *
@@ -3435,6 +3366,7 @@
         }
 
 	atomic_set(&tic->t_ref, 1);
+	INIT_LIST_HEAD(&tic->t_queue);
 	tic->t_unit_res		= unit_bytes;
 	tic->t_curr_res		= unit_bytes;
 	tic->t_cnt		= cnt;
@@ -3445,7 +3377,7 @@
 	tic->t_trans_type	= 0;
 	if (xflags & XFS_LOG_PERM_RESERV)
 		tic->t_flags |= XLOG_TIC_PERM_RESERV;
-	sv_init(&tic->t_wait, SV_DEFAULT, "logtick");
+	init_waitqueue_head(&tic->t_wait);
 
 	xlog_tic_reset_res(tic);
 
@@ -3484,18 +3416,25 @@
 }
 
 STATIC void
-xlog_verify_grant_head(xlog_t *log, int equals)
+xlog_verify_grant_tail(
+	struct log	*log)
 {
-    if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
-	if (equals)
-	    ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
-	else
-	    ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
-    } else {
-	ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
-	ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
-    }
-}	/* xlog_verify_grant_head */
+	int		tail_cycle, tail_blocks;
+	int		cycle, space;
+
+	/*
+	 * Check to make sure the grant write head didn't just over lap the
+	 * tail.  If the cycles are the same, we can't be overlapping.
+	 * Otherwise, make sure that the cycles differ by exactly one and
+	 * check the byte count.
+	 */
+	xlog_crack_grant_head(&log->l_grant_write_head, &cycle, &space);
+	xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
+	if (tail_cycle != cycle) {
+		ASSERT(cycle - 1 == tail_cycle);
+		ASSERT(space <= BBTOB(tail_blocks));
+	}
+}
 
 /* check if it will fit */
 STATIC void
@@ -3716,12 +3655,10 @@
 		xlog_cil_force(log);
 
 	/*
-	 * We must hold both the GRANT lock and the LOG lock,
-	 * before we mark the filesystem SHUTDOWN and wake
-	 * everybody up to tell the bad news.
+	 * mark the filesystem and the as in a shutdown state and wake
+	 * everybody up to tell them the bad news.
 	 */
 	spin_lock(&log->l_icloglock);
-	spin_lock(&log->l_grant_lock);
 	mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
 	if (mp->m_sb_bp)
 		XFS_BUF_DONE(mp->m_sb_bp);
@@ -3742,27 +3679,21 @@
 	spin_unlock(&log->l_icloglock);
 
 	/*
-	 * We don't want anybody waiting for log reservations
-	 * after this. That means we have to wake up everybody
-	 * queued up on reserve_headq as well as write_headq.
-	 * In addition, we make sure in xlog_{re}grant_log_space
-	 * that we don't enqueue anything once the SHUTDOWN flag
-	 * is set, and this action is protected by the GRANTLOCK.
+	 * We don't want anybody waiting for log reservations after this. That
+	 * means we have to wake up everybody queued up on reserveq as well as
+	 * writeq.  In addition, we make sure in xlog_{re}grant_log_space that
+	 * we don't enqueue anything once the SHUTDOWN flag is set, and this
+	 * action is protected by the grant locks.
 	 */
-	if ((tic = log->l_reserve_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_reserve_headq);
-	}
+	spin_lock(&log->l_grant_reserve_lock);
+	list_for_each_entry(tic, &log->l_reserveq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_reserve_lock);
 
-	if ((tic = log->l_write_headq)) {
-		do {
-			sv_signal(&tic->t_wait);
-			tic = tic->t_next;
-		} while (tic != log->l_write_headq);
-	}
-	spin_unlock(&log->l_grant_lock);
+	spin_lock(&log->l_grant_write_lock);
+	list_for_each_entry(tic, &log->l_writeq, t_queue)
+		wake_up(&tic->t_wait);
+	spin_unlock(&log->l_grant_write_lock);
 
 	if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
 		ASSERT(!logerror);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 23d6ceb..9dc8125 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -61,7 +61,7 @@
 	INIT_LIST_HEAD(&cil->xc_committing);
 	spin_lock_init(&cil->xc_cil_lock);
 	init_rwsem(&cil->xc_ctx_lock);
-	sv_init(&cil->xc_commit_wait, SV_DEFAULT, "cilwait");
+	init_waitqueue_head(&cil->xc_commit_wait);
 
 	INIT_LIST_HEAD(&ctx->committing);
 	INIT_LIST_HEAD(&ctx->busy_extents);
@@ -361,15 +361,10 @@
 	int	abort)
 {
 	struct xfs_cil_ctx	*ctx = args;
-	struct xfs_log_vec	*lv;
-	int			abortflag = abort ? XFS_LI_ABORTED : 0;
 	struct xfs_busy_extent	*busyp, *n;
 
-	/* unpin all the log items */
-	for (lv = ctx->lv_chain; lv; lv = lv->lv_next ) {
-		xfs_trans_item_committed(lv->lv_item, ctx->start_lsn,
-							abortflag);
-	}
+	xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
+					ctx->start_lsn, abort);
 
 	list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list)
 		xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp);
@@ -568,7 +563,7 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 	}
@@ -592,7 +587,7 @@
 	 */
 	spin_lock(&cil->xc_cil_lock);
 	ctx->commit_lsn = commit_lsn;
-	sv_broadcast(&cil->xc_commit_wait);
+	wake_up_all(&cil->xc_commit_wait);
 	spin_unlock(&cil->xc_cil_lock);
 
 	/* release the hounds! */
@@ -757,7 +752,7 @@
 			 * It is still being pushed! Wait for the push to
 			 * complete, then start again from the beginning.
 			 */
-			sv_wait(&cil->xc_commit_wait, 0, &cil->xc_cil_lock, 0);
+			xlog_wait(&cil->xc_commit_wait, &cil->xc_cil_lock);
 			goto restart;
 		}
 		if (ctx->sequence != sequence)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index edcdfe0..d5f8be8 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -21,7 +21,6 @@
 struct xfs_buf;
 struct log;
 struct xlog_ticket;
-struct xfs_buf_cancel;
 struct xfs_mount;
 
 /*
@@ -54,7 +53,6 @@
 	BTOBB(XLOG_MAX_ICLOGS << (xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? \
 	 XLOG_MAX_RECORD_BSHIFT : XLOG_BIG_RECORD_BSHIFT))
 
-
 static inline xfs_lsn_t xlog_assign_lsn(uint cycle, uint block)
 {
 	return ((xfs_lsn_t)cycle << 32) | block;
@@ -133,12 +131,10 @@
  */
 #define XLOG_TIC_INITED		0x1	/* has been initialized */
 #define XLOG_TIC_PERM_RESERV	0x2	/* permanent reservation */
-#define XLOG_TIC_IN_Q		0x4
 
 #define XLOG_TIC_FLAGS \
 	{ XLOG_TIC_INITED,	"XLOG_TIC_INITED" }, \
-	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }, \
-	{ XLOG_TIC_IN_Q,	"XLOG_TIC_IN_Q" }
+	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
 
 #endif	/* __KERNEL__ */
 
@@ -244,9 +240,8 @@
 } xlog_res_t;
 
 typedef struct xlog_ticket {
-	sv_t		   t_wait;	 /* ticket wait queue            : 20 */
-	struct xlog_ticket *t_next;	 /*			         :4|8 */
-	struct xlog_ticket *t_prev;	 /*				 :4|8 */
+	wait_queue_head_t  t_wait;	 /* ticket wait queue */
+	struct list_head   t_queue;	 /* reserve/write queue */
 	xlog_tid_t	   t_tid;	 /* transaction identifier	 : 4  */
 	atomic_t	   t_ref;	 /* ticket reference count       : 4  */
 	int		   t_curr_res;	 /* current reservation in bytes : 4  */
@@ -353,8 +348,8 @@
  * and move everything else out to subsequent cachelines.
  */
 typedef struct xlog_in_core {
-	sv_t			ic_force_wait;
-	sv_t			ic_write_wait;
+	wait_queue_head_t	ic_force_wait;
+	wait_queue_head_t	ic_write_wait;
 	struct xlog_in_core	*ic_next;
 	struct xlog_in_core	*ic_prev;
 	struct xfs_buf		*ic_bp;
@@ -421,7 +416,7 @@
 	struct xfs_cil_ctx	*xc_ctx;
 	struct rw_semaphore	xc_ctx_lock;
 	struct list_head	xc_committing;
-	sv_t			xc_commit_wait;
+	wait_queue_head_t	xc_commit_wait;
 	xfs_lsn_t		xc_current_sequence;
 };
 
@@ -491,7 +486,7 @@
 	struct xfs_buftarg	*l_targ;        /* buftarg of log */
 	uint			l_flags;
 	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
-	struct xfs_buf_cancel	**l_buf_cancel_table;
+	struct list_head	*l_buf_cancel_table;
 	int			l_iclog_hsize;  /* size of iclog header */
 	int			l_iclog_heads;  /* # of iclog header sectors */
 	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
@@ -503,29 +498,40 @@
 	int			l_logBBsize;    /* size of log in BB chunks */
 
 	/* The following block of fields are changed while holding icloglock */
-	sv_t			l_flush_wait ____cacheline_aligned_in_smp;
+	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
 						/* waiting for iclog flush */
 	int			l_covered_state;/* state of "covering disk
 						 * log entries" */
 	xlog_in_core_t		*l_iclog;       /* head log queue	*/
 	spinlock_t		l_icloglock;    /* grab to change iclog state */
-	xfs_lsn_t		l_tail_lsn;     /* lsn of 1st LR with unflushed
-						 * buffers */
-	xfs_lsn_t		l_last_sync_lsn;/* lsn of last LR on disk */
 	int			l_curr_cycle;   /* Cycle number of log writes */
 	int			l_prev_cycle;   /* Cycle number before last
 						 * block increment */
 	int			l_curr_block;   /* current logical log block */
 	int			l_prev_block;   /* previous logical log block */
 
-	/* The following block of fields are changed while holding grant_lock */
-	spinlock_t		l_grant_lock ____cacheline_aligned_in_smp;
-	xlog_ticket_t		*l_reserve_headq;
-	xlog_ticket_t		*l_write_headq;
-	int			l_grant_reserve_cycle;
-	int			l_grant_reserve_bytes;
-	int			l_grant_write_cycle;
-	int			l_grant_write_bytes;
+	/*
+	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
+	 * read without needing to hold specific locks. To avoid operations
+	 * contending with other hot objects, place each of them on a separate
+	 * cacheline.
+	 */
+	/* lsn of last LR on disk */
+	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
+	/* lsn of 1st LR with unflushed * buffers */
+	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
+
+	/*
+	 * ticket grant locks, queues and accounting have their own cachlines
+	 * as these are quite hot and can be operated on concurrently.
+	 */
+	spinlock_t		l_grant_reserve_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_reserveq;
+	atomic64_t		l_grant_reserve_head;
+
+	spinlock_t		l_grant_write_lock ____cacheline_aligned_in_smp;
+	struct list_head	l_writeq;
+	atomic64_t		l_grant_write_head;
 
 	/* The following field are used for debugging; need to hold icloglock */
 #ifdef DEBUG
@@ -534,6 +540,9 @@
 
 } xlog_t;
 
+#define XLOG_BUF_CANCEL_BUCKET(log, blkno) \
+	((log)->l_buf_cancel_table + ((__uint64_t)blkno % XLOG_BC_TABLE_SIZE))
+
 #define XLOG_FORCED_SHUTDOWN(log)	((log)->l_flags & XLOG_IO_ERROR)
 
 /* common routines */
@@ -562,6 +571,61 @@
 				xlog_in_core_t **commit_iclog, uint flags);
 
 /*
+ * When we crack an atomic LSN, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from. This should always
+ * be used to smaple and crack LSNs taht are stored and updated in atomic
+ * variables.
+ */
+static inline void
+xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
+{
+	xfs_lsn_t val = atomic64_read(lsn);
+
+	*cycle = CYCLE_LSN(val);
+	*block = BLOCK_LSN(val);
+}
+
+/*
+ * Calculate and assign a value to an atomic LSN variable from component pieces.
+ */
+static inline void
+xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
+{
+	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
+}
+
+/*
+ * When we crack the grant head, we sample it first so that the value will not
+ * change while we are cracking it into the component values. This means we
+ * will always get consistent component values to work from.
+ */
+static inline void
+xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
+{
+	*cycle = val >> 32;
+	*space = val & 0xffffffff;
+}
+
+static inline void
+xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
+{
+	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
+}
+
+static inline int64_t
+xlog_assign_grant_head_val(int cycle, int space)
+{
+	return ((int64_t)cycle << 32) | space;
+}
+
+static inline void
+xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
+{
+	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
+}
+
+/*
  * Committed Item List interfaces
  */
 int	xlog_cil_init(struct log *log);
@@ -585,6 +649,21 @@
  */
 #define XLOG_UNMOUNT_REC_TYPE	(-1U)
 
+/*
+ * Wrapper function for waiting on a wait queue serialised against wakeups
+ * by a spinlock. This matches the semantics of all the wait queues used in the
+ * log code.
+ */
+static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock)
+{
+	DECLARE_WAITQUEUE(wait, current);
+
+	add_wait_queue_exclusive(wq, &wait);
+	__set_current_state(TASK_UNINTERRUPTIBLE);
+	spin_unlock(lock);
+	schedule();
+	remove_wait_queue(wq, &wait);
+}
 #endif	/* __KERNEL__ */
 
 #endif	/* __XFS_LOG_PRIV_H__ */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 966d3f9..204d8e5 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -53,6 +53,17 @@
 #endif
 
 /*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+	xfs_daddr_t		bc_blkno;
+	uint			bc_len;
+	int			bc_refcount;
+	struct list_head	bc_list;
+};
+
+/*
  * Sector aligned buffer routines for buffer create/read/write/access
  */
 
@@ -925,12 +936,12 @@
 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
 	if (found == 2)
 		log->l_curr_cycle++;
-	log->l_tail_lsn = be64_to_cpu(rhead->h_tail_lsn);
-	log->l_last_sync_lsn = be64_to_cpu(rhead->h_lsn);
-	log->l_grant_reserve_cycle = log->l_curr_cycle;
-	log->l_grant_reserve_bytes = BBTOB(log->l_curr_block);
-	log->l_grant_write_cycle = log->l_curr_cycle;
-	log->l_grant_write_bytes = BBTOB(log->l_curr_block);
+	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
+	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
+	xlog_assign_grant_head(&log->l_grant_reserve_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
+	xlog_assign_grant_head(&log->l_grant_write_head, log->l_curr_cycle,
+					BBTOB(log->l_curr_block));
 
 	/*
 	 * Look for unmount record.  If we find it, then we know there
@@ -960,7 +971,7 @@
 	}
 	after_umount_blk = (i + hblks + (int)
 		BTOBB(be32_to_cpu(rhead->h_len))) % log->l_logBBsize;
-	tail_lsn = log->l_tail_lsn;
+	tail_lsn = atomic64_read(&log->l_tail_lsn);
 	if (*head_blk == after_umount_blk &&
 	    be32_to_cpu(rhead->h_num_logops) == 1) {
 		umount_data_blk = (i + hblks) % log->l_logBBsize;
@@ -975,12 +986,10 @@
 			 * log records will point recovery to after the
 			 * current unmount record.
 			 */
-			log->l_tail_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
-			log->l_last_sync_lsn =
-				xlog_assign_lsn(log->l_curr_cycle,
-						after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_tail_lsn,
+					log->l_curr_cycle, after_umount_blk);
+			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
+					log->l_curr_cycle, after_umount_blk);
 			*tail_blk = after_umount_blk;
 
 			/*
@@ -1605,82 +1614,45 @@
  * record in the table to tell us how many times we expect to see this
  * record during the second pass.
  */
-STATIC void
-xlog_recover_do_buffer_pass1(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
+STATIC int
+xlog_recover_buffer_pass1(
+	struct log		*log,
+	xlog_recover_item_t	*item)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*nextp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
-	xfs_daddr_t		blkno = 0;
-	uint			len = 0;
-	ushort			flags = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	}
+	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	/*
 	 * If this isn't a cancel buffer item, then just return.
 	 */
-	if (!(flags & XFS_BLF_CANCEL)) {
+	if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
 		trace_xfs_log_recover_buf_not_cancel(log, buf_f);
-		return;
+		return 0;
 	}
 
 	/*
-	 * Insert an xfs_buf_cancel record into the hash table of
-	 * them.  If there is already an identical record, bump
-	 * its reference count.
+	 * Insert an xfs_buf_cancel record into the hash table of them.
+	 * If there is already an identical record, bump its reference count.
 	 */
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	/*
-	 * If the hash bucket is empty then just insert a new record into
-	 * the bucket.
-	 */
-	if (*bucket == NULL) {
-		bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-						     KM_SLEEP);
-		bcp->bc_blkno = blkno;
-		bcp->bc_len = len;
-		bcp->bc_refcount = 1;
-		bcp->bc_next = NULL;
-		*bucket = bcp;
-		return;
-	}
-
-	/*
-	 * The hash bucket is not empty, so search for duplicates of our
-	 * record.  If we find one them just bump its refcount.  If not
-	 * then add us at the end of the list.
-	 */
-	prevp = NULL;
-	nextp = *bucket;
-	while (nextp != NULL) {
-		if (nextp->bc_blkno == blkno && nextp->bc_len == len) {
-			nextp->bc_refcount++;
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == buf_f->blf_blkno &&
+		    bcp->bc_len == buf_f->blf_len) {
+			bcp->bc_refcount++;
 			trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
-			return;
+			return 0;
 		}
-		prevp = nextp;
-		nextp = nextp->bc_next;
 	}
-	ASSERT(prevp != NULL);
-	bcp = (xfs_buf_cancel_t *)kmem_alloc(sizeof(xfs_buf_cancel_t),
-					     KM_SLEEP);
-	bcp->bc_blkno = blkno;
-	bcp->bc_len = len;
+
+	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), KM_SLEEP);
+	bcp->bc_blkno = buf_f->blf_blkno;
+	bcp->bc_len = buf_f->blf_len;
 	bcp->bc_refcount = 1;
-	bcp->bc_next = NULL;
-	prevp->bc_next = bcp;
+	list_add_tail(&bcp->bc_list, bucket);
+
 	trace_xfs_log_recover_buf_cancel_add(log, buf_f);
+	return 0;
 }
 
 /*
@@ -1698,14 +1670,13 @@
  */
 STATIC int
 xlog_check_buffer_cancelled(
-	xlog_t			*log,
+	struct log		*log,
 	xfs_daddr_t		blkno,
 	uint			len,
 	ushort			flags)
 {
-	xfs_buf_cancel_t	*bcp;
-	xfs_buf_cancel_t	*prevp;
-	xfs_buf_cancel_t	**bucket;
+	struct list_head	*bucket;
+	struct xfs_buf_cancel	*bcp;
 
 	if (log->l_buf_cancel_table == NULL) {
 		/*
@@ -1716,128 +1687,70 @@
 		return 0;
 	}
 
-	bucket = &log->l_buf_cancel_table[(__uint64_t)blkno %
-					  XLOG_BC_TABLE_SIZE];
-	bcp = *bucket;
-	if (bcp == NULL) {
-		/*
-		 * There is no corresponding entry in the table built
-		 * in pass one, so this buffer has not been cancelled.
-		 */
-		ASSERT(!(flags & XFS_BLF_CANCEL));
-		return 0;
+	/*
+	 * Search for an entry in the  cancel table that matches our buffer.
+	 */
+	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+	list_for_each_entry(bcp, bucket, bc_list) {
+		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+			goto found;
 	}
 
 	/*
-	 * Search for an entry in the buffer cancel table that
-	 * matches our buffer.
-	 */
-	prevp = NULL;
-	while (bcp != NULL) {
-		if (bcp->bc_blkno == blkno && bcp->bc_len == len) {
-			/*
-			 * We've go a match, so return 1 so that the
-			 * recovery of this buffer is cancelled.
-			 * If this buffer is actually a buffer cancel
-			 * log item, then decrement the refcount on the
-			 * one in the table and remove it if this is the
-			 * last reference.
-			 */
-			if (flags & XFS_BLF_CANCEL) {
-				bcp->bc_refcount--;
-				if (bcp->bc_refcount == 0) {
-					if (prevp == NULL) {
-						*bucket = bcp->bc_next;
-					} else {
-						prevp->bc_next = bcp->bc_next;
-					}
-					kmem_free(bcp);
-				}
-			}
-			return 1;
-		}
-		prevp = bcp;
-		bcp = bcp->bc_next;
-	}
-	/*
-	 * We didn't find a corresponding entry in the table, so
-	 * return 0 so that the buffer is NOT cancelled.
+	 * We didn't find a corresponding entry in the table, so return 0 so
+	 * that the buffer is NOT cancelled.
 	 */
 	ASSERT(!(flags & XFS_BLF_CANCEL));
 	return 0;
-}
 
-STATIC int
-xlog_recover_do_buffer_pass2(
-	xlog_t			*log,
-	xfs_buf_log_format_t	*buf_f)
-{
-	xfs_daddr_t		blkno = 0;
-	ushort			flags = 0;
-	uint			len = 0;
-
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		flags = buf_f->blf_flags;
-		len = buf_f->blf_len;
-		break;
+found:
+	/*
+	 * We've go a match, so return 1 so that the recovery of this buffer
+	 * is cancelled.  If this buffer is actually a buffer cancel log
+	 * item, then decrement the refcount on the one in the table and
+	 * remove it if this is the last reference.
+	 */
+	if (flags & XFS_BLF_CANCEL) {
+		if (--bcp->bc_refcount == 0) {
+			list_del(&bcp->bc_list);
+			kmem_free(bcp);
+		}
 	}
-
-	return xlog_check_buffer_cancelled(log, blkno, len, flags);
+	return 1;
 }
 
 /*
- * Perform recovery for a buffer full of inodes.  In these buffers,
- * the only data which should be recovered is that which corresponds
- * to the di_next_unlinked pointers in the on disk inode structures.
- * The rest of the data for the inodes is always logged through the
- * inodes themselves rather than the inode buffer and is recovered
- * in xlog_recover_do_inode_trans().
+ * Perform recovery for a buffer full of inodes.  In these buffers, the only
+ * data which should be recovered is that which corresponds to the
+ * di_next_unlinked pointers in the on disk inode structures.  The rest of the
+ * data for the inodes is always logged through the inodes themselves rather
+ * than the inode buffer and is recovered in xlog_recover_inode_pass2().
  *
- * The only time when buffers full of inodes are fully recovered is
- * when the buffer is full of newly allocated inodes.  In this case
- * the buffer will not be marked as an inode buffer and so will be
- * sent to xlog_recover_do_reg_buffer() below during recovery.
+ * The only time when buffers full of inodes are fully recovered is when the
+ * buffer is full of newly allocated inodes.  In this case the buffer will
+ * not be marked as an inode buffer and so will be sent to
+ * xlog_recover_do_reg_buffer() below during recovery.
  */
 STATIC int
 xlog_recover_do_inode_buffer(
-	xfs_mount_t		*mp,
+	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
-	int			item_index;
-	int			bit;
-	int			nbits;
-	int			reg_buf_offset;
-	int			reg_buf_bytes;
+	int			item_index = 0;
+	int			bit = 0;
+	int			nbits = 0;
+	int			reg_buf_offset = 0;
+	int			reg_buf_bytes = 0;
 	int			next_unlinked_offset;
 	int			inodes_per_buf;
 	xfs_agino_t		*logged_nextp;
 	xfs_agino_t		*buffer_nextp;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 
 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
-	/*
-	 * Set the variables corresponding to the current region to
-	 * 0 so that we'll initialize them on the first pass through
-	 * the loop.
-	 */
-	reg_buf_offset = 0;
-	reg_buf_bytes = 0;
-	bit = 0;
-	nbits = 0;
-	item_index = 0;
 	inodes_per_buf = XFS_BUF_COUNT(bp) >> mp->m_sb.sb_inodelog;
 	for (i = 0; i < inodes_per_buf; i++) {
 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
@@ -1852,18 +1765,18 @@
 			 * the current di_next_unlinked field.
 			 */
 			bit += nbits;
-			bit = xfs_next_bit(data_map, map_size, bit);
+			bit = xfs_next_bit(buf_f->blf_data_map,
+					   buf_f->blf_map_size, bit);
 
 			/*
 			 * If there are no more logged regions in the
 			 * buffer, then we're done.
 			 */
-			if (bit == -1) {
+			if (bit == -1)
 				return 0;
-			}
 
-			nbits = xfs_contig_bits(data_map, map_size,
-							 bit);
+			nbits = xfs_contig_bits(buf_f->blf_data_map,
+						buf_f->blf_map_size, bit);
 			ASSERT(nbits > 0);
 			reg_buf_offset = bit << XFS_BLF_SHIFT;
 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
@@ -1875,9 +1788,8 @@
 		 * di_next_unlinked field, then move on to the next
 		 * di_next_unlinked field.
 		 */
-		if (next_unlinked_offset < reg_buf_offset) {
+		if (next_unlinked_offset < reg_buf_offset)
 			continue;
-		}
 
 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
@@ -1913,36 +1825,29 @@
  * given buffer.  The bitmap in the buf log format structure indicates
  * where to place the logged data.
  */
-/*ARGSUSED*/
 STATIC void
 xlog_recover_do_reg_buffer(
 	struct xfs_mount	*mp,
 	xlog_recover_item_t	*item,
-	xfs_buf_t		*bp,
+	struct xfs_buf		*bp,
 	xfs_buf_log_format_t	*buf_f)
 {
 	int			i;
 	int			bit;
 	int			nbits;
-	unsigned int		*data_map = NULL;
-	unsigned int		map_size = 0;
 	int                     error;
 
 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
 
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		data_map = buf_f->blf_data_map;
-		map_size = buf_f->blf_map_size;
-		break;
-	}
 	bit = 0;
 	i = 1;  /* 0 is the buf format structure */
 	while (1) {
-		bit = xfs_next_bit(data_map, map_size, bit);
+		bit = xfs_next_bit(buf_f->blf_data_map,
+				   buf_f->blf_map_size, bit);
 		if (bit == -1)
 			break;
-		nbits = xfs_contig_bits(data_map, map_size, bit);
+		nbits = xfs_contig_bits(buf_f->blf_data_map,
+					buf_f->blf_map_size, bit);
 		ASSERT(nbits > 0);
 		ASSERT(item->ri_buf[i].i_addr != NULL);
 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
@@ -2176,77 +2081,46 @@
  * for more details on the implementation of the table of cancel records.
  */
 STATIC int
-xlog_recover_do_buffer_trans(
+xlog_recover_buffer_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_buf_log_format_t	*buf_f = item->ri_buf[0].i_addr;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	int			error;
-	int			cancel;
-	xfs_daddr_t		blkno;
-	int			len;
-	ushort			flags;
 	uint			buf_flags;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		/*
-		 * In this pass we're only looking for buf items
-		 * with the XFS_BLF_CANCEL bit set.
-		 */
-		xlog_recover_do_buffer_pass1(log, buf_f);
+	/*
+	 * In this pass we only want to recover all the buffers which have
+	 * not been cancelled and are not cancellation buffers themselves.
+	 */
+	if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
+			buf_f->blf_len, buf_f->blf_flags)) {
+		trace_xfs_log_recover_buf_cancel(log, buf_f);
 		return 0;
-	} else {
-		/*
-		 * In this pass we want to recover all the buffers
-		 * which have not been cancelled and are not
-		 * cancellation buffers themselves.  The routine
-		 * we call here will tell us whether or not to
-		 * continue with the replay of this buffer.
-		 */
-		cancel = xlog_recover_do_buffer_pass2(log, buf_f);
-		if (cancel) {
-			trace_xfs_log_recover_buf_cancel(log, buf_f);
-			return 0;
-		}
-	}
-	trace_xfs_log_recover_buf_recover(log, buf_f);
-	switch (buf_f->blf_type) {
-	case XFS_LI_BUF:
-		blkno = buf_f->blf_blkno;
-		len = buf_f->blf_len;
-		flags = buf_f->blf_flags;
-		break;
-	default:
-		xfs_fs_cmn_err(CE_ALERT, log->l_mp,
-			"xfs_log_recover: unknown buffer type 0x%x, logdev %s",
-			buf_f->blf_type, log->l_mp->m_logname ?
-			log->l_mp->m_logname : "internal");
-		XFS_ERROR_REPORT("xlog_recover_do_buffer_trans",
-				 XFS_ERRLEVEL_LOW, log->l_mp);
-		return XFS_ERROR(EFSCORRUPTED);
 	}
 
-	mp = log->l_mp;
+	trace_xfs_log_recover_buf_recover(log, buf_f);
+
 	buf_flags = XBF_LOCK;
-	if (!(flags & XFS_BLF_INODE_BUF))
+	if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF))
 		buf_flags |= XBF_MAPPED;
 
-	bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
+	bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+			  buf_flags);
 	if (XFS_BUF_ISERROR(bp)) {
-		xfs_ioerror_alert("xlog_recover_do..(read#1)", log->l_mp,
-				  bp, blkno);
+		xfs_ioerror_alert("xlog_recover_do..(read#1)", mp,
+				  bp, buf_f->blf_blkno);
 		error = XFS_BUF_GETERROR(bp);
 		xfs_buf_relse(bp);
 		return error;
 	}
 
 	error = 0;
-	if (flags & XFS_BLF_INODE_BUF) {
+	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
-	} else if (flags &
+	} else if (buf_f->blf_flags &
 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
 		xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
 	} else {
@@ -2286,16 +2160,14 @@
 }
 
 STATIC int
-xlog_recover_do_inode_trans(
+xlog_recover_inode_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_inode_log_format_t	*in_f;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	xfs_dinode_t		*dip;
-	xfs_ino_t		ino;
 	int			len;
 	xfs_caddr_t		src;
 	xfs_caddr_t		dest;
@@ -2305,10 +2177,6 @@
 	xfs_icdinode_t		*dicp;
 	int			need_free = 0;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	if (item->ri_buf[0].i_len == sizeof(xfs_inode_log_format_t)) {
 		in_f = item->ri_buf[0].i_addr;
 	} else {
@@ -2318,8 +2186,6 @@
 		if (error)
 			goto error;
 	}
-	ino = in_f->ilf_ino;
-	mp = log->l_mp;
 
 	/*
 	 * Inode buffers can be freed, look out for it,
@@ -2354,8 +2220,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode magic number, dino ptr = 0x%p, dino bp = 0x%p, ino = %Ld",
-			dip, bp, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(1)",
+			dip, bp, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(1)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2365,8 +2231,8 @@
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, ino %Ld",
-			item, ino);
-		XFS_ERROR_REPORT("xlog_recover_do_inode_trans(2)",
+			item, in_f->ilf_ino);
+		XFS_ERROR_REPORT("xlog_recover_inode_pass2(2)",
 				 XFS_ERRLEVEL_LOW, mp);
 		error = EFSCORRUPTED;
 		goto error;
@@ -2394,12 +2260,12 @@
 	if (unlikely((dicp->di_mode & S_IFMT) == S_IFREG)) {
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(3)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
 					 XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad regular inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
@@ -2407,40 +2273,40 @@
 		if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
 		    (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
 		    (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
-			XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(4)",
+			XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
 					     XFS_ERRLEVEL_LOW, mp, dicp);
 			xfs_buf_relse(bp);
 			xfs_fs_cmn_err(CE_ALERT, mp,
 				"xfs_inode_recover: Bad dir inode log record, rec ptr 0x%p, ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
-				item, dip, bp, ino);
+				item, dip, bp, in_f->ilf_ino);
 			error = EFSCORRUPTED;
 			goto error;
 		}
 	}
 	if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(5)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
-			item, dip, bp, ino,
+			item, dip, bp, in_f->ilf_ino,
 			dicp->di_nextents + dicp->di_anextents,
 			dicp->di_nblocks);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(6)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
 			"xfs_inode_recover: Bad inode log rec ptr 0x%p, dino ptr 0x%p, dino bp 0x%p, ino %Ld, forkoff 0x%x",
-			item, dip, bp, ino, dicp->di_forkoff);
+			item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
 		error = EFSCORRUPTED;
 		goto error;
 	}
 	if (unlikely(item->ri_buf[1].i_len > sizeof(struct xfs_icdinode))) {
-		XFS_CORRUPTION_ERROR("xlog_recover_do_inode_trans(7)",
+		XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
 				     XFS_ERRLEVEL_LOW, mp, dicp);
 		xfs_buf_relse(bp);
 		xfs_fs_cmn_err(CE_ALERT, mp,
@@ -2532,7 +2398,7 @@
 			break;
 
 		default:
-			xlog_warn("XFS: xlog_recover_do_inode_trans: Invalid flag");
+			xlog_warn("XFS: xlog_recover_inode_pass2: Invalid flag");
 			ASSERT(0);
 			xfs_buf_relse(bp);
 			error = EIO;
@@ -2556,18 +2422,11 @@
  * of that type.
  */
 STATIC int
-xlog_recover_do_quotaoff_trans(
+xlog_recover_quotaoff_pass1(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_qoff_logformat_t	*qoff_f;
-
-	if (pass == XLOG_RECOVER_PASS2) {
-		return (0);
-	}
-
-	qoff_f = item->ri_buf[0].i_addr;
+	xfs_qoff_logformat_t	*qoff_f = item->ri_buf[0].i_addr;
 	ASSERT(qoff_f);
 
 	/*
@@ -2588,22 +2447,17 @@
  * Recover a dquot record
  */
 STATIC int
-xlog_recover_do_dquot_trans(
+xlog_recover_dquot_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_buf_t		*bp;
 	struct xfs_disk_dquot	*ddq, *recddq;
 	int			error;
 	xfs_dq_logformat_t	*dq_f;
 	uint			type;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-	mp = log->l_mp;
 
 	/*
 	 * Filesystems are required to send in quota flags at mount time.
@@ -2647,7 +2501,7 @@
 	if ((error = xfs_qm_dqcheck(recddq,
 			   dq_f->qlf_id,
 			   0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans (log copy)"))) {
+			   "xlog_recover_dquot_pass2 (log copy)"))) {
 		return XFS_ERROR(EIO);
 	}
 	ASSERT(dq_f->qlf_len == 1);
@@ -2670,7 +2524,7 @@
 	 * minimal initialization then.
 	 */
 	if (xfs_qm_dqcheck(ddq, dq_f->qlf_id, 0, XFS_QMOPT_DOWARN,
-			   "xlog_recover_do_dquot_trans")) {
+			   "xlog_recover_dquot_pass2")) {
 		xfs_buf_relse(bp);
 		return XFS_ERROR(EIO);
 	}
@@ -2693,38 +2547,31 @@
  * LSN.
  */
 STATIC int
-xlog_recover_do_efi_trans(
+xlog_recover_efi_pass2(
 	xlog_t			*log,
 	xlog_recover_item_t	*item,
-	xfs_lsn_t		lsn,
-	int			pass)
+	xfs_lsn_t		lsn)
 {
 	int			error;
-	xfs_mount_t		*mp;
+	xfs_mount_t		*mp = log->l_mp;
 	xfs_efi_log_item_t	*efip;
 	xfs_efi_log_format_t	*efi_formatp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return 0;
-	}
-
 	efi_formatp = item->ri_buf[0].i_addr;
 
-	mp = log->l_mp;
 	efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
 	if ((error = xfs_efi_copy_format(&(item->ri_buf[0]),
 					 &(efip->efi_format)))) {
 		xfs_efi_item_free(efip);
 		return error;
 	}
-	efip->efi_next_extent = efi_formatp->efi_nextents;
-	efip->efi_flags |= XFS_EFI_COMMITTED;
+	atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
 
 	spin_lock(&log->l_ailp->xa_lock);
 	/*
 	 * xfs_trans_ail_update() drops the AIL lock.
 	 */
-	xfs_trans_ail_update(log->l_ailp, (xfs_log_item_t *)efip, lsn);
+	xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
 	return 0;
 }
 
@@ -2737,11 +2584,10 @@
  * efd format structure.  If we find it, we remove the efi from the
  * AIL and free it.
  */
-STATIC void
-xlog_recover_do_efd_trans(
+STATIC int
+xlog_recover_efd_pass2(
 	xlog_t			*log,
-	xlog_recover_item_t	*item,
-	int			pass)
+	xlog_recover_item_t	*item)
 {
 	xfs_efd_log_format_t	*efd_formatp;
 	xfs_efi_log_item_t	*efip = NULL;
@@ -2750,10 +2596,6 @@
 	struct xfs_ail_cursor	cur;
 	struct xfs_ail		*ailp = log->l_ailp;
 
-	if (pass == XLOG_RECOVER_PASS1) {
-		return;
-	}
-
 	efd_formatp = item->ri_buf[0].i_addr;
 	ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
 		((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
@@ -2785,62 +2627,6 @@
 	}
 	xfs_trans_ail_cursor_done(ailp, &cur);
 	spin_unlock(&ailp->xa_lock);
-}
-
-/*
- * Perform the transaction
- *
- * If the transaction modifies a buffer or inode, do it now.  Otherwise,
- * EFIs and EFDs get queued up by adding entries into the AIL for them.
- */
-STATIC int
-xlog_recover_do_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
-	int			pass)
-{
-	int			error = 0;
-	xlog_recover_item_t	*item;
-
-	error = xlog_recover_reorder_trans(log, trans, pass);
-	if (error)
-		return error;
-
-	list_for_each_entry(item, &trans->r_itemq, ri_list) {
-		trace_xfs_log_recover_item_recover(log, trans, item, pass);
-		switch (ITEM_TYPE(item)) {
-		case XFS_LI_BUF:
-			error = xlog_recover_do_buffer_trans(log, item, pass);
-			break;
-		case XFS_LI_INODE:
-			error = xlog_recover_do_inode_trans(log, item, pass);
-			break;
-		case XFS_LI_EFI:
-			error = xlog_recover_do_efi_trans(log, item,
-							  trans->r_lsn, pass);
-			break;
-		case XFS_LI_EFD:
-			xlog_recover_do_efd_trans(log, item, pass);
-			error = 0;
-			break;
-		case XFS_LI_DQUOT:
-			error = xlog_recover_do_dquot_trans(log, item, pass);
-			break;
-		case XFS_LI_QUOTAOFF:
-			error = xlog_recover_do_quotaoff_trans(log, item,
-							       pass);
-			break;
-		default:
-			xlog_warn(
-	"XFS: invalid item type (%d) xlog_recover_do_trans", ITEM_TYPE(item));
-			ASSERT(0);
-			error = XFS_ERROR(EIO);
-			break;
-		}
-
-		if (error)
-			return error;
-	}
 
 	return 0;
 }
@@ -2852,7 +2638,7 @@
  */
 STATIC void
 xlog_recover_free_trans(
-	xlog_recover_t		*trans)
+	struct xlog_recover	*trans)
 {
 	xlog_recover_item_t	*item, *n;
 	int			i;
@@ -2871,17 +2657,95 @@
 }
 
 STATIC int
+xlog_recover_commit_pass1(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass1(log, item);
+	case XFS_LI_QUOTAOFF:
+		return xlog_recover_quotaoff_pass1(log, item);
+	case XFS_LI_INODE:
+	case XFS_LI_EFI:
+	case XFS_LI_EFD:
+	case XFS_LI_DQUOT:
+		/* nothing to do in pass 1 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass1",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+STATIC int
+xlog_recover_commit_pass2(
+	struct log		*log,
+	struct xlog_recover	*trans,
+	xlog_recover_item_t	*item)
+{
+	trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
+
+	switch (ITEM_TYPE(item)) {
+	case XFS_LI_BUF:
+		return xlog_recover_buffer_pass2(log, item);
+	case XFS_LI_INODE:
+		return xlog_recover_inode_pass2(log, item);
+	case XFS_LI_EFI:
+		return xlog_recover_efi_pass2(log, item, trans->r_lsn);
+	case XFS_LI_EFD:
+		return xlog_recover_efd_pass2(log, item);
+	case XFS_LI_DQUOT:
+		return xlog_recover_dquot_pass2(log, item);
+	case XFS_LI_QUOTAOFF:
+		/* nothing to do in pass2 */
+		return 0;
+	default:
+		xlog_warn(
+	"XFS: invalid item type (%d) xlog_recover_commit_pass2",
+			ITEM_TYPE(item));
+		ASSERT(0);
+		return XFS_ERROR(EIO);
+	}
+}
+
+/*
+ * Perform the transaction.
+ *
+ * If the transaction modifies a buffer or inode, do it now.  Otherwise,
+ * EFIs and EFDs get queued up by adding entries into the AIL for them.
+ */
+STATIC int
 xlog_recover_commit_trans(
-	xlog_t			*log,
-	xlog_recover_t		*trans,
+	struct log		*log,
+	struct xlog_recover	*trans,
 	int			pass)
 {
-	int			error;
+	int			error = 0;
+	xlog_recover_item_t	*item;
 
 	hlist_del(&trans->r_list);
-	if ((error = xlog_recover_do_trans(log, trans, pass)))
+
+	error = xlog_recover_reorder_trans(log, trans, pass);
+	if (error)
 		return error;
-	xlog_recover_free_trans(trans);			/* no error */
+
+	list_for_each_entry(item, &trans->r_itemq, ri_list) {
+		if (pass == XLOG_RECOVER_PASS1)
+			error = xlog_recover_commit_pass1(log, trans, item);
+		else
+			error = xlog_recover_commit_pass2(log, trans, item);
+		if (error)
+			return error;
+	}
+
+	xlog_recover_free_trans(trans);
 	return 0;
 }
 
@@ -3011,7 +2875,7 @@
 	xfs_extent_t		*extp;
 	xfs_fsblock_t		startblock_fsb;
 
-	ASSERT(!(efip->efi_flags & XFS_EFI_RECOVERED));
+	ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
 
 	/*
 	 * First check the validity of the extents described by the
@@ -3050,7 +2914,7 @@
 					 extp->ext_len);
 	}
 
-	efip->efi_flags |= XFS_EFI_RECOVERED;
+	set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
 	error = xfs_trans_commit(tp, 0);
 	return error;
 
@@ -3107,7 +2971,7 @@
 		 * Skip EFIs that we've already processed.
 		 */
 		efip = (xfs_efi_log_item_t *)lip;
-		if (efip->efi_flags & XFS_EFI_RECOVERED) {
+		if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
 			lip = xfs_trans_ail_cursor_next(ailp, &cur);
 			continue;
 		}
@@ -3724,7 +3588,7 @@
 	xfs_daddr_t	head_blk,
 	xfs_daddr_t	tail_blk)
 {
-	int		error;
+	int		error, i;
 
 	ASSERT(head_blk != tail_blk);
 
@@ -3732,10 +3596,12 @@
 	 * First do a pass to find all of the cancelled buf log items.
 	 * Store them in the buf_cancel_table for use in the second pass.
 	 */
-	log->l_buf_cancel_table =
-		(xfs_buf_cancel_t **)kmem_zalloc(XLOG_BC_TABLE_SIZE *
-						 sizeof(xfs_buf_cancel_t*),
+	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
+						 sizeof(struct list_head),
 						 KM_SLEEP);
+	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
+		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
+
 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
 				      XLOG_RECOVER_PASS1);
 	if (error != 0) {
@@ -3754,7 +3620,7 @@
 		int	i;
 
 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
-			ASSERT(log->l_buf_cancel_table[i] == NULL);
+			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
 	}
 #endif	/* DEBUG */
 
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 19e9dfa..d447aef 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -472,7 +472,7 @@
 			goto out_unwind;
 		pag->pag_agno = index;
 		pag->pag_mount = mp;
-		rwlock_init(&pag->pag_ici_lock);
+		spin_lock_init(&pag->pag_ici_lock);
 		mutex_init(&pag->pag_ici_reclaim_lock);
 		INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
 		spin_lock_init(&pag->pag_buf_lock);
@@ -975,6 +975,24 @@
 }
 
 /*
+ * precalculate the low space thresholds for dynamic speculative preallocation.
+ */
+void
+xfs_set_low_space_thresholds(
+	struct xfs_mount	*mp)
+{
+	int i;
+
+	for (i = 0; i < XFS_LOWSP_MAX; i++) {
+		__uint64_t space = mp->m_sb.sb_dblocks;
+
+		do_div(space, 100);
+		mp->m_low_space[i] = space * (i + 1);
+	}
+}
+
+
+/*
  * Set whether we're using inode alignment.
  */
 STATIC void
@@ -1196,6 +1214,9 @@
 	 */
 	xfs_set_rw_sizes(mp);
 
+	/* set the low space thresholds for dynamic preallocation */
+	xfs_set_low_space_thresholds(mp);
+
 	/*
 	 * Set the inode cluster size.
 	 * This may still be overridden by the file system
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 5861b49..a62e897 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -103,6 +103,16 @@
 	xfs_mod_incore_sb(mp, field, delta, rsvd)
 #endif
 
+/* dynamic preallocation free space thresholds, 5% down to 1% */
+enum {
+	XFS_LOWSP_1_PCNT = 0,
+	XFS_LOWSP_2_PCNT,
+	XFS_LOWSP_3_PCNT,
+	XFS_LOWSP_4_PCNT,
+	XFS_LOWSP_5_PCNT,
+	XFS_LOWSP_MAX,
+};
+
 typedef struct xfs_mount {
 	struct super_block	*m_super;
 	xfs_tid_t		m_tid;		/* next unused tid for fs */
@@ -202,6 +212,8 @@
 	__int64_t		m_update_flags;	/* sb flags we need to update
 						   on the next remount,rw */
 	struct shrinker		m_inode_shrink;	/* inode reclaim shrinker */
+	int64_t			m_low_space[XFS_LOWSP_MAX];
+						/* low free space thresholds */
 } xfs_mount_t;
 
 /*
@@ -379,6 +391,8 @@
 
 extern int	xfs_dev_is_read_only(struct xfs_mount *, char *);
 
+extern void	xfs_set_low_space_thresholds(struct xfs_mount *);
+
 #endif	/* __KERNEL__ */
 
 extern void	xfs_mod_sb(struct xfs_trans *, __int64_t);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index f6d956b..f80a067 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1350,7 +1350,7 @@
  * they could be immediately flushed and we'd have to race with the flusher
  * trying to pull the item from the AIL as we add it.
  */
-void
+static void
 xfs_trans_item_committed(
 	struct xfs_log_item	*lip,
 	xfs_lsn_t		commit_lsn,
@@ -1425,6 +1425,83 @@
 	xfs_trans_free(tp);
 }
 
+static inline void
+xfs_log_item_batch_insert(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		commit_lsn)
+{
+	int	i;
+
+	spin_lock(&ailp->xa_lock);
+	/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
+	xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
+
+	for (i = 0; i < nr_items; i++)
+		IOP_UNPIN(log_items[i], 0);
+}
+
+/*
+ * Bulk operation version of xfs_trans_committed that takes a log vector of
+ * items to insert into the AIL. This uses bulk AIL insertion techniques to
+ * minimise lock traffic.
+ */
+void
+xfs_trans_committed_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_vec	*log_vector,
+	xfs_lsn_t		commit_lsn,
+	int			aborted)
+{
+#define LOG_ITEM_BATCH_SIZE	32
+	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
+	struct xfs_log_vec	*lv;
+	int			i = 0;
+
+	/* unpin all the log items */
+	for (lv = log_vector; lv; lv = lv->lv_next ) {
+		struct xfs_log_item	*lip = lv->lv_item;
+		xfs_lsn_t		item_lsn;
+
+		if (aborted)
+			lip->li_flags |= XFS_LI_ABORTED;
+		item_lsn = IOP_COMMITTED(lip, commit_lsn);
+
+		/* item_lsn of -1 means the item was freed */
+		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
+			continue;
+
+		if (item_lsn != commit_lsn) {
+
+			/*
+			 * Not a bulk update option due to unusual item_lsn.
+			 * Push into AIL immediately, rechecking the lsn once
+			 * we have the ail lock. Then unpin the item.
+			 */
+			spin_lock(&ailp->xa_lock);
+			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
+				xfs_trans_ail_update(ailp, lip, item_lsn);
+			else
+				spin_unlock(&ailp->xa_lock);
+			IOP_UNPIN(lip, 0);
+			continue;
+		}
+
+		/* Item is a candidate for bulk AIL insert.  */
+		log_items[i++] = lv->lv_item;
+		if (i >= LOG_ITEM_BATCH_SIZE) {
+			xfs_log_item_batch_insert(ailp, log_items,
+					LOG_ITEM_BATCH_SIZE, commit_lsn);
+			i = 0;
+		}
+	}
+
+	/* make sure we insert the remainder! */
+	if (i)
+		xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
+}
+
 /*
  * Called from the trans_commit code when we notice that
  * the filesystem is in the middle of a forced shutdown.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 246286b..c2042b7 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -294,8 +294,8 @@
 #define	XFS_ALLOC_BTREE_REF	2
 #define	XFS_BMAP_BTREE_REF	2
 #define	XFS_DIR_BTREE_REF	2
+#define	XFS_INO_REF		2
 #define	XFS_ATTR_BTREE_REF	1
-#define	XFS_INO_REF		1
 #define	XFS_DQUOT_REF		1
 
 #ifdef __KERNEL__
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index dc90695..c5bbbc4 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -28,8 +28,8 @@
 #include "xfs_trans_priv.h"
 #include "xfs_error.h"
 
-STATIC void xfs_ail_insert(struct xfs_ail *, xfs_log_item_t *);
-STATIC xfs_log_item_t * xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
+STATIC void xfs_ail_splice(struct xfs_ail *, struct list_head *, xfs_lsn_t);
+STATIC void xfs_ail_delete(struct xfs_ail *, xfs_log_item_t *);
 STATIC xfs_log_item_t * xfs_ail_min(struct xfs_ail *);
 STATIC xfs_log_item_t * xfs_ail_next(struct xfs_ail *, xfs_log_item_t *);
 
@@ -449,129 +449,152 @@
 		xfs_log_move_tail(ailp->xa_mount, 1);
 }	/* xfs_trans_unlocked_item */
 
-
 /*
- * Update the position of the item in the AIL with the new
- * lsn.  If it is not yet in the AIL, add it.  Otherwise, move
- * it to its new position by removing it and re-adding it.
+ * xfs_trans_ail_update - bulk AIL insertion operation.
  *
- * Wakeup anyone with an lsn less than the item's lsn.  If the item
- * we move in the AIL is the minimum one, update the tail lsn in the
- * log manager.
+ * @xfs_trans_ail_update takes an array of log items that all need to be
+ * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
+ * be added.  Otherwise, it will be repositioned  by removing it and re-adding
+ * it to the AIL. If we move the first item in the AIL, update the log tail to
+ * match the new minimum LSN in the AIL.
  *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
+ * This function takes the AIL lock once to execute the update operations on
+ * all the items in the array, and as such should not be called with the AIL
+ * lock held. As a result, once we have the AIL lock, we need to check each log
+ * item LSN to confirm it needs to be moved forward in the AIL.
+ *
+ * To optimise the insert operation, we delete all the items from the AIL in
+ * the first pass, moving them into a temporary list, then splice the temporary
+ * list into the correct position in the AIL. This avoids needing to do an
+ * insert operation on every item.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
  */
 void
-xfs_trans_ail_update(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip,
-	xfs_lsn_t	lsn) __releases(ailp->xa_lock)
+xfs_trans_ail_update_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
 {
-	xfs_log_item_t		*dlip = NULL;
-	xfs_log_item_t		*mlip;	/* ptr to minimum lip */
+	xfs_log_item_t		*mlip;
 	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
+	LIST_HEAD(tmp);
 
 	mlip = xfs_ail_min(ailp);
 
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-	} else {
-		lip->li_flags |= XFS_LI_IN_AIL;
-	}
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (lip->li_flags & XFS_LI_IN_AIL) {
+			/* check if we really need to move the item */
+			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
+				continue;
 
-	lip->li_lsn = lsn;
-	xfs_ail_insert(ailp, lip);
-
-	if (mlip == dlip) {
-		mlip = xfs_ail_min(ailp);
-		/*
-		 * It is not safe to access mlip after the AIL lock is
-		 * dropped, so we must get a copy of li_lsn before we do
-		 * so.  This is especially important on 32-bit platforms
-		 * where accessing and updating 64-bit values like li_lsn
-		 * is not atomic.
-		 */
-		tail_lsn = mlip->li_lsn;
-		spin_unlock(&ailp->xa_lock);
-		xfs_log_move_tail(ailp->xa_mount, tail_lsn);
-	} else {
-		spin_unlock(&ailp->xa_lock);
-	}
-
-
-}	/* xfs_trans_update_ail */
-
-/*
- * Delete the given item from the AIL.  It must already be in
- * the AIL.
- *
- * Wakeup anyone with an lsn less than item's lsn.    If the item
- * we delete in the AIL is the minimum one, update the tail lsn in the
- * log manager.
- *
- * Clear the IN_AIL flag from the item, reset its lsn to 0, and
- * bump the AIL's generation count to indicate that the tree
- * has changed.
- *
- * This function must be called with the AIL lock held.  The lock
- * is dropped before returning.
- */
-void
-xfs_trans_ail_delete(
-	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
-{
-	xfs_log_item_t		*dlip;
-	xfs_log_item_t		*mlip;
-	xfs_lsn_t		tail_lsn;
-
-	if (lip->li_flags & XFS_LI_IN_AIL) {
-		mlip = xfs_ail_min(ailp);
-		dlip = xfs_ail_delete(ailp, lip);
-		ASSERT(dlip == lip);
-		xfs_trans_ail_cursor_clear(ailp, dlip);
-
-
-		lip->li_flags &= ~XFS_LI_IN_AIL;
-		lip->li_lsn = 0;
-
-		if (mlip == dlip) {
-			mlip = xfs_ail_min(ailp);
-			/*
-			 * It is not safe to access mlip after the AIL lock
-			 * is dropped, so we must get a copy of li_lsn
-			 * before we do so.  This is especially important
-			 * on 32-bit platforms where accessing and updating
-			 * 64-bit values like li_lsn is not atomic.
-			 */
-			tail_lsn = mlip ? mlip->li_lsn : 0;
-			spin_unlock(&ailp->xa_lock);
-			xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+			xfs_ail_delete(ailp, lip);
+			if (mlip == lip)
+				mlip_changed = 1;
 		} else {
-			spin_unlock(&ailp->xa_lock);
+			lip->li_flags |= XFS_LI_IN_AIL;
 		}
+		lip->li_lsn = lsn;
+		list_add(&lip->li_ail, &tmp);
 	}
-	else {
-		/*
-		 * If the file system is not being shutdown, we are in
-		 * serious trouble if we get to this stage.
-		 */
-		struct xfs_mount	*mp = ailp->xa_mount;
 
+	xfs_ail_splice(ailp, &tmp, lsn);
+
+	if (!mlip_changed) {
 		spin_unlock(&ailp->xa_lock);
-		if (!XFS_FORCED_SHUTDOWN(mp)) {
-			xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
-		"%s: attempting to delete a log item that is not in the AIL",
-					__func__);
-			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-		}
+		return;
 	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip->li_lsn;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
 }
 
+/*
+ * xfs_trans_ail_delete_bulk - remove multiple log items from the AIL
+ *
+ * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
+ * removed from the AIL. The caller is already holding the AIL lock, and done
+ * all the checks necessary to ensure the items passed in via @log_items are
+ * ready for deletion. This includes checking that the items are in the AIL.
+ *
+ * For each log item to be removed, unlink it  from the AIL, clear the IN_AIL
+ * flag from the item and reset the item's lsn to 0. If we remove the first
+ * item in the AIL, update the log tail to match the new minimum LSN in the
+ * AIL.
+ *
+ * This function will not drop the AIL lock until all items are removed from
+ * the AIL to minimise the amount of lock traffic on the AIL. This does not
+ * greatly increase the AIL hold time, but does significantly reduce the amount
+ * of traffic on the lock, especially during IO completion.
+ *
+ * This function must be called with the AIL lock held.  The lock is dropped
+ * before returning.
+ */
+void
+xfs_trans_ail_delete_bulk(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	**log_items,
+	int			nr_items) __releases(ailp->xa_lock)
+{
+	xfs_log_item_t		*mlip;
+	xfs_lsn_t		tail_lsn;
+	int			mlip_changed = 0;
+	int			i;
 
+	mlip = xfs_ail_min(ailp);
+
+	for (i = 0; i < nr_items; i++) {
+		struct xfs_log_item *lip = log_items[i];
+		if (!(lip->li_flags & XFS_LI_IN_AIL)) {
+			struct xfs_mount	*mp = ailp->xa_mount;
+
+			spin_unlock(&ailp->xa_lock);
+			if (!XFS_FORCED_SHUTDOWN(mp)) {
+				xfs_cmn_err(XFS_PTAG_AILDELETE, CE_ALERT, mp,
+		"%s: attempting to delete a log item that is not in the AIL",
+						__func__);
+				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
+			}
+			return;
+		}
+
+		xfs_ail_delete(ailp, lip);
+		lip->li_flags &= ~XFS_LI_IN_AIL;
+		lip->li_lsn = 0;
+		if (mlip == lip)
+			mlip_changed = 1;
+	}
+
+	if (!mlip_changed) {
+		spin_unlock(&ailp->xa_lock);
+		return;
+	}
+
+	/*
+	 * It is not safe to access mlip after the AIL lock is dropped, so we
+	 * must get a copy of li_lsn before we do so.  This is especially
+	 * important on 32-bit platforms where accessing and updating 64-bit
+	 * values like li_lsn is not atomic. It is possible we've emptied the
+	 * AIL here, so if that is the case, pass an LSN of 0 to the tail move.
+	 */
+	mlip = xfs_ail_min(ailp);
+	tail_lsn = mlip ? mlip->li_lsn : 0;
+	spin_unlock(&ailp->xa_lock);
+	xfs_log_move_tail(ailp->xa_mount, tail_lsn);
+}
 
 /*
  * The active item list (AIL) is a doubly linked list of log
@@ -623,16 +646,13 @@
 }
 
 /*
- * Insert the given log item into the AIL.
- * We almost always insert at the end of the list, so on inserts
- * we search from the end of the list to find where the
- * new item belongs.
+ * splice the log item list into the AIL at the given LSN.
  */
 STATIC void
-xfs_ail_insert(
+xfs_ail_splice(
 	struct xfs_ail	*ailp,
-	xfs_log_item_t	*lip)
-/* ARGSUSED */
+	struct list_head *list,
+	xfs_lsn_t	lsn)
 {
 	xfs_log_item_t	*next_lip;
 
@@ -640,39 +660,33 @@
 	 * If the list is empty, just insert the item.
 	 */
 	if (list_empty(&ailp->xa_ail)) {
-		list_add(&lip->li_ail, &ailp->xa_ail);
+		list_splice(list, &ailp->xa_ail);
 		return;
 	}
 
 	list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
-		if (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0)
+		if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
 			break;
 	}
 
 	ASSERT((&next_lip->li_ail == &ailp->xa_ail) ||
-	       (XFS_LSN_CMP(next_lip->li_lsn, lip->li_lsn) <= 0));
+	       (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0));
 
-	list_add(&lip->li_ail, &next_lip->li_ail);
-
-	xfs_ail_check(ailp, lip);
+	list_splice_init(list, &next_lip->li_ail);
 	return;
 }
 
 /*
  * Delete the given item from the AIL.  Return a pointer to the item.
  */
-/*ARGSUSED*/
-STATIC xfs_log_item_t *
+STATIC void
 xfs_ail_delete(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	xfs_ail_check(ailp, lip);
-
 	list_del(&lip->li_ail);
-
-	return lip;
+	xfs_trans_ail_cursor_clear(ailp, lip);
 }
 
 /*
@@ -682,7 +696,6 @@
 STATIC xfs_log_item_t *
 xfs_ail_min(
 	struct xfs_ail	*ailp)
-/* ARGSUSED */
 {
 	if (list_empty(&ailp->xa_ail))
 		return NULL;
@@ -699,7 +712,6 @@
 xfs_ail_next(
 	struct xfs_ail	*ailp,
 	xfs_log_item_t	*lip)
-/* ARGSUSED */
 {
 	if (lip->li_ail.next == &ailp->xa_ail)
 		return NULL;
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index f783d5e..f7590f5 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -69,12 +69,16 @@
 	tp->t_flags |= XFS_TRANS_DIRTY;
 	efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
 
-	next_extent = efip->efi_next_extent;
+	/*
+	 * atomic_inc_return gives us the value after the increment;
+	 * we want to use it as an array index so we need to subtract 1 from
+	 * it.
+	 */
+	next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
 	ASSERT(next_extent < efip->efi_format.efi_nextents);
 	extp = &(efip->efi_format.efi_extents[next_extent]);
 	extp->ext_start = start_block;
 	extp->ext_len = ext_len;
-	efip->efi_next_extent++;
 }
 
 
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 62da86c..35162c2 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -22,15 +22,17 @@
 struct xfs_log_item_desc;
 struct xfs_mount;
 struct xfs_trans;
+struct xfs_ail;
+struct xfs_log_vec;
 
 void	xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
 void	xfs_trans_del_item(struct xfs_log_item *);
 void	xfs_trans_free_items(struct xfs_trans *tp, xfs_lsn_t commit_lsn,
 				int flags);
-void	xfs_trans_item_committed(struct xfs_log_item *lip,
-				xfs_lsn_t commit_lsn, int aborted);
 void	xfs_trans_unreserve_and_mod_sb(struct xfs_trans *tp);
 
+void	xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
+				xfs_lsn_t commit_lsn, int aborted);
 /*
  * AIL traversal cursor.
  *
@@ -73,12 +75,29 @@
 /*
  * From xfs_trans_ail.c
  */
-void			xfs_trans_ail_update(struct xfs_ail *ailp,
-					struct xfs_log_item *lip, xfs_lsn_t lsn)
-					__releases(ailp->xa_lock);
-void			xfs_trans_ail_delete(struct xfs_ail *ailp,
-					struct xfs_log_item *lip)
-					__releases(ailp->xa_lock);
+void	xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items,
+				xfs_lsn_t lsn) __releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_update(
+	struct xfs_ail		*ailp,
+	struct xfs_log_item	*lip,
+	xfs_lsn_t		lsn) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
+}
+
+void	xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
+				struct xfs_log_item **log_items, int nr_items)
+				__releases(ailp->xa_lock);
+static inline void
+xfs_trans_ail_delete(
+	struct xfs_ail	*ailp,
+	xfs_log_item_t	*lip) __releases(ailp->xa_lock)
+{
+	xfs_trans_ail_delete_bulk(ailp, &lip, 1);
+}
+
 void			xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
 void			xfs_trans_unlocked_item(struct xfs_ail *,
 					xfs_log_item_t *);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 8e4a63c4..d8e6f8c 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -964,29 +964,48 @@
 			xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
 	}
 
-	if (ip->i_d.di_nlink != 0) {
-		if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-		     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-		       ip->i_delayed_blks > 0)) &&
-		     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
-		    (!(ip->i_d.di_flags &
-				(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+	if (ip->i_d.di_nlink == 0)
+		return 0;
 
-			/*
-			 * If we can't get the iolock just skip truncating
-			 * the blocks past EOF because we could deadlock
-			 * with the mmap_sem otherwise.  We'll get another
-			 * chance to drop them once the last reference to
-			 * the inode is dropped, so we'll never leak blocks
-			 * permanently.
-			 */
-			error = xfs_free_eofblocks(mp, ip,
-						   XFS_FREE_EOF_TRYLOCK);
-			if (error)
-				return error;
-		}
+	if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
+	     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
+	       ip->i_delayed_blks > 0)) &&
+	     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
+	    (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
+
+		/*
+		 * If we can't get the iolock just skip truncating the blocks
+		 * past EOF because we could deadlock with the mmap_sem
+		 * otherwise.  We'll get another chance to drop them once the
+		 * last reference to the inode is dropped, so we'll never leak
+		 * blocks permanently.
+		 *
+		 * Further, check if the inode is being opened, written and
+		 * closed frequently and we have delayed allocation blocks
+		 * oustanding (e.g. streaming writes from the NFS server),
+		 * truncating the blocks past EOF will cause fragmentation to
+		 * occur.
+		 *
+		 * In this case don't do the truncation, either, but we have to
+		 * be careful how we detect this case. Blocks beyond EOF show
+		 * up as i_delayed_blks even when the inode is clean, so we
+		 * need to truncate them away first before checking for a dirty
+		 * release. Hence on the first dirty close we will still remove
+		 * the speculative allocation, but after that we will leave it
+		 * in place.
+		 */
+		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
+			return 0;
+
+		error = xfs_free_eofblocks(mp, ip,
+					   XFS_FREE_EOF_TRYLOCK);
+		if (error)
+			return error;
+
+		/* delalloc blocks after truncation means it really is dirty */
+		if (ip->i_delayed_blks)
+			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
 	}
-
 	return 0;
 }
 
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index c637b75..cd77aa7 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -119,7 +119,7 @@
 struct acpi_table_bert {
 	struct acpi_table_header header;	/* Common ACPI table header */
 	u32 region_length;	/* Length of the boot error region */
-	u64 address;		/* Physical addresss of the error region */
+	u64 address;		/* Physical address of the error region */
 };
 
 /* Boot Error Region (not a subtable, pointed to by Address field above) */
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index ff5c660..6098cae 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -147,11 +147,11 @@
 /* Always use the library code for GPIO management calls,
  * or when sleeping may be involved.
  */
-extern int gpio_request(unsigned gpio, const char *label);
+extern int __must_check gpio_request(unsigned gpio, const char *label);
 extern void gpio_free(unsigned gpio);
 
-extern int gpio_direction_input(unsigned gpio);
-extern int gpio_direction_output(unsigned gpio, int value);
+extern int __must_check gpio_direction_input(unsigned gpio);
+extern int __must_check gpio_direction_output(unsigned gpio, int value);
 
 extern int gpio_set_debounce(unsigned gpio, unsigned debounce);
 
@@ -192,8 +192,8 @@
 	const char	*label;
 };
 
-extern int gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
-extern int gpio_request_array(struct gpio *array, size_t num);
+extern int __must_check gpio_request_one(unsigned gpio, unsigned long flags, const char *label);
+extern int __must_check gpio_request_array(struct gpio *array, size_t num);
 extern void gpio_free_array(struct gpio *array, size_t num);
 
 #ifdef CONFIG_GPIO_SYSFS
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 3577ca1..4644c9a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -211,6 +211,36 @@
 }
 #endif
 
+static inline void readsl(const void __iomem *addr, void *buf, int len)
+{
+	insl((unsigned long)addr, buf, len);
+}
+
+static inline void readsw(const void __iomem *addr, void *buf, int len)
+{
+	insw((unsigned long)addr, buf, len);
+}
+
+static inline void readsb(const void __iomem *addr, void *buf, int len)
+{
+	insb((unsigned long)addr, buf, len);
+}
+
+static inline void writesl(const void __iomem *addr, const void *buf, int len)
+{
+	outsl((unsigned long)addr, buf, len);
+}
+
+static inline void writesw(const void __iomem *addr, const void *buf, int len)
+{
+	outsw((unsigned long)addr, buf, len);
+}
+
+static inline void writesb(const void __iomem *addr, const void *buf, int len)
+{
+	outsb((unsigned long)addr, buf, len);
+}
+
 #ifndef CONFIG_GENERIC_IOMAP
 #define ioread8(addr)		readb(addr)
 #define ioread16(addr)		readw(addr)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 05cbad0..6864933 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -200,7 +200,8 @@
 
 #define READ_MOSTLY_DATA(align)						\
 	. = ALIGN(align);						\
-	*(.data..read_mostly)
+	*(.data..read_mostly)						\
+	. = ALIGN(align);
 
 #define CACHELINE_ALIGNED_DATA(align)					\
 	. = ALIGN(align);						\
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
new file mode 100644
index 0000000..c5813c8
--- /dev/null
+++ b/include/crypto/if_alg.h
@@ -0,0 +1,92 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _CRYPTO_IF_ALG_H
+#define _CRYPTO_IF_ALG_H
+
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/if_alg.h>
+#include <linux/types.h>
+#include <net/sock.h>
+
+#define ALG_MAX_PAGES			16
+
+struct crypto_async_request;
+
+struct alg_sock {
+	/* struct sock must be the first member of struct alg_sock */
+	struct sock sk;
+
+	struct sock *parent;
+
+	const struct af_alg_type *type;
+	void *private;
+};
+
+struct af_alg_completion {
+	struct completion completion;
+	int err;
+};
+
+struct af_alg_control {
+	struct af_alg_iv *iv;
+	int op;
+};
+
+struct af_alg_type {
+	void *(*bind)(const char *name, u32 type, u32 mask);
+	void (*release)(void *private);
+	int (*setkey)(void *private, const u8 *key, unsigned int keylen);
+	int (*accept)(void *private, struct sock *sk);
+
+	struct proto_ops *ops;
+	struct module *owner;
+	char name[14];
+};
+
+struct af_alg_sgl {
+	struct scatterlist sg[ALG_MAX_PAGES];
+	struct page *pages[ALG_MAX_PAGES];
+};
+
+int af_alg_register_type(const struct af_alg_type *type);
+int af_alg_unregister_type(const struct af_alg_type *type);
+
+int af_alg_release(struct socket *sock);
+int af_alg_accept(struct sock *sk, struct socket *newsock);
+
+int af_alg_make_sg(struct af_alg_sgl *sgl, void __user *addr, int len,
+		   int write);
+void af_alg_free_sg(struct af_alg_sgl *sgl);
+
+int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con);
+
+int af_alg_wait_for_completion(int err, struct af_alg_completion *completion);
+void af_alg_complete(struct crypto_async_request *req, int err);
+
+static inline struct alg_sock *alg_sk(struct sock *sk)
+{
+	return (struct alg_sock *)sk;
+}
+
+static inline void af_alg_release_parent(struct sock *sk)
+{
+	sock_put(alg_sk(sk)->parent);
+}
+
+static inline void af_alg_init_completion(struct af_alg_completion *completion)
+{
+	init_completion(&completion->completion);
+}
+
+#endif	/* _CRYPTO_IF_ALG_H */
diff --git a/drivers/crypto/padlock.h b/include/crypto/padlock.h
similarity index 82%
rename from drivers/crypto/padlock.h
rename to include/crypto/padlock.h
index b728e45..d2cfa2e 100644
--- a/drivers/crypto/padlock.h
+++ b/include/crypto/padlock.h
@@ -15,9 +15,15 @@
 
 #define PADLOCK_ALIGNMENT 16
 
-#define PFX	"padlock: "
+#define PFX	KBUILD_MODNAME ": "
 
 #define PADLOCK_CRA_PRIORITY	300
 #define PADLOCK_COMPOSITE_PRIORITY 400
 
+#ifdef CONFIG_64BIT
+#define STACK_ALIGN 16
+#else
+#define STACK_ALIGN 4
+#endif
+
 #endif	/* _CRYPTO_PADLOCK_H */
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 833d208..4fd95a3 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -68,6 +68,21 @@
 	return (++sg)->length ? sg : (void *)sg_page(sg);
 }
 
+static inline void scatterwalk_crypto_chain(struct scatterlist *head,
+					    struct scatterlist *sg,
+					    int chain, int num)
+{
+	if (chain) {
+		head->length += sg->length;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	if (sg)
+		scatterwalk_sg_chain(head, num, sg);
+	else
+		sg_mark_end(head);
+}
+
 static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
 						struct scatter_walk *walk_out)
 {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 274eaaa..a4694c6 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -683,6 +683,21 @@
 	void *driver_priv; /**< Private structure for driver to use */
 };
 
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL             (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID        (1 << 0)
+#define DRM_SCANOUTPOS_INVBL        (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE     (1 << 2)
+
 /**
  * DRM driver structure. This structure represent the common code for
  * a family of cards. There will one drm_device for each card present
@@ -760,6 +775,68 @@
 	 */
 	int (*device_is_agp) (struct drm_device *dev);
 
+	/**
+	 * Called by vblank timestamping code.
+	 *
+	 * Return the current display scanout position from a crtc.
+	 *
+	 * \param dev  DRM device.
+	 * \param crtc Id of the crtc to query.
+	 * \param *vpos Target location for current vertical scanout position.
+	 * \param *hpos Target location for current horizontal scanout position.
+	 *
+	 * Returns vpos as a positive number while in active scanout area.
+	 * Returns vpos as a negative number inside vblank, counting the number
+	 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+	 * until start of active scanout / end of vblank."
+	 *
+	 * \return Flags, or'ed together as follows:
+	 *
+	 * DRM_SCANOUTPOS_VALID = Query successfull.
+	 * DRM_SCANOUTPOS_INVBL = Inside vblank.
+	 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+	 * this flag means that returned position may be offset by a constant
+	 * but unknown small number of scanlines wrt. real scanout position.
+	 *
+	 */
+	int (*get_scanout_position) (struct drm_device *dev, int crtc,
+				     int *vpos, int *hpos);
+
+	/**
+	 * Called by \c drm_get_last_vbltimestamp. Should return a precise
+	 * timestamp when the most recent VBLANK interval ended or will end.
+	 *
+	 * Specifically, the timestamp in @vblank_time should correspond as
+	 * closely as possible to the time when the first video scanline of
+	 * the video frame after the end of VBLANK will start scanning out,
+	 * the time immmediately after end of the VBLANK interval. If the
+	 * @crtc is currently inside VBLANK, this will be a time in the future.
+	 * If the @crtc is currently scanning out a frame, this will be the
+	 * past start time of the current scanout. This is meant to adhere
+	 * to the OpenML OML_sync_control extension specification.
+	 *
+	 * \param dev dev DRM device handle.
+	 * \param crtc crtc for which timestamp should be returned.
+	 * \param *max_error Maximum allowable timestamp error in nanoseconds.
+	 *                   Implementation should strive to provide timestamp
+	 *                   with an error of at most *max_error nanoseconds.
+	 *                   Returns true upper bound on error for timestamp.
+	 * \param *vblank_time Target location for returned vblank timestamp.
+	 * \param flags 0 = Defaults, no special treatment needed.
+	 * \param       DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+	 *	        irq handler. Some drivers need to apply some workarounds
+	 *              for gpu-specific vblank irq quirks if flag is set.
+	 *
+	 * \returns
+	 * Zero if timestamping isn't supported in current display mode or a
+	 * negative number on failure. A positive status code on success,
+	 * which describes how the vblank_time timestamp was computed.
+	 */
+	int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+				     int *max_error,
+				     struct timeval *vblank_time,
+				     unsigned flags);
+
 	/* these have to be filled in */
 
 	irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -983,6 +1060,8 @@
 
 	wait_queue_head_t *vbl_queue;   /**< VBLANK wait queue */
 	atomic_t *_vblank_count;        /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+	struct timeval *_vblank_time;   /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+	spinlock_t vblank_time_lock;    /**< Protects vblank count and time updates during vblank enable/disable */
 	spinlock_t vbl_lock;
 	atomic_t *vblank_refcount;      /* number of users of vblank interruptsper crtc */
 	u32 *last_vblank;               /* protected by dev->vbl_lock, used */
@@ -1041,12 +1120,14 @@
 	/*@{ */
 	spinlock_t object_name_lock;
 	struct idr object_name_idr;
-	uint32_t invalidate_domains;    /* domains pending invalidation */
-	uint32_t flush_domains;         /* domains pending flush */
 	/*@} */
-
+	int switch_power_state;
 };
 
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
 static __inline__ int drm_core_check_feature(struct drm_device *dev,
 					     int feature)
 {
@@ -1284,11 +1365,22 @@
 			   struct drm_file *filp);
 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+				     struct timeval *vblanktime);
 extern void drm_handle_vblank(struct drm_device *dev, int crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern void drm_vblank_off(struct drm_device *dev, int crtc);
 extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+				     struct timeval *tvblank, unsigned flags);
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+						 int crtc, int *max_error,
+						 struct timeval *vblank_time,
+						 unsigned flags,
+						 struct drm_crtc *refcrtc);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
 /* Modesetting support */
 extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
 extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1321,7 +1413,6 @@
 extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
 extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
-extern void drm_agp_chipset_flush(struct drm_device *dev);
 
 				/* Stub support (drm_stub.h) */
 extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1340,6 +1431,9 @@
 extern int drm_put_minor(struct drm_minor **minor);
 extern unsigned int drm_debug;
 
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
 extern struct class *drm_class;
 extern struct proc_dir_entry *drm_proc_root;
 extern struct dentry *drm_debugfs_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa68..acd7fad 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -351,8 +351,14 @@
 
 	bool enabled;
 
+	/* Requested mode from modesetting. */
 	struct drm_display_mode mode;
 
+	/* Programmed mode in hw, after adjustments for encoders,
+	 * crtc, panel scaling etc. Needed for timestamping etc.
+	 */
+	struct drm_display_mode hwmode;
+
 	int x, y;
 	const struct drm_crtc_funcs *funcs;
 
@@ -360,6 +366,9 @@
 	uint32_t gamma_size;
 	uint16_t *gamma_store;
 
+	/* Constants needed for precise vblank and swap timestamping. */
+	s64 framedur_ns, linedur_ns, pixeldur_ns;
+
 	/* if you are using the helper */
 	void *helper_private;
 };
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f22e7fe..aac27bd 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -121,9 +121,6 @@
 void drm_fb_helper_restore(void);
 void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
 			    uint32_t fb_width, uint32_t fb_height);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
-			    uint32_t depth);
-
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
 bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531..e391777 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@
 	struct list_head unused_nodes;
 	int num_unused;
 	spinlock_t unused_lock;
+	unsigned int scan_check_range : 1;
 	unsigned scan_alignment;
 	unsigned long scan_size;
 	unsigned long scan_hit_start;
 	unsigned scan_hit_size;
 	unsigned scanned_blocks;
+	unsigned long scan_start;
+	unsigned long scan_end;
 };
 
 /*
@@ -145,6 +148,10 @@
 
 void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
 		      unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+				 unsigned alignment,
+				 unsigned long start,
+				 unsigned long end);
 int drm_mm_scan_add_block(struct drm_mm_node *node);
 int drm_mm_scan_remove_block(struct drm_mm_node *node);
 
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d4..fe29ae3 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -142,6 +142,42 @@
 	{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+	{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
 	{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -419,6 +455,10 @@
 	{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+	{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
 	{0, 0, 0}
 
 #define r128_PCI_IDS \
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index a2776e2..0039f1f 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -289,6 +289,7 @@
 #define I915_PARAM_HAS_BLT		 11
 #define I915_PARAM_HAS_RELAXED_FENCING	 12
 #define I915_PARAM_HAS_COHERENT_RINGS	 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
 
 typedef struct drm_i915_getparam {
 	int param;
@@ -635,6 +636,17 @@
 #define I915_EXEC_RENDER                 (1<<0)
 #define I915_EXEC_BSD                    (2<<0)
 #define I915_EXEC_BLT                    (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
 	__u64 flags;
 	__u64 rsvd1;
 	__u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946..9e343c0 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
 
 #ifndef _DRM_INTEL_GTT_H
 #define	_DRM_INTEL_GTT_H
-struct intel_gtt {
-	/* Number of stolen gtt entries at the beginning. */
-	unsigned int gtt_stolen_entries;
+
+const struct intel_gtt {
+	/* Size of memory reserved for graphics by the BIOS */
+	unsigned int stolen_size;
 	/* Total number of gtt entries. */
 	unsigned int gtt_total_entries;
 	/* Part of the gtt that is mappable by the cpu, for those chips where
 	 * this is not the full gtt. */
 	unsigned int gtt_mappable_entries;
-};
+	/* Whether i915 needs to use the dmar apis or not. */
+	unsigned int needs_dmar : 1;
+} *intel_gtt_get(void);
 
-struct intel_gtt *intel_gtt_get(void);
+void intel_gtt_chipset_flush(void);
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+			 struct scatterlist **sg_list, int *num_sg);
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+				 unsigned int sg_len,
+				 unsigned int pg_start,
+				 unsigned int flags);
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+			    struct page **pages, unsigned int flags);
+
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY	1
+#define AGP_PHYS_MEMORY		2
+
+/* New caching attributes for gen6/sandybridge */
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
 
 #endif
-
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index bc5590b..e2cfe80 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,16 +71,14 @@
 #define NOUVEAU_GETPARAM_PCI_VENDOR      3
 #define NOUVEAU_GETPARAM_PCI_DEVICE      4
 #define NOUVEAU_GETPARAM_BUS_TYPE        5
-#define NOUVEAU_GETPARAM_FB_PHYSICAL     6
-#define NOUVEAU_GETPARAM_AGP_PHYSICAL    7
 #define NOUVEAU_GETPARAM_FB_SIZE         8
 #define NOUVEAU_GETPARAM_AGP_SIZE        9
-#define NOUVEAU_GETPARAM_PCI_PHYSICAL    10
 #define NOUVEAU_GETPARAM_CHIPSET_ID      11
 #define NOUVEAU_GETPARAM_VM_VRAM_BASE    12
 #define NOUVEAU_GETPARAM_GRAPH_UNITS     13
 #define NOUVEAU_GETPARAM_PTIMER_TIME     14
 #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
 struct drm_nouveau_getparam {
 	uint64_t param;
 	uint64_t value;
@@ -171,7 +169,6 @@
 };
 
 #define NOUVEAU_GEM_CPU_PREP_NOWAIT                                  0x00000001
-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK                                 0x00000002
 #define NOUVEAU_GEM_CPU_PREP_WRITE                                   0x00000004
 struct drm_nouveau_gem_cpu_prep {
 	uint32_t handle;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 10f8b53..e95a86b 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -906,6 +906,7 @@
 #define RADEON_INFO_ACCEL_WORKING2	0x05
 #define RADEON_INFO_TILING_CONFIG	0x06
 #define RADEON_INFO_WANT_HYPERZ		0x07
+#define RADEON_INFO_WANT_CMASK		0x08 /* get access to CMASK on r300 */
 
 struct drm_radeon_info {
 	uint32_t		request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc15..50852aa 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@
  * @is_iomem:		is this io memory ?
  * @size:		size in byte
  * @offset:		offset from the base address
+ * @io_reserved_vm:     The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count:  Refcounting the numbers of callers to ttm_mem_io_reserve
  *
  * Structure indicating the bus placement of an object.
  */
@@ -83,7 +85,8 @@
 	unsigned long	size;
 	unsigned long	offset;
 	bool		is_iomem;
-	bool		io_reserved;
+	bool		io_reserved_vm;
+	uint64_t        io_reserved_count;
 };
 
 
@@ -154,7 +157,6 @@
  * keeps one refcount. When this refcount reaches zero,
  * the object is destroyed.
  * @event_queue: Queue for processes waiting on buffer object status change.
- * @lock: spinlock protecting mostly synchronization members.
  * @mem: structure describing current placement.
  * @persistant_swap_storage: Usually the swap storage is deleted for buffers
  * pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@
 	struct kref kref;
 	struct kref list_kref;
 	wait_queue_head_t event_queue;
-	spinlock_t lock;
 
 	/**
 	 * Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@
 	struct list_head lru;
 	struct list_head ddestroy;
 	struct list_head swap;
+	struct list_head io_reserve_lru;
 	uint32_t val_seq;
 	bool seq_valid;
 
@@ -248,10 +250,10 @@
 	atomic_t reserved;
 
 	/**
-	 * Members protected by the bo::lock
+	 * Members protected by struct buffer_object_device::fence_lock
 	 * In addition, setting sync_obj to anything else
 	 * than NULL requires bo::reserved to be held. This allows for
-	 * checking NULL while reserved but not holding bo::lock.
+	 * checking NULL while reserved but not holding the mentioned lock.
 	 */
 
 	void *sync_obj_arg;
@@ -364,6 +366,44 @@
  */
 extern void ttm_bo_unref(struct ttm_buffer_object **bo);
 
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+				bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
 /**
  * ttm_bo_lock_delayed_workqueue
  *
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8e0c848..1da8af6 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -179,30 +179,6 @@
 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1)	/* Memory mappable */
 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3)	/* Can't map aperture */
 
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @manager: The range manager used for this memory type. FIXME: If the aperture
- * has a page size different from the underlying system, the granularity
- * of this manager should take care of this. But the range allocating code
- * in ttm_bo.c needs to be modified for this.
- * @lru: The lru list for this memory type.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
 struct ttm_mem_type_manager;
 
 struct ttm_mem_type_manager_func {
@@ -287,6 +263,36 @@
 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
 };
 
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
 struct ttm_mem_type_manager {
 	struct ttm_bo_device *bdev;
 
@@ -303,6 +309,15 @@
 	uint32_t default_caching;
 	const struct ttm_mem_type_manager_func *func;
 	void *priv;
+	struct mutex io_reserve_mutex;
+	bool use_io_reserve_lru;
+	bool io_reserve_fastpath;
+
+	/*
+	 * Protected by @io_reserve_mutex:
+	 */
+
+	struct list_head io_reserve_lru;
 
 	/*
 	 * Protected by the global->lru_lock.
@@ -510,9 +525,12 @@
  *
  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
  * @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
  * @addr_space_mm: Range manager for the device address space.
  * lru_lock: Spinlock that protects the buffer+device lru lists and
  * ddestroy lists.
+ * @val_seq: Current validation sequence.
  * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
  * If a GPU lockup has been detected, this is forced to 0.
  * @dev_mapping: A pointer to the struct address_space representing the
@@ -531,6 +549,7 @@
 	struct ttm_bo_driver *driver;
 	rwlock_t vm_lock;
 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+	spinlock_t fence_lock;
 	/*
 	 * Protected by the vm lock.
 	 */
@@ -541,6 +560,7 @@
 	 * Protected by the global:lru lock.
 	 */
 	struct list_head ddestroy;
+	uint32_t val_seq;
 
 	/*
 	 * Protected by load / firstopen / lastclose /unload sync.
@@ -753,31 +773,6 @@
 
 extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
 
-/**
- * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
- *
- * @bo Pointer to a struct ttm_buffer_object.
- * @bus_base On return the base of the PCI region
- * @bus_offset On return the byte offset into the PCI region
- * @bus_size On return the byte size of the buffer object or zero if
- * the buffer object memory is not accessible through a PCI region.
- *
- * Returns:
- * -EINVAL if the buffer object is currently not mappable.
- * 0 otherwise.
- */
-
-extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
-			     struct ttm_mem_reg *mem,
-			     unsigned long *bus_base,
-			     unsigned long *bus_offset,
-			     unsigned long *bus_size);
-
-extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
-				struct ttm_mem_reg *mem);
-extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
-				struct ttm_mem_reg *mem);
-
 extern void ttm_bo_global_release(struct drm_global_reference *ref);
 extern int ttm_bo_global_init(struct drm_global_reference *ref);
 
@@ -810,6 +805,22 @@
 extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 
 /**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+			   bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
  * ttm_bo_reserve:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
@@ -859,11 +870,44 @@
  * try again. (only if use_sequence == 1).
  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
  * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
  */
 extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 			  bool interruptible,
 			  bool no_wait, bool use_sequence, uint32_t sequence);
 
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+				 bool interruptible,
+				 bool no_wait, bool use_sequence,
+				 uint32_t sequence);
+
 /**
  * ttm_bo_unreserve
  *
@@ -874,6 +918,16 @@
 extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
 
 /**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
  * ttm_bo_wait_unreserved
  *
  * @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475..26cc7f9 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
  * @bo:             refcounted buffer object pointer.
  * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
  * adding a new sync object.
- * @reservied:      Indicates whether @bo has been reserved for validation.
+ * @reserved:       Indicates whether @bo has been reserved for validation.
+ * @removed:        Indicates whether @bo has been removed from lru lists.
+ * @put_count:      Number of outstanding references on bo::list_kref.
+ * @old_sync_obj:   Pointer to a sync object about to be unreferenced
  */
 
 struct ttm_validate_buffer {
@@ -49,6 +52,9 @@
 	struct ttm_buffer_object *bo;
 	void *new_sync_obj_arg;
 	bool reserved;
+	bool removed;
+	int put_count;
+	void *old_sync_obj;
 };
 
 /**
@@ -66,7 +72,6 @@
  * function ttm_eu_reserve_buffers
  *
  * @list:    thread private list of ttm_validate_buffer structs.
- * @val_seq: A unique sequence number.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@
  * has failed.
  */
 
-extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+extern int ttm_eu_reserve_buffers(struct list_head *list);
 
 /**
  * function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index d1580c1..2296d8b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -158,6 +158,7 @@
 header-y += if.h
 header-y += if_addr.h
 header-y += if_addrlabel.h
+header-y += if_alg.h
 header-y += if_arcnet.h
 header-y += if_arp.h
 header-y += if_bonding.h
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 09ea4a1..eaf6cd7 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -102,10 +102,8 @@
 extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
 extern int agp_bind_memory(struct agp_memory *, off_t);
 extern int agp_unbind_memory(struct agp_memory *);
-extern int agp_rebind_memory(void);
 extern void agp_enable(struct agp_bridge_data *, u32);
 extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
 extern void agp_backend_release(struct agp_bridge_data *);
-extern void agp_flush_chipset(struct agp_bridge_data *);
 
 #endif				/* _AGP_BACKEND_H */
diff --git a/include/linux/bfin_mac.h b/include/linux/bfin_mac.h
index 904dec7..a69554e 100644
--- a/include/linux/bfin_mac.h
+++ b/include/linux/bfin_mac.h
@@ -24,6 +24,7 @@
 	const unsigned short *mac_peripherals;
 	int phy_mode;
 	unsigned int phy_mask;
+	unsigned short vlan1_mask, vlan2_mask;
 };
 
 #endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 64a7114..c3d6512 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -25,7 +25,7 @@
 /*
  * This structure is used to hold the arguments that are used when loading binaries.
  */
-struct linux_binprm{
+struct linux_binprm {
 	char buf[BINPRM_BUF_SIZE];
 #ifdef CONFIG_MMU
 	struct vm_area_struct *vma;
@@ -93,7 +93,6 @@
 	int (*load_shlib)(struct file *);
 	int (*core_dump)(struct coredump_params *cprm);
 	unsigned long min_coredump;	/* minimal dump size */
-	int hasvdso;
 };
 
 extern int __register_binfmt(struct linux_binfmt *fmt, int insert);
@@ -113,7 +112,7 @@
 
 extern int prepare_binprm(struct linux_binprm *);
 extern int __must_check remove_arg_zero(struct linux_binprm *);
-extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
+extern int search_binary_handler(struct linux_binprm *, struct pt_regs *);
 extern int flush_old_exec(struct linux_binprm * bprm);
 extern void setup_new_exec(struct linux_binprm * bprm);
 
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f389e31..fb45919 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -28,8 +28,6 @@
 
 void cdev_del(struct cdev *);
 
-int cdev_index(struct inode *inode);
-
 void cd_forget(struct inode *);
 
 extern struct backing_dev_info directly_mappable_cdev_bdi;
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index c3c74ae..09dcc0c 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -43,6 +43,10 @@
 #define CEPH_FEATURE_NOSRCADDR      (1<<1)
 #define CEPH_FEATURE_MONCLOCKCHECK  (1<<2)
 #define CEPH_FEATURE_FLOCK          (1<<3)
+#define CEPH_FEATURE_SUBSCRIBE2     (1<<4)
+#define CEPH_FEATURE_MONNAMES       (1<<5)
+#define CEPH_FEATURE_RECONNECT_SEQ  (1<<6)
+#define CEPH_FEATURE_DIRLAYOUTHASH  (1<<7)
 
 
 /*
@@ -55,10 +59,10 @@
 	__le32 fl_stripe_count;    /* over this many objects */
 	__le32 fl_object_size;     /* until objects are this big, then move to
 				      new objects */
-	__le32 fl_cas_hash;        /* 0 = none; 1 = sha256 */
+	__le32 fl_cas_hash;        /* UNUSED.  0 = none; 1 = sha256 */
 
 	/* pg -> disk layout */
-	__le32 fl_object_stripe_unit;  /* for per-object parity, if any */
+	__le32 fl_object_stripe_unit;  /* UNUSED.  for per-object parity, if any */
 
 	/* object -> pg layout */
 	__le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
@@ -69,6 +73,12 @@
 
 int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
 
+struct ceph_dir_layout {
+	__u8   dl_dir_hash;   /* see ceph_hash.h for ids */
+	__u8   dl_unused1;
+	__u16  dl_unused2;
+	__u32  dl_unused3;
+} __attribute__ ((packed));
 
 /* crypto algorithms */
 #define CEPH_CRYPTO_NONE 0x0
@@ -457,7 +467,7 @@
 	struct ceph_timespec rctime;
 	struct ceph_frag_tree_head fragtree;  /* (must be at end of struct) */
 } __attribute__ ((packed));
-/* followed by frag array, then symlink string, then xattr blob */
+/* followed by frag array, symlink string, dir layout, xattr blob */
 
 /* reply_lease follows dname, and reply_inode */
 struct ceph_mds_reply_lease {
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index a108b42..c3011be 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -110,17 +110,12 @@
 
 /*
  * ceph_connection state bit flags
- *
- * QUEUED and BUSY are used together to ensure that only a single
- * thread is currently opening, reading or writing data to the socket.
  */
 #define LOSSYTX         0  /* we can close channel or drop messages on errors */
 #define CONNECTING	1
 #define NEGOTIATING	2
 #define KEEPALIVE_PENDING      3
 #define WRITE_PENDING	4  /* we have data ready to send */
-#define QUEUED          5  /* there is work queued on this connection */
-#define BUSY            6  /* work is being done */
 #define STANDBY		8  /* no outgoing messages, socket closed.  we keep
 			    * the ceph_connection around to maintain shared
 			    * state with the peer. */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index ed4ba11..ce104e3 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -564,7 +564,7 @@
 /*
  * To iterate across the tasks in a cgroup:
  *
- * 1) call cgroup_iter_start to intialize an iterator
+ * 1) call cgroup_iter_start to initialize an iterator
  *
  * 2) call cgroup_iter_next() to retrieve member tasks until it
  *    returns NULL or until you want to end the iteration
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h
index 6fc2bed..0e7bf27 100644
--- a/include/linux/cramfs_fs.h
+++ b/include/linux/cramfs_fs.h
@@ -84,9 +84,11 @@
 				| CRAMFS_FLAG_WRONG_SIGNATURE \
 				| CRAMFS_FLAG_SHIFTED_ROOT_OFFSET )
 
+#ifdef __KERNEL__
 /* Uncompression interfaces to the underlying zlib */
 int cramfs_uncompress_block(void *dst, int dstlen, void *src, int srclen);
 int cramfs_uncompress_init(void);
 void cramfs_uncompress_exit(void);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
index d5a1d48..6fe2114 100644
--- a/include/linux/cs5535.h
+++ b/include/linux/cs5535.h
@@ -103,14 +103,20 @@
 #define GPIO_POSITIVE_EDGE_STS	0x48
 #define GPIO_NEGATIVE_EDGE_STS	0x4C
 
+#define GPIO_FLTR7_AMOUNT	0xD8
+
 #define GPIO_MAP_X		0xE0
 #define GPIO_MAP_Y		0xE4
 #define GPIO_MAP_Z		0xE8
 #define GPIO_MAP_W		0xEC
 
+#define GPIO_FE7_SEL		0xF7
+
 void cs5535_gpio_set(unsigned offset, unsigned int reg);
 void cs5535_gpio_clear(unsigned offset, unsigned int reg);
 int cs5535_gpio_isset(unsigned offset, unsigned int reg);
+int cs5535_gpio_set_irq(unsigned group, unsigned irq);
+void cs5535_gpio_setup_event(unsigned offset, int pair, int pme);
 
 /* MFGPTs */
 
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
index f9b06cc..8c0aef1 100644
--- a/include/linux/decompress/inflate.h
+++ b/include/linux/decompress/inflate.h
@@ -1,9 +1,6 @@
 #ifndef INFLATE_H
 #define INFLATE_H
 
-/* Other housekeeping constants */
-#define INBUFSIZ 4096
-
 int gunzip(unsigned char *inbuf, int len,
 	   int(*fill)(void*, unsigned int),
 	   int(*flush)(void*, unsigned int),
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index ad5ec1d..4cb72b9 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -61,8 +61,6 @@
 #define large_malloc(a) malloc(a)
 #define large_free(a) free(a)
 
-#define set_error_fn(x)
-
 #define INIT
 
 #else /* STATIC */
@@ -72,6 +70,7 @@
 #include <linux/kernel.h>
 #include <linux/fs.h>
 #include <linux/string.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 /* Use defines rather than static inline in order to avoid spurious
@@ -84,9 +83,6 @@
 #define large_malloc(a) vmalloc(a)
 #define large_free(a) vfree(a)
 
-static void(*error)(char *m);
-#define set_error_fn(x) error = x;
-
 #define INIT __init
 #define STATIC
 
diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h
new file mode 100644
index 0000000..41728fc
--- /dev/null
+++ b/include/linux/decompress/unxz.h
@@ -0,0 +1,19 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef DECOMPRESS_UNXZ_H
+#define DECOMPRESS_UNXZ_H
+
+int unxz(unsigned char *in, int in_size,
+	 int (*fill)(void *dest, unsigned int size),
+	 int (*flush)(void *src, unsigned int size),
+	 unsigned char *out, int *in_used,
+	 void (*error)(char *x));
+
+#endif
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a90b389..1c70028 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -44,34 +44,24 @@
 extern int ddebug_remove_module(const char *mod_name);
 
 #define dynamic_pr_debug(fmt, ...) do {					\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);			\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		printk(KERN_DEBUG pr_fmt(fmt),	##__VA_ARGS__);		\
 	} while (0)
 
 
 #define dynamic_dev_dbg(dev, fmt, ...) do {				\
-	__label__ do_printk;						\
-	__label__ out;							\
 	static struct _ddebug descriptor				\
 	__used								\
 	__attribute__((section("__verbose"), aligned(8))) =		\
 	{ KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__,		\
 		_DPRINTK_FLAGS_DEFAULT };				\
-	JUMP_LABEL(&descriptor.enabled, do_printk);			\
-	goto out;							\
-do_printk:								\
-	dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);		\
-out:	;								\
+	if (unlikely(descriptor.enabled))				\
+		dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__);	\
 	} while (0)
 
 #else
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f16a010..bec8b82 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -48,8 +48,10 @@
 
 
 
-extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
+extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+					    unsigned int rxqs);
 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
+#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
 
 /**
  * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6ce1bca..65990ef 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -724,21 +724,30 @@
 					 ~EXT3_DIR_ROUND)
 #define EXT3_MAX_REC_LEN		((1<<16)-1)
 
+/*
+ * Tests against MAX_REC_LEN etc were put in place for 64k block
+ * sizes; if that is not possible on this arch, we can skip
+ * those tests and speed things up.
+ */
 static inline unsigned ext3_rec_len_from_disk(__le16 dlen)
 {
 	unsigned len = le16_to_cpu(dlen);
 
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == EXT3_MAX_REC_LEN)
 		return 1 << 16;
+#endif
 	return len;
 }
 
 static inline __le16 ext3_rec_len_to_disk(unsigned len)
 {
+#if (PAGE_CACHE_SIZE >= 65536)
 	if (len == (1 << 16))
 		return cpu_to_le16(EXT3_MAX_REC_LEN);
 	else if (len > (1 << 16))
 		BUG();
+#endif
 	return cpu_to_le16(len);
 }
 
@@ -856,6 +865,7 @@
 extern int ext3_should_retry_alloc(struct super_block *sb, int *retries);
 extern void ext3_init_block_alloc_info(struct inode *);
 extern void ext3_rsv_window_add(struct super_block *sb, struct ext3_reserve_window_node *rsv);
+extern int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range);
 
 /* dir.c */
 extern int ext3_check_dir_entry(const char *, struct inode *,
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 3c15510..73e0b62 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -2,6 +2,7 @@
 #define _FALLOC_H_
 
 #define FALLOC_FL_KEEP_SIZE	0x01 /* default is extend size */
+#define FALLOC_FL_PUNCH_HOLE	0x02 /* de-allocates range */
 
 #ifdef __KERNEL__
 
diff --git a/include/linux/fec.h b/include/linux/fec.h
index 5d3523d..bcff455 100644
--- a/include/linux/fec.h
+++ b/include/linux/fec.h
@@ -3,6 +3,8 @@
  * Copyright (c) 2009 Orex Computed Radiography
  *   Baruch Siach <baruch@tkos.co.il>
  *
+ * Copyright (C) 2010 Freescale Semiconductor, Inc.
+ *
  * Header file for the FEC platform data
  *
  * This program is free software; you can redistribute it and/or modify
@@ -16,6 +18,7 @@
 
 struct fec_platform_data {
 	phy_interface_t phy;
+	unsigned char mac[ETH_ALEN];
 };
 
 #endif
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 68c642d..59ea406 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -273,7 +273,7 @@
  * @closure:	See &fw_cdev_event_common;
  *		set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl
  * @type:	%FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL
- * @completed:	Offset into the receive buffer; data before this offest is valid
+ * @completed:	Offset into the receive buffer; data before this offset is valid
  *
  * This event is sent in multichannel contexts (context type
  * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f84d992..c070128 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1423,6 +1423,7 @@
 	 * generic_show_options()
 	 */
 	char __rcu *s_options;
+	const struct dentry_operations *s_d_op; /* default d_op for dentries */
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
@@ -1834,7 +1835,9 @@
 			int (*set)(struct super_block *,void *),
 			void *data);
 extern struct dentry *mount_pseudo(struct file_system_type *, char *,
-	const struct super_operations *ops, unsigned long);
+	const struct super_operations *ops,
+	const struct dentry_operations *dops,
+	unsigned long);
 extern void simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
 
 static inline void sb_mark_dirty(struct super_block *sb)
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/gpio-i2cmux.h
new file mode 100644
index 0000000..4a333bb
--- /dev/null
+++ b/include/linux/gpio-i2cmux.h
@@ -0,0 +1,38 @@
+/*
+ * gpio-i2cmux interface to platform code
+ *
+ * Peter Korsgaard <peter.korsgaard@barco.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_GPIO_I2CMUX_H
+#define _LINUX_GPIO_I2CMUX_H
+
+/* MUX has no specific idle mode */
+#define GPIO_I2CMUX_NO_IDLE	((unsigned)-1)
+
+/**
+ * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux
+ * @parent: Parent I2C bus adapter number
+ * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
+ * @values: Array of bitmasks of GPIO settings (low/high) for each
+ *	position
+ * @n_values: Number of multiplexer positions (busses to instantiate)
+ * @gpios: Array of GPIO numbers used to control MUX
+ * @n_gpios: Number of GPIOs used to control MUX
+ * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
+ */
+struct gpio_i2cmux_platform_data {
+	int parent;
+	int base_nr;
+	const unsigned *values;
+	int n_values;
+	const unsigned *gpios;
+	int n_gpios;
+	unsigned idle;
+};
+
+#endif /* _LINUX_GPIO_I2CMUX_H */
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index e41f7dd..f79d67f 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -13,6 +13,7 @@
 #include <linux/errno.h>
 
 struct device;
+struct gpio;
 struct gpio_chip;
 
 /*
@@ -29,7 +30,18 @@
 	return 0;
 }
 
-static inline int gpio_request(unsigned gpio, const char *label)
+static inline int __must_check gpio_request(unsigned gpio, const char *label)
+{
+	return -ENOSYS;
+}
+
+static inline int __must_check gpio_request_one(unsigned gpio,
+					unsigned long flags, const char *label)
+{
+	return -ENOSYS;
+}
+
+static inline int __must_check gpio_request_array(struct gpio *array, size_t num)
 {
 	return -ENOSYS;
 }
@@ -42,12 +54,20 @@
 	WARN_ON(1);
 }
 
-static inline int gpio_direction_input(unsigned gpio)
+static inline void gpio_free_array(struct gpio *array, size_t num)
+{
+	might_sleep();
+
+	/* GPIO can never have been requested */
+	WARN_ON(1);
+}
+
+static inline int __must_check gpio_direction_input(unsigned gpio)
 {
 	return -ENOSYS;
 }
 
-static inline int gpio_direction_output(unsigned gpio, int value)
+static inline int __must_check gpio_direction_output(unsigned gpio, int value)
 {
 	return -ENOSYS;
 }
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 20b9801..d91c25e 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -402,7 +402,7 @@
 	__u16 dpad;			/* dpad input code */
 };
 
-#define HID_MAX_FIELDS 64
+#define HID_MAX_FIELDS 128
 
 struct hid_report {
 	struct list_head list;
@@ -593,6 +593,7 @@
  * @report_fixup: called before report descriptor parsing (NULL means nop)
  * @input_mapping: invoked on input registering before mapping an usage
  * @input_mapped: invoked on input registering after mapping an usage
+ * @feature_mapping: invoked on feature registering
  * @suspend: invoked on suspend (NULL means nop)
  * @resume: invoked on resume if device was not reset (NULL means nop)
  * @reset_resume: invoked on resume if device was reset (NULL means nop)
@@ -636,6 +637,9 @@
 	int (*input_mapped)(struct hid_device *hdev,
 			struct hid_input *hidinput, struct hid_field *field,
 			struct hid_usage *usage, unsigned long **bit, int *max);
+	void (*feature_mapping)(struct hid_device *hdev,
+			struct hid_input *hidinput, struct hid_field *field,
+			struct hid_usage *usage);
 #ifdef CONFIG_PM
 	int (*suspend)(struct hid_device *hdev, pm_message_t message);
 	int (*resume)(struct hid_device *hdev);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 56cfe23..903576d 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -57,9 +57,10 @@
  * transmit an arbitrary number of messages without interruption.
  * @count must be be less than 64k since msg.len is u16.
  */
-extern int i2c_master_send(struct i2c_client *client, const char *buf,
+extern int i2c_master_send(const struct i2c_client *client, const char *buf,
 			   int count);
-extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
+extern int i2c_master_recv(const struct i2c_client *client, char *buf,
+			   int count);
 
 /* Transfer num messages.
  */
@@ -78,23 +79,25 @@
 /* Now follow the 'nice' access routines. These also document the calling
    conventions of i2c_smbus_xfer. */
 
-extern s32 i2c_smbus_read_byte(struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
+extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
+				    u8 command);
+extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
 				     u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
-extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
+				    u8 command);
+extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
 				     u8 command, u16 value);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
 				     u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
 				      u8 command, u8 length, const u8 *values);
 /* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
 					 u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
+extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
 					  u8 command, u8 length,
 					  const u8 *values);
 #endif /* I2C */
diff --git a/include/linux/if_alg.h b/include/linux/if_alg.h
new file mode 100644
index 0000000..0f9acce
--- /dev/null
+++ b/include/linux/if_alg.h
@@ -0,0 +1,40 @@
+/*
+ * if_alg: User-space algorithm interface
+ *
+ * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#ifndef _LINUX_IF_ALG_H
+#define _LINUX_IF_ALG_H
+
+#include <linux/types.h>
+
+struct sockaddr_alg {
+	__u16	salg_family;
+	__u8	salg_type[14];
+	__u32	salg_feat;
+	__u32	salg_mask;
+	__u8	salg_name[64];
+};
+
+struct af_alg_iv {
+	__u32	ivlen;
+	__u8	iv[0];
+};
+
+/* Socket options */
+#define ALG_SET_KEY			1
+#define ALG_SET_IV			2
+#define ALG_SET_OP			3
+
+/* Operations */
+#define ALG_OP_DECRYPT			0
+#define ALG_OP_ENCRYPT			1
+
+#endif	/* _LINUX_IF_ALG_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index f7e73c3..dd3f201 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -103,7 +103,7 @@
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
 
-typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
 
 #endif
diff --git a/include/linux/input.h b/include/linux/input.h
index c4e9d91..e428382 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -802,6 +802,7 @@
 #define SW_CAMERA_LENS_COVER	0x09  /* set = lens covered */
 #define SW_KEYPAD_SLIDE		0x0a  /* set = keypad slide out */
 #define SW_FRONT_PROXIMITY	0x0b  /* set = front proximity sensor active */
+#define SW_ROTATE_LOCK		0x0c  /* set = rotate locked/disabled */
 #define SW_MAX			0x0f
 #define SW_CNT			(SW_MAX+1)
 
diff --git a/include/linux/input/as5011.h b/include/linux/input/as5011.h
new file mode 100644
index 0000000..1affd0d
--- /dev/null
+++ b/include/linux/input/as5011.h
@@ -0,0 +1,20 @@
+#ifndef _AS5011_H
+#define _AS5011_H
+
+/*
+ * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+struct as5011_platform_data {
+	unsigned int button_gpio;
+	unsigned int axis_irq; /* irq number */
+	unsigned long axis_irqflags;
+	char xp, xn; /* threshold for x axis */
+	char yp, yn; /* threshold for y axis */
+};
+
+#endif /* _AS5011_H */
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
deleted file mode 100644
index 1d19ab2..0000000
--- a/include/linux/intel-gtt.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Common Intel AGPGART and GTT definitions.
- */
-#ifndef _INTEL_GTT_H
-#define _INTEL_GTT_H
-
-#include <linux/agp_backend.h>
-
-/* This is for Intel only GTT controls.
- *
- * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
- */
-
-#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
-#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
-
-/* flag for GFDT type */
-#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
-
-#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 2ae86aa..27e79c2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -94,7 +94,7 @@
  *
  * This is an opaque datatype.
  **/
-typedef struct handle_s		handle_t;	/* Atomic operation type */
+typedef struct jbd2_journal_handle handle_t;	/* Atomic operation type */
 
 
 /**
@@ -416,7 +416,7 @@
  * in so it can be fixed later.
  */
 
-struct handle_s
+struct jbd2_journal_handle
 {
 	/* Which compound transaction is this update a part of? */
 	transaction_t		*h_transaction;
@@ -1158,6 +1158,22 @@
 	kmem_cache_free(jbd2_handle_cache, handle);
 }
 
+/*
+ * jbd2_inode management (optional, for those file systems that want to use
+ * dynamically allocated jbd2_inode structures)
+ */
+extern struct kmem_cache *jbd2_inode_cache;
+
+static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags)
+{
+	return kmem_cache_alloc(jbd2_inode_cache, gfp_flags);
+}
+
+static inline void jbd2_free_inode(struct jbd2_inode *jinode)
+{
+	kmem_cache_free(jbd2_inode_cache, jinode);
+}
+
 /* Primary revoke support */
 #define JOURNAL_REVOKE_DEFAULT_HASH 256
 extern int	   jbd2_journal_init_revoke(journal_t *, int);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d0fbc04..57dac70 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -143,9 +143,22 @@
 
 #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
 
-#define abs(x) ({				\
-		long __x = (x);			\
-		(__x < 0) ? -__x : __x;		\
+/*
+ * abs() handles unsigned and signed longs, ints, shorts and chars.  For all
+ * input types abs() returns a signed long.
+ * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64()
+ * for those.
+ */
+#define abs(x) ({						\
+		long ret;					\
+		if (sizeof(x) == sizeof(long)) {		\
+			long __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		} else {					\
+			int __x = (x);				\
+			ret = (__x < 0) ? -__x : __x;		\
+		}						\
+		ret;						\
 	})
 
 #define abs64(x) ({				\
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 24b4414..2a0d7d6 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -18,6 +18,10 @@
 	KMSG_DUMP_OOPS,
 	KMSG_DUMP_PANIC,
 	KMSG_DUMP_KEXEC,
+	KMSG_DUMP_RESTART,
+	KMSG_DUMP_HALT,
+	KMSG_DUMP_POWEROFF,
+	KMSG_DUMP_EMERG,
 };
 
 /**
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 6cc38fc..d4a62ab 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -24,5 +24,7 @@
 void kref_init(struct kref *kref);
 void kref_get(struct kref *kref);
 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
+int kref_sub(struct kref *kref, unsigned int count,
+	     void (*release) (struct kref *kref));
 
 #endif /* _KREF_H_ */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 919ae53..ea2dc1a 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -540,6 +540,7 @@
 #endif
 #define KVM_CAP_PPC_GET_PVINFO 57
 #define KVM_CAP_PPC_IRQ_LEVEL 58
+#define KVM_CAP_ASYNC_PF 59
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a055742..b5021db 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -16,6 +16,8 @@
 #include <linux/mm.h>
 #include <linux/preempt.h>
 #include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -40,6 +42,7 @@
 #define KVM_REQ_KICK               9
 #define KVM_REQ_DEACTIVATE_FPU    10
 #define KVM_REQ_EVENT             11
+#define KVM_REQ_APF_HALT          12
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID	0
 
@@ -74,6 +77,27 @@
 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 			      struct kvm_io_device *dev);
 
+#ifdef CONFIG_KVM_ASYNC_PF
+struct kvm_async_pf {
+	struct work_struct work;
+	struct list_head link;
+	struct list_head queue;
+	struct kvm_vcpu *vcpu;
+	struct mm_struct *mm;
+	gva_t gva;
+	unsigned long addr;
+	struct kvm_arch_async_pf arch;
+	struct page *page;
+	bool done;
+};
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch);
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
+#endif
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -104,6 +128,15 @@
 	gpa_t mmio_phys_addr;
 #endif
 
+#ifdef CONFIG_KVM_ASYNC_PF
+	struct {
+		u32 queued;
+		struct list_head queue;
+		struct list_head done;
+		spinlock_t lock;
+	} async_pf;
+#endif
+
 	struct kvm_vcpu_arch arch;
 };
 
@@ -113,16 +146,19 @@
  */
 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
 
+struct kvm_lpage_info {
+	unsigned long rmap_pde;
+	int write_count;
+};
+
 struct kvm_memory_slot {
 	gfn_t base_gfn;
 	unsigned long npages;
 	unsigned long flags;
 	unsigned long *rmap;
 	unsigned long *dirty_bitmap;
-	struct {
-		unsigned long rmap_pde;
-		int write_count;
-	} *lpage_info[KVM_NR_PAGE_SIZES - 1];
+	unsigned long *dirty_bitmap_head;
+	struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
 	unsigned long userspace_addr;
 	int user_alloc;
 	int id;
@@ -169,6 +205,7 @@
 
 struct kvm_memslots {
 	int nmemslots;
+	u64 generation;
 	struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
 					KVM_PRIVATE_MEM_SLOTS];
 };
@@ -206,6 +243,10 @@
 
 	struct mutex irq_lock;
 #ifdef CONFIG_HAVE_KVM_IRQCHIP
+	/*
+	 * Update side is protected by irq_lock and,
+	 * if configured, irqfds.lock.
+	 */
 	struct kvm_irq_routing_table __rcu *irq_routing;
 	struct hlist_head mask_notifier_list;
 	struct hlist_head irq_ack_notifier_list;
@@ -216,6 +257,7 @@
 	unsigned long mmu_notifier_seq;
 	long mmu_notifier_count;
 #endif
+	long tlbs_dirty;
 };
 
 /* The guest did something we don't support. */
@@ -302,7 +344,11 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable);
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable);
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn);
 int memslot_id(struct kvm *kvm, gfn_t gfn);
@@ -321,18 +367,25 @@
 			 int offset, int len);
 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
 		    unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa);
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn);
 
 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
 void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
+
 void kvm_flush_remote_tlbs(struct kvm *kvm);
 void kvm_reload_remote_mmus(struct kvm *kvm);
 
@@ -398,7 +451,19 @@
 
 void kvm_free_physmem(struct kvm *kvm);
 
-struct  kvm *kvm_arch_create_vm(void);
+#ifndef __KVM_HAVE_ARCH_VM_ALLOC
+static inline struct kvm *kvm_arch_alloc_vm(void)
+{
+	return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+}
+
+static inline void kvm_arch_free_vm(struct kvm *kvm)
+{
+	kfree(kvm);
+}
+#endif
+
+int kvm_arch_init_vm(struct kvm *kvm);
 void kvm_arch_destroy_vm(struct kvm *kvm);
 void kvm_free_all_assigned_devices(struct kvm *kvm);
 void kvm_arch_sync_events(struct kvm *kvm);
@@ -414,16 +479,8 @@
 	void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
 };
 
-#define KVM_ASSIGNED_MSIX_PENDING		0x1
-struct kvm_guest_msix_entry {
-	u32 vector;
-	u16 entry;
-	u16 flags;
-};
-
 struct kvm_assigned_dev_kernel {
 	struct kvm_irq_ack_notifier ack_notifier;
-	struct work_struct interrupt_work;
 	struct list_head list;
 	int assigned_dev_id;
 	int host_segnr;
@@ -434,13 +491,14 @@
 	bool host_irq_disabled;
 	struct msix_entry *host_msix_entries;
 	int guest_irq;
-	struct kvm_guest_msix_entry *guest_msix_entries;
+	struct msix_entry *guest_msix_entries;
 	unsigned long irq_requested_type;
 	int irq_source_id;
 	int flags;
 	struct pci_dev *dev;
 	struct kvm *kvm;
-	spinlock_t assigned_dev_lock;
+	spinlock_t intx_lock;
+	char irq_name[32];
 };
 
 struct kvm_irq_mask_notifier {
@@ -462,6 +520,8 @@
 				   unsigned long *deliver_bitmask);
 #endif
 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
+		int irq_source_id, int level);
 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
 void kvm_register_irq_ack_notifier(struct kvm *kvm,
 				   struct kvm_irq_ack_notifier *kian);
@@ -603,17 +663,28 @@
 void kvm_eventfd_init(struct kvm *kvm);
 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
 void kvm_irqfd_release(struct kvm *kvm);
+void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
 
 #else
 
 static inline void kvm_eventfd_init(struct kvm *kvm) {}
+
 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
 {
 	return -EINVAL;
 }
 
 static inline void kvm_irqfd_release(struct kvm *kvm) {}
+
+#ifdef CONFIG_HAVE_KVM_IRQCHIP
+static inline void kvm_irq_routing_update(struct kvm *kvm,
+					  struct kvm_irq_routing_table *irq_rt)
+{
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+}
+#endif
+
 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 {
 	return -ENOSYS;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 7ac0d4ee..fa7cc72 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -67,4 +67,11 @@
 	u32 dest_id;
 };
 
+struct gfn_to_hva_cache {
+	u64 generation;
+	gpa_t gpa;
+	unsigned long hva;
+	struct kvm_memory_slot *memslot;
+};
+
 #endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
index 38368d7..fd548d2 100644
--- a/include/linux/leds-lp5521.h
+++ b/include/linux/leds-lp5521.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const char *label;
 };
 
 #endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
index 7967476..2694289 100644
--- a/include/linux/leds-lp5523.h
+++ b/include/linux/leds-lp5523.h
@@ -42,6 +42,7 @@
 	int	(*setup_resources)(void);
 	void	(*release_resources)(void);
 	void	(*enable)(bool state);
+	const	char *label;
 };
 
 #endif /* __LINUX_LP5523_H */
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 34b2b7f..257d377 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -44,14 +44,4 @@
 #define NLMDBG_XDR		0x0100
 #define NLMDBG_ALL		0x7fff
 
-
-/*
- * Support for printing NLM cookies in dprintk()
- */
-#ifdef RPC_DEBUG
-struct nlm_cookie;
-/* Call this function with the BKL held (it uses a static buffer) */
-extern const char *nlmdbg_cookie2a(const struct nlm_cookie *);
-#endif
-
 #endif /* LINUX_LOCKD_DEBUG_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 2dee05e..ff9abff 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -202,9 +202,9 @@
  * Lockd client functions
  */
 struct nlm_rqst * nlm_alloc_call(struct nlm_host *host);
-void		  nlm_release_call(struct nlm_rqst *);
 int		  nlm_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
 int		  nlm_async_reply(struct nlm_rqst *, u32, const struct rpc_call_ops *);
+void		  nlmclnt_release_call(struct nlm_rqst *);
 struct nlm_wait * nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl);
 void		  nlmclnt_finish_block(struct nlm_wait *block);
 int		  nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout);
@@ -223,13 +223,14 @@
 					const u32 version,
 					const char *hostname,
 					int noresvport);
+void		  nlmclnt_release_host(struct nlm_host *);
 struct nlm_host  *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
 					const char *hostname,
 					const size_t hostname_len);
+void		  nlmsvc_release_host(struct nlm_host *);
 struct rpc_clnt * nlm_bind_host(struct nlm_host *);
 void		  nlm_rebind_host(struct nlm_host *);
 struct nlm_host * nlm_get_host(struct nlm_host *);
-void		  nlm_release_host(struct nlm_host *);
 void		  nlm_shutdown_hosts(void);
 void		  nlm_host_rebooted(const struct nlm_reboot *);
 
@@ -267,6 +268,7 @@
 void		  nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
 					nlm_host_match_fn_t match);
 void		  nlmsvc_grant_reply(struct nlm_cookie *, __be32);
+void		  nlmsvc_release_call(struct nlm_rqst *);
 
 /*
  * File handling for the server personality
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
index 54cbbac..5525d37 100644
--- a/include/linux/mbcache.h
+++ b/include/linux/mbcache.h
@@ -18,6 +18,17 @@
 	} e_index;
 };
 
+struct mb_cache {
+	struct list_head		c_cache_list;
+	const char			*c_name;
+	atomic_t			c_entry_count;
+	int				c_max_entries;
+	int				c_bucket_bits;
+	struct kmem_cache		*c_entry_cache;
+	struct list_head		*c_block_hash;
+	struct list_head		*c_index_hash;
+};
+
 /* Functions on caches */
 
 struct mb_cache *mb_cache_create(const char *, int);
diff --git a/include/linux/mfd/ab8500.h b/include/linux/mfd/ab8500.h
index d63b605..85cf2c28 100644
--- a/include/linux/mfd/ab8500.h
+++ b/include/linux/mfd/ab8500.h
@@ -99,8 +99,6 @@
 #define AB8500_NR_IRQS			104
 #define AB8500_NUM_IRQ_REGS		13
 
-#define AB8500_NUM_REGULATORS   15
-
 /**
  * struct ab8500 - ab8500 internal structure
  * @dev: parent device
@@ -145,7 +143,8 @@
 struct ab8500_platform_data {
 	int irq_base;
 	void (*init) (struct ab8500 *);
-	struct regulator_init_data *regulator[AB8500_NUM_REGULATORS];
+	int num_regulator;
+	struct regulator_init_data *regulator;
 };
 
 extern int __devinit ab8500_init(struct ab8500 *ab8500);
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index cb93d80..5582ab3 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -39,7 +39,7 @@
 	size_t			data_size;
 
 	/*
-	 * This resources can be specified relatievly to the parent device.
+	 * This resources can be specified relatively to the parent device.
 	 * For accessing device you should use resources from device
 	 */
 	int			num_resources;
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b4c741e..7d0f3d6 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,4 +1,5 @@
 /*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
  * Copyright 2009-2010 Pengutronix
  * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
  *
@@ -122,39 +123,39 @@
 		unsigned int channel, unsigned int *sample);
 
 
-#define	MC13783_SW_SW1A		0
-#define	MC13783_SW_SW1B		1
-#define	MC13783_SW_SW2A		2
-#define	MC13783_SW_SW2B		3
-#define	MC13783_SW_SW3		4
-#define	MC13783_SW_PLL		5
-#define	MC13783_REGU_VAUDIO	6
-#define	MC13783_REGU_VIOHI	7
-#define	MC13783_REGU_VIOLO	8
-#define	MC13783_REGU_VDIG	9
-#define	MC13783_REGU_VGEN	10
-#define	MC13783_REGU_VRFDIG	11
-#define	MC13783_REGU_VRFREF	12
-#define	MC13783_REGU_VRFCP	13
-#define	MC13783_REGU_VSIM	14
-#define	MC13783_REGU_VESIM	15
-#define	MC13783_REGU_VCAM	16
-#define	MC13783_REGU_VRFBG	17
-#define	MC13783_REGU_VVIB	18
-#define	MC13783_REGU_VRF1	19
-#define	MC13783_REGU_VRF2	20
-#define	MC13783_REGU_VMMC1	21
-#define	MC13783_REGU_VMMC2	22
-#define	MC13783_REGU_GPO1	23
-#define	MC13783_REGU_GPO2	24
-#define	MC13783_REGU_GPO3	25
-#define	MC13783_REGU_GPO4	26
-#define	MC13783_REGU_V1		27
-#define	MC13783_REGU_V2		28
-#define	MC13783_REGU_V3		29
-#define	MC13783_REGU_V4		30
-#define	MC13783_REGU_PWGT1SPI	31
-#define	MC13783_REGU_PWGT2SPI	32
+#define	MC13783_REG_SW1A		0
+#define	MC13783_REG_SW1B		1
+#define	MC13783_REG_SW2A		2
+#define	MC13783_REG_SW2B		3
+#define	MC13783_REG_SW3		4
+#define	MC13783_REG_PLL		5
+#define	MC13783_REG_VAUDIO	6
+#define	MC13783_REG_VIOHI	7
+#define	MC13783_REG_VIOLO	8
+#define	MC13783_REG_VDIG	9
+#define	MC13783_REG_VGEN	10
+#define	MC13783_REG_VRFDIG	11
+#define	MC13783_REG_VRFREF	12
+#define	MC13783_REG_VRFCP	13
+#define	MC13783_REG_VSIM	14
+#define	MC13783_REG_VESIM	15
+#define	MC13783_REG_VCAM	16
+#define	MC13783_REG_VRFBG	17
+#define	MC13783_REG_VVIB	18
+#define	MC13783_REG_VRF1	19
+#define	MC13783_REG_VRF2	20
+#define	MC13783_REG_VMMC1	21
+#define	MC13783_REG_VMMC2	22
+#define	MC13783_REG_GPO1	23
+#define	MC13783_REG_GPO2	24
+#define	MC13783_REG_GPO3	25
+#define	MC13783_REG_GPO4	26
+#define	MC13783_REG_V1		27
+#define	MC13783_REG_V2		28
+#define	MC13783_REG_V3		29
+#define	MC13783_REG_V4		30
+#define	MC13783_REG_PWGT1SPI	31
+#define	MC13783_REG_PWGT2SPI	32
 
 #define MC13783_IRQ_ADCDONE	MC13XXX_IRQ_ADCDONE
 #define MC13783_IRQ_ADCBISDONE	MC13XXX_IRQ_ADCBISDONE
diff --git a/include/linux/mfd/mc13892.h b/include/linux/mfd/mc13892.h
new file mode 100644
index 0000000..a00f2be
--- /dev/null
+++ b/include/linux/mfd/mc13892.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2010 Yong Shen <yong.shen@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MC13892_H
+#define __LINUX_MFD_MC13892_H
+
+#include <linux/mfd/mc13xxx.h>
+
+#define MC13892_SW1		0
+#define MC13892_SW2		1
+#define MC13892_SW3		2
+#define MC13892_SW4		3
+#define MC13892_SWBST	4
+#define MC13892_VIOHI	5
+#define MC13892_VPLL	6
+#define MC13892_VDIG	7
+#define MC13892_VSD	8
+#define MC13892_VUSB2	9
+#define MC13892_VVIDEO	10
+#define MC13892_VAUDIO	11
+#define MC13892_VCAM	12
+#define MC13892_VGEN1	13
+#define MC13892_VGEN2	14
+#define MC13892_VGEN3	15
+#define MC13892_VUSB	16
+#define MC13892_GPO1	17
+#define MC13892_GPO2	18
+#define MC13892_GPO3	19
+#define MC13892_GPO4	20
+#define MC13892_PWGT1SPI	21
+#define MC13892_PWGT2SPI	22
+#define MC13892_VCOINCELL	23
+
+#endif
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 085f041..8e70310 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -57,6 +57,10 @@
  * is configured in 4-bit mode.
  */
 #define TMIO_MMC_BLKSZ_2BYTES		(1 << 1)
+/*
+ * Some controllers can support SDIO IRQ signalling.
+ */
+#define TMIO_MMC_SDIO_IRQ		(1 << 2)
 
 int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
 int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -66,6 +70,7 @@
 struct tmio_mmc_dma {
 	void *chan_priv_tx;
 	void *chan_priv_rx;
+	int alignment_shift;
 };
 
 /*
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index de79bae..3fd3684 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -17,6 +17,11 @@
 
 #include <linux/interrupt.h>
 
+enum wm8994_type {
+	WM8994 = 0,
+	WM8958 = 1,
+};
+
 struct regulator_dev;
 struct regulator_bulk_data;
 
@@ -48,6 +53,8 @@
 	struct mutex io_lock;
 	struct mutex irq_lock;
 
+	enum wm8994_type type;
+
 	struct device *dev;
 	int (*read_dev)(struct wm8994 *wm8994, unsigned short reg,
 			int bytes, void *dest);
@@ -68,6 +75,7 @@
 	u16 gpio_regs[WM8994_NUM_GPIO_REGS];
 
 	struct regulator_dev *dbvdd;
+	int num_supplies;
 	struct regulator_bulk_data *supplies;
 };
 
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index add8a1b..9eab263 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -30,6 +30,8 @@
 
 #define WM8994_DRC_REGS 5
 #define WM8994_EQ_REGS  20
+#define WM8958_MBC_CUTOFF_REGS 20
+#define WM8958_MBC_COEFF_REGS  48
 
 /**
  * DRC configurations are specified with a label and a set of register
@@ -59,6 +61,18 @@
         u16 regs[WM8994_EQ_REGS];
 };
 
+/**
+ * Multiband compressor configurations are specified with a label and
+ * two sets of values to write.  Configurations are expected to be
+ * generated using the multiband compressor configuration panel in
+ * WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8958_mbc_cfg {
+	const char *name;
+	u16 cutoff_regs[WM8958_MBC_CUTOFF_REGS];
+	u16 coeff_regs[WM8958_MBC_COEFF_REGS];
+};
+
 struct wm8994_pdata {
 	int gpio_base;
 
@@ -78,6 +92,9 @@
         int num_retune_mobile_cfgs;
         struct wm8994_retune_mobile_cfg *retune_mobile_cfgs;
 
+	int num_mbc_cfgs;
+	struct wm8958_mbc_cfg *mbc_cfgs;
+
         /* LINEOUT can be differential or single ended */
         unsigned int lineout1_diff:1;
         unsigned int lineout2_diff:1;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 967f62f..be072fa 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -64,12 +64,16 @@
 #define WM8994_LDO_1                            0x3B
 #define WM8994_LDO_2                            0x3C
 #define WM8994_CHARGE_PUMP_1                    0x4C
+#define WM8958_CHARGE_PUMP_2                    0x4D
 #define WM8994_CLASS_W_1                        0x51
 #define WM8994_DC_SERVO_1                       0x54
 #define WM8994_DC_SERVO_2                       0x55
 #define WM8994_DC_SERVO_4                       0x57
 #define WM8994_DC_SERVO_READBACK                0x58
 #define WM8994_ANALOGUE_HP_1                    0x60
+#define WM8958_MIC_DETECT_1                     0xD0
+#define WM8958_MIC_DETECT_2                     0xD1
+#define WM8958_MIC_DETECT_3                     0xD2
 #define WM8994_CHIP_REVISION                    0x100
 #define WM8994_CONTROL_INTERFACE                0x101
 #define WM8994_WRITE_SEQUENCER_CTRL_1           0x110
@@ -109,6 +113,10 @@
 #define WM8994_AIF2DAC_LRCLK                    0x315
 #define WM8994_AIF2DAC_DATA                     0x316
 #define WM8994_AIF2ADC_DATA                     0x317
+#define WM8958_AIF3_CONTROL_1                   0x320
+#define WM8958_AIF3_CONTROL_2                   0x321
+#define WM8958_AIF3DAC_DATA                     0x322
+#define WM8958_AIF3ADC_DATA                     0x323
 #define WM8994_AIF1_ADC1_LEFT_VOLUME            0x400
 #define WM8994_AIF1_ADC1_RIGHT_VOLUME           0x401
 #define WM8994_AIF1_DAC1_LEFT_VOLUME            0x402
@@ -242,6 +250,83 @@
 #define WM8994_INTERRUPT_STATUS_2_MASK          0x739
 #define WM8994_INTERRUPT_CONTROL                0x740
 #define WM8994_IRQ_DEBOUNCE                     0x748
+#define WM8958_DSP2_PROGRAM                     0x900
+#define WM8958_DSP2_CONFIG                      0x901
+#define WM8958_DSP2_MAGICNUM                    0xA00
+#define WM8958_DSP2_RELEASEYEAR                 0xA01
+#define WM8958_DSP2_RELEASEMONTHDAY             0xA02
+#define WM8958_DSP2_RELEASETIME                 0xA03
+#define WM8958_DSP2_VERMAJMIN                   0xA04
+#define WM8958_DSP2_VERBUILD                    0xA05
+#define WM8958_DSP2_EXECCONTROL                 0xA0D
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1     0x2200
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_2     0x2201
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_1     0x2202
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C2_2     0x2203
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_1     0x2204
+#define WM8958_MBC_BAND_2_LOWER_CUTOFF_C3_2     0x2205
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_1     0x2206
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C2_2     0x2207
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_1     0x2208
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C3_2     0x2209
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_1     0x220A
+#define WM8958_MBC_BAND_2_UPPER_CUTOFF_C1_2     0x220B
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_1     0x220C
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C1_2     0x220D
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_1     0x220E
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C2_2     0x220F
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_1     0x2210
+#define WM8958_MBC_BAND_1_UPPER_CUTOFF_C3_2     0x2211
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_1        0x2212
+#define WM8958_MBC_BAND_1_LOWER_CUTOFF_2        0x2213
+#define WM8958_MBC_BAND_1_K_1                   0x2400
+#define WM8958_MBC_BAND_1_K_2                   0x2401
+#define WM8958_MBC_BAND_1_N1_1                  0x2402
+#define WM8958_MBC_BAND_1_N1_2                  0x2403
+#define WM8958_MBC_BAND_1_N2_1                  0x2404
+#define WM8958_MBC_BAND_1_N2_2                  0x2405
+#define WM8958_MBC_BAND_1_N3_1                  0x2406
+#define WM8958_MBC_BAND_1_N3_2                  0x2407
+#define WM8958_MBC_BAND_1_N4_1                  0x2408
+#define WM8958_MBC_BAND_1_N4_2                  0x2409
+#define WM8958_MBC_BAND_1_N5_1                  0x240A
+#define WM8958_MBC_BAND_1_N5_2                  0x240B
+#define WM8958_MBC_BAND_1_X1_1                  0x240C
+#define WM8958_MBC_BAND_1_X1_2                  0x240D
+#define WM8958_MBC_BAND_1_X2_1                  0x240E
+#define WM8958_MBC_BAND_1_X2_2                  0x240F
+#define WM8958_MBC_BAND_1_X3_1                  0x2410
+#define WM8958_MBC_BAND_1_X3_2                  0x2411
+#define WM8958_MBC_BAND_1_ATTACK_1              0x2412
+#define WM8958_MBC_BAND_1_ATTACK_2              0x2413
+#define WM8958_MBC_BAND_1_DECAY_1               0x2414
+#define WM8958_MBC_BAND_1_DECAY_2               0x2415
+#define WM8958_MBC_BAND_2_K_1                   0x2416
+#define WM8958_MBC_BAND_2_K_2                   0x2417
+#define WM8958_MBC_BAND_2_N1_1                  0x2418
+#define WM8958_MBC_BAND_2_N1_2                  0x2419
+#define WM8958_MBC_BAND_2_N2_1                  0x241A
+#define WM8958_MBC_BAND_2_N2_2                  0x241B
+#define WM8958_MBC_BAND_2_N3_1                  0x241C
+#define WM8958_MBC_BAND_2_N3_2                  0x241D
+#define WM8958_MBC_BAND_2_N4_1                  0x241E
+#define WM8958_MBC_BAND_2_N4_2                  0x241F
+#define WM8958_MBC_BAND_2_N5_1                  0x2420
+#define WM8958_MBC_BAND_2_N5_2                  0x2421
+#define WM8958_MBC_BAND_2_X1_1                  0x2422
+#define WM8958_MBC_BAND_2_X1_2                  0x2423
+#define WM8958_MBC_BAND_2_X2_1                  0x2424
+#define WM8958_MBC_BAND_2_X2_2                  0x2425
+#define WM8958_MBC_BAND_2_X3_1                  0x2426
+#define WM8958_MBC_BAND_2_X3_2                  0x2427
+#define WM8958_MBC_BAND_2_ATTACK_1              0x2428
+#define WM8958_MBC_BAND_2_ATTACK_2              0x2429
+#define WM8958_MBC_BAND_2_DECAY_1               0x242A
+#define WM8958_MBC_BAND_2_DECAY_2               0x242B
+#define WM8958_MBC_B2_PG2_1                     0x242C
+#define WM8958_MBC_B2_PG2_2                     0x242D
+#define WM8958_MBC_B1_PG2_1                     0x242E
+#define WM8958_MBC_B1_PG2_2                     0x242F
 #define WM8994_WRITE_SEQUENCER_0                0x3000
 #define WM8994_WRITE_SEQUENCER_1                0x3001
 #define WM8994_WRITE_SEQUENCER_2                0x3002
@@ -992,6 +1077,12 @@
 /*
  * R6 (0x06) - Power Management (6)
  */
+#define WM8958_AIF3ADC_SRC_MASK                 0x0600  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_SHIFT                     9  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF3ADC_SRC_WIDTH                     2  /* AIF3ADC_SRC - [10:9] */
+#define WM8958_AIF2DAC_SRC_MASK                 0x0180  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_SHIFT                     7  /* AIF2DAC_SRC - [8:7] */
+#define WM8958_AIF2DAC_SRC_WIDTH                     2  /* AIF2DAC_SRC - [8:7] */
 #define WM8994_AIF3_TRI                         0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_MASK                    0x0020  /* AIF3_TRI */
 #define WM8994_AIF3_TRI_SHIFT                        5  /* AIF3_TRI */
@@ -1836,6 +1927,14 @@
 #define WM8994_CP_ENA_WIDTH                          1  /* CP_ENA */
 
 /*
+ * R77 (0x4D) - Charge Pump (2)
+ */
+#define WM8958_CP_DISCH                         0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_MASK                    0x8000  /* CP_DISCH */
+#define WM8958_CP_DISCH_SHIFT                       15  /* CP_DISCH */
+#define WM8958_CP_DISCH_WIDTH                        1  /* CP_DISCH */
+
+/*
  * R81 (0x51) - Class W (1)
  */
 #define WM8994_CP_DYN_SRC_SEL_MASK              0x0300  /* CP_DYN_SRC_SEL - [9:8] */
@@ -1952,6 +2051,46 @@
 #define WM8994_HPOUT1R_DLY_WIDTH                     1  /* HPOUT1R_DLY */
 
 /*
+ * R208 (0xD0) - Mic Detect 1
+ */
+#define WM8958_MICD_BIAS_STARTTIME_MASK         0xF000  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_SHIFT            12  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_BIAS_STARTTIME_WIDTH             4  /* MICD_BIAS_STARTTIME - [15:12] */
+#define WM8958_MICD_RATE_MASK                   0x0F00  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_SHIFT                       8  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_RATE_WIDTH                       4  /* MICD_RATE - [11:8] */
+#define WM8958_MICD_DBTIME                      0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_MASK                 0x0002  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_SHIFT                     1  /* MICD_DBTIME */
+#define WM8958_MICD_DBTIME_WIDTH                     1  /* MICD_DBTIME */
+#define WM8958_MICD_ENA                         0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_MASK                    0x0001  /* MICD_ENA */
+#define WM8958_MICD_ENA_SHIFT                        0  /* MICD_ENA */
+#define WM8958_MICD_ENA_WIDTH                        1  /* MICD_ENA */
+
+/*
+ * R209 (0xD1) - Mic Detect 2
+ */
+#define WM8958_MICD_LVL_SEL_MASK                0x00FF  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_SHIFT                    0  /* MICD_LVL_SEL - [7:0] */
+#define WM8958_MICD_LVL_SEL_WIDTH                    8  /* MICD_LVL_SEL - [7:0] */
+
+/*
+ * R210 (0xD2) - Mic Detect 3
+ */
+#define WM8958_MICD_LVL_MASK                    0x07FC  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_SHIFT                        2  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_LVL_WIDTH                        9  /* MICD_LVL - [10:2] */
+#define WM8958_MICD_VALID                       0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_MASK                  0x0002  /* MICD_VALID */
+#define WM8958_MICD_VALID_SHIFT                      1  /* MICD_VALID */
+#define WM8958_MICD_VALID_WIDTH                      1  /* MICD_VALID */
+#define WM8958_MICD_STS                         0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_MASK                    0x0001  /* MICD_STS */
+#define WM8958_MICD_STS_SHIFT                        0  /* MICD_STS */
+#define WM8958_MICD_STS_WIDTH                        1  /* MICD_STS */
+
+/*
  * R256 (0x100) - Chip Revision
  */
 #define WM8994_CHIP_REV_MASK                    0x000F  /* CHIP_REV - [3:0] */
@@ -2069,6 +2208,14 @@
 /*
  * R520 (0x208) - Clocking (1)
  */
+#define WM8958_DSP2CLK_ENA                      0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_MASK                 0x4000  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_SHIFT                    14  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_ENA_WIDTH                     1  /* DSP2CLK_ENA */
+#define WM8958_DSP2CLK_SRC                      0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_MASK                 0x1000  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_SHIFT                    12  /* DSP2CLK_SRC */
+#define WM8958_DSP2CLK_SRC_WIDTH                     1  /* DSP2CLK_SRC */
 #define WM8994_TOCLK_ENA                        0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_MASK                   0x0010  /* TOCLK_ENA */
 #define WM8994_TOCLK_ENA_SHIFT                       4  /* TOCLK_ENA */
@@ -2553,6 +2700,63 @@
 #define WM8994_AIF2ADCR_DAT_INV_WIDTH                1  /* AIF2ADCR_DAT_INV */
 
 /*
+ * R800 (0x320) - AIF3 Control (1)
+ */
+#define WM8958_AIF3_LRCLK_INV                   0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_MASK              0x0080  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_SHIFT                  7  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_LRCLK_INV_WIDTH                  1  /* AIF3_LRCLK_INV */
+#define WM8958_AIF3_WL_MASK                     0x0060  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_SHIFT                         5  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_WL_WIDTH                         2  /* AIF3_WL - [6:5] */
+#define WM8958_AIF3_FMT_MASK                    0x0018  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_SHIFT                        3  /* AIF3_FMT - [4:3] */
+#define WM8958_AIF3_FMT_WIDTH                        2  /* AIF3_FMT - [4:3] */
+
+/*
+ * R801 (0x321) - AIF3 Control (2)
+ */
+#define WM8958_AIF3DAC_BOOST_MASK               0x0C00  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_SHIFT                  10  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_BOOST_WIDTH                   2  /* AIF3DAC_BOOST - [11:10] */
+#define WM8958_AIF3DAC_COMP                     0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_MASK                0x0010  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_SHIFT                    4  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMP_WIDTH                    1  /* AIF3DAC_COMP */
+#define WM8958_AIF3DAC_COMPMODE                 0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_MASK            0x0008  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_SHIFT                3  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3DAC_COMPMODE_WIDTH                1  /* AIF3DAC_COMPMODE */
+#define WM8958_AIF3ADC_COMP                     0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_MASK                0x0004  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_SHIFT                    2  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMP_WIDTH                    1  /* AIF3ADC_COMP */
+#define WM8958_AIF3ADC_COMPMODE                 0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_MASK            0x0002  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_SHIFT                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3ADC_COMPMODE_WIDTH                1  /* AIF3ADC_COMPMODE */
+#define WM8958_AIF3_LOOPBACK                    0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_MASK               0x0001  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_SHIFT                   0  /* AIF3_LOOPBACK */
+#define WM8958_AIF3_LOOPBACK_WIDTH                   1  /* AIF3_LOOPBACK */
+
+/*
+ * R802 (0x322) - AIF3DAC Data
+ */
+#define WM8958_AIF3DAC_DAT_INV                  0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_MASK             0x0001  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_SHIFT                 0  /* AIF3DAC_DAT_INV */
+#define WM8958_AIF3DAC_DAT_INV_WIDTH                 1  /* AIF3DAC_DAT_INV */
+
+/*
+ * R803 (0x323) - AIF3ADC Data
+ */
+#define WM8958_AIF3ADC_DAT_INV                  0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_MASK             0x0001  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_SHIFT                 0  /* AIF3ADC_DAT_INV */
+#define WM8958_AIF3ADC_DAT_INV_WIDTH                 1  /* AIF3ADC_DAT_INV */
+
+/*
  * R1024 (0x400) - AIF1 ADC1 Left Volume
  */
 #define WM8994_AIF1ADC1_VU                      0x0100  /* AIF1ADC1_VU */
@@ -4289,4 +4493,102 @@
 #define WM8994_TEMP_SHUT_DB_SHIFT                    0  /* TEMP_SHUT_DB */
 #define WM8994_TEMP_SHUT_DB_WIDTH                    1  /* TEMP_SHUT_DB */
 
+/*
+ * R2304 (0x900) - DSP2_Program
+ */
+#define WM8958_DSP2_ENA                         0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_MASK                    0x0001  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_SHIFT                        0  /* DSP2_ENA */
+#define WM8958_DSP2_ENA_WIDTH                        1  /* DSP2_ENA */
+
+/*
+ * R2305 (0x901) - DSP2_Config
+ */
+#define WM8958_MBC_SEL_MASK                     0x0030  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_SHIFT                         4  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_SEL_WIDTH                         2  /* MBC_SEL - [5:4] */
+#define WM8958_MBC_ENA                          0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_MASK                     0x0001  /* MBC_ENA */
+#define WM8958_MBC_ENA_SHIFT                         0  /* MBC_ENA */
+#define WM8958_MBC_ENA_WIDTH                         1  /* MBC_ENA */
+
+/*
+ * R2560 (0xA00) - DSP2_MagicNum
+ */
+#define WM8958_DSP2_MAGIC_NUM_MASK              0xFFFF  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_SHIFT                  0  /* DSP2_MAGIC_NUM - [15:0] */
+#define WM8958_DSP2_MAGIC_NUM_WIDTH                 16  /* DSP2_MAGIC_NUM - [15:0] */
+
+/*
+ * R2561 (0xA01) - DSP2_ReleaseYear
+ */
+#define WM8958_DSP2_RELEASE_YEAR_MASK           0xFFFF  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_SHIFT               0  /* DSP2_RELEASE_YEAR - [15:0] */
+#define WM8958_DSP2_RELEASE_YEAR_WIDTH              16  /* DSP2_RELEASE_YEAR - [15:0] */
+
+/*
+ * R2562 (0xA02) - DSP2_ReleaseMonthDay
+ */
+#define WM8958_DSP2_RELEASE_MONTH_MASK          0xFF00  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_SHIFT              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_MONTH_WIDTH              8  /* DSP2_RELEASE_MONTH - [15:8] */
+#define WM8958_DSP2_RELEASE_DAY_MASK            0x00FF  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_SHIFT                0  /* DSP2_RELEASE_DAY - [7:0] */
+#define WM8958_DSP2_RELEASE_DAY_WIDTH                8  /* DSP2_RELEASE_DAY - [7:0] */
+
+/*
+ * R2563 (0xA03) - DSP2_ReleaseTime
+ */
+#define WM8958_DSP2_RELEASE_HOURS_MASK          0xFF00  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_SHIFT              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_HOURS_WIDTH              8  /* DSP2_RELEASE_HOURS - [15:8] */
+#define WM8958_DSP2_RELEASE_MINS_MASK           0x00FF  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_SHIFT               0  /* DSP2_RELEASE_MINS - [7:0] */
+#define WM8958_DSP2_RELEASE_MINS_WIDTH               8  /* DSP2_RELEASE_MINS - [7:0] */
+
+/*
+ * R2564 (0xA04) - DSP2_VerMajMin
+ */
+#define WM8958_DSP2_MAJOR_VER_MASK              0xFF00  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_SHIFT                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MAJOR_VER_WIDTH                  8  /* DSP2_MAJOR_VER - [15:8] */
+#define WM8958_DSP2_MINOR_VER_MASK              0x00FF  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_SHIFT                  0  /* DSP2_MINOR_VER - [7:0] */
+#define WM8958_DSP2_MINOR_VER_WIDTH                  8  /* DSP2_MINOR_VER - [7:0] */
+
+/*
+ * R2565 (0xA05) - DSP2_VerBuild
+ */
+#define WM8958_DSP2_BUILD_VER_MASK              0xFFFF  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_SHIFT                  0  /* DSP2_BUILD_VER - [15:0] */
+#define WM8958_DSP2_BUILD_VER_WIDTH                 16  /* DSP2_BUILD_VER - [15:0] */
+
+/*
+ * R2573 (0xA0D) - DSP2_ExecControl
+ */
+#define WM8958_DSP2_STOPC                       0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_MASK                  0x0020  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_SHIFT                      5  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPC_WIDTH                      1  /* DSP2_STOPC */
+#define WM8958_DSP2_STOPS                       0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_MASK                  0x0010  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_SHIFT                      4  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPS_WIDTH                      1  /* DSP2_STOPS */
+#define WM8958_DSP2_STOPI                       0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_MASK                  0x0008  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_SHIFT                      3  /* DSP2_STOPI */
+#define WM8958_DSP2_STOPI_WIDTH                      1  /* DSP2_STOPI */
+#define WM8958_DSP2_STOP                        0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_MASK                   0x0004  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_SHIFT                       2  /* DSP2_STOP */
+#define WM8958_DSP2_STOP_WIDTH                       1  /* DSP2_STOP */
+#define WM8958_DSP2_RUNR                        0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_MASK                   0x0002  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_SHIFT                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUNR_WIDTH                       1  /* DSP2_RUNR */
+#define WM8958_DSP2_RUN                         0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_MASK                    0x0001  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_SHIFT                        0  /* DSP2_RUN */
+#define WM8958_DSP2_RUN_WIDTH                        1  /* DSP2_RUN */
+
 #endif
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
new file mode 100644
index 0000000..16b0261
--- /dev/null
+++ b/include/linux/mmc/dw_mmc.h
@@ -0,0 +1,217 @@
+/*
+ * Synopsys DesignWare Multimedia Card Interface driver
+ *  (Based on NXP driver for lpc 31xx)
+ *
+ * Copyright (C) 2009 NXP Semiconductors
+ * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_MMC_DW_MMC_H_
+#define _LINUX_MMC_DW_MMC_H_
+
+#define MAX_MCI_SLOTS	2
+
+enum dw_mci_state {
+	STATE_IDLE = 0,
+	STATE_SENDING_CMD,
+	STATE_SENDING_DATA,
+	STATE_DATA_BUSY,
+	STATE_SENDING_STOP,
+	STATE_DATA_ERROR,
+};
+
+enum {
+	EVENT_CMD_COMPLETE = 0,
+	EVENT_XFER_COMPLETE,
+	EVENT_DATA_COMPLETE,
+	EVENT_DATA_ERROR,
+	EVENT_XFER_ERROR
+};
+
+struct mmc_data;
+
+/**
+ * struct dw_mci - MMC controller state shared between all slots
+ * @lock: Spinlock protecting the queue and associated data.
+ * @regs: Pointer to MMIO registers.
+ * @sg: Scatterlist entry currently being processed by PIO code, if any.
+ * @pio_offset: Offset into the current scatterlist entry.
+ * @cur_slot: The slot which is currently using the controller.
+ * @mrq: The request currently being processed on @cur_slot,
+ *	or NULL if the controller is idle.
+ * @cmd: The command currently being sent to the card, or NULL.
+ * @data: The data currently being transferred, or NULL if no data
+ *	transfer is in progress.
+ * @use_dma: Whether DMA channel is initialized or not.
+ * @sg_dma: Bus address of DMA buffer.
+ * @sg_cpu: Virtual address of DMA buffer.
+ * @dma_ops: Pointer to platform-specific DMA callbacks.
+ * @cmd_status: Snapshot of SR taken upon completion of the current
+ *	command. Only valid when EVENT_CMD_COMPLETE is pending.
+ * @data_status: Snapshot of SR taken upon completion of the current
+ *	data transfer. Only valid when EVENT_DATA_COMPLETE or
+ *	EVENT_DATA_ERROR is pending.
+ * @stop_cmdr: Value to be loaded into CMDR when the stop command is
+ *	to be sent.
+ * @dir_status: Direction of current transfer.
+ * @tasklet: Tasklet running the request state machine.
+ * @card_tasklet: Tasklet handling card detect.
+ * @pending_events: Bitmask of events flagged by the interrupt handler
+ *	to be processed by the tasklet.
+ * @completed_events: Bitmask of events which the state machine has
+ *	processed.
+ * @state: Tasklet state.
+ * @queue: List of slots waiting for access to the controller.
+ * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
+ *	rate and timeout calculations.
+ * @current_speed: Configured rate of the controller.
+ * @num_slots: Number of slots available.
+ * @pdev: Platform device associated with the MMC controller.
+ * @pdata: Platform data associated with the MMC controller.
+ * @slot: Slots sharing this MMC controller.
+ * @data_shift: log2 of FIFO item size.
+ * @push_data: Pointer to FIFO push function.
+ * @pull_data: Pointer to FIFO pull function.
+ * @quirks: Set of quirks that apply to specific versions of the IP.
+ *
+ * Locking
+ * =======
+ *
+ * @lock is a softirq-safe spinlock protecting @queue as well as
+ * @cur_slot, @mrq and @state. These must always be updated
+ * at the same time while holding @lock.
+ *
+ * The @mrq field of struct dw_mci_slot is also protected by @lock,
+ * and must always be written at the same time as the slot is added to
+ * @queue.
+ *
+ * @pending_events and @completed_events are accessed using atomic bit
+ * operations, so they don't need any locking.
+ *
+ * None of the fields touched by the interrupt handler need any
+ * locking. However, ordering is important: Before EVENT_DATA_ERROR or
+ * EVENT_DATA_COMPLETE is set in @pending_events, all data-related
+ * interrupts must be disabled and @data_status updated with a
+ * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
+ * CMDRDY interupt must be disabled and @cmd_status updated with a
+ * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
+ * bytes_xfered field of @data must be written. This is ensured by
+ * using barriers.
+ */
+struct dw_mci {
+	spinlock_t		lock;
+	void __iomem		*regs;
+
+	struct scatterlist	*sg;
+	unsigned int		pio_offset;
+
+	struct dw_mci_slot	*cur_slot;
+	struct mmc_request	*mrq;
+	struct mmc_command	*cmd;
+	struct mmc_data		*data;
+
+	/* DMA interface members*/
+	int			use_dma;
+
+	dma_addr_t		sg_dma;
+	void			*sg_cpu;
+	struct dw_mci_dma_ops	*dma_ops;
+#ifdef CONFIG_MMC_DW_IDMAC
+	unsigned int		ring_size;
+#else
+	struct dw_mci_dma_data	*dma_data;
+#endif
+	u32			cmd_status;
+	u32			data_status;
+	u32			stop_cmdr;
+	u32			dir_status;
+	struct tasklet_struct	tasklet;
+	struct tasklet_struct	card_tasklet;
+	unsigned long		pending_events;
+	unsigned long		completed_events;
+	enum dw_mci_state	state;
+	struct list_head	queue;
+
+	u32			bus_hz;
+	u32			current_speed;
+	u32			num_slots;
+	struct platform_device	*pdev;
+	struct dw_mci_board	*pdata;
+	struct dw_mci_slot	*slot[MAX_MCI_SLOTS];
+
+	/* FIFO push and pull */
+	int			data_shift;
+	void (*push_data)(struct dw_mci *host, void *buf, int cnt);
+	void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
+
+	/* Workaround flags */
+	u32			quirks;
+};
+
+/* DMA ops for Internal/External DMAC interface */
+struct dw_mci_dma_ops {
+	/* DMA Ops */
+	int (*init)(struct dw_mci *host);
+	void (*start)(struct dw_mci *host, unsigned int sg_len);
+	void (*complete)(struct dw_mci *host);
+	void (*stop)(struct dw_mci *host);
+	void (*cleanup)(struct dw_mci *host);
+	void (*exit)(struct dw_mci *host);
+};
+
+/* IP Quirks/flags. */
+/* No special quirks or flags to cater for */
+#define DW_MCI_QUIRK_NONE		0
+/* DTO fix for command transmission with IDMAC configured */
+#define DW_MCI_QUIRK_IDMAC_DTO		1
+/* delay needed between retries on some 2.11a implementations */
+#define DW_MCI_QUIRK_RETRY_DELAY	2
+/* High Speed Capable - Supports HS cards (upto 50MHz) */
+#define DW_MCI_QUIRK_HIGHSPEED		4
+
+
+struct dma_pdata;
+
+struct block_settings {
+	unsigned short	max_segs;	/* see blk_queue_max_segments */
+	unsigned int	max_blk_size;	/* maximum size of one mmc block */
+	unsigned int	max_blk_count;	/* maximum number of blocks in one req*/
+	unsigned int	max_req_size;	/* maximum number of bytes in one req*/
+	unsigned int	max_seg_size;	/* see blk_queue_max_segment_size */
+};
+
+/* Board platform data */
+struct dw_mci_board {
+	u32 num_slots;
+
+	u32 quirks; /* Workaround / Quirk flags */
+	unsigned int bus_hz; /* Bus speed */
+
+	/* delay in mS before detecting cards after interrupt */
+	u32 detect_delay_ms;
+
+	int (*init)(u32 slot_id, irq_handler_t , void *);
+	int (*get_ro)(u32 slot_id);
+	int (*get_cd)(u32 slot_id);
+	int (*get_ocr)(u32 slot_id);
+	int (*get_bus_wd)(u32 slot_id);
+	/*
+	 * Enable power to selected slot and set voltage to desired level.
+	 * Voltage levels are specified using MMC_VDD_xxx defines defined
+	 * in linux/mmc/host.h file.
+	 */
+	void (*setpower)(u32 slot_id, u32 volt);
+	void (*exit)(u32 slot_id);
+	void (*select_slot)(u32 slot_id);
+
+	struct dw_mci_dma_ops *dma_ops;
+	struct dma_pdata *data;
+	struct block_settings *blk_settings;
+};
+
+#endif /* _LINUX_MMC_DW_MMC_H_ */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 30f6fad..bcb793e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -131,6 +131,9 @@
 	unsigned int		f_max;
 	unsigned int		f_init;
 	u32			ocr_avail;
+	u32			ocr_avail_sdio;	/* SDIO-specific OCR */
+	u32			ocr_avail_sd;	/* SD-specific OCR */
+	u32			ocr_avail_mmc;	/* MMC-specific OCR */
 	struct notifier_block	pm_notify;
 
 #define MMC_VDD_165_195		0x00000080	/* VDD voltage 1.65 - 1.95 */
@@ -169,9 +172,20 @@
 #define MMC_CAP_1_2V_DDR	(1 << 12)	/* can support */
 						/* DDR mode at 1.2V */
 #define MMC_CAP_POWER_OFF_CARD	(1 << 13)	/* Can power off after boot */
+#define MMC_CAP_BUS_WIDTH_TEST	(1 << 14)	/* CMD14/CMD19 bus width ok */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
+#ifdef CONFIG_MMC_CLKGATE
+	int			clk_requests;	/* internal reference counter */
+	unsigned int		clk_delay;	/* number of MCI clk hold cycles */
+	bool			clk_gated;	/* clock gated */
+	struct work_struct	clk_gate_work; /* delayed clock gate */
+	unsigned int		clk_old;	/* old clock value cache */
+	spinlock_t		clk_lock;	/* lock for clk fields */
+	struct mutex		clk_gate_mutex;	/* mutex for clock gating */
+#endif
+
 	/* host specific block data */
 	unsigned int		max_seg_size;	/* see blk_queue_max_segment_size */
 	unsigned short		max_segs;	/* see blk_queue_max_segments */
@@ -307,5 +321,10 @@
 	return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
 }
 
+static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
+{
+	return host->pm_flags & MMC_PM_KEEP_POWER;
+}
+
 #endif
 
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 956fbd87..612301f 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -40,7 +40,9 @@
 #define MMC_READ_DAT_UNTIL_STOP  11   /* adtc [31:0] dadr        R1  */
 #define MMC_STOP_TRANSMISSION    12   /* ac                      R1b */
 #define MMC_SEND_STATUS          13   /* ac   [31:16] RCA        R1  */
+#define MMC_BUS_TEST_R           14   /* adtc                    R1  */
 #define MMC_GO_INACTIVE_STATE    15   /* ac   [31:16] RCA            */
+#define MMC_BUS_TEST_W           19   /* adtc                    R1  */
 #define MMC_SPI_READ_OCR         58   /* spi                  spi_R3 */
 #define MMC_SPI_CRC_ON_OFF       59   /* spi  [0:0] flag      spi_R1 */
 
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 1fdc673..83bd9f7 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -83,6 +83,8 @@
 #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12		(1<<28)
 /* Controller doesn't have HISPD bit field in HI-SPEED SD card */
 #define SDHCI_QUIRK_NO_HISPD_BIT			(1<<29)
+/* Controller treats ADMA descriptors with length 0000h incorrectly */
+#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC		(1<<30)
 
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
@@ -139,6 +141,10 @@
 
 	unsigned int caps;	/* Alternative capabilities */
 
+	unsigned int            ocr_avail_sdio;	/* OCR bit masks */
+	unsigned int            ocr_avail_sd;
+	unsigned int            ocr_avail_mmc;
+
 	unsigned long private[0] ____cacheline_aligned;
 };
 #endif /* __SDHCI_H */
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h
index 1c27f20..e13eefe 100644
--- a/include/linux/ncp_fs.h
+++ b/include/linux/ncp_fs.h
@@ -143,104 +143,4 @@
 #define NCP_MAXPATHLEN 255
 #define NCP_MAXNAMELEN 14
 
-#ifdef __KERNEL__
-
-#include <linux/ncp_fs_i.h>
-#include <linux/ncp_fs_sb.h>
-
-/* define because it is easy to change PRINTK to {*}PRINTK */
-#define PRINTK(format, args...) printk(KERN_DEBUG format , ## args)
-
-#undef NCPFS_PARANOIA
-#ifdef NCPFS_PARANOIA
-#define PPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define PPRINTK(format, args...)
-#endif
-
-#ifndef DEBUG_NCP
-#define DEBUG_NCP 0
-#endif
-#if DEBUG_NCP > 0
-#define DPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DPRINTK(format, args...)
-#endif
-#if DEBUG_NCP > 1
-#define DDPRINTK(format, args...) PRINTK(format , ## args)
-#else
-#define DDPRINTK(format, args...)
-#endif
-
-#define NCP_MAX_RPC_TIMEOUT (6*HZ)
-
-
-struct ncp_entry_info {
-	struct nw_info_struct	i;
-	ino_t			ino;
-	int			opened;
-	int			access;
-	unsigned int		volume;
-	__u8			file_handle[6];
-};
-
-static inline struct ncp_server *NCP_SBP(const struct super_block *sb)
-{
-	return sb->s_fs_info;
-}
-
-#define NCP_SERVER(inode)	NCP_SBP((inode)->i_sb)
-static inline struct ncp_inode_info *NCP_FINFO(const struct inode *inode)
-{
-	return container_of(inode, struct ncp_inode_info, vfs_inode);
-}
-
-/* linux/fs/ncpfs/inode.c */
-int ncp_notify_change(struct dentry *, struct iattr *);
-struct inode *ncp_iget(struct super_block *, struct ncp_entry_info *);
-void ncp_update_inode(struct inode *, struct ncp_entry_info *);
-void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
-
-/* linux/fs/ncpfs/dir.c */
-extern const struct inode_operations ncp_dir_inode_operations;
-extern const struct file_operations ncp_dir_operations;
-extern const struct dentry_operations ncp_root_dentry_operations;
-int ncp_conn_logged_in(struct super_block *);
-int ncp_date_dos2unix(__le16 time, __le16 date);
-void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
-
-/* linux/fs/ncpfs/ioctl.c */
-long ncp_ioctl(struct file *, unsigned int, unsigned long);
-long ncp_compat_ioctl(struct file *, unsigned int, unsigned long);
-
-/* linux/fs/ncpfs/sock.c */
-int ncp_request2(struct ncp_server *server, int function,
-	void* reply, int max_reply_size);
-static inline int ncp_request(struct ncp_server *server, int function) {
-	return ncp_request2(server, function, server->packet, server->packet_size);
-}
-int ncp_connect(struct ncp_server *server);
-int ncp_disconnect(struct ncp_server *server);
-void ncp_lock_server(struct ncp_server *server);
-void ncp_unlock_server(struct ncp_server *server);
-
-/* linux/fs/ncpfs/symlink.c */
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
-/* linux/fs/ncpfs/file.c */
-extern const struct inode_operations ncp_file_inode_operations;
-extern const struct file_operations ncp_file_operations;
-int ncp_make_open(struct inode *, int);
-
-/* linux/fs/ncpfs/mmap.c */
-int ncp_mmap(struct file *, struct vm_area_struct *);
-
-/* linux/fs/ncpfs/ncplib_kernel.c */
-int ncp_make_closed(struct inode *);
-
-#endif				/* __KERNEL__ */
-
 #endif				/* _LINUX_NCP_FS_H */
diff --git a/include/linux/ncp_mount.h b/include/linux/ncp_mount.h
index a2b549e..dfcbea2 100644
--- a/include/linux/ncp_mount.h
+++ b/include/linux/ncp_mount.h
@@ -68,26 +68,4 @@
 
 #define NCP_MOUNT_VERSION_V5	(5)	/* Text only */
 
-#ifdef __KERNEL__
-
-struct ncp_mount_data_kernel {
-	unsigned long    flags;		/* NCP_MOUNT_* flags */
-	unsigned int	 int_flags;	/* internal flags */
-#define NCP_IMOUNT_LOGGEDIN_POSSIBLE	0x0001
-	__kernel_uid32_t mounted_uid;	/* Who may umount() this filesystem? */
-	struct pid      *wdog_pid;	/* Who cares for our watchdog packets? */
-	unsigned int     ncp_fd;	/* The socket to the ncp port */
-	unsigned int     time_out;	/* How long should I wait after
-					   sending a NCP request? */
-	unsigned int     retry_count;	/* And how often should I retry? */
-	unsigned char	 mounted_vol[NCP_VOLNAME_LEN + 1];
-	__kernel_uid32_t uid;
-	__kernel_gid32_t gid;
-	__kernel_mode_t  file_mode;
-	__kernel_mode_t  dir_mode;
-	int		 info_fd;
-};
-
-#endif /* __KERNEL__ */
-
 #endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0f6b1c9..be4957c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2191,11 +2191,15 @@
 extern void		ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
-extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 				       void (*setup)(struct net_device *),
-				       unsigned int queue_count);
+				       unsigned int txqs, unsigned int rxqs);
 #define alloc_netdev(sizeof_priv, name, setup) \
-	alloc_netdev_mq(sizeof_priv, name, setup, 1)
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+
+#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+
 extern int		register_netdev(struct net_device *dev);
 extern void		unregister_netdev(struct net_device *dev);
 
@@ -2303,7 +2307,7 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+int netif_skb_features(struct sk_buff *skb);
 
 static inline int net_gso_ok(int features, int gso_type)
 {
@@ -2317,16 +2321,10 @@
 	       (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
 }
 
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
+static inline int netif_needs_gso(struct sk_buff *skb, int features)
 {
-	if (skb_is_gso(skb)) {
-		int features = netif_get_vlan_features(skb, dev);
-
-		return (!skb_gso_ok(skb, features) ||
-			unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
-	}
-
-	return 0;
+	return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
+		unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
 }
 
 static inline void netif_set_gso_max_size(struct net_device *dev,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index 742bec0..6712e71 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -472,7 +472,7 @@
  *  necessary for reading the counters.
  */
 struct xt_info_lock {
-	spinlock_t lock;
+	seqlock_t lock;
 	unsigned char readers;
 };
 DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks);
@@ -497,7 +497,7 @@
 	local_bh_disable();
 	lock = &__get_cpu_var(xt_info_locks);
 	if (likely(!lock->readers++))
-		spin_lock(&lock->lock);
+		write_seqlock(&lock->lock);
 }
 
 static inline void xt_info_rdunlock_bh(void)
@@ -505,7 +505,7 @@
 	struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks);
 
 	if (likely(!--lock->readers))
-		spin_unlock(&lock->lock);
+		write_sequnlock(&lock->lock);
 	local_bh_enable();
 }
 
@@ -516,12 +516,12 @@
  */
 static inline void xt_info_wrlock(unsigned int cpu)
 {
-	spin_lock(&per_cpu(xt_info_locks, cpu).lock);
+	write_seqlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 static inline void xt_info_wrunlock(unsigned int cpu)
 {
-	spin_unlock(&per_cpu(xt_info_locks, cpu).lock);
+	write_sequnlock(&per_cpu(xt_info_locks, cpu).lock);
 }
 
 /*
diff --git a/include/linux/nfc/pn544.h b/include/linux/nfc/pn544.h
new file mode 100644
index 0000000..7ab8521
--- /dev/null
+++ b/include/linux/nfc/pn544.h
@@ -0,0 +1,97 @@
+/*
+ * Driver include for the PN544 NFC chip.
+ *
+ * Copyright (C) Nokia Corporation
+ *
+ * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
+ * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _PN544_H_
+#define _PN544_H_
+
+#include <linux/i2c.h>
+
+#define PN544_DRIVER_NAME	"pn544"
+#define PN544_MAXWINDOW_SIZE	7
+#define PN544_WINDOW_SIZE	4
+#define PN544_RETRIES		10
+#define PN544_MAX_I2C_TRANSFER	0x0400
+#define PN544_MSG_MAX_SIZE	0x21 /* at normal HCI mode */
+
+/* ioctl */
+#define PN544_CHAR_BASE		'P'
+#define PN544_IOR(num, dtype)	_IOR(PN544_CHAR_BASE, num, dtype)
+#define PN544_IOW(num, dtype)	_IOW(PN544_CHAR_BASE, num, dtype)
+#define PN544_GET_FW_MODE	PN544_IOW(1, unsigned int)
+#define PN544_SET_FW_MODE	PN544_IOW(2, unsigned int)
+#define PN544_GET_DEBUG		PN544_IOW(3, unsigned int)
+#define PN544_SET_DEBUG		PN544_IOW(4, unsigned int)
+
+/* Timing restrictions (ms) */
+#define PN544_RESETVEN_TIME	30 /* 7 */
+#define PN544_PVDDVEN_TIME	0
+#define PN544_VBATVEN_TIME	0
+#define PN544_GPIO4VEN_TIME	0
+#define PN544_WAKEUP_ACK	5
+#define PN544_WAKEUP_GUARD	(PN544_WAKEUP_ACK + 1)
+#define PN544_INACTIVITY_TIME	1000
+#define PN544_INTERFRAME_DELAY	200 /* us */
+#define PN544_BAUDRATE_CHANGE	150 /* us */
+
+/* Debug bits */
+#define PN544_DEBUG_BUF		0x01
+#define PN544_DEBUG_READ	0x02
+#define PN544_DEBUG_WRITE	0x04
+#define PN544_DEBUG_IRQ		0x08
+#define PN544_DEBUG_CALLS	0x10
+#define PN544_DEBUG_MODE	0x20
+
+/* Normal (HCI) mode */
+#define PN544_LLC_HCI_OVERHEAD	3 /* header + crc (to length) */
+#define PN544_LLC_MIN_SIZE	(1 + PN544_LLC_HCI_OVERHEAD) /* length + */
+#define PN544_LLC_MAX_DATA	(PN544_MSG_MAX_SIZE - 2)
+#define PN544_LLC_MAX_HCI_SIZE	(PN544_LLC_MAX_DATA - 2)
+
+struct pn544_llc_packet {
+	unsigned char length; /* of rest of packet */
+	unsigned char header;
+	unsigned char data[PN544_LLC_MAX_DATA]; /* includes crc-ccitt */
+};
+
+/* Firmware upgrade mode */
+#define PN544_FW_HEADER_SIZE	3
+/* max fw transfer is 1024bytes, but I2C limits it to 0xC0 */
+#define PN544_MAX_FW_DATA	(PN544_MAX_I2C_TRANSFER - PN544_FW_HEADER_SIZE)
+
+struct pn544_fw_packet {
+	unsigned char command; /* status in answer */
+	unsigned char length[2]; /* big-endian order (msf) */
+	unsigned char data[PN544_MAX_FW_DATA];
+};
+
+#ifdef __KERNEL__
+/* board config */
+struct pn544_nfc_platform_data {
+	int (*request_resources) (struct i2c_client *client);
+	void (*free_resources) (void);
+	void (*enable) (int fw);
+	int (*test) (void);
+	void (*disable) (void);
+};
+#endif /* __KERNEL__ */
+
+#endif /* _PN544_H_ */
diff --git a/include/linux/nfs3.h b/include/linux/nfs3.h
index ac33806..6ccfe3b 100644
--- a/include/linux/nfs3.h
+++ b/include/linux/nfs3.h
@@ -11,6 +11,9 @@
 #define NFS3_MAXGROUPS		16
 #define NFS3_FHSIZE		64
 #define NFS3_COOKIESIZE		4
+#define NFS3_CREATEVERFSIZE	8
+#define NFS3_COOKIEVERFSIZE	8
+#define NFS3_WRITEVERFSIZE	8
 #define NFS3_FIFO_DEV		(-1)
 #define NFS3MODE_FMT		0170000
 #define NFS3MODE_DIR		0040000
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 4925b22..9b46300 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -111,9 +111,13 @@
 
 #define EXCHGID4_FLAG_SUPP_MOVED_REFER		0x00000001
 #define EXCHGID4_FLAG_SUPP_MOVED_MIGR		0x00000002
+#define EXCHGID4_FLAG_BIND_PRINC_STATEID	0x00000100
+
 #define EXCHGID4_FLAG_USE_NON_PNFS		0x00010000
 #define EXCHGID4_FLAG_USE_PNFS_MDS		0x00020000
 #define EXCHGID4_FLAG_USE_PNFS_DS		0x00040000
+#define EXCHGID4_FLAG_MASK_PNFS			0x00070000
+
 #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A	0x40000000
 #define EXCHGID4_FLAG_CONFIRMED_R		0x80000000
 /*
@@ -121,8 +125,8 @@
  * they're set in the argument or response, have separate
  * invalid flag masks for arg (_A) and resp (_R).
  */
-#define EXCHGID4_FLAG_MASK_A			0x40070003
-#define EXCHGID4_FLAG_MASK_R			0x80070003
+#define EXCHGID4_FLAG_MASK_A			0x40070103
+#define EXCHGID4_FLAG_MASK_R			0x80070103
 
 #define SEQ4_STATUS_CB_PATH_DOWN		0x00000001
 #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING	0x00000002
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 452d964..b197563 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -47,11 +47,6 @@
 	u64			cl_clientid;	/* constant */
 	unsigned long		cl_state;
 
-	struct rb_root		cl_openowner_id;
-	struct rb_root		cl_lockowner_id;
-
-	struct list_head	cl_delegations;
-	struct rb_root		cl_state_owners;
 	spinlock_t		cl_lock;
 
 	unsigned long		cl_lease_time;
@@ -71,6 +66,7 @@
 	 */
 	char			cl_ipaddr[48];
 	unsigned char		cl_id_uniquifier;
+	u32			cl_cb_ident;	/* v4.0 callback identifier */
 	const struct nfs4_minor_version_ops *cl_mvops;
 #endif /* CONFIG_NFS_V4 */
 
@@ -148,7 +144,14 @@
 						   that are supported on this
 						   filesystem */
 	struct pnfs_layoutdriver_type  *pnfs_curr_ld; /* Active layout driver */
+	struct rpc_wait_queue	roc_rpcwaitq;
+
+	/* the following fields are protected by nfs_client->cl_lock */
+	struct rb_root		state_owners;
+	struct rb_root		openowner_id;
+	struct rb_root		lockowner_id;
 #endif
+	struct list_head	delegations;
 	void (*destroy)(struct nfs_server *);
 
 	atomic_t active; /* Keep trace of any activity to this server */
@@ -196,6 +199,7 @@
 						 * op for dynamic resizing */
 	int		target_max_slots;	/* Set by CB_RECALL_SLOT as
 						 * the new max_slots */
+	struct completion complete;
 };
 
 static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
@@ -212,7 +216,6 @@
 	unsigned long			session_state;
 	u32				hash_alg;
 	u32				ssv_len;
-	struct completion		complete;
 
 	/* The fore and back channel */
 	struct nfs4_channel_attrs	fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 80f0719..b006857 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -208,6 +208,7 @@
 	struct inode *inode;
 	struct nfs_open_context *ctx;
 	struct nfs4_sequence_args seq_args;
+	nfs4_stateid stateid;
 };
 
 struct nfs4_layoutget_res {
@@ -223,7 +224,6 @@
 	struct nfs4_layoutget_args args;
 	struct nfs4_layoutget_res res;
 	struct pnfs_layout_segment **lsegpp;
-	int status;
 };
 
 struct nfs4_getdeviceinfo_args {
@@ -317,6 +317,7 @@
 struct nfs_lowner {
 	__u64			clientid;
 	__u64			id;
+	dev_t			s_dev;
 };
 
 struct nfs_lock_args {
@@ -484,6 +485,7 @@
 	struct nfs_fh *		fh;
 	struct nfs_fattr *	fattr;
 	unsigned char		d_type;
+	struct nfs_server *	server;
 };
 
 /*
@@ -1089,7 +1091,7 @@
 	int	(*pathconf) (struct nfs_server *, struct nfs_fh *,
 			     struct nfs_pathconf *);
 	int	(*set_capabilities)(struct nfs_server *, struct nfs_fh *);
-	__be32 *(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int plus);
+	int	(*decode_dirent)(struct xdr_stream *, struct nfs_entry *, int);
 	void	(*read_setup)   (struct nfs_read_data *, struct rpc_message *);
 	int	(*read_done)  (struct rpc_task *, struct nfs_read_data *);
 	void	(*write_setup)  (struct nfs_write_data *, struct rpc_message *);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index cb845c16..ae0dc45 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,7 @@
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC	0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK	0x1304
 #define PCI_DEVICE_ID_AMD_15H_NB_MISC	0x1603
+#define PCI_DEVICE_ID_AMD_CNB17H_F3	0x1703
 #define PCI_DEVICE_ID_AMD_LANCE		0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME	0x2001
 #define PCI_DEVICE_ID_AMD_SCSI		0x2020
@@ -1650,6 +1651,11 @@
 #define PCI_DEVICE_ID_O2_6836		0x6836
 #define PCI_DEVICE_ID_O2_6812		0x6872
 #define PCI_DEVICE_ID_O2_6933		0x6933
+#define PCI_DEVICE_ID_O2_8120		0x8120
+#define PCI_DEVICE_ID_O2_8220		0x8220
+#define PCI_DEVICE_ID_O2_8221		0x8221
+#define PCI_DEVICE_ID_O2_8320		0x8320
+#define PCI_DEVICE_ID_O2_8321		0x8321
 
 #define PCI_VENDOR_ID_3DFX		0x121a
 #define PCI_DEVICE_ID_3DFX_VOODOO	0x0001
@@ -2363,6 +2369,10 @@
 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD	0x2381
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS	0x2383
+#define PCI_DEVICE_ID_JMICRON_JMB385_MS	0x2388
+#define PCI_DEVICE_ID_JMICRON_JMB388_SD	0x2391
+#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
+#define PCI_DEVICE_ID_JMICRON_JMB390_MS	0x2393
 
 #define PCI_VENDOR_ID_KORENIX		0x1982
 #define PCI_DEVICE_ID_KORENIX_JETCARDF0	0x1600
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 56e76af..1a2ccd6 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -57,7 +57,7 @@
 };
 
 /*
- * Structures and helpers for sys_poll/sys_poll
+ * Structures and helpers for select/poll syscall
  */
 struct poll_wqueues {
 	poll_table pt;
diff --git a/include/linux/pps.h b/include/linux/pps.h
index 0194ab0..a9bb1d9 100644
--- a/include/linux/pps.h
+++ b/include/linux/pps.h
@@ -114,11 +114,18 @@
 	struct pps_ktime timeout;
 };
 
+struct pps_bind_args {
+	int tsformat;	/* format of time stamps */
+	int edge;	/* selected event type */
+	int consumer;	/* selected kernel consumer */
+};
+
 #include <linux/ioctl.h>
 
 #define PPS_GETPARAMS		_IOR('p', 0xa1, struct pps_kparams *)
 #define PPS_SETPARAMS		_IOW('p', 0xa2, struct pps_kparams *)
 #define PPS_GETCAP		_IOR('p', 0xa3, int *)
 #define PPS_FETCH		_IOWR('p', 0xa4, struct pps_fdata *)
+#define PPS_KC_BIND		_IOW('p', 0xa5, struct pps_bind_args *)
 
 #endif /* _PPS_H_ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index e0a193f..9404854 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -18,6 +18,9 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef LINUX_PPS_KERNEL_H
+#define LINUX_PPS_KERNEL_H
+
 #include <linux/pps.h>
 
 #include <linux/cdev.h>
@@ -28,18 +31,28 @@
  * Global defines
  */
 
+struct pps_device;
+
 /* The specific PPS source info */
 struct pps_source_info {
 	char name[PPS_MAX_NAME_LEN];		/* simbolic name */
 	char path[PPS_MAX_NAME_LEN];		/* path of connected device */
 	int mode;				/* PPS's allowed mode */
 
-	void (*echo)(int source, int event, void *data); /* PPS echo function */
+	void (*echo)(struct pps_device *pps,
+			int event, void *data);	/* PPS echo function */
 
 	struct module *owner;
 	struct device *dev;
 };
 
+struct pps_event_time {
+#ifdef CONFIG_NTP_PPS
+	struct timespec ts_raw;
+#endif /* CONFIG_NTP_PPS */
+	struct timespec ts_real;
+};
+
 /* The main struct */
 struct pps_device {
 	struct pps_source_info info;		/* PSS source info */
@@ -52,38 +65,56 @@
 	struct pps_ktime clear_tu;
 	int current_mode;			/* PPS mode at event time */
 
-	int go;					/* PPS event is arrived? */
+	unsigned int last_ev;			/* last PPS event id */
 	wait_queue_head_t queue;		/* PPS event queue */
 
 	unsigned int id;			/* PPS source unique ID */
 	struct cdev cdev;
 	struct device *dev;
-	int devno;
 	struct fasync_struct *async_queue;	/* fasync method */
 	spinlock_t lock;
-
-	atomic_t usage;				/* usage count */
 };
 
 /*
  * Global variables
  */
 
-extern spinlock_t pps_idr_lock;
-extern struct idr pps_idr;
-extern struct timespec pps_irq_ts[];
-
 extern struct device_attribute pps_attrs[];
 
 /*
  * Exported functions
  */
 
-struct pps_device *pps_get_source(int source);
-extern void pps_put_source(struct pps_device *pps);
-extern int pps_register_source(struct pps_source_info *info,
-				int default_params);
-extern void pps_unregister_source(int source);
+extern struct pps_device *pps_register_source(
+		struct pps_source_info *info, int default_params);
+extern void pps_unregister_source(struct pps_device *pps);
 extern int pps_register_cdev(struct pps_device *pps);
 extern void pps_unregister_cdev(struct pps_device *pps);
-extern void pps_event(int source, struct pps_ktime *ts, int event, void *data);
+extern void pps_event(struct pps_device *pps,
+		struct pps_event_time *ts, int event, void *data);
+
+static inline void timespec_to_pps_ktime(struct pps_ktime *kt,
+		struct timespec ts)
+{
+	kt->sec = ts.tv_sec;
+	kt->nsec = ts.tv_nsec;
+}
+
+#ifdef CONFIG_NTP_PPS
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real);
+}
+
+#else /* CONFIG_NTP_PPS */
+
+static inline void pps_get_ts(struct pps_event_time *ts)
+{
+	getnstimeofday(&ts->ts_real);
+}
+
+#endif /* CONFIG_NTP_PPS */
+
+#endif /* LINUX_PPS_KERNEL_H */
+
diff --git a/include/linux/printk.h b/include/linux/printk.h
index b772ca5..ee048e7 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -4,14 +4,14 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
-#define	KERN_EMERG	"<0>"	/* system is unusable			*/
-#define	KERN_ALERT	"<1>"	/* action must be taken immediately	*/
-#define	KERN_CRIT	"<2>"	/* critical conditions			*/
-#define	KERN_ERR	"<3>"	/* error conditions			*/
-#define	KERN_WARNING	"<4>"	/* warning conditions			*/
-#define	KERN_NOTICE	"<5>"	/* normal but significant condition	*/
-#define	KERN_INFO	"<6>"	/* informational			*/
-#define	KERN_DEBUG	"<7>"	/* debug-level messages			*/
+#define KERN_EMERG	"<0>"	/* system is unusable			*/
+#define KERN_ALERT	"<1>"	/* action must be taken immediately	*/
+#define KERN_CRIT	"<2>"	/* critical conditions			*/
+#define KERN_ERR	"<3>"	/* error conditions			*/
+#define KERN_WARNING	"<4>"	/* warning conditions			*/
+#define KERN_NOTICE	"<5>"	/* normal but significant condition	*/
+#define KERN_INFO	"<6>"	/* informational			*/
+#define KERN_DEBUG	"<7>"	/* debug-level messages			*/
 
 /* Use the default kernel loglevel */
 #define KERN_DEFAULT	"<d>"
@@ -20,7 +20,7 @@
  * line that had no enclosing \n). Only to be used by core/arch code
  * during early bootup (a continued line is not SMP-safe otherwise).
  */
-#define	KERN_CONT	"<c>"
+#define KERN_CONT	"<c>"
 
 extern int console_printk[];
 
@@ -29,6 +29,17 @@
 #define minimum_console_loglevel (console_printk[2])
 #define default_console_loglevel (console_printk[3])
 
+static inline void console_silent(void)
+{
+	console_loglevel = 0;
+}
+
+static inline void console_verbose(void)
+{
+	if (console_loglevel)
+		console_loglevel = 15;
+}
+
 struct va_format {
 	const char *fmt;
 	va_list *va;
@@ -65,11 +76,27 @@
  */
 #define HW_ERR		"[Hardware Error]: "
 
+/*
+ * Dummy printk for disabled debugging statements to use whilst maintaining
+ * gcc's format and side-effect checking.
+ */
+static inline __attribute__ ((format (printf, 1, 2)))
+int no_printk(const char *fmt, ...)
+{
+	return 0;
+}
+
+extern asmlinkage __attribute__ ((format (printf, 1, 2)))
+void early_printk(const char *fmt, ...);
+
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
 #ifdef CONFIG_PRINTK
-asmlinkage int vprintk(const char *fmt, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-asmlinkage int printk(const char * fmt, ...)
-	__attribute__ ((format (printf, 1, 2))) __cold;
+asmlinkage __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *fmt, va_list args);
+asmlinkage __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *fmt, ...);
 
 /*
  * Please don't use printk_ratelimit(), because it shares ratelimiting state
@@ -83,99 +110,56 @@
 
 extern int printk_delay_msec;
 extern int dmesg_restrict;
-
-/*
- * Print a one-time message (analogous to WARN_ONCE() et al):
- */
-#define printk_once(x...) ({			\
-	static bool __print_once;		\
-						\
-	if (!__print_once) {			\
-		__print_once = true;		\
-		printk(x);			\
-	}					\
-})
+extern int kptr_restrict;
 
 void log_buf_kexec_setup(void);
 #else
-static inline int vprintk(const char *s, va_list args)
-	__attribute__ ((format (printf, 1, 0)));
-static inline int vprintk(const char *s, va_list args) { return 0; }
-static inline int printk(const char *s, ...)
-	__attribute__ ((format (printf, 1, 2)));
-static inline int __cold printk(const char *s, ...) { return 0; }
-static inline int printk_ratelimit(void) { return 0; }
-static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
-					  unsigned int interval_msec)	\
-		{ return false; }
-
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_once(x...) printk(x)
+static inline __attribute__ ((format (printf, 1, 0)))
+int vprintk(const char *s, va_list args)
+{
+	return 0;
+}
+static inline __attribute__ ((format (printf, 1, 2))) __cold
+int printk(const char *s, ...)
+{
+	return 0;
+}
+static inline int printk_ratelimit(void)
+{
+	return 0;
+}
+static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+					  unsigned int interval_msec)
+{
+	return false;
+}
 
 static inline void log_buf_kexec_setup(void)
 {
 }
 #endif
 
-/*
- * Dummy printk for disabled debugging statements to use whilst maintaining
- * gcc's format and side-effect checking.
- */
-static inline __attribute__ ((format (printf, 1, 2)))
-int no_printk(const char *s, ...) { return 0; }
-
-extern int printk_needs_cpu(int cpu);
-extern void printk_tick(void);
-
-extern void asmlinkage __attribute__((format(printf, 1, 2)))
-	early_printk(const char *fmt, ...);
-
-static inline void console_silent(void)
-{
-	console_loglevel = 0;
-}
-
-static inline void console_verbose(void)
-{
-	if (console_loglevel)
-		console_loglevel = 15;
-}
-
 extern void dump_stack(void) __cold;
 
-enum {
-	DUMP_PREFIX_NONE,
-	DUMP_PREFIX_ADDRESS,
-	DUMP_PREFIX_OFFSET
-};
-extern void hex_dump_to_buffer(const void *buf, size_t len,
-				int rowsize, int groupsize,
-				char *linebuf, size_t linebuflen, bool ascii);
-extern void print_hex_dump(const char *level, const char *prefix_str,
-				int prefix_type, int rowsize, int groupsize,
-				const void *buf, size_t len, bool ascii);
-extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
-			const void *buf, size_t len);
-
 #ifndef pr_fmt
 #define pr_fmt(fmt) fmt
 #endif
 
 #define pr_emerg(fmt, ...) \
-        printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_alert(fmt, ...) \
-        printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_crit(fmt, ...) \
-        printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_err(fmt, ...) \
-        printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warning(fmt, ...) \
-        printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_warn pr_warning
 #define pr_notice(fmt, ...) \
-        printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_info(fmt, ...) \
-        printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+	printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_cont(fmt, ...) \
 	printk(KERN_CONT fmt, ##__VA_ARGS__)
 
@@ -185,7 +169,7 @@
 	printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_devel(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /* If you are writing a driver, please use dev_dbg instead */
@@ -198,7 +182,51 @@
 	dynamic_pr_debug(fmt, ##__VA_ARGS__)
 #else
 #define pr_debug(fmt, ...) \
-	({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+/*
+ * Print a one-time message (analogous to WARN_ONCE() et al):
+ */
+
+#ifdef CONFIG_PRINTK
+#define printk_once(fmt, ...)			\
+({						\
+	static bool __print_once;		\
+						\
+	if (!__print_once) {			\
+		__print_once = true;		\
+		printk(fmt, ##__VA_ARGS__);	\
+	}					\
+})
+#else
+#define printk_once(fmt, ...)			\
+	no_printk(fmt, ##__VA_ARGS__)
+#endif
+
+#define pr_emerg_once(fmt, ...)					\
+	printk_once(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert_once(fmt, ...)					\
+	printk_once(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit_once(fmt, ...)					\
+	printk_once(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err_once(fmt, ...)					\
+	printk_once(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warn_once(fmt, ...)					\
+	printk_once(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice_once(fmt, ...)				\
+	printk_once(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info_once(fmt, ...)					\
+	printk_once(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont_once(fmt, ...)					\
+	printk_once(KERN_CONT pr_fmt(fmt), ##__VA_ARGS__)
+/* If you are writing a driver, please use dev_dbg instead */
+#if defined(DEBUG)
+#define pr_debug_once(fmt, ...)					\
+	printk_once(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define pr_debug_once(fmt, ...)					\
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #endif
 
 /*
@@ -206,7 +234,8 @@
  * no local ratelimit_state used in the !PRINTK case
  */
 #ifdef CONFIG_PRINTK
-#define printk_ratelimited(fmt, ...)  ({				\
+#define printk_ratelimited(fmt, ...)					\
+({									\
 	static DEFINE_RATELIMIT_STATE(_rs,				\
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
@@ -215,34 +244,59 @@
 		printk(fmt, ##__VA_ARGS__);				\
 })
 #else
-/* No effect, but we still get type checking even in the !PRINTK case: */
-#define printk_ratelimited printk
+#define printk_ratelimited(fmt, ...)					\
+	no_printk(fmt, ##__VA_ARGS__)
 #endif
 
-#define pr_emerg_ratelimited(fmt, ...) \
+#define pr_emerg_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_alert_ratelimited(fmt, ...) \
+#define pr_alert_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_crit_ratelimited(fmt, ...) \
+#define pr_crit_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_err_ratelimited(fmt, ...) \
+#define pr_err_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning_ratelimited(fmt, ...) \
+#define pr_warn_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn_ratelimited pr_warning_ratelimited
-#define pr_notice_ratelimited(fmt, ...) \
+#define pr_notice_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info_ratelimited(fmt, ...) \
+#define pr_info_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
 /* no pr_cont_ratelimited, don't do that... */
 /* If you are writing a driver, please use dev_dbg instead */
 #if defined(DEBUG)
-#define pr_debug_ratelimited(fmt, ...) \
+#define pr_debug_ratelimited(fmt, ...)					\
 	printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
 #else
 #define pr_debug_ratelimited(fmt, ...) \
-	({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
-				     ##__VA_ARGS__); 0; })
+	no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+enum {
+	DUMP_PREFIX_NONE,
+	DUMP_PREFIX_ADDRESS,
+	DUMP_PREFIX_OFFSET
+};
+extern void hex_dump_to_buffer(const void *buf, size_t len,
+			       int rowsize, int groupsize,
+			       char *linebuf, size_t linebuflen, bool ascii);
+#ifdef CONFIG_PRINTK
+extern void print_hex_dump(const char *level, const char *prefix_str,
+			   int prefix_type, int rowsize, int groupsize,
+			   const void *buf, size_t len, bool ascii);
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+				 const void *buf, size_t len);
+#else
+static inline void print_hex_dump(const char *level, const char *prefix_str,
+				  int prefix_type, int rowsize, int groupsize,
+				  const void *buf, size_t len, bool ascii)
+{
+}
+static inline void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+					const void *buf, size_t len)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index d1a9193..223b14c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -31,8 +31,9 @@
 #define quota_error(sb, fmt, args...) \
 	__quota_error((sb), __func__, fmt , ## args)
 
-extern void __quota_error(struct super_block *sb, const char *func,
-			 const char *fmt, ...);
+extern __attribute__((format (printf, 3, 4)))
+void __quota_error(struct super_block *sb, const char *func,
+		   const char *fmt, ...);
 
 /*
  * declaration of quota_function calls in kernel.
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index f509877..6a210f1 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -11,15 +11,17 @@
 #define __LINUX_MFD_AB8500_REGULATOR_H
 
 /* AB8500 regulators */
-#define AB8500_LDO_AUX1         0
-#define AB8500_LDO_AUX2         1
-#define AB8500_LDO_AUX3         2
-#define AB8500_LDO_INTCORE      3
-#define AB8500_LDO_TVOUT        4
-#define AB8500_LDO_AUDIO	5
-#define AB8500_LDO_ANAMIC1      6
-#define AB8500_LDO_ANAMIC2      7
-#define AB8500_LDO_DMIC         8
-#define AB8500_LDO_ANA          9
-
+enum ab8500_regulator_id {
+	AB8500_LDO_AUX1,
+	AB8500_LDO_AUX2,
+	AB8500_LDO_AUX3,
+	AB8500_LDO_INTCORE,
+	AB8500_LDO_TVOUT,
+	AB8500_LDO_AUDIO,
+	AB8500_LDO_ANAMIC1,
+	AB8500_LDO_ANAMIC2,
+	AB8500_LDO_DMIC,
+	AB8500_LDO_ANA,
+	AB8500_NUM_REGULATORS,
+};
 #endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ebd7472..7954f6b 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -154,6 +154,7 @@
 				   int min_uV, int max_uV);
 int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
 int regulator_get_voltage(struct regulator *regulator);
+int regulator_sync_voltage(struct regulator *regulator);
 int regulator_set_current_limit(struct regulator *regulator,
 			       int min_uA, int max_uA);
 int regulator_get_current_limit(struct regulator *regulator);
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 592cd7c..b8ed16a 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -42,7 +42,11 @@
  *
  * @set_voltage: Set the voltage for the regulator within the range specified.
  *               The driver should select the voltage closest to min_uV.
+ * @set_voltage_sel: Set the voltage for the regulator using the specified
+ *                   selector.
  * @get_voltage: Return the currently configured voltage for the regulator.
+ * @get_voltage_sel: Return the currently configured voltage selector for the
+ *                   regulator.
  * @list_voltage: Return one of the supported voltages, in microvolts; zero
  *	if the selector indicates a voltage that is unusable on this system;
  *	or negative errno.  Selectors range from zero to one less than
@@ -79,8 +83,11 @@
 	int (*list_voltage) (struct regulator_dev *, unsigned selector);
 
 	/* get/set regulator voltage */
-	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
+	int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV,
+			    unsigned *selector);
+	int (*set_voltage_sel) (struct regulator_dev *, unsigned selector);
 	int (*get_voltage) (struct regulator_dev *);
+	int (*get_voltage_sel) (struct regulator_dev *);
 
 	/* get/set regulator current  */
 	int (*set_current_limit) (struct regulator_dev *,
@@ -168,9 +175,9 @@
  */
 struct regulator_dev {
 	struct regulator_desc *desc;
-	int use_count;
-	int open_count;
 	int exclusive;
+	u32 use_count;
+	u32 open_count;
 
 	/* lists we belong to */
 	struct list_head list; /* list of all regulators */
@@ -188,10 +195,14 @@
 	struct regulator_dev *supply;	/* for tree */
 
 	void *reg_data;		/* regulator_dev data */
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs;
+#endif
 };
 
 struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
-	struct device *dev, struct regulator_init_data *init_data,
+	struct device *dev, const struct regulator_init_data *init_data,
 	void *driver_data);
 void regulator_unregister(struct regulator_dev *rdev);
 
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 0bed941..ff681eb 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -66,14 +66,62 @@
 
 #define RIO_PW_MSG_SIZE		64
 
+/*
+ * A component tag value (stored in the component tag CSR) is used as device's
+ * unique identifier assigned during enumeration. Besides being used for
+ * identifying switches (which do not have device ID register), it also is used
+ * by error management notification and therefore has to be assigned
+ * to endpoints as well.
+ */
+#define RIO_CTAG_RESRVD	0xfffe0000 /* Reserved */
+#define RIO_CTAG_UDEVID	0x0001ffff /* Unique device identifier */
+
 extern struct bus_type rio_bus_type;
 extern struct device rio_bus;
 extern struct list_head rio_devices;	/* list of all devices */
 
 struct rio_mport;
+struct rio_dev;
 union rio_pw_msg;
 
 /**
+ * struct rio_switch - RIO switch info
+ * @node: Node in global list of switches
+ * @switchid: Switch ID that is unique across a network
+ * @route_table: Copy of switch routing table
+ * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
+ * @add_entry: Callback for switch-specific route add function
+ * @get_entry: Callback for switch-specific route get function
+ * @clr_table: Callback for switch-specific clear route table function
+ * @set_domain: Callback for switch-specific domain setting function
+ * @get_domain: Callback for switch-specific domain get function
+ * @em_init: Callback for switch-specific error management init function
+ * @em_handle: Callback for switch-specific error management handler function
+ * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
+ * @nextdev: Array of per-port pointers to the next attached device
+ */
+struct rio_switch {
+	struct list_head node;
+	u16 switchid;
+	u8 *route_table;
+	u32 port_ok;
+	int (*add_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 route_port);
+	int (*get_entry) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table, u16 route_destid, u8 *route_port);
+	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			  u16 table);
+	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 sw_domain);
+	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
+			   u8 *sw_domain);
+	int (*em_init) (struct rio_dev *dev);
+	int (*em_handle) (struct rio_dev *dev, u8 swport);
+	int (*sw_sysfs) (struct rio_dev *dev, int create);
+	struct rio_dev *nextdev[0];
+};
+
+/**
  * struct rio_dev - RIO device info
  * @global_list: Node in list of all RIO devices
  * @net_list: Node in list of RIO devices in a network
@@ -93,13 +141,14 @@
  * @phys_efptr: RIO device extended features pointer
  * @em_efptr: RIO Error Management features pointer
  * @dma_mask: Mask of bits of RIO address this device implements
- * @rswitch: Pointer to &struct rio_switch if valid for this device
  * @driver: Driver claiming this device
  * @dev: Device model device
  * @riores: RIO resources this device owns
  * @pwcback: port-write callback function for this device
- * @destid: Network destination ID
+ * @destid: Network destination ID (or associated destid for switch)
+ * @hopcount: Hopcount to this device
  * @prev: Previous RIO device connected to the current one
+ * @rswitch: struct rio_switch (if valid for this device)
  */
 struct rio_dev {
 	struct list_head global_list;	/* node in list of all RIO devices */
@@ -120,18 +169,20 @@
 	u32 phys_efptr;
 	u32 em_efptr;
 	u64 dma_mask;
-	struct rio_switch *rswitch;	/* RIO switch info */
 	struct rio_driver *driver;	/* RIO driver claiming this device */
 	struct device dev;	/* LDM device structure */
 	struct resource riores[RIO_MAX_DEV_RESOURCES];
 	int (*pwcback) (struct rio_dev *rdev, union rio_pw_msg *msg, int step);
 	u16 destid;
+	u8 hopcount;
 	struct rio_dev *prev;
+	struct rio_switch rswitch[0];	/* RIO switch info */
 };
 
 #define rio_dev_g(n) list_entry(n, struct rio_dev, global_list)
 #define rio_dev_f(n) list_entry(n, struct rio_dev, net_list)
 #define	to_rio_dev(n) container_of(n, struct rio_dev, dev)
+#define sw_to_rio_dev(n) container_of(n, struct rio_dev, rswitch[0])
 
 /**
  * struct rio_msg - RIO message event
@@ -224,49 +275,6 @@
 #define RIO_SW_SYSFS_CREATE	1	/* Create switch attributes */
 #define RIO_SW_SYSFS_REMOVE	0	/* Remove switch attributes */
 
-/**
- * struct rio_switch - RIO switch info
- * @node: Node in global list of switches
- * @rdev: Associated RIO device structure
- * @switchid: Switch ID that is unique across a network
- * @hopcount: Hopcount to this switch
- * @destid: Associated destid in the path
- * @route_table: Copy of switch routing table
- * @port_ok: Status of each port (one bit per port) - OK=1 or UNINIT=0
- * @add_entry: Callback for switch-specific route add function
- * @get_entry: Callback for switch-specific route get function
- * @clr_table: Callback for switch-specific clear route table function
- * @set_domain: Callback for switch-specific domain setting function
- * @get_domain: Callback for switch-specific domain get function
- * @em_init: Callback for switch-specific error management initialization function
- * @em_handle: Callback for switch-specific error management handler function
- * @sw_sysfs: Callback that initializes switch-specific sysfs attributes
- * @nextdev: Array of per-port pointers to the next attached device
- */
-struct rio_switch {
-	struct list_head node;
-	struct rio_dev *rdev;
-	u16 switchid;
-	u16 hopcount;
-	u16 destid;
-	u8 *route_table;
-	u32 port_ok;
-	int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 route_port);
-	int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
-			  u16 table, u16 route_destid, u8 * route_port);
-	int (*clr_table) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			  u16 table);
-	int (*set_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 sw_domain);
-	int (*get_domain) (struct rio_mport *mport, u16 destid, u8 hopcount,
-			   u8 *sw_domain);
-	int (*em_init) (struct rio_dev *dev);
-	int (*em_handle) (struct rio_dev *dev, u8 swport);
-	int (*sw_sysfs) (struct rio_dev *dev, int create);
-	struct rio_dev *nextdev[0];
-};
-
 /* Low-level architecture-dependent routines */
 
 /**
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index edc55da..e09e565 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -150,16 +150,8 @@
 static inline int rio_read_config_32(struct rio_dev *rdev, u32 offset,
 				     u32 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_32(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_32(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -174,16 +166,8 @@
 static inline int rio_write_config_32(struct rio_dev *rdev, u32 offset,
 				      u32 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_32(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_32(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -198,16 +182,8 @@
 static inline int rio_read_config_16(struct rio_dev *rdev, u32 offset,
 				     u16 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_16(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_read_config_16(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 /**
@@ -222,16 +198,8 @@
 static inline int rio_write_config_16(struct rio_dev *rdev, u32 offset,
 				      u16 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_16(rdev->net->hport, destid, hopcount,
-					 offset, data);
+	return rio_mport_write_config_16(rdev->net->hport, rdev->destid,
+					 rdev->hopcount, offset, data);
 };
 
 /**
@@ -245,16 +213,8 @@
  */
 static inline int rio_read_config_8(struct rio_dev *rdev, u32 offset, u8 * data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_read_config_8(rdev->net->hport, destid, hopcount,
-				       offset, data);
+	return rio_mport_read_config_8(rdev->net->hport, rdev->destid,
+				       rdev->hopcount, offset, data);
 };
 
 /**
@@ -268,16 +228,8 @@
  */
 static inline int rio_write_config_8(struct rio_dev *rdev, u32 offset, u8 data)
 {
-	u8 hopcount = 0xff;
-	u16 destid = rdev->destid;
-
-	if (rdev->rswitch) {
-		destid = rdev->rswitch->destid;
-		hopcount = rdev->rswitch->hopcount;
-	}
-
-	return rio_mport_write_config_8(rdev->net->hport, destid, hopcount,
-					offset, data);
+	return rio_mport_write_config_8(rdev->net->hport, rdev->destid,
+					rdev->hopcount, offset, data);
 };
 
 extern int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid,
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index ee7b6ad..7410d33 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -36,5 +36,7 @@
 #define RIO_DID_IDTCPS10Q		0x035e
 #define RIO_DID_IDTCPS1848		0x0374
 #define RIO_DID_IDTCPS1616		0x0379
+#define RIO_DID_IDTVPS1616		0x0377
+#define RIO_DID_IDTSPS1616		0x0378
 
 #endif				/* LINUX_RIO_IDS_H */
diff --git a/include/linux/romfs_fs.h b/include/linux/romfs_fs.h
index c490fbc..5f57f93 100644
--- a/include/linux/romfs_fs.h
+++ b/include/linux/romfs_fs.h
@@ -1,6 +1,9 @@
 #ifndef __LINUX_ROMFS_FS_H
 #define __LINUX_ROMFS_FS_H
 
+#include <linux/types.h>
+#include <linux/fs.h>
+
 /* The basic structures of the romfs filesystem */
 
 #define ROMBSIZE BLOCK_SIZE
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 14dbc83..3c995b4 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -107,12 +107,17 @@
 extern int rtc_valid_tm(struct rtc_time *tm);
 extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time);
 extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm);
+ktime_t rtc_tm_to_ktime(struct rtc_time tm);
+struct rtc_time rtc_ktime_to_tm(ktime_t kt);
+
 
 #include <linux/device.h>
 #include <linux/seq_file.h>
 #include <linux/cdev.h>
 #include <linux/poll.h>
 #include <linux/mutex.h>
+#include <linux/timerqueue.h>
+#include <linux/workqueue.h>
 
 extern struct class *rtc_class;
 
@@ -151,7 +156,19 @@
 };
 
 #define RTC_DEVICE_NAME_SIZE 20
-struct rtc_task;
+typedef struct rtc_task {
+	void (*func)(void *private_data);
+	void *private_data;
+} rtc_task_t;
+
+
+struct rtc_timer {
+	struct rtc_task	task;
+	struct timerqueue_node node;
+	ktime_t period;
+	int enabled;
+};
+
 
 /* flags */
 #define RTC_DEV_BUSY 0
@@ -179,16 +196,13 @@
 	spinlock_t irq_task_lock;
 	int irq_freq;
 	int max_user_freq;
-#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
-	struct work_struct uie_task;
-	struct timer_list uie_timer;
-	/* Those fields are protected by rtc->irq_lock */
-	unsigned int oldsecs;
-	unsigned int uie_irq_active:1;
-	unsigned int stop_uie_polling:1;
-	unsigned int uie_task_active:1;
-	unsigned int uie_timer_active:1;
-#endif
+
+	struct timerqueue_head timerqueue;
+	struct rtc_timer aie_timer;
+	struct rtc_timer uie_rtctimer;
+	struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
+	int pie_enabled;
+	struct work_struct irqwork;
 };
 #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
 
@@ -224,15 +238,22 @@
 extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
 						unsigned int enabled);
 
-typedef struct rtc_task {
-	void (*func)(void *private_data);
-	void *private_data;
-} rtc_task_t;
+void rtc_aie_update_irq(void *private);
+void rtc_uie_update_irq(void *private);
+enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
 
 int rtc_register(rtc_task_t *task);
 int rtc_unregister(rtc_task_t *task);
 int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
 
+void rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer);
+void rtc_timer_init(struct rtc_timer *timer, void (*f)(void* p), void* data);
+int rtc_timer_start(struct rtc_device *rtc, struct rtc_timer* timer,
+			ktime_t expires, ktime_t period);
+int rtc_timer_cancel(struct rtc_device *rtc, struct rtc_timer* timer);
+void rtc_timer_do_work(struct work_struct *work);
+
 static inline bool is_leap_year(unsigned int year)
 {
 	return (!(year % 4) && (year % 100)) || !(year % 400);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abc527a..96e2321 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -683,7 +683,7 @@
 	atomic_t fanotify_listeners;
 #endif
 #ifdef CONFIG_EPOLL
-	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
+	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
 #endif
 #ifdef CONFIG_POSIX_MQUEUE
 	/* protected by mq_lock	*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a23fa29..758c5b0 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -212,6 +212,7 @@
 #include <linux/tty.h>
 #include <linux/mutex.h>
 #include <linux/sysrq.h>
+#include <linux/pps_kernel.h>
 
 struct uart_port;
 struct serial_struct;
@@ -528,10 +529,10 @@
 	struct uart_state *state = uport->state;
 	struct tty_port *port = &state->port;
 	struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
-	struct timespec ts;
+	struct pps_event_time ts;
 
 	if (ld && ld->ops->dcd_change)
-		getnstimeofday(&ts);
+		pps_get_ts(&ts);
 
 	uport->icount.dcd++;
 #ifdef CONFIG_HARD_PPS
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 5f65f14..edbb1d0 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -191,7 +191,8 @@
 #define AF_PHONET	35	/* Phonet sockets		*/
 #define AF_IEEE802154	36	/* IEEE802154 sockets		*/
 #define AF_CAIF		37	/* CAIF sockets			*/
-#define AF_MAX		38	/* For now.. */
+#define AF_ALG		38	/* Algorithm sockets		*/
+#define AF_MAX		39	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -232,6 +233,7 @@
 #define PF_PHONET	AF_PHONET
 #define PF_IEEE802154	AF_IEEE802154
 #define PF_CAIF		AF_CAIF
+#define PF_ALG		AF_ALG
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
@@ -305,6 +307,7 @@
 #define SOL_RDS		276
 #define SOL_IUCV	277
 #define SOL_CAIF	278
+#define SOL_ALG		279
 
 /* IPX options */
 #define IPX_TYPE	1
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index b202475..8521067 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -110,9 +110,9 @@
 	__be32 *		(*crmarshal)(struct rpc_task *, __be32 *);
 	int			(*crrefresh)(struct rpc_task *);
 	__be32 *		(*crvalidate)(struct rpc_task *, __be32 *);
-	int			(*crwrap_req)(struct rpc_task *, kxdrproc_t,
+	int			(*crwrap_req)(struct rpc_task *, kxdreproc_t,
 						void *, __be32 *, void *);
-	int			(*crunwrap_resp)(struct rpc_task *, kxdrproc_t,
+	int			(*crunwrap_resp)(struct rpc_task *, kxdrdproc_t,
 						void *, __be32 *, void *);
 };
 
@@ -139,8 +139,8 @@
 void			put_rpccred(struct rpc_cred *);
 __be32 *		rpcauth_marshcred(struct rpc_task *, __be32 *);
 __be32 *		rpcauth_checkverf(struct rpc_task *, __be32 *);
-int			rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj);
-int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
+int			rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
 int			rpcauth_refreshcred(struct rpc_task *);
 void			rpcauth_invalcred(struct rpc_task *);
 int			rpcauth_uptodatecred(struct rpc_task *);
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 7c91260..c50b458 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -43,10 +43,18 @@
  */
 static inline int svc_is_backchannel(const struct svc_rqst *rqstp)
 {
-	if (rqstp->rq_server->bc_xprt)
+	if (rqstp->rq_server->sv_bc_xprt)
 		return 1;
 	return 0;
 }
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+	if (svc_is_backchannel(rqstp))
+		return (struct nfs4_sessionid *)
+			rqstp->rq_server->sv_bc_xprt->xpt_bc_sid;
+	return NULL;
+}
+
 #else /* CONFIG_NFS_V4_1 */
 static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
 					 unsigned int min_reqs)
@@ -59,6 +67,11 @@
 	return 0;
 }
 
+static inline struct nfs4_sessionid *bc_xprt_sid(struct svc_rqst *rqstp)
+{
+	return NULL;
+}
+
 static inline void xprt_free_bc_request(struct rpc_rqst *req)
 {
 }
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index a5a55f2..ef9476a 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -89,8 +89,8 @@
  */
 struct rpc_procinfo {
 	u32			p_proc;		/* RPC procedure number */
-	kxdrproc_t		p_encode;	/* XDR encode function */
-	kxdrproc_t		p_decode;	/* XDR decode function */
+	kxdreproc_t		p_encode;	/* XDR encode function */
+	kxdrdproc_t		p_decode;	/* XDR decode function */
 	unsigned int		p_arglen;	/* argument hdr length (u32) */
 	unsigned int		p_replen;	/* reply hdr length (u32) */
 	unsigned int		p_count;	/* call count */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 5a3085b..c81d4d8 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -99,7 +99,7 @@
 	spinlock_t		sv_cb_lock;	/* protects the svc_cb_list */
 	wait_queue_head_t	sv_cb_waitq;	/* sleep here if there are no
 						 * entries in the svc_cb_list */
-	struct svc_xprt		*bc_xprt;
+	struct svc_xprt		*sv_bc_xprt;	/* callback on fore channel */
 #endif /* CONFIG_NFS_V4_1 */
 };
 
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index aea0d438..357da5e 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -78,6 +78,7 @@
 	size_t			xpt_remotelen;	/* length of address */
 	struct rpc_wait_queue	xpt_bc_pending;	/* backchannel wait queue */
 	struct list_head	xpt_users;	/* callbacks on free */
+	void			*xpt_bc_sid;	/* back channel session ID */
 
 	struct net		*xpt_net;
 };
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 498ab93..fc84b7a 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -33,8 +33,8 @@
 };
 
 /*
- * This is the generic XDR function. rqstp is either a rpc_rqst (client
- * side) or svc_rqst pointer (server side).
+ * This is the legacy generic XDR function. rqstp is either a rpc_rqst
+ * (client side) or svc_rqst pointer (server side).
  * Encode functions always assume there's enough room in the buffer.
  */
 typedef int	(*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
@@ -201,14 +201,22 @@
 
 	__be32 *end;		/* end of available buffer space */
 	struct kvec *iov;	/* pointer to the current kvec */
+	struct kvec scratch;	/* Scratch buffer */
+	struct page **page_ptr;	/* pointer to the current page */
 };
 
+/*
+ * These are the xdr_stream style generic XDR encode and decode functions.
+ */
+typedef void	(*kxdreproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+typedef int	(*kxdrdproc_t)(void *rqstp, struct xdr_stream *xdr, void *obj);
+
 extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
 extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
 		unsigned int base, unsigned int len);
 extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p);
-extern __be32 *xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes);
+extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen);
 extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
 extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 144b34b..c1f4998 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -122,7 +122,7 @@
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
  */
-extern void suspend_set_ops(struct platform_suspend_ops *ops);
+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
 extern int suspend_valid_only_mem(suspend_state_t state);
 
 /**
@@ -147,7 +147,7 @@
 #else /* !CONFIG_SUSPEND */
 #define suspend_valid_only_mem	NULL
 
-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
 #endif /* !CONFIG_SUSPEND */
 
@@ -245,7 +245,7 @@
 extern void swsusp_unset_page_free(struct page *);
 extern unsigned long get_safe_page(gfp_t gfp_mask);
 
-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
 #else /* CONFIG_HIBERNATION */
@@ -253,7 +253,7 @@
 static inline void swsusp_set_page_free(struct page *p) {}
 static inline void swsusp_unset_page_free(struct page *p) {}
 
-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
diff --git a/include/linux/time.h b/include/linux/time.h
index 9f15ac7..1e6d3b5 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -158,6 +158,8 @@
 extern int do_getitimer(int which, struct itimerval *value);
 extern void getnstimeofday(struct timespec *tv);
 extern void getrawmonotonic(struct timespec *ts);
+extern void getnstime_raw_and_real(struct timespec *ts_raw,
+		struct timespec *ts_real);
 extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 32d852f..d23999f 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -268,6 +268,7 @@
 extern void second_overflow(void);
 extern void update_ntp_one_tick(void);
 extern int do_adjtimex(struct timex *);
+extern void hardpps(const struct timespec *, const struct timespec *);
 
 int read_current_timer(unsigned long *timer_val);
 
diff --git a/include/linux/toshiba.h b/include/linux/toshiba.h
index 6a7c4ed..772dedb 100644
--- a/include/linux/toshiba.h
+++ b/include/linux/toshiba.h
@@ -33,6 +33,8 @@
 	unsigned int edi __attribute__ ((packed));
 } SMMRegisters;
 
+#ifdef __KERNEL__
 int tosh_smm(SMMRegisters *regs);
+#endif /* __KERNEL__ */
 
 #endif
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d3e4f87..c681461 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -32,7 +32,7 @@
 	int state;			/* State. */
 	void (*regfunc)(void);
 	void (*unregfunc)(void);
-	struct tracepoint_func *funcs;
+	struct tracepoint_func __rcu *funcs;
 } __attribute__((aligned(32)));		/*
 					 * Aligned on 32 bytes because it is
 					 * globally visible and gcc happily
@@ -326,7 +326,7 @@
  *		memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  *		__entry->next_pid	= next->pid;
  *		__entry->next_prio	= next->prio;
- *	)
+ *	),
  *
  *	*
  *	* Formatted output of a trace record via TP_printk().
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 526d66f..ff7dc08 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -101,14 +101,15 @@
  *	any pending driver I/O is completed.
  *
  * void (*dcd_change)(struct tty_struct *tty, unsigned int status,
- * 			struct timespec *ts)
+ *			struct pps_event_time *ts)
  *
  *	Tells the discipline that the DCD pin has changed its status and
- *	the relative timestamp. Pointer ts can be NULL.
+ *	the relative timestamp. Pointer ts cannot be NULL.
  */
 
 #include <linux/fs.h>
 #include <linux/wait.h>
+#include <linux/pps_kernel.h>
 
 struct tty_ldisc_ops {
 	int	magic;
@@ -143,7 +144,7 @@
 			       char *fp, int count);
 	void	(*write_wakeup)(struct tty_struct *);
 	void	(*dcd_change)(struct tty_struct *, unsigned int,
-				struct timespec *);
+				struct pps_event_time *);
 
 	struct  module *owner;
 	
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index fa261a0..8da8c4e 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -67,21 +67,21 @@
 #endif
 };
 
-static void inline u64_stats_update_begin(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_begin(&syncp->seq);
 #endif
 }
 
-static void inline u64_stats_update_end(struct u64_stats_sync *syncp)
+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	write_seqcount_end(&syncp->seq);
 #endif
 }
 
-static unsigned int inline u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -93,7 +93,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
@@ -112,7 +112,7 @@
  * - UP 32bit must disable BH.
  * - 64bit have no problem atomically reading u64 values, irq safe.
  */
-static unsigned int inline u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
+static inline unsigned int u64_stats_fetch_begin_bh(const struct u64_stats_sync *syncp)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 	return read_seqcount_begin(&syncp->seq);
@@ -124,7 +124,7 @@
 #endif
 }
 
-static bool inline u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
+static inline bool u64_stats_fetch_retry_bh(const struct u64_stats_sync *syncp,
 					 unsigned int start)
 {
 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c9a6abd..c0d817d 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -3,9 +3,9 @@
 
 #include <linux/kernel.h>
 
-struct __una_u16 { u16 x; } __attribute__((packed));
-struct __una_u32 { u32 x; } __attribute__((packed));
-struct __una_u64 { u64 x; } __attribute__((packed));
+struct __una_u16 { u16 x; } __packed;
+struct __una_u32 { u32 x; } __packed;
+struct __una_u64 { u64 x; } __packed;
 
 static inline u16 __get_unaligned_cpu16(const void *p)
 {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 8178156..faf4679 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -6,7 +6,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 
-#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 8)
+#define UIDHASH_BITS	(CONFIG_BASE_SMALL ? 3 : 7)
 #define UIDHASH_SZ	(1 << UIDHASH_BITS)
 
 struct user_namespace {
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ae9ab13..4b9a7f5 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -33,6 +33,7 @@
 void vga_switcheroo_unregister_client(struct pci_dev *dev);
 int vga_switcheroo_register_client(struct pci_dev *dev,
 				   void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+				   void (*reprobe)(struct pci_dev *dev),
 				   bool (*can_switch)(struct pci_dev *dev));
 
 void vga_switcheroo_client_fb_set(struct pci_dev *dev,
@@ -48,6 +49,7 @@
 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
 static inline int vga_switcheroo_register_client(struct pci_dev *dev,
 					  void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state),
+					  void (*reprobe)(struct pci_dev *dev),
 					  bool (*can_switch)(struct pci_dev *dev)) { return 0; }
 static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
 static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
diff --git a/include/linux/xz.h b/include/linux/xz.h
new file mode 100644
index 0000000..64cffa6
--- /dev/null
+++ b/include/linux/xz.h
@@ -0,0 +1,264 @@
+/*
+ * XZ decompressor
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_H
+#define XZ_H
+
+#ifdef __KERNEL__
+#	include <linux/stddef.h>
+#	include <linux/types.h>
+#else
+#	include <stddef.h>
+#	include <stdint.h>
+#endif
+
+/* In Linux, this is used to make extern functions static when needed. */
+#ifndef XZ_EXTERN
+#	define XZ_EXTERN extern
+#endif
+
+/**
+ * enum xz_mode - Operation mode
+ *
+ * @XZ_SINGLE:              Single-call mode. This uses less RAM than
+ *                          than multi-call modes, because the LZMA2
+ *                          dictionary doesn't need to be allocated as
+ *                          part of the decoder state. All required data
+ *                          structures are allocated at initialization,
+ *                          so xz_dec_run() cannot return XZ_MEM_ERROR.
+ * @XZ_PREALLOC:            Multi-call mode with preallocated LZMA2
+ *                          dictionary buffer. All data structures are
+ *                          allocated at initialization, so xz_dec_run()
+ *                          cannot return XZ_MEM_ERROR.
+ * @XZ_DYNALLOC:            Multi-call mode. The LZMA2 dictionary is
+ *                          allocated once the required size has been
+ *                          parsed from the stream headers. If the
+ *                          allocation fails, xz_dec_run() will return
+ *                          XZ_MEM_ERROR.
+ *
+ * It is possible to enable support only for a subset of the above
+ * modes at compile time by defining XZ_DEC_SINGLE, XZ_DEC_PREALLOC,
+ * or XZ_DEC_DYNALLOC. The xz_dec kernel module is always compiled
+ * with support for all operation modes, but the preboot code may
+ * be built with fewer features to minimize code size.
+ */
+enum xz_mode {
+	XZ_SINGLE,
+	XZ_PREALLOC,
+	XZ_DYNALLOC
+};
+
+/**
+ * enum xz_ret - Return codes
+ * @XZ_OK:                  Everything is OK so far. More input or more
+ *                          output space is required to continue. This
+ *                          return code is possible only in multi-call mode
+ *                          (XZ_PREALLOC or XZ_DYNALLOC).
+ * @XZ_STREAM_END:          Operation finished successfully.
+ * @XZ_UNSUPPORTED_CHECK:   Integrity check type is not supported. Decoding
+ *                          is still possible in multi-call mode by simply
+ *                          calling xz_dec_run() again.
+ *                          Note that this return value is used only if
+ *                          XZ_DEC_ANY_CHECK was defined at build time,
+ *                          which is not used in the kernel. Unsupported
+ *                          check types return XZ_OPTIONS_ERROR if
+ *                          XZ_DEC_ANY_CHECK was not defined at build time.
+ * @XZ_MEM_ERROR:           Allocating memory failed. This return code is
+ *                          possible only if the decoder was initialized
+ *                          with XZ_DYNALLOC. The amount of memory that was
+ *                          tried to be allocated was no more than the
+ *                          dict_max argument given to xz_dec_init().
+ * @XZ_MEMLIMIT_ERROR:      A bigger LZMA2 dictionary would be needed than
+ *                          allowed by the dict_max argument given to
+ *                          xz_dec_init(). This return value is possible
+ *                          only in multi-call mode (XZ_PREALLOC or
+ *                          XZ_DYNALLOC); the single-call mode (XZ_SINGLE)
+ *                          ignores the dict_max argument.
+ * @XZ_FORMAT_ERROR:        File format was not recognized (wrong magic
+ *                          bytes).
+ * @XZ_OPTIONS_ERROR:       This implementation doesn't support the requested
+ *                          compression options. In the decoder this means
+ *                          that the header CRC32 matches, but the header
+ *                          itself specifies something that we don't support.
+ * @XZ_DATA_ERROR:          Compressed data is corrupt.
+ * @XZ_BUF_ERROR:           Cannot make any progress. Details are slightly
+ *                          different between multi-call and single-call
+ *                          mode; more information below.
+ *
+ * In multi-call mode, XZ_BUF_ERROR is returned when two consecutive calls
+ * to XZ code cannot consume any input and cannot produce any new output.
+ * This happens when there is no new input available, or the output buffer
+ * is full while at least one output byte is still pending. Assuming your
+ * code is not buggy, you can get this error only when decoding a compressed
+ * stream that is truncated or otherwise corrupt.
+ *
+ * In single-call mode, XZ_BUF_ERROR is returned only when the output buffer
+ * is too small or the compressed input is corrupt in a way that makes the
+ * decoder produce more output than the caller expected. When it is
+ * (relatively) clear that the compressed input is truncated, XZ_DATA_ERROR
+ * is used instead of XZ_BUF_ERROR.
+ */
+enum xz_ret {
+	XZ_OK,
+	XZ_STREAM_END,
+	XZ_UNSUPPORTED_CHECK,
+	XZ_MEM_ERROR,
+	XZ_MEMLIMIT_ERROR,
+	XZ_FORMAT_ERROR,
+	XZ_OPTIONS_ERROR,
+	XZ_DATA_ERROR,
+	XZ_BUF_ERROR
+};
+
+/**
+ * struct xz_buf - Passing input and output buffers to XZ code
+ * @in:         Beginning of the input buffer. This may be NULL if and only
+ *              if in_pos is equal to in_size.
+ * @in_pos:     Current position in the input buffer. This must not exceed
+ *              in_size.
+ * @in_size:    Size of the input buffer
+ * @out:        Beginning of the output buffer. This may be NULL if and only
+ *              if out_pos is equal to out_size.
+ * @out_pos:    Current position in the output buffer. This must not exceed
+ *              out_size.
+ * @out_size:   Size of the output buffer
+ *
+ * Only the contents of the output buffer from out[out_pos] onward, and
+ * the variables in_pos and out_pos are modified by the XZ code.
+ */
+struct xz_buf {
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_size;
+
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+};
+
+/**
+ * struct xz_dec - Opaque type to hold the XZ decoder state
+ */
+struct xz_dec;
+
+/**
+ * xz_dec_init() - Allocate and initialize a XZ decoder state
+ * @mode:       Operation mode
+ * @dict_max:   Maximum size of the LZMA2 dictionary (history buffer) for
+ *              multi-call decoding. This is ignored in single-call mode
+ *              (mode == XZ_SINGLE). LZMA2 dictionary is always 2^n bytes
+ *              or 2^n + 2^(n-1) bytes (the latter sizes are less common
+ *              in practice), so other values for dict_max don't make sense.
+ *              In the kernel, dictionary sizes of 64 KiB, 128 KiB, 256 KiB,
+ *              512 KiB, and 1 MiB are probably the only reasonable values,
+ *              except for kernel and initramfs images where a bigger
+ *              dictionary can be fine and useful.
+ *
+ * Single-call mode (XZ_SINGLE): xz_dec_run() decodes the whole stream at
+ * once. The caller must provide enough output space or the decoding will
+ * fail. The output space is used as the dictionary buffer, which is why
+ * there is no need to allocate the dictionary as part of the decoder's
+ * internal state.
+ *
+ * Because the output buffer is used as the workspace, streams encoded using
+ * a big dictionary are not a problem in single-call mode. It is enough that
+ * the output buffer is big enough to hold the actual uncompressed data; it
+ * can be smaller than the dictionary size stored in the stream headers.
+ *
+ * Multi-call mode with preallocated dictionary (XZ_PREALLOC): dict_max bytes
+ * of memory is preallocated for the LZMA2 dictionary. This way there is no
+ * risk that xz_dec_run() could run out of memory, since xz_dec_run() will
+ * never allocate any memory. Instead, if the preallocated dictionary is too
+ * small for decoding the given input stream, xz_dec_run() will return
+ * XZ_MEMLIMIT_ERROR. Thus, it is important to know what kind of data will be
+ * decoded to avoid allocating excessive amount of memory for the dictionary.
+ *
+ * Multi-call mode with dynamically allocated dictionary (XZ_DYNALLOC):
+ * dict_max specifies the maximum allowed dictionary size that xz_dec_run()
+ * may allocate once it has parsed the dictionary size from the stream
+ * headers. This way excessive allocations can be avoided while still
+ * limiting the maximum memory usage to a sane value to prevent running the
+ * system out of memory when decompressing streams from untrusted sources.
+ *
+ * On success, xz_dec_init() returns a pointer to struct xz_dec, which is
+ * ready to be used with xz_dec_run(). If memory allocation fails,
+ * xz_dec_init() returns NULL.
+ */
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max);
+
+/**
+ * xz_dec_run() - Run the XZ decoder
+ * @s:          Decoder state allocated using xz_dec_init()
+ * @b:          Input and output buffers
+ *
+ * The possible return values depend on build options and operation mode.
+ * See enum xz_ret for details.
+ *
+ * Note that if an error occurs in single-call mode (return value is not
+ * XZ_STREAM_END), b->in_pos and b->out_pos are not modified and the
+ * contents of the output buffer from b->out[b->out_pos] onward are
+ * undefined. This is true even after XZ_BUF_ERROR, because with some filter
+ * chains, there may be a second pass over the output buffer, and this pass
+ * cannot be properly done if the output buffer is truncated. Thus, you
+ * cannot give the single-call decoder a too small buffer and then expect to
+ * get that amount valid data from the beginning of the stream. You must use
+ * the multi-call decoder if you don't want to uncompress the whole stream.
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b);
+
+/**
+ * xz_dec_reset() - Reset an already allocated decoder state
+ * @s:          Decoder state allocated using xz_dec_init()
+ *
+ * This function can be used to reset the multi-call decoder state without
+ * freeing and reallocating memory with xz_dec_end() and xz_dec_init().
+ *
+ * In single-call mode, xz_dec_reset() is always called in the beginning of
+ * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in
+ * multi-call mode.
+ */
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s);
+
+/**
+ * xz_dec_end() - Free the memory allocated for the decoder state
+ * @s:          Decoder state allocated using xz_dec_init(). If s is NULL,
+ *              this function does nothing.
+ */
+XZ_EXTERN void xz_dec_end(struct xz_dec *s);
+
+/*
+ * Standalone build (userspace build or in-kernel build for boot time use)
+ * needs a CRC32 implementation. For normal in-kernel use, kernel's own
+ * CRC32 module is used instead, and users of this module don't need to
+ * care about the functions below.
+ */
+#ifndef XZ_INTERNAL_CRC32
+#	ifdef __KERNEL__
+#		define XZ_INTERNAL_CRC32 0
+#	else
+#		define XZ_INTERNAL_CRC32 1
+#	endif
+#endif
+
+#if XZ_INTERNAL_CRC32
+/*
+ * This must be called before any other xz_* function to initialize
+ * the CRC32 lookup table.
+ */
+XZ_EXTERN void xz_crc32_init(void);
+
+/*
+ * Update CRC32 value using the polynomial from IEEE-802.3. To start a new
+ * calculation, the third argument must be zero. To continue the calculation,
+ * the previously returned value is passed as the third argument.
+ */
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc);
+#endif
+#endif
diff --git a/include/media/davinci/vpss.h b/include/media/davinci/vpss.h
index c59cc02..b586495 100644
--- a/include/media/davinci/vpss.h
+++ b/include/media/davinci/vpss.h
@@ -44,7 +44,7 @@
 	short pplen;
 };
 
-/* Used for enable/diable VPSS Clock */
+/* Used for enable/disable VPSS Clock */
 enum vpss_clock_sel {
 	/* DM355/DM365 */
 	VPSS_CCDC_CLOCK,
diff --git a/include/net/ah.h b/include/net/ah.h
index f0129f7..be7798d 100644
--- a/include/net/ah.h
+++ b/include/net/ah.h
@@ -4,7 +4,7 @@
 #include <linux/skbuff.h>
 
 /* This is the maximum truncated ICV length that we know of. */
-#define MAX_AH_AUTH_LEN	12
+#define MAX_AH_AUTH_LEN	16
 
 struct crypto_ahash;
 
diff --git a/include/net/arp.h b/include/net/arp.h
index f4cf6ce..91f0568 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -25,5 +25,6 @@
 				  const unsigned char *src_hw,
 				  const unsigned char *target_hw);
 extern void arp_xmit(struct sk_buff *skb);
+int arp_invalidate(struct net_device *dev, __be32 ip);
 
 #endif	/* _ARP_H */
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h
index d5df797..5395e09 100644
--- a/include/net/phonet/phonet.h
+++ b/include/net/phonet/phonet.h
@@ -107,8 +107,8 @@
 	int			sock_type;
 };
 
-int phonet_proto_register(int protocol, struct phonet_protocol *pp);
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp);
+int phonet_proto_register(unsigned int protocol, struct phonet_protocol *pp);
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp);
 
 int phonet_sysctl_init(void);
 void phonet_sysctl_exit(void);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 0af57eb..e9eee99 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -207,7 +207,7 @@
 	return q->q.qlen;
 }
 
-static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
+static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
 {
 	return (struct qdisc_skb_cb *)skb->cb;
 }
@@ -394,7 +394,7 @@
 	return true;
 }
 
-static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
+static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
 {
 	return qdisc_skb_cb(skb)->pkt_len;
 }
@@ -426,10 +426,18 @@
 	return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 }
 
-static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
+
+static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
+				 const struct sk_buff *skb)
 {
-	sch->bstats.bytes += len;
-	sch->bstats.packets++;
+	bstats->bytes += qdisc_pkt_len(skb);
+	bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
+}
+
+static inline void qdisc_bstats_update(struct Qdisc *sch,
+				       const struct sk_buff *skb)
+{
+	bstats_update(&sch->bstats, skb);
 }
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
@@ -437,7 +445,7 @@
 {
 	__skb_queue_tail(list, skb);
 	sch->qstats.backlog += qdisc_pkt_len(skb);
-	__qdisc_update_bstats(sch, qdisc_pkt_len(skb));
+	qdisc_bstats_update(sch, skb);
 
 	return NET_XMIT_SUCCESS;
 }
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 2b2769c..2a128c8 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -99,8 +99,8 @@
 #define SCTP_SOCKOPT_PEELOFF	102	/* peel off association. */
 /* Options 104-106 are deprecated and removed. Do not use this space */
 #define SCTP_SOCKOPT_CONNECTX_OLD	107	/* CONNECTX old requests. */
-#define SCTP_GET_PEER_ADDRS	108		/* Get all peer addresss. */
-#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local addresss. */
+#define SCTP_GET_PEER_ADDRS	108		/* Get all peer address. */
+#define SCTP_GET_LOCAL_ADDRS	109		/* Get all local address. */
 #define SCTP_SOCKOPT_CONNECTX	110		/* CONNECTX requests. */
 #define SCTP_SOCKOPT_CONNECTX3	111	/* CONNECTX requests (updated) */
 
diff --git a/include/net/sock.h b/include/net/sock.h
index 21a02f7..d884d26 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -152,14 +152,18 @@
 	 * fields between dontcopy_begin/dontcopy_end
 	 * are not copied in sock_copy()
 	 */
+	/* private: */
 	int			skc_dontcopy_begin[0];
+	/* public: */
 	union {
 		struct hlist_node	skc_node;
 		struct hlist_nulls_node skc_nulls_node;
 	};
 	int			skc_tx_queue_mapping;
 	atomic_t		skc_refcnt;
+	/* private: */
 	int                     skc_dontcopy_end[0];
+	/* public: */
 };
 
 /**
diff --git a/include/scsi/fc/fc_fcp.h b/include/scsi/fc/fc_fcp.h
index 8e9b222..8a143ca 100644
--- a/include/scsi/fc/fc_fcp.h
+++ b/include/scsi/fc/fc_fcp.h
@@ -46,7 +46,7 @@
  */
 struct fcp_cmnd {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
@@ -58,7 +58,7 @@
 
 struct fcp_cmnd32 {
 	__u8		fc_lun[8];	/* logical unit number */
-	__u8		fc_cmdref;	/* commmand reference number */
+	__u8		fc_cmdref;	/* command reference number */
 	__u8		fc_pri_ta;	/* priority and task attribute */
 	__u8		fc_tm_flags;	/* task management flags */
 	__u8		fc_flags;	/* additional len & flags */
diff --git a/include/sound/alc5623.h b/include/sound/alc5623.h
new file mode 100644
index 0000000..422c97d
--- /dev/null
+++ b/include/sound/alc5623.h
@@ -0,0 +1,15 @@
+#ifndef _INCLUDE_SOUND_ALC5623_H
+#define _INCLUDE_SOUND_ALC5623_H
+struct alc5623_platform_data {
+	/* configure :                              */
+	/* Lineout/Speaker Amps Vmid ratio control  */
+	/* enable/disable adc/dac high pass filters */
+	unsigned int add_ctrl;
+	/* configure :                              */
+	/* output to enable when jack is low        */
+	/* output to enable when jack is high       */
+	/* jack detect (gpio/nc/jack detect [12]    */
+	unsigned int jack_det_ctrl;
+};
+#endif
+
diff --git a/include/sound/asound.h b/include/sound/asound.h
index a1803ec..5d6074f 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -259,6 +259,7 @@
 #define SNDRV_PCM_INFO_HALF_DUPLEX	0x00100000	/* only half duplex */
 #define SNDRV_PCM_INFO_JOINT_DUPLEX	0x00200000	/* playback and capture stream are somewhat correlated */
 #define SNDRV_PCM_INFO_SYNC_START	0x00400000	/* pcm support some kind of sync go */
+#define SNDRV_PCM_INFO_NO_PERIOD_WAKEUP	0x00800000	/* period wakeup can be disabled */
 #define SNDRV_PCM_INFO_FIFO_IN_FRAMES	0x80000000	/* internal kernel flag - FIFO size is in frames */
 
 typedef int __bitwise snd_pcm_state_t;
@@ -334,6 +335,8 @@
 #define	SNDRV_PCM_HW_PARAM_LAST_INTERVAL	SNDRV_PCM_HW_PARAM_TICK_TIME
 
 #define SNDRV_PCM_HW_PARAMS_NORESAMPLE	(1<<0)	/* avoid rate resampling */
+#define SNDRV_PCM_HW_PARAMS_EXPORT_BUFFER	(1<<1)	/* export buffer */
+#define SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP	(1<<2)	/* disable period wakeups */
 
 struct snd_interval {
 	unsigned int min, max;
diff --git a/include/sound/control.h b/include/sound/control.h
index 112374d..7715e6f 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -160,12 +160,14 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo);
 int snd_ctl_boolean_stereo_info(struct snd_kcontrol *kcontrol,
 				struct snd_ctl_elem_info *uinfo);
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[]);
 
 /*
  * virtual master control
diff --git a/include/sound/hdsp.h b/include/sound/hdsp.h
index d98a78d..0909a38 100644
--- a/include/sound/hdsp.h
+++ b/include/sound/hdsp.h
@@ -28,6 +28,7 @@
 	Multiface,
 	H9652,
 	H9632,
+	RPM,
 	Undefined,
 };
 
diff --git a/include/sound/minors.h b/include/sound/minors.h
index a81798a..8f76420 100644
--- a/include/sound/minors.h
+++ b/include/sound/minors.h
@@ -31,8 +31,8 @@
 /* these minors can still be used for autoloading devices (/dev/aload*) */
 #define SNDRV_MINOR_CONTROL		0	/* 0 */
 #define SNDRV_MINOR_GLOBAL		1	/* 1 */
-#define SNDRV_MINOR_SEQUENCER		(SNDRV_MINOR_GLOBAL + 0 * 32)
-#define SNDRV_MINOR_TIMER		(SNDRV_MINOR_GLOBAL + 1 * 32)
+#define SNDRV_MINOR_SEQUENCER		1	/* SNDRV_MINOR_GLOBAL + 0 * 32 */
+#define SNDRV_MINOR_TIMER		33	/* SNDRV_MINOR_GLOBAL + 1 * 32 */
 
 #ifndef CONFIG_SND_DYNAMIC_MINORS
 						/* 2 - 3 (reserved) */
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index dfd9b76..e731f8d7 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -297,6 +297,7 @@
 	unsigned int info;
 	unsigned int rate_num;
 	unsigned int rate_den;
+	unsigned int no_period_wakeup: 1;
 
 	/* -- SW params -- */
 	int tstamp_mode;		/* mmap timestamp is updated */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e7b6802..1bafe95 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -16,8 +16,6 @@
 
 #include <linux/list.h>
 
-#include <sound/soc.h>
-
 struct snd_pcm_substream;
 
 /*
@@ -205,7 +203,7 @@
 	int (*resume)(struct snd_soc_dai *dai);
 
 	/* ops */
-	struct snd_soc_dai_ops *ops;
+	const struct snd_soc_dai_ops *ops;
 
 	/* DAI capabilities */
 	struct snd_soc_pcm_stream capture;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 8fd3b41..8031769 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -16,7 +16,6 @@
 #include <linux/device.h>
 #include <linux/types.h>
 #include <sound/control.h>
-#include <sound/soc.h>
 
 /* widget has no PM register bit */
 #define SND_SOC_NOPM	-1
@@ -72,6 +71,10 @@
 	 wcontrols, wncontrols) \
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
+#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
+	 wcontrols, wncontrols) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols}
 #define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
 	 wcontrols, wncontrols)\
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -90,6 +93,9 @@
 #define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
+#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1}
 #define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \
 {	.id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \
 	.shift = wshift, .invert = winvert, .kcontrols = wcontrols, \
@@ -116,6 +122,11 @@
 {	.id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
+	wncontrols, wevent, wflags) \
+{	.id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = wncontrols, \
+	.event = wevent, .event_flags = wflags}
 #define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
 	wncontrols, wevent, wflags) \
 {	.id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
@@ -140,6 +151,11 @@
 {	.id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
 	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
 	.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
+	wevent, wflags) \
+{	.id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
+	.invert = winvert, .kcontrols = wcontrols, .num_kcontrols = 1, \
+	.event = wevent, .event_flags = wflags}
 
 /* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
 #define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
@@ -219,13 +235,6 @@
 	.info = snd_soc_info_volsw, \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE(xname, reg, shift_left, shift_right, max, invert, \
-	power) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.info = snd_soc_info_volsw, \
- 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- 	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_volsw, \
@@ -233,15 +242,6 @@
 	.tlv.p = (tlv_array), \
 	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
-#define SOC_DAPM_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, \
-	power, tlv_array) \
-{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
-	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
-	.tlv.p = (tlv_array), \
-	.info = snd_soc_info_volsw, \
-	.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
-	.private_value = (reg) | ((shift_left) << 8) | ((shift_right) << 12) |\
-		((max) << 16) | ((invert) << 24) }
 #define SOC_DAPM_ENUM(xname, xenum) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
 	.info = snd_soc_info_enum_double, \
@@ -297,6 +297,7 @@
 struct snd_soc_dapm_path;
 struct snd_soc_dapm_pin;
 struct snd_soc_dapm_route;
+struct snd_soc_dapm_context;
 
 int dapm_reg_event(struct snd_soc_dapm_widget *w,
 		   struct snd_kcontrol *kcontrol, int event);
@@ -324,16 +325,16 @@
 	struct snd_ctl_elem_value *uncontrol);
 int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *uncontrol);
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget);
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num);
 
 /* dapm path setup */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec);
-void snd_soc_dapm_free(struct snd_soc_codec *codec);
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm);
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num);
 
 /* dapm events */
@@ -343,27 +344,33 @@
 
 /* dapm sys fs - used by the core */
 int snd_soc_dapm_sys_add(struct device *dev);
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec);
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm);
 
 /* dapm audio pin control and status */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin);
-int snd_soc_dapm_sync(struct snd_soc_codec *codec);
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec,
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
+			    const char *pin);
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin);
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin);
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
 				  const char *pin);
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin);
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin);
 
 /* dapm widget types */
 enum snd_soc_dapm_type {
 	snd_soc_dapm_input = 0,		/* input pin */
 	snd_soc_dapm_output,		/* output pin */
 	snd_soc_dapm_mux,			/* selects 1 analog signal from many inputs */
+	snd_soc_dapm_virt_mux,			/* virtual version of snd_soc_dapm_mux */
 	snd_soc_dapm_value_mux,			/* selects 1 analog signal from many inputs */
 	snd_soc_dapm_mixer,			/* mixes several analog signals together */
 	snd_soc_dapm_mixer_named_ctl,		/* mixer with named controls */
 	snd_soc_dapm_pga,			/* programmable gain/attenuation (volume) */
+	snd_soc_dapm_out_drv,			/* output driver */
 	snd_soc_dapm_adc,			/* analog to digital converter */
 	snd_soc_dapm_dac,			/* digital to analog converter */
 	snd_soc_dapm_micbias,		/* microphone bias (power) */
@@ -425,6 +432,7 @@
 	char *sname;	/* stream name */
 	struct snd_soc_codec *codec;
 	struct list_head list;
+	struct snd_soc_dapm_context *dapm;
 
 	/* dapm control */
 	short reg;						/* negative reg = no direct dapm */
@@ -461,4 +469,35 @@
 	struct list_head power_list;
 };
 
+struct snd_soc_dapm_update {
+	struct snd_soc_dapm_widget *widget;
+	struct snd_kcontrol *kcontrol;
+	int reg;
+	int mask;
+	int val;
+};
+
+/* DAPM context */
+struct snd_soc_dapm_context {
+	int n_widgets; /* number of widgets in this context */
+	enum snd_soc_bias_level bias_level;
+	enum snd_soc_bias_level suspend_bias_level;
+	struct delayed_work delayed_work;
+	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
+
+	struct snd_soc_dapm_update *update;
+
+	struct device *dev; /* from parent - for debug */
+	struct snd_soc_codec *codec; /* parent codec */
+	struct snd_soc_card *card; /* parent card */
+
+	/* used during DAPM updates */
+	int dev_power;
+	struct list_head list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_dapm;
+#endif
+};
+
 #endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 5c3bce8..74921f2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -222,10 +222,8 @@
 
 struct snd_jack;
 struct snd_soc_card;
-struct snd_soc_device;
 struct snd_soc_pcm_stream;
 struct snd_soc_ops;
-struct snd_soc_dai_mode;
 struct snd_soc_pcm_runtime;
 struct snd_soc_dai;
 struct snd_soc_dai_driver;
@@ -235,9 +233,10 @@
 struct snd_soc_codec;
 struct snd_soc_codec_driver;
 struct soc_enum;
-struct snd_soc_ac97_ops;
 struct snd_soc_jack;
 struct snd_soc_jack_pin;
+struct snd_soc_cache_ops;
+#include <sound/soc-dapm.h>
 
 #ifdef CONFIG_GPIOLIB
 struct snd_soc_jack_gpio;
@@ -253,17 +252,30 @@
 	SND_SOC_SPI,
 };
 
+enum snd_soc_compress_type {
+	SND_SOC_FLAT_COMPRESSION = 1,
+	SND_SOC_LZO_COMPRESSION,
+	SND_SOC_RBTREE_COMPRESSION
+};
+
 int snd_soc_register_platform(struct device *dev,
 		struct snd_soc_platform_driver *platform_drv);
 void snd_soc_unregister_platform(struct device *dev);
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
+		const struct snd_soc_codec_driver *codec_drv,
 		struct snd_soc_dai_driver *dai_drv, int num_dai);
 void snd_soc_unregister_codec(struct device *dev);
 int snd_soc_codec_volatile_register(struct snd_soc_codec *codec, int reg);
 int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
 			       int addr_bits, int data_bits,
 			       enum snd_soc_control_type control);
+int snd_soc_cache_sync(struct snd_soc_codec *codec);
+int snd_soc_cache_init(struct snd_soc_codec *codec);
+int snd_soc_cache_exit(struct snd_soc_codec *codec);
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value);
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value);
 
 /* Utility functions to get clock rates from various things */
 int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -420,23 +432,37 @@
 	int (*trigger)(struct snd_pcm_substream *, int);
 };
 
+/* SoC cache ops */
+struct snd_soc_cache_ops {
+	const char *name;
+	enum snd_soc_compress_type id;
+	int (*init)(struct snd_soc_codec *codec);
+	int (*exit)(struct snd_soc_codec *codec);
+	int (*read)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int *value);
+	int (*write)(struct snd_soc_codec *codec, unsigned int reg,
+		unsigned int value);
+	int (*sync)(struct snd_soc_codec *codec);
+};
+
 /* SoC Audio Codec device */
 struct snd_soc_codec {
 	const char *name;
+	const char *name_prefix;
 	int id;
 	struct device *dev;
-	struct snd_soc_codec_driver *driver;
+	const struct snd_soc_codec_driver *driver;
 
 	struct mutex mutex;
 	struct snd_soc_card *card;
 	struct list_head list;
 	struct list_head card_list;
 	int num_dai;
+	enum snd_soc_compress_type compress_type;
 
 	/* runtime */
 	struct snd_ac97 *ac97;  /* for ad-hoc ac97 devices */
 	unsigned int active;
-	unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
 	unsigned int cache_only:1;  /* Suppress writes to hardware */
 	unsigned int cache_sync:1; /* Cache needs to be synced to hardware */
 	unsigned int suspended:1; /* Codec is in suspend PM state */
@@ -444,25 +470,25 @@
 	unsigned int ac97_registered:1; /* Codec has been AC97 registered */
 	unsigned int ac97_created:1; /* Codec has been created by SoC */
 	unsigned int sysfs_registered:1; /* codec has been sysfs registered */
+	unsigned int cache_init:1; /* codec cache has been initialized */
 
 	/* codec IO */
 	void *control_data; /* codec control (i2c/3wire) data */
 	hw_write_t hw_write;
 	unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
+	unsigned int (*read)(struct snd_soc_codec *, unsigned int);
+	int (*write)(struct snd_soc_codec *, unsigned int, unsigned int);
 	void *reg_cache;
+	const void *reg_def_copy;
+	const struct snd_soc_cache_ops *cache_ops;
+	struct mutex cache_rw_mutex;
 
 	/* dapm */
-	u32 pop_time;
-	struct list_head dapm_widgets;
-	struct list_head dapm_paths;
-	enum snd_soc_bias_level bias_level;
-	enum snd_soc_bias_level suspend_bias_level;
-	struct delayed_work delayed_work;
+	struct snd_soc_dapm_context dapm;
 
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *debugfs_codec_root;
 	struct dentry *debugfs_reg;
-	struct dentry *debugfs_pop_time;
 	struct dentry *debugfs_dapm;
 #endif
 };
@@ -488,6 +514,7 @@
 	short reg_cache_step;
 	short reg_word_size;
 	const void *reg_cache_default;
+	enum snd_soc_compress_type compress_type;
 
 	/* codec bias level */
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -554,6 +581,30 @@
 	struct snd_soc_ops *ops;
 };
 
+struct snd_soc_codec_conf {
+	const char *dev_name;
+
+	/*
+	 * optional map of kcontrol, widget and path name prefixes that are
+	 * associated per device
+	 */
+	const char *name_prefix;
+
+	/*
+	 * set this to the desired compression type if you want to
+	 * override the one supplied in codec->driver->compress_type
+	 */
+	enum snd_soc_compress_type compress_type;
+};
+
+struct snd_soc_aux_dev {
+	const char *name;		/* Codec name */
+	const char *codec_name;		/* for multi-codec */
+
+	/* codec/machine specific init - e.g. add machine controls */
+	int (*init)(struct snd_soc_dapm_context *dapm);
+};
+
 /* SoC card */
 struct snd_soc_card {
 	const char *name;
@@ -579,6 +630,8 @@
 	/* callbacks */
 	int (*set_bias_level)(struct snd_soc_card *,
 			      enum snd_soc_bias_level level);
+	int (*set_bias_level_post)(struct snd_soc_card *,
+				   enum snd_soc_bias_level level);
 
 	long pmdown_time;
 
@@ -588,12 +641,35 @@
 	struct snd_soc_pcm_runtime *rtd;
 	int num_rtd;
 
+	/* optional codec specific configuration */
+	struct snd_soc_codec_conf *codec_conf;
+	int num_configs;
+
+	/*
+	 * optional auxiliary devices such as amplifiers or codecs with DAI
+	 * link unused
+	 */
+	struct snd_soc_aux_dev *aux_dev;
+	int num_aux_devs;
+	struct snd_soc_pcm_runtime *rtd_aux;
+	int num_aux_rtd;
+
 	struct work_struct deferred_resume_work;
 
 	/* lists of probed devices belonging to this card */
 	struct list_head codec_dev_list;
 	struct list_head platform_dev_list;
 	struct list_head dai_dev_list;
+
+	struct list_head widgets;
+	struct list_head paths;
+	struct list_head dapm_list;
+
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *debugfs_card_root;
+	struct dentry *debugfs_pop_time;
+#endif
+	u32 pop_time;
 };
 
 /* SoC machine DAI configuration, glues a codec and cpu DAI together */
@@ -639,17 +715,9 @@
 };
 
 /* codec IO */
-static inline unsigned int snd_soc_read(struct snd_soc_codec *codec,
-					unsigned int reg)
-{
-	return codec->driver->read(codec, reg);
-}
-
-static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
-					 unsigned int reg, unsigned int val)
-{
-	return codec->driver->write(codec, reg, val);
-}
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val);
 
 /* device driver data */
 
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
index b0b4eb2..da39b22 100644
--- a/include/trace/define_trace.h
+++ b/include/trace/define_trace.h
@@ -21,6 +21,16 @@
 #undef CREATE_TRACE_POINTS
 
 #include <linux/stringify.h>
+/*
+ * module.h includes tracepoints, and because ftrace.h
+ * pulls in module.h:
+ *  trace/ftrace.h -> linux/ftrace_event.h -> linux/perf_event.h ->
+ *  linux/ftrace.h -> linux/module.h
+ * we must include module.h here before we play with any of
+ * the TRACE_EVENT() macros, otherwise the tracepoints included
+ * by module.h may break the build.
+ */
+#include <linux/module.h>
 
 #undef TRACE_EVENT
 #define TRACE_EVENT(name, proto, args, tstruct, assign, print)	\
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
new file mode 100644
index 0000000..186e84d
--- /dev/null
+++ b/include/trace/events/asoc.h
@@ -0,0 +1,235 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM asoc
+
+#if !defined(_TRACE_ASOC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ASOC_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+struct snd_soc_jack;
+struct snd_soc_codec;
+struct snd_soc_card;
+struct snd_soc_dapm_widget;
+
+/*
+ * Log register events
+ */
+DECLARE_EVENT_CLASS(snd_soc_reg,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		codec->name	)
+		__field(	int,		id		)
+		__field(	unsigned int,	reg		)
+		__field(	unsigned int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, codec->name);
+		__entry->id = codec->id;
+		__entry->reg = reg;
+		__entry->val = val;
+	),
+
+	TP_printk("codec=%s.%d reg=%x val=%x", __get_str(name),
+		  (int)__entry->id, (unsigned int)__entry->reg,
+		  (unsigned int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_write,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DEFINE_EVENT(snd_soc_reg, snd_soc_reg_read,
+
+	TP_PROTO(struct snd_soc_codec *codec, unsigned int reg,
+		 unsigned int val),
+
+	TP_ARGS(codec, reg, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_card,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		card->name	)
+		__field(	int,		val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+		__entry->val = val;
+	),
+
+	TP_printk("card=%s val=%d", __get_str(name), (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_start,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DEFINE_EVENT(snd_soc_card, snd_soc_bias_level_done,
+
+	TP_PROTO(struct snd_soc_card *card, int val),
+
+	TP_ARGS(card, val)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_basic,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card),
+
+	TP_STRUCT__entry(
+		__string(	name,	card->name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, card->name);
+	),
+
+	TP_printk("card=%s", __get_str(name))
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_start,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_basic, snd_soc_dapm_done,
+
+	TP_PROTO(struct snd_soc_card *card),
+
+	TP_ARGS(card)
+
+);
+
+DECLARE_EVENT_CLASS(snd_soc_dapm_widget,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val),
+
+	TP_STRUCT__entry(
+		__string(	name,	w->name		)
+		__field(	int,	val		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, w->name);
+		__entry->val = val;
+	),
+
+	TP_printk("widget=%s val=%d", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_power,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_start,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+DEFINE_EVENT(snd_soc_dapm_widget, snd_soc_dapm_widget_event_done,
+
+	TP_PROTO(struct snd_soc_dapm_widget *w, int val),
+
+	TP_ARGS(w, val)
+
+);
+
+TRACE_EVENT(snd_soc_jack_irq,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name		)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(snd_soc_jack_report,
+
+	TP_PROTO(struct snd_soc_jack *jack, int mask, int val),
+
+	TP_ARGS(jack, mask, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		mask			)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->mask = mask;
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x/%x", __get_str(name), (int)__entry->val,
+		  (int)__entry->mask)
+);
+
+TRACE_EVENT(snd_soc_jack_notify,
+
+	TP_PROTO(struct snd_soc_jack *jack, int val),
+
+	TP_ARGS(jack, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		jack->jack->name	)
+		__field(	int,		val			)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, jack->jack->name);
+		__entry->val = val;
+	),
+
+	TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
+);
+
+#endif /* _TRACE_ASOC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51..46e3cd8 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -6,6 +6,36 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
 
+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
+
+#define kvm_trace_exit_reason						\
+	ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL),	\
+	ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN),	\
+	ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR),	\
+	ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
+	ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+
+TRACE_EVENT(kvm_userspace_exit,
+	    TP_PROTO(__u32 reason, int errno),
+	    TP_ARGS(reason, errno),
+
+	TP_STRUCT__entry(
+		__field(	__u32,		reason		)
+		__field(	int,		errno		)
+	),
+
+	TP_fast_assign(
+		__entry->reason		= reason;
+		__entry->errno		= errno;
+	),
+
+	TP_printk("reason %s (%d)",
+		  __entry->errno < 0 ?
+		  (__entry->errno == -EINTR ? "restart" : "error") :
+		  __print_symbolic(__entry->reason, kvm_trace_exit_reason),
+		  __entry->errno < 0 ? -__entry->errno : __entry->reason)
+);
+
 #if defined(__KVM_HAVE_IOAPIC)
 TRACE_EVENT(kvm_set_irq,
 	TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -185,6 +215,97 @@
 		  __entry->referenced ? "YOUNG" : "OLD")
 );
 
+#ifdef CONFIG_KVM_ASYNC_PF
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn),
+
+	TP_STRUCT__entry(
+		__field(__u64, gva)
+		__field(u64, gfn)
+	),
+
+	TP_fast_assign(
+		__entry->gva = gva;
+		__entry->gfn = gfn;
+	),
+
+	TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+	TP_PROTO(u64 gva, u64 gfn),
+
+	TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva),
+
+	TP_STRUCT__entry(
+		__field(__u64, token)
+		__field(__u64, gva)
+	),
+
+	TP_fast_assign(
+		__entry->token = token;
+		__entry->gva = gva;
+	),
+
+	TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+	TP_PROTO(u64 token, u64 gva),
+
+	TP_ARGS(token, gva)
+);
+
+TRACE_EVENT(
+	kvm_async_pf_completed,
+	TP_PROTO(unsigned long address, struct page *page, u64 gva),
+	TP_ARGS(address, page, gva),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, address)
+		__field(pfn_t, pfn)
+		__field(u64, gva)
+		),
+
+	TP_fast_assign(
+		__entry->address = address;
+		__entry->pfn = page ? page_to_pfn(page) : 0;
+		__entry->gva = gva;
+		),
+
+	TP_printk("gva %#llx address %#lx pfn %#llx",  __entry->gva,
+		  __entry->address, __entry->pfn)
+);
+
+#endif
+
 #endif /* _TRACE_KVM_MAIN_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/regulator.h b/include/trace/events/regulator.h
new file mode 100644
index 0000000..37502a7
--- /dev/null
+++ b/include/trace/events/regulator.h
@@ -0,0 +1,141 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM regulator
+
+#if !defined(_TRACE_REGULATOR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_REGULATOR_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+
+/*
+ * Events which just log themselves and the regulator name for enable/disable
+ * type tracking.
+ */
+DECLARE_EVENT_CLASS(regulator_basic,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(	name,	name	)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_delay,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_enable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+DEFINE_EVENT(regulator_basic, regulator_disable_complete,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name)
+
+);
+
+/*
+ * Events that take a range of numerical values, mostly for voltages
+ * and so on.
+ */
+DECLARE_EVENT_CLASS(regulator_range,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        int,            min             )
+		__field(        int,            max             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->min  = min;
+		__entry->max  = max;
+	),
+
+	TP_printk("name=%s (%d-%d)", __get_str(name),
+		  (int)__entry->min, (int)__entry->max)
+);
+
+DEFINE_EVENT(regulator_range, regulator_set_voltage,
+
+	TP_PROTO(const char *name, int min, int max),
+
+	TP_ARGS(name, min, max)
+
+);
+
+
+/*
+ * Events that take a single value, mostly for readback and refcounts.
+ */
+DECLARE_EVENT_CLASS(regulator_value,
+
+	TP_PROTO(const char *name, unsigned int val),
+
+	TP_ARGS(name, val),
+
+	TP_STRUCT__entry(
+		__string(	name,		name		)
+		__field(        unsigned int,   val             )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->val  = val;
+	),
+
+	TP_printk("name=%s, val=%u", __get_str(name),
+		  (int)__entry->val)
+);
+
+DEFINE_EVENT(regulator_value, regulator_set_voltage_complete,
+
+	TP_PROTO(const char *name, unsigned int value),
+
+	TP_ARGS(name, value)
+
+);
+
+#endif /* _TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 75ce9d5..f10293c 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -25,9 +25,7 @@
 
 	TP_fast_assign(
 		__entry->skbaddr = skb;
-		if (skb) {
-			__entry->protocol = ntohs(skb->protocol);
-		}
+		__entry->protocol = ntohs(skb->protocol);
 		__entry->location = location;
 	),
 
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 43e2d7d..7a1d15f 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -94,7 +94,7 @@
 	int (*remove)(struct xenbus_device *dev);
 	int (*suspend)(struct xenbus_device *dev, pm_message_t state);
 	int (*resume)(struct xenbus_device *dev);
-	int (*uevent)(struct xenbus_device *, char **, int, char *, int);
+	int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
 	struct device_driver driver;
 	int (*read_otherend_details)(struct xenbus_device *dev);
 	int (*is_ready)(struct xenbus_device *dev);
diff --git a/init/Kconfig b/init/Kconfig
index 8dfd094..4f6cdbf 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@
 config HAVE_KERNEL_LZMA
 	bool
 
+config HAVE_KERNEL_XZ
+	bool
+
 config HAVE_KERNEL_LZO
 	bool
 
 choice
 	prompt "Kernel compression mode"
 	default KERNEL_GZIP
-	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
+	depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
 	help
 	  The linux kernel is a kind of self-extracting executable.
 	  Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@
 	  two. Compression is slowest.	The kernel size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config KERNEL_XZ
+	bool "XZ"
+	depends on HAVE_KERNEL_XZ
+	help
+	  XZ uses the LZMA2 algorithm and instruction set specific
+	  BCJ filters which can improve compression ratio of executable
+	  code. The size of the kernel is about 30% smaller with XZ in
+	  comparison to gzip. On architectures for which there is a BCJ
+	  filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
+	  will create a few percent smaller kernel than plain LZMA.
+
+	  The speed is about the same as with LZMA: The decompression
+	  speed of XZ is better than that of bzip2 but worse than gzip
+	  and LZO. Compression is slow.
+
 config KERNEL_LZO
 	bool "LZO"
 	depends on HAVE_KERNEL_LZO
@@ -673,7 +691,7 @@
 	help
 	  Memory Resource Controller Swap Extension comes with its price in
 	  a bigger memory consumption. General purpose distribution kernels
-	  which want to enable the feautre but keep it disabled by default
+	  which want to enable the feature but keep it disabled by default
 	  and let the user enable it by swapaccount boot command line
 	  parameter should have this option unselected.
 	  For those who want to have the feature enabled by default should
diff --git a/kernel/Makefile b/kernel/Makefile
index 33e0a39..353d3fe 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -43,7 +43,7 @@
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
-obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
+obj-$(CONFIG_SMP) += smp.o
 ifneq ($(CONFIG_SMP),y)
 obj-y += up.o
 endif
@@ -100,6 +100,7 @@
 obj-$(CONFIG_TRACING) += trace/
 obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
+obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/kernel/audit.c b/kernel/audit.c
index 77770a0..e495624 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -400,7 +400,7 @@
 	if (err < 0) {
 		BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
 		printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
-		audit_log_lost("auditd dissapeared\n");
+		audit_log_lost("auditd disappeared\n");
 		audit_pid = 0;
 		/* we might get lucky and get this in the next auditd */
 		audit_hold_skb(skb);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 51cddc1..5c5f4cc 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -763,8 +763,6 @@
  * -> cgroup_mkdir.
  */
 
-static struct dentry *cgroup_lookup(struct inode *dir,
-			struct dentry *dentry, struct nameidata *nd);
 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
 static int cgroup_populate_dir(struct cgroup *cgrp);
@@ -1451,6 +1449,10 @@
 
 static int cgroup_get_rootdir(struct super_block *sb)
 {
+	static const struct dentry_operations cgroup_dops = {
+		.d_iput = cgroup_diput,
+	};
+
 	struct inode *inode =
 		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
 	struct dentry *dentry;
@@ -1468,6 +1470,8 @@
 		return -ENOMEM;
 	}
 	sb->s_root = dentry;
+	/* for everything else we want ->d_op set */
+	sb->s_d_op = &cgroup_dops;
 	return 0;
 }
 
@@ -2191,7 +2195,7 @@
 };
 
 static const struct inode_operations cgroup_dir_inode_operations = {
-	.lookup = cgroup_lookup,
+	.lookup = simple_lookup,
 	.mkdir = cgroup_mkdir,
 	.rmdir = cgroup_rmdir,
 	.rename = cgroup_rename,
@@ -2207,26 +2211,6 @@
 	return __d_cft(file->f_dentry);
 }
 
-static int cgroup_delete_dentry(const struct dentry *dentry)
-{
-	return 1;
-}
-
-static struct dentry *cgroup_lookup(struct inode *dir,
-			struct dentry *dentry, struct nameidata *nd)
-{
-	static const struct dentry_operations cgroup_dentry_operations = {
-		.d_delete = cgroup_delete_dentry,
-		.d_iput = cgroup_diput,
-	};
-
-	if (dentry->d_name.len > NAME_MAX)
-		return ERR_PTR(-ENAMETOOLONG);
-	d_set_d_op(dentry, &cgroup_dentry_operations);
-	d_add(dentry, NULL);
-	return NULL;
-}
-
 static int cgroup_create_file(struct dentry *dentry, mode_t mode,
 				struct super_block *sb)
 {
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index a6e7297..bd3e8e2 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2914,7 +2914,7 @@
 	}
 }
 
-/* Intialize kdb_printf, breakpoint tables and kdb state */
+/* Initialize kdb_printf, breakpoint tables and kdb state */
 void __init kdb_init(int lvl)
 {
 	static int kdb_init_lvl = KDB_NOT_INITIALIZED;
diff --git a/kernel/exit.c b/kernel/exit.c
index 89c7486..f9a45eb 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -994,6 +994,15 @@
 	exit_fs(tsk);
 	check_stack_usage();
 	exit_thread();
+
+	/*
+	 * Flush inherited counters to the parent - before the parent
+	 * gets woken up by child-exit notifications.
+	 *
+	 * because of cgroup mode, must be called before cgroup_exit()
+	 */
+	perf_event_exit_task(tsk);
+
 	cgroup_exit(tsk, 1);
 
 	if (group_dead)
@@ -1007,11 +1016,6 @@
 	 * FIXME: do that only when needed, using sched_exit tracepoint
 	 */
 	flush_ptrace_hw_breakpoint(tsk);
-	/*
-	 * Flush inherited counters to the parent - before the parent
-	 * gets woken up by child-exit notifications.
-	 */
-	perf_event_exit_task(tsk);
 
 	exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 45da2b6..0c8d7c0 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1745,7 +1745,7 @@
 	}
 
 	/*
-	 * A NULL parameter means "inifinte"
+	 * A NULL parameter means "infinite"
 	 */
 	if (!expires) {
 		schedule();
diff --git a/kernel/kexec.c b/kernel/kexec.c
index b55045b..ec19b92 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -163,7 +163,7 @@
 	 * just verifies it is an address we can use.
 	 *
 	 * Since the kernel does everything in page size chunks ensure
-	 * the destination addreses are page aligned.  Too many
+	 * the destination addresses are page aligned.  Too many
 	 * special cases crop of when we don't do this.  The most
 	 * insidious is getting overlapping destination addresses
 	 * simply because addresses are changed to page size
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 17110a4..ee74b35 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -241,24 +241,19 @@
 	seq_puts(m, "Latency Top version : v0.1\n");
 
 	for (i = 0; i < MAXLR; i++) {
-		if (latency_record[i].backtrace[0]) {
+		struct latency_record *lr = &latency_record[i];
+
+		if (lr->backtrace[0]) {
 			int q;
-			seq_printf(m, "%i %lu %lu ",
-				latency_record[i].count,
-				latency_record[i].time,
-				latency_record[i].max);
+			seq_printf(m, "%i %lu %lu",
+				   lr->count, lr->time, lr->max);
 			for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
-				char sym[KSYM_SYMBOL_LEN];
-				char *c;
-				if (!latency_record[i].backtrace[q])
+				unsigned long bt = lr->backtrace[q];
+				if (!bt)
 					break;
-				if (latency_record[i].backtrace[q] == ULONG_MAX)
+				if (bt == ULONG_MAX)
 					break;
-				sprint_symbol(sym, latency_record[i].backtrace[q]);
-				c = strchr(sym, '+');
-				if (c)
-					*c = 0;
-				seq_printf(m, "%s ", sym);
+				seq_printf(m, " %ps", (void *)bt);
 			}
 			seq_printf(m, "\n");
 		}
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 11847bf..05ebe84 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -38,6 +38,12 @@
 
 #include <asm/irq_regs.h>
 
+enum event_type_t {
+	EVENT_FLEXIBLE = 0x1,
+	EVENT_PINNED = 0x2,
+	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -65,6 +71,12 @@
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+			      enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+			     enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)	{ }
 
 extern __weak const char *perf_pmu_name(void)
@@ -72,6 +84,11 @@
 	return "pmu";
 }
 
+static inline u64 perf_clock(void)
+{
+	return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
 	int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -240,11 +257,6 @@
 	put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-	return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -256,6 +268,12 @@
 	ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+	struct perf_event_context *ctx = event->ctx;
+	return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -269,7 +287,7 @@
 		return;
 
 	if (ctx->is_active)
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 	else
 		run_end = event->tstamp_stopped;
 
@@ -278,7 +296,7 @@
 	if (event->state == PERF_EVENT_STATE_INACTIVE)
 		run_end = event->tstamp_stopped;
 	else
-		run_end = ctx->time;
+		run_end = perf_event_time(event);
 
 	event->total_time_running = run_end - event->tstamp_running;
 }
@@ -534,6 +552,7 @@
 		  struct perf_cpu_context *cpuctx,
 		  struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
 	u64 delta;
 	/*
 	 * An event which could not be activated because of
@@ -545,7 +564,7 @@
 	    && !event_filter_match(event)) {
 		delta = ctx->time - event->tstamp_stopped;
 		event->tstamp_running += delta;
-		event->tstamp_stopped = ctx->time;
+		event->tstamp_stopped = tstamp;
 	}
 
 	if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -556,7 +575,7 @@
 		event->pending_disable = 0;
 		event->state = PERF_EVENT_STATE_OFF;
 	}
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_stopped = tstamp;
 	event->pmu->del(event, 0);
 	event->oncpu = -1;
 
@@ -768,6 +787,8 @@
 		 struct perf_cpu_context *cpuctx,
 		 struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	if (event->state <= PERF_EVENT_STATE_OFF)
 		return 0;
 
@@ -784,9 +805,9 @@
 		return -EAGAIN;
 	}
 
-	event->tstamp_running += ctx->time - event->tstamp_stopped;
+	event->tstamp_running += tstamp - event->tstamp_stopped;
 
-	event->shadow_ctx_time = ctx->time - ctx->timestamp;
+	event->shadow_ctx_time = tstamp - ctx->timestamp;
 
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
@@ -898,11 +919,13 @@
 static void add_event_to_ctx(struct perf_event *event,
 			       struct perf_event_context *ctx)
 {
+	u64 tstamp = perf_event_time(event);
+
 	list_add_event(event, ctx);
 	perf_group_attach(event);
-	event->tstamp_enabled = ctx->time;
-	event->tstamp_running = ctx->time;
-	event->tstamp_stopped = ctx->time;
+	event->tstamp_enabled = tstamp;
+	event->tstamp_running = tstamp;
+	event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -937,7 +960,7 @@
 
 	add_event_to_ctx(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1042,14 +1065,13 @@
 					struct perf_event_context *ctx)
 {
 	struct perf_event *sub;
+	u64 tstamp = perf_event_time(event);
 
 	event->state = PERF_EVENT_STATE_INACTIVE;
-	event->tstamp_enabled = ctx->time - event->total_time_enabled;
+	event->tstamp_enabled = tstamp - event->total_time_enabled;
 	list_for_each_entry(sub, &event->sibling_list, group_entry) {
-		if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-			sub->tstamp_enabled =
-				ctx->time - sub->total_time_enabled;
-		}
+		if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+			sub->tstamp_enabled = tstamp - sub->total_time_enabled;
 	}
 }
 
@@ -1082,7 +1104,7 @@
 		goto unlock;
 	__perf_event_mark_enabled(event, ctx);
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		goto unlock;
 
 	/*
@@ -1193,12 +1215,6 @@
 	return 0;
 }
 
-enum event_type_t {
-	EVENT_FLEXIBLE = 0x1,
-	EVENT_PINNED = 0x2,
-	EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
 			  struct perf_cpu_context *cpuctx,
 			  enum event_type_t event_type)
@@ -1435,7 +1451,7 @@
 	list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
 		if (event->state <= PERF_EVENT_STATE_OFF)
 			continue;
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, 1))
@@ -1467,7 +1483,7 @@
 		 * Listen to the 'cpu' scheduling filter constraint
 		 * of events:
 		 */
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1694,7 +1710,7 @@
 		if (event->state != PERF_EVENT_STATE_ACTIVE)
 			continue;
 
-		if (event->cpu != -1 && event->cpu != smp_processor_id())
+		if (!event_filter_match(event))
 			continue;
 
 		hwc = &event->hw;
@@ -3893,7 +3909,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm || event->attr.mmap ||
@@ -4030,7 +4046,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if (event->attr.comm)
@@ -4178,7 +4194,7 @@
 	if (event->state < PERF_EVENT_STATE_INACTIVE)
 		return 0;
 
-	if (event->cpu != -1 && event->cpu != smp_processor_id())
+	if (!event_filter_match(event))
 		return 0;
 
 	if ((!executable && event->attr.mmap_data) ||
@@ -4648,7 +4664,7 @@
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
-void inline perf_swevent_put_recursion_context(int rctx)
+inline void perf_swevent_put_recursion_context(int rctx)
 {
 	struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
 
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 870f72b..1832bd2 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -51,14 +51,14 @@
 
 static int hibernation_mode = HIBERNATION_SHUTDOWN;
 
-static struct platform_hibernation_ops *hibernation_ops;
+static const struct platform_hibernation_ops *hibernation_ops;
 
 /**
  * hibernation_set_ops - set the global hibernate operations
  * @ops: the hibernation operations to use in subsequent hibernation transitions
  */
 
-void hibernation_set_ops(struct platform_hibernation_ops *ops)
+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
 {
 	if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
 	    && ops->prepare && ops->finish && ops->enter && ops->pre_restore
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 8850df6..de6f86b 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,13 +31,13 @@
 	[PM_SUSPEND_MEM]	= "mem",
 };
 
-static struct platform_suspend_ops *suspend_ops;
+static const struct platform_suspend_ops *suspend_ops;
 
 /**
  *	suspend_set_ops - Set the global suspend method table.
  *	@ops:	Pointer to ops structure.
  */
-void suspend_set_ops(struct platform_suspend_ops *ops)
+void suspend_set_ops(const struct platform_suspend_ops *ops)
 {
 	mutex_lock(&pm_mutex);
 	suspend_ops = ops;
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 8c7e483..6942588 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -888,7 +888,7 @@
 /**
  *	swsusp_read - read the hibernation image.
  *	@flags_p: flags passed by the "frozen" kernel in the image header should
- *		  be written into this memeory location
+ *		  be written into this memory location
  */
 
 int swsusp_read(unsigned int *flags_p)
diff --git a/kernel/printk.c b/kernel/printk.c
index f64b899..53d9a9e 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -39,6 +39,7 @@
 #include <linux/syslog.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/rculist.h>
 
 #include <asm/uaccess.h>
 
@@ -1502,7 +1503,7 @@
 	/* Don't allow registering multiple times */
 	if (!dumper->registered) {
 		dumper->registered = 1;
-		list_add_tail(&dumper->list, &dump_list);
+		list_add_tail_rcu(&dumper->list, &dump_list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
@@ -1526,29 +1527,16 @@
 	spin_lock_irqsave(&dump_list_lock, flags);
 	if (dumper->registered) {
 		dumper->registered = 0;
-		list_del(&dumper->list);
+		list_del_rcu(&dumper->list);
 		err = 0;
 	}
 	spin_unlock_irqrestore(&dump_list_lock, flags);
+	synchronize_rcu();
 
 	return err;
 }
 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
 
-static const char * const kmsg_reasons[] = {
-	[KMSG_DUMP_OOPS]	= "oops",
-	[KMSG_DUMP_PANIC]	= "panic",
-	[KMSG_DUMP_KEXEC]	= "kexec",
-};
-
-static const char *kmsg_to_str(enum kmsg_dump_reason reason)
-{
-	if (reason >= ARRAY_SIZE(kmsg_reasons) || reason < 0)
-		return "unknown";
-
-	return kmsg_reasons[reason];
-}
-
 /**
  * kmsg_dump - dump kernel log to kernel message dumpers.
  * @reason: the reason (oops, panic etc) for dumping
@@ -1587,13 +1575,9 @@
 		l2 = chars;
 	}
 
-	if (!spin_trylock_irqsave(&dump_list_lock, flags)) {
-		printk(KERN_ERR "dump_kmsg: dump list lock is held during %s, skipping dump\n",
-				kmsg_to_str(reason));
-		return;
-	}
-	list_for_each_entry(dumper, &dump_list, list)
+	rcu_read_lock();
+	list_for_each_entry_rcu(dumper, &dump_list, list)
 		dumper->dump(dumper, reason, s1, l1, s2, l2);
-	spin_unlock_irqrestore(&dump_list_lock, flags);
+	rcu_read_unlock();
 }
 #endif
diff --git a/kernel/sched.c b/kernel/sched.c
index a0eb094..ea3e5ef 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2505,7 +2505,7 @@
  * try_to_wake_up_local - try to wake up a local task with rq lock held
  * @p: the thread to be awakened
  *
- * Put @p on the run-queue if it's not alredy there.  The caller must
+ * Put @p on the run-queue if it's not already there.  The caller must
  * ensure that this_rq() is locked, @p is bound to this_rq() and not
  * the current task.  this_rq() stays locked over invocation.
  */
diff --git a/kernel/smp.c b/kernel/smp.c
index 12ed8b0..4ec30e0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
 static struct {
 	struct list_head	queue;
 	raw_spinlock_t		lock;
@@ -529,3 +530,21 @@
 {
 	raw_spin_unlock_irq(&call_function.lock);
 }
+#endif /* USE_GENERIC_SMP_HELPERS */
+
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int wait)
+{
+	int ret = 0;
+
+	preempt_disable();
+	ret = smp_call_function(func, info, wait);
+	local_irq_disable();
+	func(info);
+	local_irq_enable();
+	preempt_enable();
+	return ret;
+}
+EXPORT_SYMBOL(on_each_cpu);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 0823778..68eb5ef 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -885,25 +885,6 @@
 }
 early_initcall(spawn_ksoftirqd);
 
-#ifdef CONFIG_SMP
-/*
- * Call a function on all processors
- */
-int on_each_cpu(void (*func) (void *info), void *info, int wait)
-{
-	int ret = 0;
-
-	preempt_disable();
-	ret = smp_call_function(func, info, wait);
-	local_irq_disable();
-	func(info);
-	local_irq_enable();
-	preempt_enable();
-	return ret;
-}
-EXPORT_SYMBOL(on_each_cpu);
-#endif
-
 /*
  * [ These __weak aliases are kept in a separate compilation unit, so that
  *   GCC does not inline them incorrectly. ]
diff --git a/kernel/sys.c b/kernel/sys.c
index 2745dcd..31b71a2 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -43,6 +43,8 @@
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
 
+#include <linux/kmsg_dump.h>
+
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/unistd.h>
@@ -285,6 +287,7 @@
  */
 void emergency_restart(void)
 {
+	kmsg_dump(KMSG_DUMP_EMERG);
 	machine_emergency_restart();
 }
 EXPORT_SYMBOL_GPL(emergency_restart);
@@ -312,6 +315,7 @@
 		printk(KERN_EMERG "Restarting system.\n");
 	else
 		printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
+	kmsg_dump(KMSG_DUMP_RESTART);
 	machine_restart(cmd);
 }
 EXPORT_SYMBOL_GPL(kernel_restart);
@@ -333,6 +337,7 @@
 	kernel_shutdown_prepare(SYSTEM_HALT);
 	sysdev_shutdown();
 	printk(KERN_EMERG "System halted.\n");
+	kmsg_dump(KMSG_DUMP_HALT);
 	machine_halt();
 }
 
@@ -351,6 +356,7 @@
 	disable_nonboot_cpus();
 	sysdev_shutdown();
 	printk(KERN_EMERG "Power down.\n");
+	kmsg_dump(KMSG_DUMP_POWEROFF);
 	machine_power_off();
 }
 EXPORT_SYMBOL_GPL(kernel_power_off);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ae5cbb1..bc86bb3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/signal.h>
+#include <linux/printk.h>
 #include <linux/proc_fs.h>
 #include <linux/security.h>
 #include <linux/ctype.h>
@@ -245,10 +246,6 @@
 		.mode		= 0555,
 		.child		= dev_table,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -710,6 +707,15 @@
 		.extra1		= &zero,
 		.extra2		= &one,
 	},
+	{
+		.procname	= "kptr_restrict",
+		.data		= &kptr_restrict,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &two,
+	},
 #endif
 	{
 		.procname	= "ngroups_max",
@@ -962,10 +968,6 @@
 		.proc_handler	= proc_dointvec,
 	},
 #endif
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1326,11 +1328,6 @@
 		.extra2		= &one,
 	},
 #endif
-
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -1486,10 +1483,6 @@
 		.proc_handler	= &pipe_proc_fn,
 		.extra1		= &pipe_min_size,
 	},
-/*
- * NOTE: do not add new entries to this table unless you have read
- * Documentation/sysctl/ctl_unnumbered.txt
- */
 	{ }
 };
 
@@ -2899,7 +2892,7 @@
 	}
 }
 
-#else /* CONFIG_PROC_FS */
+#else /* CONFIG_PROC_SYSCTL */
 
 int proc_dostring(struct ctl_table *table, int write,
 		  void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -2951,7 +2944,7 @@
 }
 
 
-#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_PROC_SYSCTL */
 
 /*
  * No sense putting this after each symbol definition, twice,
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 4b2545a..b875bed 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -1192,7 +1192,7 @@
 
 		buf[result] = '\0';
 
-		/* Convert the decnet addresss to binary */
+		/* Convert the decnet address to binary */
 		result = -EIO;
 		nodep = strchr(buf, '.') + 1;
 		if (!nodep)
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 69691eb..3971c6b 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -348,7 +348,7 @@
 	return ret;
 }
 
-#ifdef CONFIG_IA64
+#if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
 #define TASKSTATS_NEEDS_PADDING 1
 #endif
 
diff --git a/kernel/time.c b/kernel/time.c
index ba9b338..3217435 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -238,7 +238,7 @@
  * Avoid unnecessary multiplications/divisions in the
  * two most common HZ cases:
  */
-unsigned int inline jiffies_to_msecs(const unsigned long j)
+inline unsigned int jiffies_to_msecs(const unsigned long j)
 {
 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
 	return (MSEC_PER_SEC / HZ) * j;
@@ -254,7 +254,7 @@
 }
 EXPORT_SYMBOL(jiffies_to_msecs);
 
-unsigned int inline jiffies_to_usecs(const unsigned long j)
+inline unsigned int jiffies_to_usecs(const unsigned long j)
 {
 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
 	return (USEC_PER_SEC / HZ) * j;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index df140cd..c50a034 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -679,7 +679,7 @@
 int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
 {
 
-	/* Intialize mult/shift and max_idle_ns */
+	/* Initialize mult/shift and max_idle_ns */
 	__clocksource_updatefreq_scale(cs, scale, freq);
 
 	/* Add clocksource to the clcoksource list */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index d232189..5c00242 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -14,6 +14,7 @@
 #include <linux/timex.h>
 #include <linux/time.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 
 /*
  * NTP timekeeping variables:
@@ -74,6 +75,162 @@
 /* constant (boot-param configurable) NTP tick adjustment (upscaled)	*/
 static s64			ntp_tick_adj;
 
+#ifdef CONFIG_NTP_PPS
+
+/*
+ * The following variables are used when a pulse-per-second (PPS) signal
+ * is available. They establish the engineering parameters of the clock
+ * discipline loop when controlled by the PPS signal.
+ */
+#define PPS_VALID	10	/* PPS signal watchdog max (s) */
+#define PPS_POPCORN	4	/* popcorn spike threshold (shift) */
+#define PPS_INTMIN	2	/* min freq interval (s) (shift) */
+#define PPS_INTMAX	8	/* max freq interval (s) (shift) */
+#define PPS_INTCOUNT	4	/* number of consecutive good intervals to
+				   increase pps_shift or consecutive bad
+				   intervals to decrease it */
+#define PPS_MAXWANDER	100000	/* max PPS freq wander (ns/s) */
+
+static int pps_valid;		/* signal watchdog counter */
+static long pps_tf[3];		/* phase median filter */
+static long pps_jitter;		/* current jitter (ns) */
+static struct timespec pps_fbase; /* beginning of the last freq interval */
+static int pps_shift;		/* current interval duration (s) (shift) */
+static int pps_intcnt;		/* interval counter */
+static s64 pps_freq;		/* frequency offset (scaled ns/s) */
+static long pps_stabil;		/* current stability (scaled ns/s) */
+
+/*
+ * PPS signal quality monitors
+ */
+static long pps_calcnt;		/* calibration intervals */
+static long pps_jitcnt;		/* jitter limit exceeded */
+static long pps_stbcnt;		/* stability limit exceeded */
+static long pps_errcnt;		/* calibration errors */
+
+
+/* PPS kernel consumer compensates the whole phase error immediately.
+ * Otherwise, reduce the offset by a fixed factor times the time constant.
+ */
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL)
+		return offset;
+	else
+		return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void)
+{
+	/* the PPS calibration interval may end
+	   surprisingly early */
+	pps_shift = PPS_INTMIN;
+	pps_intcnt = 0;
+}
+
+/**
+ * pps_clear - Clears the PPS state variables
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_clear(void)
+{
+	pps_reset_freq_interval();
+	pps_tf[0] = 0;
+	pps_tf[1] = 0;
+	pps_tf[2] = 0;
+	pps_fbase.tv_sec = pps_fbase.tv_nsec = 0;
+	pps_freq = 0;
+}
+
+/* Decrease pps_valid to indicate that another second has passed since
+ * the last PPS signal. When it reaches 0, indicate that PPS signal is
+ * missing.
+ *
+ * Must be called while holding a write on the xtime_lock
+ */
+static inline void pps_dec_valid(void)
+{
+	if (pps_valid > 0)
+		pps_valid--;
+	else {
+		time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+				 STA_PPSWANDER | STA_PPSERROR);
+		pps_clear();
+	}
+}
+
+static inline void pps_set_freq(s64 freq)
+{
+	pps_freq = freq;
+}
+
+static inline int is_error_status(int status)
+{
+	return (time_status & (STA_UNSYNC|STA_CLOCKERR))
+		/* PPS signal lost when either PPS time or
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & (STA_PPSFREQ|STA_PPSTIME))
+			&& !(time_status & STA_PPSSIGNAL))
+		/* PPS jitter exceeded when
+		 * PPS time synchronization requested */
+		|| ((time_status & (STA_PPSTIME|STA_PPSJITTER))
+			== (STA_PPSTIME|STA_PPSJITTER))
+		/* PPS wander exceeded or calibration error when
+		 * PPS frequency synchronization requested
+		 */
+		|| ((time_status & STA_PPSFREQ)
+			&& (time_status & (STA_PPSWANDER|STA_PPSERROR)));
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	txc->ppsfreq	   = shift_right((pps_freq >> PPM_SCALE_INV_SHIFT) *
+					 PPM_SCALE_INV, NTP_SCALE_SHIFT);
+	txc->jitter	   = pps_jitter;
+	if (!(time_status & STA_NANO))
+		txc->jitter /= NSEC_PER_USEC;
+	txc->shift	   = pps_shift;
+	txc->stabil	   = pps_stabil;
+	txc->jitcnt	   = pps_jitcnt;
+	txc->calcnt	   = pps_calcnt;
+	txc->errcnt	   = pps_errcnt;
+	txc->stbcnt	   = pps_stbcnt;
+}
+
+#else /* !CONFIG_NTP_PPS */
+
+static inline s64 ntp_offset_chunk(s64 offset)
+{
+	return shift_right(offset, SHIFT_PLL + time_constant);
+}
+
+static inline void pps_reset_freq_interval(void) {}
+static inline void pps_clear(void) {}
+static inline void pps_dec_valid(void) {}
+static inline void pps_set_freq(s64 freq) {}
+
+static inline int is_error_status(int status)
+{
+	return status & (STA_UNSYNC|STA_CLOCKERR);
+}
+
+static inline void pps_fill_timex(struct timex *txc)
+{
+	/* PPS is not implemented, so these are zero */
+	txc->ppsfreq	   = 0;
+	txc->jitter	   = 0;
+	txc->shift	   = 0;
+	txc->stabil	   = 0;
+	txc->jitcnt	   = 0;
+	txc->calcnt	   = 0;
+	txc->errcnt	   = 0;
+	txc->stbcnt	   = 0;
+}
+
+#endif /* CONFIG_NTP_PPS */
+
 /*
  * NTP methods:
  */
@@ -185,6 +342,9 @@
 
 	tick_length	= tick_length_base;
 	time_offset	= 0;
+
+	/* Clear PPS state variables */
+	pps_clear();
 }
 
 /*
@@ -250,16 +410,16 @@
 		time_status |= STA_UNSYNC;
 	}
 
-	/*
-	 * Compute the phase adjustment for the next second. The offset is
-	 * reduced by a fixed factor times the time constant.
-	 */
+	/* Compute the phase adjustment for the next second */
 	tick_length	 = tick_length_base;
 
-	delta		 = shift_right(time_offset, SHIFT_PLL + time_constant);
+	delta		 = ntp_offset_chunk(time_offset);
 	time_offset	-= delta;
 	tick_length	+= delta;
 
+	/* Check PPS signal */
+	pps_dec_valid();
+
 	if (!time_adjust)
 		return;
 
@@ -369,6 +529,8 @@
 	if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
 		time_state = TIME_OK;
 		time_status = STA_UNSYNC;
+		/* restart PPS frequency calibration */
+		pps_reset_freq_interval();
 	}
 
 	/*
@@ -418,6 +580,8 @@
 		time_freq = txc->freq * PPM_SCALE;
 		time_freq = min(time_freq, MAXFREQ_SCALED);
 		time_freq = max(time_freq, -MAXFREQ_SCALED);
+		/* update pps_freq */
+		pps_set_freq(time_freq);
 	}
 
 	if (txc->modes & ADJ_MAXERROR)
@@ -508,7 +672,8 @@
 	}
 
 	result = time_state;	/* mostly `TIME_OK' */
-	if (time_status & (STA_UNSYNC|STA_CLOCKERR))
+	/* check for errors */
+	if (is_error_status(time_status))
 		result = TIME_ERROR;
 
 	txc->freq	   = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
@@ -522,15 +687,8 @@
 	txc->tick	   = tick_usec;
 	txc->tai	   = time_tai;
 
-	/* PPS is not implemented, so these are zero */
-	txc->ppsfreq	   = 0;
-	txc->jitter	   = 0;
-	txc->shift	   = 0;
-	txc->stabil	   = 0;
-	txc->jitcnt	   = 0;
-	txc->calcnt	   = 0;
-	txc->errcnt	   = 0;
-	txc->stbcnt	   = 0;
+	/* fill PPS status fields */
+	pps_fill_timex(txc);
 
 	write_sequnlock_irq(&xtime_lock);
 
@@ -544,6 +702,243 @@
 	return result;
 }
 
+#ifdef	CONFIG_NTP_PPS
+
+/* actually struct pps_normtime is good old struct timespec, but it is
+ * semantically different (and it is the reason why it was invented):
+ * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ]
+ * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */
+struct pps_normtime {
+	__kernel_time_t	sec;	/* seconds */
+	long		nsec;	/* nanoseconds */
+};
+
+/* normalize the timestamp so that nsec is in the
+   ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */
+static inline struct pps_normtime pps_normalize_ts(struct timespec ts)
+{
+	struct pps_normtime norm = {
+		.sec = ts.tv_sec,
+		.nsec = ts.tv_nsec
+	};
+
+	if (norm.nsec > (NSEC_PER_SEC >> 1)) {
+		norm.nsec -= NSEC_PER_SEC;
+		norm.sec++;
+	}
+
+	return norm;
+}
+
+/* get current phase correction and jitter */
+static inline long pps_phase_filter_get(long *jitter)
+{
+	*jitter = pps_tf[0] - pps_tf[1];
+	if (*jitter < 0)
+		*jitter = -*jitter;
+
+	/* TODO: test various filters */
+	return pps_tf[0];
+}
+
+/* add the sample to the phase filter */
+static inline void pps_phase_filter_add(long err)
+{
+	pps_tf[2] = pps_tf[1];
+	pps_tf[1] = pps_tf[0];
+	pps_tf[0] = err;
+}
+
+/* decrease frequency calibration interval length.
+ * It is halved after four consecutive unstable intervals.
+ */
+static inline void pps_dec_freq_interval(void)
+{
+	if (--pps_intcnt <= -PPS_INTCOUNT) {
+		pps_intcnt = -PPS_INTCOUNT;
+		if (pps_shift > PPS_INTMIN) {
+			pps_shift--;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* increase frequency calibration interval length.
+ * It is doubled after four consecutive stable intervals.
+ */
+static inline void pps_inc_freq_interval(void)
+{
+	if (++pps_intcnt >= PPS_INTCOUNT) {
+		pps_intcnt = PPS_INTCOUNT;
+		if (pps_shift < PPS_INTMAX) {
+			pps_shift++;
+			pps_intcnt = 0;
+		}
+	}
+}
+
+/* update clock frequency based on MONOTONIC_RAW clock PPS signal
+ * timestamps
+ *
+ * At the end of the calibration interval the difference between the
+ * first and last MONOTONIC_RAW clock timestamps divided by the length
+ * of the interval becomes the frequency update. If the interval was
+ * too long, the data are discarded.
+ * Returns the difference between old and new frequency values.
+ */
+static long hardpps_update_freq(struct pps_normtime freq_norm)
+{
+	long delta, delta_mod;
+	s64 ftemp;
+
+	/* check if the frequency interval was too long */
+	if (freq_norm.sec > (2 << pps_shift)) {
+		time_status |= STA_PPSERROR;
+		pps_errcnt++;
+		pps_dec_freq_interval();
+		pr_err("hardpps: PPSERROR: interval too long - %ld s\n",
+				freq_norm.sec);
+		return 0;
+	}
+
+	/* here the raw frequency offset and wander (stability) is
+	 * calculated. If the wander is less than the wander threshold
+	 * the interval is increased; otherwise it is decreased.
+	 */
+	ftemp = div_s64(((s64)(-freq_norm.nsec)) << NTP_SCALE_SHIFT,
+			freq_norm.sec);
+	delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
+	pps_freq = ftemp;
+	if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
+		pr_warning("hardpps: PPSWANDER: change=%ld\n", delta);
+		time_status |= STA_PPSWANDER;
+		pps_stbcnt++;
+		pps_dec_freq_interval();
+	} else {	/* good sample */
+		pps_inc_freq_interval();
+	}
+
+	/* the stability metric is calculated as the average of recent
+	 * frequency changes, but is used only for performance
+	 * monitoring
+	 */
+	delta_mod = delta;
+	if (delta_mod < 0)
+		delta_mod = -delta_mod;
+	pps_stabil += (div_s64(((s64)delta_mod) <<
+				(NTP_SCALE_SHIFT - SHIFT_USEC),
+				NSEC_PER_USEC) - pps_stabil) >> PPS_INTMIN;
+
+	/* if enabled, the system clock frequency is updated */
+	if ((time_status & STA_PPSFREQ) != 0 &&
+	    (time_status & STA_FREQHOLD) == 0) {
+		time_freq = pps_freq;
+		ntp_update_frequency();
+	}
+
+	return delta;
+}
+
+/* correct REALTIME clock phase error against PPS signal */
+static void hardpps_update_phase(long error)
+{
+	long correction = -error;
+	long jitter;
+
+	/* add the sample to the median filter */
+	pps_phase_filter_add(correction);
+	correction = pps_phase_filter_get(&jitter);
+
+	/* Nominal jitter is due to PPS signal noise. If it exceeds the
+	 * threshold, the sample is discarded; otherwise, if so enabled,
+	 * the time offset is updated.
+	 */
+	if (jitter > (pps_jitter << PPS_POPCORN)) {
+		pr_warning("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
+		       jitter, (pps_jitter << PPS_POPCORN));
+		time_status |= STA_PPSJITTER;
+		pps_jitcnt++;
+	} else if (time_status & STA_PPSTIME) {
+		/* correct the time using the phase offset */
+		time_offset = div_s64(((s64)correction) << NTP_SCALE_SHIFT,
+				NTP_INTERVAL_FREQ);
+		/* cancel running adjtime() */
+		time_adjust = 0;
+	}
+	/* update jitter */
+	pps_jitter += (jitter - pps_jitter) >> PPS_INTMIN;
+}
+
+/*
+ * hardpps() - discipline CPU clock oscillator to external PPS signal
+ *
+ * This routine is called at each PPS signal arrival in order to
+ * discipline the CPU clock oscillator to the PPS signal. It takes two
+ * parameters: REALTIME and MONOTONIC_RAW clock timestamps. The former
+ * is used to correct clock phase error and the latter is used to
+ * correct the frequency.
+ *
+ * This code is based on David Mills's reference nanokernel
+ * implementation. It was mostly rewritten but keeps the same idea.
+ */
+void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
+{
+	struct pps_normtime pts_norm, freq_norm;
+	unsigned long flags;
+
+	pts_norm = pps_normalize_ts(*phase_ts);
+
+	write_seqlock_irqsave(&xtime_lock, flags);
+
+	/* clear the error bits, they will be set again if needed */
+	time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
+
+	/* indicate signal presence */
+	time_status |= STA_PPSSIGNAL;
+	pps_valid = PPS_VALID;
+
+	/* when called for the first time,
+	 * just start the frequency interval */
+	if (unlikely(pps_fbase.tv_sec == 0)) {
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		return;
+	}
+
+	/* ok, now we have a base for frequency calculation */
+	freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase));
+
+	/* check that the signal is in the range
+	 * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */
+	if ((freq_norm.sec == 0) ||
+			(freq_norm.nsec > MAXFREQ * freq_norm.sec) ||
+			(freq_norm.nsec < -MAXFREQ * freq_norm.sec)) {
+		time_status |= STA_PPSJITTER;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		write_sequnlock_irqrestore(&xtime_lock, flags);
+		pr_err("hardpps: PPSJITTER: bad pulse\n");
+		return;
+	}
+
+	/* signal is ok */
+
+	/* check if the current frequency interval is finished */
+	if (freq_norm.sec >= (1 << pps_shift)) {
+		pps_calcnt++;
+		/* restart the frequency calibration interval */
+		pps_fbase = *raw_ts;
+		hardpps_update_freq(freq_norm);
+	}
+
+	hardpps_update_phase(pts_norm.nsec);
+
+	write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+EXPORT_SYMBOL(hardpps);
+
+#endif	/* CONFIG_NTP_PPS */
+
 static int __init ntp_tick_adj_setup(char *str)
 {
 	ntp_tick_adj = simple_strtol(str, NULL, 0);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5bb86da..5536aaf 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -288,6 +288,49 @@
 }
 EXPORT_SYMBOL_GPL(ktime_get_ts);
 
+#ifdef CONFIG_NTP_PPS
+
+/**
+ * getnstime_raw_and_real - get day and raw monotonic time in timespec format
+ * @ts_raw:	pointer to the timespec to be set to raw monotonic time
+ * @ts_real:	pointer to the timespec to be set to the time of day
+ *
+ * This function reads both the time of day and raw monotonic time at the
+ * same time atomically and stores the resulting timestamps in timespec
+ * format.
+ */
+void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
+{
+	unsigned long seq;
+	s64 nsecs_raw, nsecs_real;
+
+	WARN_ON_ONCE(timekeeping_suspended);
+
+	do {
+		u32 arch_offset;
+
+		seq = read_seqbegin(&xtime_lock);
+
+		*ts_raw = raw_time;
+		*ts_real = xtime;
+
+		nsecs_raw = timekeeping_get_ns_raw();
+		nsecs_real = timekeeping_get_ns();
+
+		/* If arch requires, add in gettimeoffset() */
+		arch_offset = arch_gettimeoffset();
+		nsecs_raw += arch_offset;
+		nsecs_real += arch_offset;
+
+	} while (read_seqretry(&xtime_lock, seq));
+
+	timespec_add_ns(ts_raw, nsecs_raw);
+	timespec_add_ns(ts_real, nsecs_real);
+}
+EXPORT_SYMBOL(getnstime_raw_and_real);
+
+#endif /* CONFIG_NTP_PPS */
+
 /**
  * do_gettimeofday - Returns the time of day in a timeval
  * @tv:		pointer to the timeval to be set
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 53f3381..761c510 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@
 endif
 obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
 obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
-obj-$(CONFIG_EVENT_TRACING) += power-traces.o
+obj-$(CONFIG_TRACEPOINTS) += power-traces.o
 ifeq ($(CONFIG_TRACING),y)
 obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
 endif
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f8cf959..dc53ecb 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1313,12 +1313,10 @@
 
 	__this_cpu_inc(user_stack_count);
 
-
-
 	event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
 					  sizeof(*entry), flags, pc);
 	if (!event)
-		return;
+		goto out_drop_count;
 	entry	= ring_buffer_event_data(event);
 
 	entry->tgid		= current->tgid;
@@ -1333,8 +1331,8 @@
 	if (!filter_check_discard(call, entry, buffer, event))
 		ring_buffer_unlock_commit(buffer, event);
 
+ out_drop_count:
 	__this_cpu_dec(user_stack_count);
-
  out:
 	preempt_enable();
 }
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index e3dfeca..6cf2237 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -53,7 +53,7 @@
  */
 
 /*
- * Function trace entry - function address and parent function addres:
+ * Function trace entry - function address and parent function address:
  */
 FTRACE_ENTRY(function, ftrace_entry,
 
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 2591583..9da289c 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -12,6 +12,8 @@
 #include <linux/highuid.h>
 #include <linux/cred.h>
 
+static struct kmem_cache *user_ns_cachep __read_mostly;
+
 /*
  * Create a new user namespace, deriving the creator from the user in the
  * passed credentials, and replacing that user with the new root user for the
@@ -26,7 +28,7 @@
 	struct user_struct *root_user;
 	int n;
 
-	ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL);
+	ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
 	if (!ns)
 		return -ENOMEM;
 
@@ -38,7 +40,7 @@
 	/* Alloc new root user.  */
 	root_user = alloc_uid(ns, 0);
 	if (!root_user) {
-		kfree(ns);
+		kmem_cache_free(user_ns_cachep, ns);
 		return -ENOMEM;
 	}
 
@@ -71,7 +73,7 @@
 	struct user_namespace *ns =
 		container_of(work, struct user_namespace, destroyer);
 	free_uid(ns->creator);
-	kfree(ns);
+	kmem_cache_free(user_ns_cachep, ns);
 }
 
 void free_user_ns(struct kref *kref)
@@ -126,3 +128,10 @@
 	/* No useful relationship so no mapping */
 	return overflowgid;
 }
+
+static __init int user_namespaces_init(void)
+{
+	user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
+	return 0;
+}
+module_init(user_namespaces_init);
diff --git a/lib/Kconfig b/lib/Kconfig
index 3116aa6..0ee67e0 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -106,6 +106,8 @@
 config LZO_DECOMPRESS
 	tristate
 
+source "lib/xz/Kconfig"
+
 #
 # These all provide a common interface (hence the apparent duplication with
 # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
@@ -120,6 +122,10 @@
 config DECOMPRESS_LZMA
 	tristate
 
+config DECOMPRESS_XZ
+	select XZ_DEC
+	tristate
+
 config DECOMPRESS_LZO
 	select LZO_DECOMPRESS
 	tristate
diff --git a/lib/Makefile b/lib/Makefile
index d7b6e30a..cbb774f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -12,7 +12,7 @@
 	 idr.o int_sqrt.o extable.o prio_tree.o \
 	 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
 	 proportions.o prio_heap.o ratelimit.o show_mem.o \
-	 is_single_threaded.o plist.o decompress.o flex_array.o
+	 is_single_threaded.o plist.o decompress.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -21,7 +21,7 @@
 
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
 	 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
-	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o
+	 string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o
 
 ifeq ($(CONFIG_DEBUG_KOBJECT),y)
 CFLAGS_kobject.o += -DDEBUG
@@ -69,11 +69,13 @@
 obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
+obj-$(CONFIG_XZ_DEC) += xz/
 obj-$(CONFIG_RAID6_PQ) += raid6/
 
 lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
 lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
 lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_XZ) += decompress_unxz.o
 lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
 
 obj-$(CONFIG_TEXTSEARCH) += textsearch.o
diff --git a/lib/decompress.c b/lib/decompress.c
index a760681..3d766b7f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -8,6 +8,7 @@
 
 #include <linux/decompress/bunzip2.h>
 #include <linux/decompress/unlzma.h>
+#include <linux/decompress/unxz.h>
 #include <linux/decompress/inflate.h>
 #include <linux/decompress/unlzo.h>
 
@@ -23,6 +24,9 @@
 #ifndef CONFIG_DECOMPRESS_LZMA
 # define unlzma NULL
 #endif
+#ifndef CONFIG_DECOMPRESS_XZ
+# define unxz NULL
+#endif
 #ifndef CONFIG_DECOMPRESS_LZO
 # define unlzo NULL
 #endif
@@ -36,6 +40,7 @@
 	{ {037, 0236}, "gzip", gunzip },
 	{ {0x42, 0x5a}, "bzip2", bunzip2 },
 	{ {0x5d, 0x00}, "lzma", unlzma },
+	{ {0xfd, 0x37}, "xz", unxz },
 	{ {0x89, 0x4c}, "lzo", unlzo },
 	{ {0, 0}, NULL, NULL }
 };
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 81c8bb1..a7b80c1 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -49,7 +49,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/bunzip2.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -682,13 +681,12 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
 	struct bunzip_data *bd;
 	int i = -1;
 	unsigned char *inbuf;
 
-	set_error_fn(error_fn);
 	if (flush)
 		outbuf = malloc(BZIP2_IOBUF_SIZE);
 
@@ -751,8 +749,8 @@
 			int(*flush)(void*, unsigned int),
 			unsigned char *outbuf,
 			int *pos,
-			void(*error_fn)(char *x))
+			void(*error)(char *x))
 {
-	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error_fn);
+	return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error);
 }
 #endif
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
index fc686c7..19ff89e 100644
--- a/lib/decompress_inflate.c
+++ b/lib/decompress_inflate.c
@@ -19,7 +19,6 @@
 #include "zlib_inflate/inflate.h"
 
 #include "zlib_inflate/infutil.h"
-#include <linux/slab.h>
 
 #endif /* STATIC */
 
@@ -27,7 +26,7 @@
 
 #define GZIP_IOBUF_SIZE (16*1024)
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -38,13 +37,12 @@
 		       int(*flush)(void*, unsigned int),
 		       unsigned char *out_buf,
 		       int *pos,
-		       void(*error_fn)(char *x)) {
+		       void(*error)(char *x)) {
 	u8 *zbuf;
 	struct z_stream_s *strm;
 	int rc;
 	size_t out_len;
 
-	set_error_fn(error_fn);
 	rc = -1;
 	if (flush) {
 		out_len = 0x8000; /* 32 K */
@@ -100,13 +98,22 @@
 	 * possible asciz filename)
 	 */
 	strm->next_in = zbuf + 10;
+	strm->avail_in = len - 10;
 	/* skip over asciz filename */
 	if (zbuf[3] & 0x8) {
-		while (strm->next_in[0])
-			strm->next_in++;
-		strm->next_in++;
+		do {
+			/*
+			 * If the filename doesn't fit into the buffer,
+			 * the file is very probably corrupt. Don't try
+			 * to read more data.
+			 */
+			if (strm->avail_in == 0) {
+				error("header error");
+				goto gunzip_5;
+			}
+			--strm->avail_in;
+		} while (*strm->next_in++);
 	}
-	strm->avail_in = len - (strm->next_in - zbuf);
 
 	strm->next_out = out_buf;
 	strm->avail_out = out_len;
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
index ca82fde..476c65a 100644
--- a/lib/decompress_unlzma.c
+++ b/lib/decompress_unlzma.c
@@ -33,7 +33,6 @@
 #define PREBOOT
 #else
 #include <linux/decompress/unlzma.h>
-#include <linux/slab.h>
 #endif /* STATIC */
 
 #include <linux/decompress/mm.h>
@@ -74,6 +73,7 @@
 	uint32_t code;
 	uint32_t range;
 	uint32_t bound;
+	void (*error)(char *);
 };
 
 
@@ -82,7 +82,7 @@
 #define RC_MODEL_TOTAL_BITS 11
 
 
-static int nofill(void *buffer, unsigned int len)
+static int INIT nofill(void *buffer, unsigned int len)
 {
 	return -1;
 }
@@ -92,7 +92,7 @@
 {
 	rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
 	if (rc->buffer_size <= 0)
-		error("unexpected EOF");
+		rc->error("unexpected EOF");
 	rc->ptr = rc->buffer;
 	rc->buffer_end = rc->buffer + rc->buffer_size;
 }
@@ -127,12 +127,6 @@
 }
 
 
-/* Called once. TODO: bb_maybe_free() */
-static inline void INIT rc_free(struct rc *rc)
-{
-	free(rc->buffer);
-}
-
 /* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
 static void INIT rc_do_normalize(struct rc *rc)
 {
@@ -169,7 +163,7 @@
 	rc->range = rc->bound;
 	*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
 }
-static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
+static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
 {
 	rc->range -= rc->bound;
 	rc->code -= rc->bound;
@@ -319,32 +313,38 @@
 
 }
 
-static inline void INIT write_byte(struct writer *wr, uint8_t byte)
+static inline int INIT write_byte(struct writer *wr, uint8_t byte)
 {
 	wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
 	if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
 		wr->buffer_pos = 0;
 		wr->global_pos += wr->header->dict_size;
-		wr->flush((char *)wr->buffer, wr->header->dict_size);
+		if (wr->flush((char *)wr->buffer, wr->header->dict_size)
+				!= wr->header->dict_size)
+			return -1;
 	}
+	return 0;
 }
 
 
-static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
+static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
 {
-	write_byte(wr, peek_old_byte(wr, offs));
+	return write_byte(wr, peek_old_byte(wr, offs));
 }
 
-static inline void INIT copy_bytes(struct writer *wr,
+static inline int INIT copy_bytes(struct writer *wr,
 					 uint32_t rep0, int len)
 {
 	do {
-		copy_byte(wr, rep0);
+		if (copy_byte(wr, rep0))
+			return -1;
 		len--;
 	} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
+
+	return len;
 }
 
-static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
 				     struct cstate *cst, uint16_t *p,
 				     int pos_state, uint16_t *prob,
 				     int lc, uint32_t literal_pos_mask) {
@@ -378,16 +378,17 @@
 		uint16_t *prob_lit = prob + mi;
 		rc_get_bit(rc, prob_lit, &mi);
 	}
-	write_byte(wr, mi);
 	if (cst->state < 4)
 		cst->state = 0;
 	else if (cst->state < 10)
 		cst->state -= 3;
 	else
 		cst->state -= 6;
+
+	return write_byte(wr, mi);
 }
 
-static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
+static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
 					    struct cstate *cst, uint16_t *p,
 					    int pos_state, uint16_t *prob) {
   int offset;
@@ -418,8 +419,7 @@
 
 				cst->state = cst->state < LZMA_NUM_LIT_STATES ?
 					9 : 11;
-				copy_byte(wr, cst->rep0);
-				return;
+				return copy_byte(wr, cst->rep0);
 			} else {
 				rc_update_bit_1(rc, prob);
 			}
@@ -521,12 +521,15 @@
 		} else
 			cst->rep0 = pos_slot;
 		if (++(cst->rep0) == 0)
-			return;
+			return 0;
+		if (cst->rep0 > wr->header->dict_size
+				|| cst->rep0 > get_pos(wr))
+			return -1;
 	}
 
 	len += LZMA_MATCH_MIN_LEN;
 
-	copy_bytes(wr, cst->rep0, len);
+	return copy_bytes(wr, cst->rep0, len);
 }
 
 
@@ -536,7 +539,7 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
 	struct lzma_header header;
@@ -552,7 +555,7 @@
 	unsigned char *inbuf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
+	rc.error = error;
 
 	if (buf)
 		inbuf = buf;
@@ -580,8 +583,10 @@
 		((unsigned char *)&header)[i] = *rc.ptr++;
 	}
 
-	if (header.pos >= (9 * 5 * 5))
+	if (header.pos >= (9 * 5 * 5)) {
 		error("bad header");
+		goto exit_1;
+	}
 
 	mi = 0;
 	lc = header.pos;
@@ -627,21 +632,29 @@
 		int pos_state =	get_pos(&wr) & pos_state_mask;
 		uint16_t *prob = p + LZMA_IS_MATCH +
 			(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
-		if (rc_is_bit_0(&rc, prob))
-			process_bit0(&wr, &rc, &cst, p, pos_state, prob,
-				     lc, literal_pos_mask);
-		else {
-			process_bit1(&wr, &rc, &cst, p, pos_state, prob);
+		if (rc_is_bit_0(&rc, prob)) {
+			if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
+					lc, literal_pos_mask)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
+		} else {
+			if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
+				error("LZMA data is corrupt");
+				goto exit_3;
+			}
 			if (cst.rep0 == 0)
 				break;
 		}
+		if (rc.buffer_size <= 0)
+			goto exit_3;
 	}
 
 	if (posp)
 		*posp = rc.ptr-rc.buffer;
-	if (wr.flush)
-		wr.flush(wr.buffer, wr.buffer_pos);
-	ret = 0;
+	if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
+		ret = 0;
+exit_3:
 	large_free(p);
 exit_2:
 	if (!output)
@@ -659,9 +672,9 @@
 			      int(*flush)(void*, unsigned int),
 			      unsigned char *output,
 			      int *posp,
-			      void(*error_fn)(char *x)
+			      void(*error)(char *x)
 	)
 {
-	return unlzma(buf, in_len - 4, fill, flush, output, posp, error_fn);
+	return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
 }
 #endif
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
index bcb3a4b..5a7a2ad 100644
--- a/lib/decompress_unlzo.c
+++ b/lib/decompress_unlzo.c
@@ -33,7 +33,6 @@
 #ifdef STATIC
 #include "lzo/lzo1x_decompress.c"
 #else
-#include <linux/slab.h>
 #include <linux/decompress/unlzo.h>
 #endif
 
@@ -49,14 +48,25 @@
 
 #define LZO_BLOCK_SIZE        (256*1024l)
 #define HEADER_HAS_FILTER      0x00000800L
+#define HEADER_SIZE_MIN       (9 + 7     + 4 + 8     + 1       + 4)
+#define HEADER_SIZE_MAX       (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4)
 
-STATIC inline int INIT parse_header(u8 *input, u8 *skip)
+STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len)
 {
 	int l;
 	u8 *parse = input;
+	u8 *end = input + in_len;
 	u8 level = 0;
 	u16 version;
 
+	/*
+	 * Check that there's enough input to possibly have a valid header.
+	 * Then it is possible to parse several fields until the minimum
+	 * size may have been used.
+	 */
+	if (in_len < HEADER_SIZE_MIN)
+		return 0;
+
 	/* read magic: 9 first bits */
 	for (l = 0; l < 9; l++) {
 		if (*parse++ != lzop_magic[l])
@@ -74,6 +84,15 @@
 	else
 		parse += 4; /* flags */
 
+	/*
+	 * At least mode, mtime_low, filename length, and checksum must
+	 * be left to be parsed. If also mtime_high is present, it's OK
+	 * because the next input buffer check is after reading the
+	 * filename length.
+	 */
+	if (end - parse < 8 + 1 + 4)
+		return 0;
+
 	/* skip mode and mtime_low */
 	parse += 8;
 	if (version >= 0x0940)
@@ -81,6 +100,8 @@
 
 	l = *parse++;
 	/* don't care about the file name, and skip checksum */
+	if (end - parse < l + 4)
+		return 0;
 	parse += l + 4;
 
 	*skip = parse - input;
@@ -91,16 +112,15 @@
 				int (*fill) (void *, unsigned int),
 				int (*flush) (void *, unsigned int),
 				u8 *output, int *posp,
-				void (*error_fn) (char *x))
+				void (*error) (char *x))
 {
-	u8 skip = 0, r = 0;
+	u8 r = 0;
+	int skip = 0;
 	u32 src_len, dst_len;
 	size_t tmp;
 	u8 *in_buf, *in_buf_save, *out_buf;
 	int ret = -1;
 
-	set_error_fn(error_fn);
-
 	if (output) {
 		out_buf = output;
 	} else if (!flush) {
@@ -119,8 +139,8 @@
 		goto exit_1;
 	} else if (input) {
 		in_buf = input;
-	} else if (!fill || !posp) {
-		error("NULL input pointer and missing position pointer or fill function");
+	} else if (!fill) {
+		error("NULL input pointer and missing fill function");
 		goto exit_1;
 	} else {
 		in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
@@ -134,22 +154,47 @@
 	if (posp)
 		*posp = 0;
 
-	if (fill)
-		fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+	if (fill) {
+		/*
+		 * Start from in_buf + HEADER_SIZE_MAX to make it possible
+		 * to use memcpy() to copy the unused data to the beginning
+		 * of the buffer. This way memmove() isn't needed which
+		 * is missing from pre-boot environments of most archs.
+		 */
+		in_buf += HEADER_SIZE_MAX;
+		in_len = fill(in_buf, HEADER_SIZE_MAX);
+	}
 
-	if (!parse_header(input, &skip)) {
+	if (!parse_header(in_buf, &skip, in_len)) {
 		error("invalid header");
 		goto exit_2;
 	}
 	in_buf += skip;
+	in_len -= skip;
+
+	if (fill) {
+		/* Move the unused data to the beginning of the buffer. */
+		memcpy(in_buf_save, in_buf, in_len);
+		in_buf = in_buf_save;
+	}
 
 	if (posp)
 		*posp = skip;
 
 	for (;;) {
 		/* read uncompressed block size */
+		if (fill && in_len < 4) {
+			skip = fill(in_buf + in_len, 4 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 4) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		dst_len = get_unaligned_be32(in_buf);
 		in_buf += 4;
+		in_len -= 4;
 
 		/* exit if last block */
 		if (dst_len == 0) {
@@ -164,8 +209,18 @@
 		}
 
 		/* read compressed block size, and skip block checksum info */
+		if (fill && in_len < 8) {
+			skip = fill(in_buf + in_len, 8 - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < 8) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		src_len = get_unaligned_be32(in_buf);
 		in_buf += 8;
+		in_len -= 8;
 
 		if (src_len <= 0 || src_len > dst_len) {
 			error("file corrupted");
@@ -173,6 +228,15 @@
 		}
 
 		/* decompress */
+		if (fill && in_len < src_len) {
+			skip = fill(in_buf + in_len, src_len - in_len);
+			if (skip > 0)
+				in_len += skip;
+		}
+		if (in_len < src_len) {
+			error("file corrupted");
+			goto exit_2;
+		}
 		tmp = dst_len;
 
 		/* When the input data is not compressed at all,
@@ -190,17 +254,26 @@
 			}
 		}
 
-		if (flush)
-			flush(out_buf, dst_len);
+		if (flush && flush(out_buf, dst_len) != dst_len)
+			goto exit_2;
 		if (output)
 			out_buf += dst_len;
 		if (posp)
 			*posp += src_len + 12;
+
+		in_buf += src_len;
+		in_len -= src_len;
 		if (fill) {
+			/*
+			 * If there happens to still be unused data left in
+			 * in_buf, move it to the beginning of the buffer.
+			 * Use a loop to avoid memmove() dependency.
+			 */
+			if (in_len > 0)
+				for (skip = 0; skip < in_len; ++skip)
+					in_buf_save[skip] = in_buf[skip];
 			in_buf = in_buf_save;
-			fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
-		} else
-			in_buf += src_len;
+		}
 	}
 
 	ret = 0;
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c
new file mode 100644
index 0000000..cecd23d
--- /dev/null
+++ b/lib/decompress_unxz.c
@@ -0,0 +1,397 @@
+/*
+ * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * Important notes about in-place decompression
+ *
+ * At least on x86, the kernel is decompressed in place: the compressed data
+ * is placed to the end of the output buffer, and the decompressor overwrites
+ * most of the compressed data. There must be enough safety margin to
+ * guarantee that the write position is always behind the read position.
+ *
+ * The safety margin for XZ with LZMA2 or BCJ+LZMA2 is calculated below.
+ * Note that the margin with XZ is bigger than with Deflate (gzip)!
+ *
+ * The worst case for in-place decompression is that the beginning of
+ * the file is compressed extremely well, and the rest of the file is
+ * uncompressible. Thus, we must look for worst-case expansion when the
+ * compressor is encoding uncompressible data.
+ *
+ * The structure of the .xz file in case of a compresed kernel is as follows.
+ * Sizes (as bytes) of the fields are in parenthesis.
+ *
+ *    Stream Header (12)
+ *    Block Header:
+ *      Block Header (8-12)
+ *      Compressed Data (N)
+ *      Block Padding (0-3)
+ *      CRC32 (4)
+ *    Index (8-20)
+ *    Stream Footer (12)
+ *
+ * Normally there is exactly one Block, but let's assume that there are
+ * 2-4 Blocks just in case. Because Stream Header and also Block Header
+ * of the first Block don't make the decompressor produce any uncompressed
+ * data, we can ignore them from our calculations. Block Headers of possible
+ * additional Blocks have to be taken into account still. With these
+ * assumptions, it is safe to assume that the total header overhead is
+ * less than 128 bytes.
+ *
+ * Compressed Data contains LZMA2 or BCJ+LZMA2 encoded data. Since BCJ
+ * doesn't change the size of the data, it is enough to calculate the
+ * safety margin for LZMA2.
+ *
+ * LZMA2 stores the data in chunks. Each chunk has a header whose size is
+ * a maximum of 6 bytes, but to get round 2^n numbers, let's assume that
+ * the maximum chunk header size is 8 bytes. After the chunk header, there
+ * may be up to 64 KiB of actual payload in the chunk. Often the payload is
+ * quite a bit smaller though; to be safe, let's assume that an average
+ * chunk has only 32 KiB of payload.
+ *
+ * The maximum uncompressed size of the payload is 2 MiB. The minimum
+ * uncompressed size of the payload is in practice never less than the
+ * payload size itself. The LZMA2 format would allow uncompressed size
+ * to be less than the payload size, but no sane compressor creates such
+ * files. LZMA2 supports storing uncompressible data in uncompressed form,
+ * so there's never a need to create payloads whose uncompressed size is
+ * smaller than the compressed size.
+ *
+ * The assumption, that the uncompressed size of the payload is never
+ * smaller than the payload itself, is valid only when talking about
+ * the payload as a whole. It is possible that the payload has parts where
+ * the decompressor consumes more input than it produces output. Calculating
+ * the worst case for this would be tricky. Instead of trying to do that,
+ * let's simply make sure that the decompressor never overwrites any bytes
+ * of the payload which it is currently reading.
+ *
+ * Now we have enough information to calculate the safety margin. We need
+ *   - 128 bytes for the .xz file format headers;
+ *   - 8 bytes per every 32 KiB of uncompressed size (one LZMA2 chunk header
+ *     per chunk, each chunk having average payload size of 32 KiB); and
+ *   - 64 KiB (biggest possible LZMA2 chunk payload size) to make sure that
+ *     the decompressor never overwrites anything from the LZMA2 chunk
+ *     payload it is currently reading.
+ *
+ * We get the following formula:
+ *
+ *    safety_margin = 128 + uncompressed_size * 8 / 32768 + 65536
+ *                  = 128 + (uncompressed_size >> 12) + 65536
+ *
+ * For comparision, according to arch/x86/boot/compressed/misc.c, the
+ * equivalent formula for Deflate is this:
+ *
+ *    safety_margin = 18 + (uncompressed_size >> 12) + 32768
+ *
+ * Thus, when updating Deflate-only in-place kernel decompressor to
+ * support XZ, the fixed overhead has to be increased from 18+32768 bytes
+ * to 128+65536 bytes.
+ */
+
+/*
+ * STATIC is defined to "static" if we are being built for kernel
+ * decompression (pre-boot code). <linux/decompress/mm.h> will define
+ * STATIC to empty if it wasn't already defined. Since we will need to
+ * know later if we are being used for kernel decompression, we define
+ * XZ_PREBOOT here.
+ */
+#ifdef STATIC
+#	define XZ_PREBOOT
+#endif
+#ifdef __KERNEL__
+#	include <linux/decompress/mm.h>
+#endif
+#define XZ_EXTERN STATIC
+
+#ifndef XZ_PREBOOT
+#	include <linux/slab.h>
+#	include <linux/xz.h>
+#else
+/*
+ * Use the internal CRC32 code instead of kernel's CRC32 module, which
+ * is not available in early phase of booting.
+ */
+#define XZ_INTERNAL_CRC32 1
+
+/*
+ * For boot time use, we enable only the BCJ filter of the current
+ * architecture or none if no BCJ filter is available for the architecture.
+ */
+#ifdef CONFIG_X86
+#	define XZ_DEC_X86
+#endif
+#ifdef CONFIG_PPC
+#	define XZ_DEC_POWERPC
+#endif
+#ifdef CONFIG_ARM
+#	define XZ_DEC_ARM
+#endif
+#ifdef CONFIG_IA64
+#	define XZ_DEC_IA64
+#endif
+#ifdef CONFIG_SPARC
+#	define XZ_DEC_SPARC
+#endif
+
+/*
+ * This will get the basic headers so that memeq() and others
+ * can be defined.
+ */
+#include "xz/xz_private.h"
+
+/*
+ * Replace the normal allocation functions with the versions from
+ * <linux/decompress/mm.h>. vfree() needs to support vfree(NULL)
+ * when XZ_DYNALLOC is used, but the pre-boot free() doesn't support it.
+ * Workaround it here because the other decompressors don't need it.
+ */
+#undef kmalloc
+#undef kfree
+#undef vmalloc
+#undef vfree
+#define kmalloc(size, flags) malloc(size)
+#define kfree(ptr) free(ptr)
+#define vmalloc(size) malloc(size)
+#define vfree(ptr) do { if (ptr != NULL) free(ptr); } while (0)
+
+/*
+ * FIXME: Not all basic memory functions are provided in architecture-specific
+ * files (yet). We define our own versions here for now, but this should be
+ * only a temporary solution.
+ *
+ * memeq and memzero are not used much and any remotely sane implementation
+ * is fast enough. memcpy/memmove speed matters in multi-call mode, but
+ * the kernel image is decompressed in single-call mode, in which only
+ * memcpy speed can matter and only if there is a lot of uncompressible data
+ * (LZMA2 stores uncompressible chunks in uncompressed form). Thus, the
+ * functions below should just be kept small; it's probably not worth
+ * optimizing for speed.
+ */
+
+#ifndef memeq
+static bool memeq(const void *a, const void *b, size_t size)
+{
+	const uint8_t *x = a;
+	const uint8_t *y = b;
+	size_t i;
+
+	for (i = 0; i < size; ++i)
+		if (x[i] != y[i])
+			return false;
+
+	return true;
+}
+#endif
+
+#ifndef memzero
+static void memzero(void *buf, size_t size)
+{
+	uint8_t *b = buf;
+	uint8_t *e = b + size;
+
+	while (b != e)
+		*b++ = '\0';
+}
+#endif
+
+#ifndef memmove
+/* Not static to avoid a conflict with the prototype in the Linux headers. */
+void *memmove(void *dest, const void *src, size_t size)
+{
+	uint8_t *d = dest;
+	const uint8_t *s = src;
+	size_t i;
+
+	if (d < s) {
+		for (i = 0; i < size; ++i)
+			d[i] = s[i];
+	} else if (d > s) {
+		i = size;
+		while (i-- > 0)
+			d[i] = s[i];
+	}
+
+	return dest;
+}
+#endif
+
+/*
+ * Since we need memmove anyway, would use it as memcpy too.
+ * Commented out for now to avoid breaking things.
+ */
+/*
+#ifndef memcpy
+#	define memcpy memmove
+#endif
+*/
+
+#include "xz/xz_crc32.c"
+#include "xz/xz_dec_stream.c"
+#include "xz/xz_dec_lzma2.c"
+#include "xz/xz_dec_bcj.c"
+
+#endif /* XZ_PREBOOT */
+
+/* Size of the input and output buffers in multi-call mode */
+#define XZ_IOBUF_SIZE 4096
+
+/*
+ * This function implements the API defined in <linux/decompress/generic.h>.
+ *
+ * This wrapper will automatically choose single-call or multi-call mode
+ * of the native XZ decoder API. The single-call mode can be used only when
+ * both input and output buffers are available as a single chunk, i.e. when
+ * fill() and flush() won't be used.
+ */
+STATIC int INIT unxz(unsigned char *in, int in_size,
+		     int (*fill)(void *dest, unsigned int size),
+		     int (*flush)(void *src, unsigned int size),
+		     unsigned char *out, int *in_used,
+		     void (*error)(char *x))
+{
+	struct xz_buf b;
+	struct xz_dec *s;
+	enum xz_ret ret;
+	bool must_free_in = false;
+
+#if XZ_INTERNAL_CRC32
+	xz_crc32_init();
+#endif
+
+	if (in_used != NULL)
+		*in_used = 0;
+
+	if (fill == NULL && flush == NULL)
+		s = xz_dec_init(XZ_SINGLE, 0);
+	else
+		s = xz_dec_init(XZ_DYNALLOC, (uint32_t)-1);
+
+	if (s == NULL)
+		goto error_alloc_state;
+
+	if (flush == NULL) {
+		b.out = out;
+		b.out_size = (size_t)-1;
+	} else {
+		b.out_size = XZ_IOBUF_SIZE;
+		b.out = malloc(XZ_IOBUF_SIZE);
+		if (b.out == NULL)
+			goto error_alloc_out;
+	}
+
+	if (in == NULL) {
+		must_free_in = true;
+		in = malloc(XZ_IOBUF_SIZE);
+		if (in == NULL)
+			goto error_alloc_in;
+	}
+
+	b.in = in;
+	b.in_pos = 0;
+	b.in_size = in_size;
+	b.out_pos = 0;
+
+	if (fill == NULL && flush == NULL) {
+		ret = xz_dec_run(s, &b);
+	} else {
+		do {
+			if (b.in_pos == b.in_size && fill != NULL) {
+				if (in_used != NULL)
+					*in_used += b.in_pos;
+
+				b.in_pos = 0;
+
+				in_size = fill(in, XZ_IOBUF_SIZE);
+				if (in_size < 0) {
+					/*
+					 * This isn't an optimal error code
+					 * but it probably isn't worth making
+					 * a new one either.
+					 */
+					ret = XZ_BUF_ERROR;
+					break;
+				}
+
+				b.in_size = in_size;
+			}
+
+			ret = xz_dec_run(s, &b);
+
+			if (flush != NULL && (b.out_pos == b.out_size
+					|| (ret != XZ_OK && b.out_pos > 0))) {
+				/*
+				 * Setting ret here may hide an error
+				 * returned by xz_dec_run(), but probably
+				 * it's not too bad.
+				 */
+				if (flush(b.out, b.out_pos) != (int)b.out_pos)
+					ret = XZ_BUF_ERROR;
+
+				b.out_pos = 0;
+			}
+		} while (ret == XZ_OK);
+
+		if (must_free_in)
+			free(in);
+
+		if (flush != NULL)
+			free(b.out);
+	}
+
+	if (in_used != NULL)
+		*in_used += b.in_pos;
+
+	xz_dec_end(s);
+
+	switch (ret) {
+	case XZ_STREAM_END:
+		return 0;
+
+	case XZ_MEM_ERROR:
+		/* This can occur only in multi-call mode. */
+		error("XZ decompressor ran out of memory");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		error("Input is not in the XZ format (wrong magic bytes)");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		error("Input was encoded with settings that are not "
+				"supported by this XZ decoder");
+		break;
+
+	case XZ_DATA_ERROR:
+	case XZ_BUF_ERROR:
+		error("XZ-compressed data is corrupt");
+		break;
+
+	default:
+		error("Bug in the XZ decompressor");
+		break;
+	}
+
+	return -1;
+
+error_alloc_in:
+	if (flush != NULL)
+		free(b.out);
+
+error_alloc_out:
+	xz_dec_end(s);
+
+error_alloc_state:
+	error("XZ decompressor ran out of memory");
+	return -1;
+}
+
+/*
+ * This macro is used by architecture-specific files to decompress
+ * the kernel image.
+ */
+#define decompress unxz
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 3094318..b335acb 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -141,11 +141,10 @@
 			else if (!dp->flags)
 				dt->num_enabled++;
 			dp->flags = newflags;
-			if (newflags) {
-				jump_label_enable(&dp->enabled);
-			} else {
-				jump_label_disable(&dp->enabled);
-			}
+			if (newflags)
+				dp->enabled = 1;
+			else
+				dp->enabled = 0;
 			if (verbose)
 				printk(KERN_INFO
 					"ddebug: changed %s:%d [%s]%s %s\n",
diff --git a/lib/flex_array.c b/lib/flex_array.c
index 77a6fea..c0ea40b 100644
--- a/lib/flex_array.c
+++ b/lib/flex_array.c
@@ -23,6 +23,7 @@
 #include <linux/flex_array.h>
 #include <linux/slab.h>
 #include <linux/stddef.h>
+#include <linux/module.h>
 
 struct flex_array_part {
 	char elements[FLEX_ARRAY_PART_SIZE];
@@ -103,6 +104,7 @@
 						FLEX_ARRAY_BASE_BYTES_LEFT);
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_alloc);
 
 static int fa_element_to_part_nr(struct flex_array *fa,
 					unsigned int element_nr)
@@ -126,12 +128,14 @@
 	for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++)
 		kfree(fa->parts[part_nr]);
 }
+EXPORT_SYMBOL(flex_array_free_parts);
 
 void flex_array_free(struct flex_array *fa)
 {
 	flex_array_free_parts(fa);
 	kfree(fa);
 }
+EXPORT_SYMBOL(flex_array_free);
 
 static unsigned int index_inside_part(struct flex_array *fa,
 					unsigned int element_nr)
@@ -196,6 +200,7 @@
 	memcpy(dst, src, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_put);
 
 /**
  * flex_array_clear - clear element in array at @element_nr
@@ -223,6 +228,7 @@
 	memset(dst, FLEX_ARRAY_FREE, fa->element_size);
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_clear);
 
 /**
  * flex_array_prealloc - guarantee that array space exists
@@ -259,6 +265,7 @@
 	}
 	return 0;
 }
+EXPORT_SYMBOL(flex_array_prealloc);
 
 /**
  * flex_array_get - pull data back out of the array
@@ -288,6 +295,7 @@
 	}
 	return &part->elements[index_inside_part(fa, element_nr)];
 }
+EXPORT_SYMBOL(flex_array_get);
 
 /**
  * flex_array_get_ptr - pull a ptr back out of the array
@@ -308,6 +316,7 @@
 
 	return *tmp;
 }
+EXPORT_SYMBOL(flex_array_get_ptr);
 
 static int part_is_free(struct flex_array_part *part)
 {
@@ -348,3 +357,4 @@
 	}
 	return ret;
 }
+EXPORT_SYMBOL(flex_array_shrink);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index b66b2bd..f5fe6ba 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -154,6 +154,7 @@
 }
 EXPORT_SYMBOL(hex_dump_to_buffer);
 
+#ifdef CONFIG_PRINTK
 /**
  * print_hex_dump - print a text hex dump to syslog for a binary blob of data
  * @level: kernel log level (e.g. KERN_DEBUG)
@@ -238,3 +239,4 @@
 		       buf, len, true);
 }
 EXPORT_SYMBOL(print_hex_dump_bytes);
+#endif
diff --git a/lib/kref.c b/lib/kref.c
index d3d227a..3efb882 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -62,6 +62,36 @@
 	return 0;
 }
 
+
+/**
+ * kref_sub - subtract a number of refcounts for object.
+ * @kref: object.
+ * @count: Number of recounts to subtract.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.
+ *
+ * Subtract @count from the refcount, and if 0, call release().
+ * Return 1 if the object was removed, otherwise return 0.  Beware, if this
+ * function returns 0, you still can not count on the kref from remaining in
+ * memory.  Only use the return value if you want to see if the kref is now
+ * gone, not present.
+ */
+int kref_sub(struct kref *kref, unsigned int count,
+	     void (*release)(struct kref *kref))
+{
+	WARN_ON(release == NULL);
+	WARN_ON(release == (void (*)(struct kref *))kfree);
+
+	if (atomic_sub_and_test((int) count, &kref->refcount)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 EXPORT_SYMBOL(kref_init);
 EXPORT_SYMBOL(kref_get);
 EXPORT_SYMBOL(kref_put);
+EXPORT_SYMBOL(kref_sub);
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 00e8a02..5021cbc 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -167,7 +167,7 @@
  * @policy: validation policy
  *
  * Parses a stream of attributes and stores a pointer to each attribute in
- * the tb array accessable via the attribute type. Attributes with a type
+ * the tb array accessible via the attribute type. Attributes with a type
  * exceeding maxtype will be silently ignored for backwards compatibility
  * reasons. policy may be set to NULL if no validation is required.
  *
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 7c06ee5..c47bbe1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -60,7 +60,7 @@
 static char *io_tlb_start, *io_tlb_end;
 
 /*
- * The number of IO TLB blocks (in groups of 64) betweeen io_tlb_start and
+ * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
  */
 static unsigned long io_tlb_nslabs;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c150d3d..d3023df 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -936,6 +936,8 @@
 	return string(buf, end, uuid, spec);
 }
 
+int kptr_restrict = 1;
+
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
  * by an extra set of alphanumeric characters that are extended format
@@ -979,6 +981,7 @@
  *       Implements a "recursive vsnprintf".
  *       Do not use this feature without some mechanism to verify the
  *       correctness of the format string and va_list arguments.
+ * - 'K' For a kernel pointer that should be hidden from unprivileged users
  *
  * Note: The difference between 'S' and 'F' is that on ia64 and ppc64
  * function pointers are really function descriptors, which contain a
@@ -1035,6 +1038,25 @@
 		return buf + vsnprintf(buf, end - buf,
 				       ((struct va_format *)ptr)->fmt,
 				       *(((struct va_format *)ptr)->va));
+	case 'K':
+		/*
+		 * %pK cannot be used in IRQ context because its test
+		 * for CAP_SYSLOG would be meaningless.
+		 */
+		if (in_irq() || in_serving_softirq() || in_nmi()) {
+			if (spec.field_width == -1)
+				spec.field_width = 2 * sizeof(void *);
+			return string(buf, end, "pK-error", spec);
+		} else if ((kptr_restrict == 0) ||
+			 (kptr_restrict == 1 &&
+			  has_capability_noaudit(current, CAP_SYSLOG)))
+			break;
+
+		if (spec.field_width == -1) {
+			spec.field_width = 2 * sizeof(void *);
+			spec.flags |= ZEROPAD;
+		}
+		return number(buf, end, 0, spec);
 	}
 	spec.flags |= SMALL;
 	if (spec.field_width == -1) {
@@ -1451,7 +1473,7 @@
  * @args: Arguments for the format string
  *
  * The return value is the number of characters which have been written into
- * the @buf not including the trailing '\0'. If @size is <= 0 the function
+ * the @buf not including the trailing '\0'. If @size is == 0 the function
  * returns 0.
  *
  * Call this function if you are already dealing with a va_list.
@@ -1465,7 +1487,11 @@
 
 	i = vsnprintf(buf, size, fmt, args);
 
-	return (i >= size) ? (size - 1) : i;
+	if (likely(i < size))
+		return i;
+	if (size != 0)
+		return size - 1;
+	return 0;
 }
 EXPORT_SYMBOL(vscnprintf);
 
@@ -1513,14 +1539,10 @@
 	int i;
 
 	va_start(args, fmt);
-	i = vsnprintf(buf, size, fmt, args);
+	i = vscnprintf(buf, size, fmt, args);
 	va_end(args);
 
-	if (likely(i < size))
-		return i;
-	if (size != 0)
-		return size - 1;
-	return 0;
+	return i;
 }
 EXPORT_SYMBOL(scnprintf);
 
diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig
new file mode 100644
index 0000000..e3b6e18
--- /dev/null
+++ b/lib/xz/Kconfig
@@ -0,0 +1,59 @@
+config XZ_DEC
+	tristate "XZ decompression support"
+	select CRC32
+	help
+	  LZMA2 compression algorithm and BCJ filters are supported using
+	  the .xz file format as the container. For integrity checking,
+	  CRC32 is supported. See Documentation/xz.txt for more information.
+
+config XZ_DEC_X86
+	bool "x86 BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_POWERPC
+	bool "PowerPC BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_IA64
+	bool "IA-64 BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARM
+	bool "ARM BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_ARMTHUMB
+	bool "ARM-Thumb BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_SPARC
+	bool "SPARC BCJ filter decoder" if EMBEDDED
+	default y
+	depends on XZ_DEC
+	select XZ_DEC_BCJ
+
+config XZ_DEC_BCJ
+	bool
+	default n
+
+config XZ_DEC_TEST
+	tristate "XZ decompressor tester"
+	default n
+	depends on XZ_DEC
+	help
+	  This allows passing .xz files to the in-kernel XZ decoder via
+	  a character special file. It calculates CRC32 of the decompressed
+	  data and writes diagnostics to the system log.
+
+	  Unless you are developing the XZ decoder, you don't need this
+	  and should say N.
diff --git a/lib/xz/Makefile b/lib/xz/Makefile
new file mode 100644
index 0000000..a7fa769
--- /dev/null
+++ b/lib/xz/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XZ_DEC) += xz_dec.o
+xz_dec-y := xz_dec_syms.o xz_dec_stream.o xz_dec_lzma2.o
+xz_dec-$(CONFIG_XZ_DEC_BCJ) += xz_dec_bcj.o
+
+obj-$(CONFIG_XZ_DEC_TEST) += xz_dec_test.o
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
new file mode 100644
index 0000000..34532d1
--- /dev/null
+++ b/lib/xz/xz_crc32.c
@@ -0,0 +1,59 @@
+/*
+ * CRC32 using the polynomial from IEEE-802.3
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+/*
+ * This is not the fastest implementation, but it is pretty compact.
+ * The fastest versions of xz_crc32() on modern CPUs without hardware
+ * accelerated CRC instruction are 3-5 times as fast as this version,
+ * but they are bigger and use more memory for the lookup table.
+ */
+
+#include "xz_private.h"
+
+/*
+ * STATIC_RW_DATA is used in the pre-boot environment on some architectures.
+ * See <linux/decompress/mm.h> for details.
+ */
+#ifndef STATIC_RW_DATA
+#	define STATIC_RW_DATA static
+#endif
+
+STATIC_RW_DATA uint32_t xz_crc32_table[256];
+
+XZ_EXTERN void xz_crc32_init(void)
+{
+	const uint32_t poly = 0xEDB88320;
+
+	uint32_t i;
+	uint32_t j;
+	uint32_t r;
+
+	for (i = 0; i < 256; ++i) {
+		r = i;
+		for (j = 0; j < 8; ++j)
+			r = (r >> 1) ^ (poly & ~((r & 1) - 1));
+
+		xz_crc32_table[i] = r;
+	}
+
+	return;
+}
+
+XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc)
+{
+	crc = ~crc;
+
+	while (size != 0) {
+		crc = xz_crc32_table[*buf++ ^ (crc & 0xFF)] ^ (crc >> 8);
+		--size;
+	}
+
+	return ~crc;
+}
diff --git a/lib/xz/xz_dec_bcj.c b/lib/xz/xz_dec_bcj.c
new file mode 100644
index 0000000..e51e255
--- /dev/null
+++ b/lib/xz/xz_dec_bcj.c
@@ -0,0 +1,561 @@
+/*
+ * Branch/Call/Jump (BCJ) filter decoders
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+
+/*
+ * The rest of the file is inside this ifdef. It makes things a little more
+ * convenient when building without support for any BCJ filters.
+ */
+#ifdef XZ_DEC_BCJ
+
+struct xz_dec_bcj {
+	/* Type of the BCJ filter being used */
+	enum {
+		BCJ_X86 = 4,        /* x86 or x86-64 */
+		BCJ_POWERPC = 5,    /* Big endian only */
+		BCJ_IA64 = 6,       /* Big or little endian */
+		BCJ_ARM = 7,        /* Little endian only */
+		BCJ_ARMTHUMB = 8,   /* Little endian only */
+		BCJ_SPARC = 9       /* Big or little endian */
+	} type;
+
+	/*
+	 * Return value of the next filter in the chain. We need to preserve
+	 * this information across calls, because we must not call the next
+	 * filter anymore once it has returned XZ_STREAM_END.
+	 */
+	enum xz_ret ret;
+
+	/* True if we are operating in single-call mode. */
+	bool single_call;
+
+	/*
+	 * Absolute position relative to the beginning of the uncompressed
+	 * data (in a single .xz Block). We care only about the lowest 32
+	 * bits so this doesn't need to be uint64_t even with big files.
+	 */
+	uint32_t pos;
+
+	/* x86 filter state */
+	uint32_t x86_prev_mask;
+
+	/* Temporary space to hold the variables from struct xz_buf */
+	uint8_t *out;
+	size_t out_pos;
+	size_t out_size;
+
+	struct {
+		/* Amount of already filtered data in the beginning of buf */
+		size_t filtered;
+
+		/* Total amount of data currently stored in buf  */
+		size_t size;
+
+		/*
+		 * Buffer to hold a mix of filtered and unfiltered data. This
+		 * needs to be big enough to hold Alignment + 2 * Look-ahead:
+		 *
+		 * Type         Alignment   Look-ahead
+		 * x86              1           4
+		 * PowerPC          4           0
+		 * IA-64           16           0
+		 * ARM              4           0
+		 * ARM-Thumb        2           2
+		 * SPARC            4           0
+		 */
+		uint8_t buf[16];
+	} temp;
+};
+
+#ifdef XZ_DEC_X86
+/*
+ * This is used to test the most significant byte of a memory address
+ * in an x86 instruction.
+ */
+static inline int bcj_x86_test_msbyte(uint8_t b)
+{
+	return b == 0x00 || b == 0xFF;
+}
+
+static size_t bcj_x86(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const bool mask_to_allowed_status[8]
+		= { true, true, true, false, true, false, false, false };
+
+	static const uint8_t mask_to_bit_num[8] = { 0, 1, 2, 2, 3, 3, 3, 3 };
+
+	size_t i;
+	size_t prev_pos = (size_t)-1;
+	uint32_t prev_mask = s->x86_prev_mask;
+	uint32_t src;
+	uint32_t dest;
+	uint32_t j;
+	uint8_t b;
+
+	if (size <= 4)
+		return 0;
+
+	size -= 4;
+	for (i = 0; i < size; ++i) {
+		if ((buf[i] & 0xFE) != 0xE8)
+			continue;
+
+		prev_pos = i - prev_pos;
+		if (prev_pos > 3) {
+			prev_mask = 0;
+		} else {
+			prev_mask = (prev_mask << (prev_pos - 1)) & 7;
+			if (prev_mask != 0) {
+				b = buf[i + 4 - mask_to_bit_num[prev_mask]];
+				if (!mask_to_allowed_status[prev_mask]
+						|| bcj_x86_test_msbyte(b)) {
+					prev_pos = i;
+					prev_mask = (prev_mask << 1) | 1;
+					continue;
+				}
+			}
+		}
+
+		prev_pos = i;
+
+		if (bcj_x86_test_msbyte(buf[i + 4])) {
+			src = get_unaligned_le32(buf + i + 1);
+			while (true) {
+				dest = src - (s->pos + (uint32_t)i + 5);
+				if (prev_mask == 0)
+					break;
+
+				j = mask_to_bit_num[prev_mask] * 8;
+				b = (uint8_t)(dest >> (24 - j));
+				if (!bcj_x86_test_msbyte(b))
+					break;
+
+				src = dest ^ (((uint32_t)1 << (32 - j)) - 1);
+			}
+
+			dest &= 0x01FFFFFF;
+			dest |= (uint32_t)0 - (dest & 0x01000000);
+			put_unaligned_le32(dest, buf + i + 1);
+			i += 4;
+		} else {
+			prev_mask = (prev_mask << 1) | 1;
+		}
+	}
+
+	prev_pos = i - prev_pos;
+	s->x86_prev_mask = prev_pos > 3 ? 0 : prev_mask << (prev_pos - 1);
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_POWERPC
+static size_t bcj_powerpc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr & 0xFC000003) == 0x48000001) {
+			instr &= 0x03FFFFFC;
+			instr -= s->pos + (uint32_t)i;
+			instr &= 0x03FFFFFC;
+			instr |= 0x48000001;
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_IA64
+static size_t bcj_ia64(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	static const uint8_t branch_table[32] = {
+		0, 0, 0, 0, 0, 0, 0, 0,
+		0, 0, 0, 0, 0, 0, 0, 0,
+		4, 4, 6, 6, 0, 0, 7, 7,
+		4, 4, 0, 0, 4, 4, 0, 0
+	};
+
+	/*
+	 * The local variables take a little bit stack space, but it's less
+	 * than what LZMA2 decoder takes, so it doesn't make sense to reduce
+	 * stack usage here without doing that for the LZMA2 decoder too.
+	 */
+
+	/* Loop counters */
+	size_t i;
+	size_t j;
+
+	/* Instruction slot (0, 1, or 2) in the 128-bit instruction word */
+	uint32_t slot;
+
+	/* Bitwise offset of the instruction indicated by slot */
+	uint32_t bit_pos;
+
+	/* bit_pos split into byte and bit parts */
+	uint32_t byte_pos;
+	uint32_t bit_res;
+
+	/* Address part of an instruction */
+	uint32_t addr;
+
+	/* Mask used to detect which instructions to convert */
+	uint32_t mask;
+
+	/* 41-bit instruction stored somewhere in the lowest 48 bits */
+	uint64_t instr;
+
+	/* Instruction normalized with bit_res for easier manipulation */
+	uint64_t norm;
+
+	for (i = 0; i + 16 <= size; i += 16) {
+		mask = branch_table[buf[i] & 0x1F];
+		for (slot = 0, bit_pos = 5; slot < 3; ++slot, bit_pos += 41) {
+			if (((mask >> slot) & 1) == 0)
+				continue;
+
+			byte_pos = bit_pos >> 3;
+			bit_res = bit_pos & 7;
+			instr = 0;
+			for (j = 0; j < 6; ++j)
+				instr |= (uint64_t)(buf[i + j + byte_pos])
+						<< (8 * j);
+
+			norm = instr >> bit_res;
+
+			if (((norm >> 37) & 0x0F) == 0x05
+					&& ((norm >> 9) & 0x07) == 0) {
+				addr = (norm >> 13) & 0x0FFFFF;
+				addr |= ((uint32_t)(norm >> 36) & 1) << 20;
+				addr <<= 4;
+				addr -= s->pos + (uint32_t)i;
+				addr >>= 4;
+
+				norm &= ~((uint64_t)0x8FFFFF << 13);
+				norm |= (uint64_t)(addr & 0x0FFFFF) << 13;
+				norm |= (uint64_t)(addr & 0x100000)
+						<< (36 - 20);
+
+				instr &= (1 << bit_res) - 1;
+				instr |= norm << bit_res;
+
+				for (j = 0; j < 6; j++)
+					buf[i + j + byte_pos]
+						= (uint8_t)(instr >> (8 * j));
+			}
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARM
+static size_t bcj_arm(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		if (buf[i + 3] == 0xEB) {
+			addr = (uint32_t)buf[i] | ((uint32_t)buf[i + 1] << 8)
+					| ((uint32_t)buf[i + 2] << 16);
+			addr <<= 2;
+			addr -= s->pos + (uint32_t)i + 8;
+			addr >>= 2;
+			buf[i] = (uint8_t)addr;
+			buf[i + 1] = (uint8_t)(addr >> 8);
+			buf[i + 2] = (uint8_t)(addr >> 16);
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_ARMTHUMB
+static size_t bcj_armthumb(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t addr;
+
+	for (i = 0; i + 4 <= size; i += 2) {
+		if ((buf[i + 1] & 0xF8) == 0xF0
+				&& (buf[i + 3] & 0xF8) == 0xF8) {
+			addr = (((uint32_t)buf[i + 1] & 0x07) << 19)
+					| ((uint32_t)buf[i] << 11)
+					| (((uint32_t)buf[i + 3] & 0x07) << 8)
+					| (uint32_t)buf[i + 2];
+			addr <<= 1;
+			addr -= s->pos + (uint32_t)i + 4;
+			addr >>= 1;
+			buf[i + 1] = (uint8_t)(0xF0 | ((addr >> 19) & 0x07));
+			buf[i] = (uint8_t)(addr >> 11);
+			buf[i + 3] = (uint8_t)(0xF8 | ((addr >> 8) & 0x07));
+			buf[i + 2] = (uint8_t)addr;
+			i += 2;
+		}
+	}
+
+	return i;
+}
+#endif
+
+#ifdef XZ_DEC_SPARC
+static size_t bcj_sparc(struct xz_dec_bcj *s, uint8_t *buf, size_t size)
+{
+	size_t i;
+	uint32_t instr;
+
+	for (i = 0; i + 4 <= size; i += 4) {
+		instr = get_unaligned_be32(buf + i);
+		if ((instr >> 22) == 0x100 || (instr >> 22) == 0x1FF) {
+			instr <<= 2;
+			instr -= s->pos + (uint32_t)i;
+			instr >>= 2;
+			instr = ((uint32_t)0x40000000 - (instr & 0x400000))
+					| 0x40000000 | (instr & 0x3FFFFF);
+			put_unaligned_be32(instr, buf + i);
+		}
+	}
+
+	return i;
+}
+#endif
+
+/*
+ * Apply the selected BCJ filter. Update *pos and s->pos to match the amount
+ * of data that got filtered.
+ *
+ * NOTE: This is implemented as a switch statement to avoid using function
+ * pointers, which could be problematic in the kernel boot code, which must
+ * avoid pointers to static data (at least on x86).
+ */
+static void bcj_apply(struct xz_dec_bcj *s,
+		      uint8_t *buf, size_t *pos, size_t size)
+{
+	size_t filtered;
+
+	buf += *pos;
+	size -= *pos;
+
+	switch (s->type) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+		filtered = bcj_x86(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+		filtered = bcj_powerpc(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+		filtered = bcj_ia64(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+		filtered = bcj_arm(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+		filtered = bcj_armthumb(s, buf, size);
+		break;
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+		filtered = bcj_sparc(s, buf, size);
+		break;
+#endif
+	default:
+		/* Never reached but silence compiler warnings. */
+		filtered = 0;
+		break;
+	}
+
+	*pos += filtered;
+	s->pos += filtered;
+}
+
+/*
+ * Flush pending filtered data from temp to the output buffer.
+ * Move the remaining mixture of possibly filtered and unfiltered
+ * data to the beginning of temp.
+ */
+static void bcj_flush(struct xz_dec_bcj *s, struct xz_buf *b)
+{
+	size_t copy_size;
+
+	copy_size = min_t(size_t, s->temp.filtered, b->out_size - b->out_pos);
+	memcpy(b->out + b->out_pos, s->temp.buf, copy_size);
+	b->out_pos += copy_size;
+
+	s->temp.filtered -= copy_size;
+	s->temp.size -= copy_size;
+	memmove(s->temp.buf, s->temp.buf + copy_size, s->temp.size);
+}
+
+/*
+ * The BCJ filter functions are primitive in sense that they process the
+ * data in chunks of 1-16 bytes. To hide this issue, this function does
+ * some buffering.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b)
+{
+	size_t out_start;
+
+	/*
+	 * Flush pending already filtered data to the output buffer. Return
+	 * immediatelly if we couldn't flush everything, or if the next
+	 * filter in the chain had already returned XZ_STREAM_END.
+	 */
+	if (s->temp.filtered > 0) {
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+	}
+
+	/*
+	 * If we have more output space than what is currently pending in
+	 * temp, copy the unfiltered data from temp to the output buffer
+	 * and try to fill the output buffer by decoding more data from the
+	 * next filter in the chain. Apply the BCJ filter on the new data
+	 * in the output buffer. If everything cannot be filtered, copy it
+	 * to temp and rewind the output buffer position accordingly.
+	 */
+	if (s->temp.size < b->out_size - b->out_pos) {
+		out_start = b->out_pos;
+		memcpy(b->out + b->out_pos, s->temp.buf, s->temp.size);
+		b->out_pos += s->temp.size;
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+		if (s->ret != XZ_STREAM_END
+				&& (s->ret != XZ_OK || s->single_call))
+			return s->ret;
+
+		bcj_apply(s, b->out, &out_start, b->out_pos);
+
+		/*
+		 * As an exception, if the next filter returned XZ_STREAM_END,
+		 * we can do that too, since the last few bytes that remain
+		 * unfiltered are meant to remain unfiltered.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			return XZ_STREAM_END;
+
+		s->temp.size = b->out_pos - out_start;
+		b->out_pos -= s->temp.size;
+		memcpy(s->temp.buf, b->out + b->out_pos, s->temp.size);
+	}
+
+	/*
+	 * If we have unfiltered data in temp, try to fill by decoding more
+	 * data from the next filter. Apply the BCJ filter on temp. Then we
+	 * hopefully can fill the actual output buffer by copying filtered
+	 * data from temp. A mix of filtered and unfiltered data may be left
+	 * in temp; it will be taken care on the next call to this function.
+	 */
+	if (s->temp.size > 0) {
+		/* Make b->out{,_pos,_size} temporarily point to s->temp. */
+		s->out = b->out;
+		s->out_pos = b->out_pos;
+		s->out_size = b->out_size;
+		b->out = s->temp.buf;
+		b->out_pos = s->temp.size;
+		b->out_size = sizeof(s->temp.buf);
+
+		s->ret = xz_dec_lzma2_run(lzma2, b);
+
+		s->temp.size = b->out_pos;
+		b->out = s->out;
+		b->out_pos = s->out_pos;
+		b->out_size = s->out_size;
+
+		if (s->ret != XZ_OK && s->ret != XZ_STREAM_END)
+			return s->ret;
+
+		bcj_apply(s, s->temp.buf, &s->temp.filtered, s->temp.size);
+
+		/*
+		 * If the next filter returned XZ_STREAM_END, we mark that
+		 * everything is filtered, since the last unfiltered bytes
+		 * of the stream are meant to be left as is.
+		 */
+		if (s->ret == XZ_STREAM_END)
+			s->temp.filtered = s->temp.size;
+
+		bcj_flush(s, b);
+		if (s->temp.filtered > 0)
+			return XZ_OK;
+	}
+
+	return s->ret;
+}
+
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call)
+{
+	struct xz_dec_bcj *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s != NULL)
+		s->single_call = single_call;
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id)
+{
+	switch (id) {
+#ifdef XZ_DEC_X86
+	case BCJ_X86:
+#endif
+#ifdef XZ_DEC_POWERPC
+	case BCJ_POWERPC:
+#endif
+#ifdef XZ_DEC_IA64
+	case BCJ_IA64:
+#endif
+#ifdef XZ_DEC_ARM
+	case BCJ_ARM:
+#endif
+#ifdef XZ_DEC_ARMTHUMB
+	case BCJ_ARMTHUMB:
+#endif
+#ifdef XZ_DEC_SPARC
+	case BCJ_SPARC:
+#endif
+		break;
+
+	default:
+		/* Unsupported Filter ID */
+		return XZ_OPTIONS_ERROR;
+	}
+
+	s->type = id;
+	s->ret = XZ_OK;
+	s->pos = 0;
+	s->x86_prev_mask = 0;
+	s->temp.filtered = 0;
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+#endif
diff --git a/lib/xz/xz_dec_lzma2.c b/lib/xz/xz_dec_lzma2.c
new file mode 100644
index 0000000..ea5fa4f
--- /dev/null
+++ b/lib/xz/xz_dec_lzma2.c
@@ -0,0 +1,1171 @@
+/*
+ * LZMA2 decoder
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_lzma2.h"
+
+/*
+ * Range decoder initialization eats the first five bytes of each LZMA chunk.
+ */
+#define RC_INIT_BYTES 5
+
+/*
+ * Minimum number of usable input buffer to safely decode one LZMA symbol.
+ * The worst case is that we decode 22 bits using probabilities and 26
+ * direct bits. This may decode at maximum of 20 bytes of input. However,
+ * lzma_main() does an extra normalization before returning, thus we
+ * need to put 21 here.
+ */
+#define LZMA_IN_REQUIRED 21
+
+/*
+ * Dictionary (history buffer)
+ *
+ * These are always true:
+ *    start <= pos <= full <= end
+ *    pos <= limit <= end
+ *
+ * In multi-call mode, also these are true:
+ *    end == size
+ *    size <= size_max
+ *    allocated <= size
+ *
+ * Most of these variables are size_t to support single-call mode,
+ * in which the dictionary variables address the actual output
+ * buffer directly.
+ */
+struct dictionary {
+	/* Beginning of the history buffer */
+	uint8_t *buf;
+
+	/* Old position in buf (before decoding more data) */
+	size_t start;
+
+	/* Position in buf */
+	size_t pos;
+
+	/*
+	 * How full dictionary is. This is used to detect corrupt input that
+	 * would read beyond the beginning of the uncompressed stream.
+	 */
+	size_t full;
+
+	/* Write limit; we don't write to buf[limit] or later bytes. */
+	size_t limit;
+
+	/*
+	 * End of the dictionary buffer. In multi-call mode, this is
+	 * the same as the dictionary size. In single-call mode, this
+	 * indicates the size of the output buffer.
+	 */
+	size_t end;
+
+	/*
+	 * Size of the dictionary as specified in Block Header. This is used
+	 * together with "full" to detect corrupt input that would make us
+	 * read beyond the beginning of the uncompressed stream.
+	 */
+	uint32_t size;
+
+	/*
+	 * Maximum allowed dictionary size in multi-call mode.
+	 * This is ignored in single-call mode.
+	 */
+	uint32_t size_max;
+
+	/*
+	 * Amount of memory currently allocated for the dictionary.
+	 * This is used only with XZ_DYNALLOC. (With XZ_PREALLOC,
+	 * size_max is always the same as the allocated size.)
+	 */
+	uint32_t allocated;
+
+	/* Operation mode */
+	enum xz_mode mode;
+};
+
+/* Range decoder */
+struct rc_dec {
+	uint32_t range;
+	uint32_t code;
+
+	/*
+	 * Number of initializing bytes remaining to be read
+	 * by rc_read_init().
+	 */
+	uint32_t init_bytes_left;
+
+	/*
+	 * Buffer from which we read our input. It can be either
+	 * temp.buf or the caller-provided input buffer.
+	 */
+	const uint8_t *in;
+	size_t in_pos;
+	size_t in_limit;
+};
+
+/* Probabilities for a length decoder. */
+struct lzma_len_dec {
+	/* Probability of match length being at least 10 */
+	uint16_t choice;
+
+	/* Probability of match length being at least 18 */
+	uint16_t choice2;
+
+	/* Probabilities for match lengths 2-9 */
+	uint16_t low[POS_STATES_MAX][LEN_LOW_SYMBOLS];
+
+	/* Probabilities for match lengths 10-17 */
+	uint16_t mid[POS_STATES_MAX][LEN_MID_SYMBOLS];
+
+	/* Probabilities for match lengths 18-273 */
+	uint16_t high[LEN_HIGH_SYMBOLS];
+};
+
+struct lzma_dec {
+	/* Distances of latest four matches */
+	uint32_t rep0;
+	uint32_t rep1;
+	uint32_t rep2;
+	uint32_t rep3;
+
+	/* Types of the most recently seen LZMA symbols */
+	enum lzma_state state;
+
+	/*
+	 * Length of a match. This is updated so that dict_repeat can
+	 * be called again to finish repeating the whole match.
+	 */
+	uint32_t len;
+
+	/*
+	 * LZMA properties or related bit masks (number of literal
+	 * context bits, a mask dervied from the number of literal
+	 * position bits, and a mask dervied from the number
+	 * position bits)
+	 */
+	uint32_t lc;
+	uint32_t literal_pos_mask; /* (1 << lp) - 1 */
+	uint32_t pos_mask;         /* (1 << pb) - 1 */
+
+	/* If 1, it's a match. Otherwise it's a single 8-bit literal. */
+	uint16_t is_match[STATES][POS_STATES_MAX];
+
+	/* If 1, it's a repeated match. The distance is one of rep0 .. rep3. */
+	uint16_t is_rep[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep0.
+	 * Otherwise check is_rep1.
+	 */
+	uint16_t is_rep0[STATES];
+
+	/*
+	 * If 0, distance of a repeated match is rep1.
+	 * Otherwise check is_rep2.
+	 */
+	uint16_t is_rep1[STATES];
+
+	/* If 0, distance of a repeated match is rep2. Otherwise it is rep3. */
+	uint16_t is_rep2[STATES];
+
+	/*
+	 * If 1, the repeated match has length of one byte. Otherwise
+	 * the length is decoded from rep_len_decoder.
+	 */
+	uint16_t is_rep0_long[STATES][POS_STATES_MAX];
+
+	/*
+	 * Probability tree for the highest two bits of the match
+	 * distance. There is a separate probability tree for match
+	 * lengths of 2 (i.e. MATCH_LEN_MIN), 3, 4, and [5, 273].
+	 */
+	uint16_t dist_slot[DIST_STATES][DIST_SLOTS];
+
+	/*
+	 * Probility trees for additional bits for match distance
+	 * when the distance is in the range [4, 127].
+	 */
+	uint16_t dist_special[FULL_DISTANCES - DIST_MODEL_END];
+
+	/*
+	 * Probability tree for the lowest four bits of a match
+	 * distance that is equal to or greater than 128.
+	 */
+	uint16_t dist_align[ALIGN_SIZE];
+
+	/* Length of a normal match */
+	struct lzma_len_dec match_len_dec;
+
+	/* Length of a repeated match */
+	struct lzma_len_dec rep_len_dec;
+
+	/* Probabilities of literals */
+	uint16_t literal[LITERAL_CODERS_MAX][LITERAL_CODER_SIZE];
+};
+
+struct lzma2_dec {
+	/* Position in xz_dec_lzma2_run(). */
+	enum lzma2_seq {
+		SEQ_CONTROL,
+		SEQ_UNCOMPRESSED_1,
+		SEQ_UNCOMPRESSED_2,
+		SEQ_COMPRESSED_0,
+		SEQ_COMPRESSED_1,
+		SEQ_PROPERTIES,
+		SEQ_LZMA_PREPARE,
+		SEQ_LZMA_RUN,
+		SEQ_COPY
+	} sequence;
+
+	/* Next position after decoding the compressed size of the chunk. */
+	enum lzma2_seq next_sequence;
+
+	/* Uncompressed size of LZMA chunk (2 MiB at maximum) */
+	uint32_t uncompressed;
+
+	/*
+	 * Compressed size of LZMA chunk or compressed/uncompressed
+	 * size of uncompressed chunk (64 KiB at maximum)
+	 */
+	uint32_t compressed;
+
+	/*
+	 * True if dictionary reset is needed. This is false before
+	 * the first chunk (LZMA or uncompressed).
+	 */
+	bool need_dict_reset;
+
+	/*
+	 * True if new LZMA properties are needed. This is false
+	 * before the first LZMA chunk.
+	 */
+	bool need_props;
+};
+
+struct xz_dec_lzma2 {
+	/*
+	 * The order below is important on x86 to reduce code size and
+	 * it shouldn't hurt on other platforms. Everything up to and
+	 * including lzma.pos_mask are in the first 128 bytes on x86-32,
+	 * which allows using smaller instructions to access those
+	 * variables. On x86-64, fewer variables fit into the first 128
+	 * bytes, but this is still the best order without sacrificing
+	 * the readability by splitting the structures.
+	 */
+	struct rc_dec rc;
+	struct dictionary dict;
+	struct lzma2_dec lzma2;
+	struct lzma_dec lzma;
+
+	/*
+	 * Temporary buffer which holds small number of input bytes between
+	 * decoder calls. See lzma2_lzma() for details.
+	 */
+	struct {
+		uint32_t size;
+		uint8_t buf[3 * LZMA_IN_REQUIRED];
+	} temp;
+};
+
+/**************
+ * Dictionary *
+ **************/
+
+/*
+ * Reset the dictionary state. When in single-call mode, set up the beginning
+ * of the dictionary to point to the actual output buffer.
+ */
+static void dict_reset(struct dictionary *dict, struct xz_buf *b)
+{
+	if (DEC_IS_SINGLE(dict->mode)) {
+		dict->buf = b->out + b->out_pos;
+		dict->end = b->out_size - b->out_pos;
+	}
+
+	dict->start = 0;
+	dict->pos = 0;
+	dict->limit = 0;
+	dict->full = 0;
+}
+
+/* Set dictionary write limit */
+static void dict_limit(struct dictionary *dict, size_t out_max)
+{
+	if (dict->end - dict->pos <= out_max)
+		dict->limit = dict->end;
+	else
+		dict->limit = dict->pos + out_max;
+}
+
+/* Return true if at least one byte can be written into the dictionary. */
+static inline bool dict_has_space(const struct dictionary *dict)
+{
+	return dict->pos < dict->limit;
+}
+
+/*
+ * Get a byte from the dictionary at the given distance. The distance is
+ * assumed to valid, or as a special case, zero when the dictionary is
+ * still empty. This special case is needed for single-call decoding to
+ * avoid writing a '\0' to the end of the destination buffer.
+ */
+static inline uint32_t dict_get(const struct dictionary *dict, uint32_t dist)
+{
+	size_t offset = dict->pos - dist - 1;
+
+	if (dist >= dict->pos)
+		offset += dict->end;
+
+	return dict->full > 0 ? dict->buf[offset] : 0;
+}
+
+/*
+ * Put one byte into the dictionary. It is assumed that there is space for it.
+ */
+static inline void dict_put(struct dictionary *dict, uint8_t byte)
+{
+	dict->buf[dict->pos++] = byte;
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+}
+
+/*
+ * Repeat given number of bytes from the given distance. If the distance is
+ * invalid, false is returned. On success, true is returned and *len is
+ * updated to indicate how many bytes were left to be repeated.
+ */
+static bool dict_repeat(struct dictionary *dict, uint32_t *len, uint32_t dist)
+{
+	size_t back;
+	uint32_t left;
+
+	if (dist >= dict->full || dist >= dict->size)
+		return false;
+
+	left = min_t(size_t, dict->limit - dict->pos, *len);
+	*len -= left;
+
+	back = dict->pos - dist - 1;
+	if (dist >= dict->pos)
+		back += dict->end;
+
+	do {
+		dict->buf[dict->pos++] = dict->buf[back++];
+		if (back == dict->end)
+			back = 0;
+	} while (--left > 0);
+
+	if (dict->full < dict->pos)
+		dict->full = dict->pos;
+
+	return true;
+}
+
+/* Copy uncompressed data as is from input to dictionary and output buffers. */
+static void dict_uncompressed(struct dictionary *dict, struct xz_buf *b,
+			      uint32_t *left)
+{
+	size_t copy_size;
+
+	while (*left > 0 && b->in_pos < b->in_size
+			&& b->out_pos < b->out_size) {
+		copy_size = min(b->in_size - b->in_pos,
+				b->out_size - b->out_pos);
+		if (copy_size > dict->end - dict->pos)
+			copy_size = dict->end - dict->pos;
+		if (copy_size > *left)
+			copy_size = *left;
+
+		*left -= copy_size;
+
+		memcpy(dict->buf + dict->pos, b->in + b->in_pos, copy_size);
+		dict->pos += copy_size;
+
+		if (dict->full < dict->pos)
+			dict->full = dict->pos;
+
+		if (DEC_IS_MULTI(dict->mode)) {
+			if (dict->pos == dict->end)
+				dict->pos = 0;
+
+			memcpy(b->out + b->out_pos, b->in + b->in_pos,
+					copy_size);
+		}
+
+		dict->start = dict->pos;
+
+		b->out_pos += copy_size;
+		b->in_pos += copy_size;
+	}
+}
+
+/*
+ * Flush pending data from dictionary to b->out. It is assumed that there is
+ * enough space in b->out. This is guaranteed because caller uses dict_limit()
+ * before decoding data into the dictionary.
+ */
+static uint32_t dict_flush(struct dictionary *dict, struct xz_buf *b)
+{
+	size_t copy_size = dict->pos - dict->start;
+
+	if (DEC_IS_MULTI(dict->mode)) {
+		if (dict->pos == dict->end)
+			dict->pos = 0;
+
+		memcpy(b->out + b->out_pos, dict->buf + dict->start,
+				copy_size);
+	}
+
+	dict->start = dict->pos;
+	b->out_pos += copy_size;
+	return copy_size;
+}
+
+/*****************
+ * Range decoder *
+ *****************/
+
+/* Reset the range decoder. */
+static void rc_reset(struct rc_dec *rc)
+{
+	rc->range = (uint32_t)-1;
+	rc->code = 0;
+	rc->init_bytes_left = RC_INIT_BYTES;
+}
+
+/*
+ * Read the first five initial bytes into rc->code if they haven't been
+ * read already. (Yes, the first byte gets completely ignored.)
+ */
+static bool rc_read_init(struct rc_dec *rc, struct xz_buf *b)
+{
+	while (rc->init_bytes_left > 0) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		rc->code = (rc->code << 8) + b->in[b->in_pos++];
+		--rc->init_bytes_left;
+	}
+
+	return true;
+}
+
+/* Return true if there may not be enough input for the next decoding loop. */
+static inline bool rc_limit_exceeded(const struct rc_dec *rc)
+{
+	return rc->in_pos > rc->in_limit;
+}
+
+/*
+ * Return true if it is possible (from point of view of range decoder) that
+ * we have reached the end of the LZMA chunk.
+ */
+static inline bool rc_is_finished(const struct rc_dec *rc)
+{
+	return rc->code == 0;
+}
+
+/* Read the next input byte if needed. */
+static __always_inline void rc_normalize(struct rc_dec *rc)
+{
+	if (rc->range < RC_TOP_VALUE) {
+		rc->range <<= RC_SHIFT_BITS;
+		rc->code = (rc->code << RC_SHIFT_BITS) + rc->in[rc->in_pos++];
+	}
+}
+
+/*
+ * Decode one bit. In some versions, this function has been splitted in three
+ * functions so that the compiler is supposed to be able to more easily avoid
+ * an extra branch. In this particular version of the LZMA decoder, this
+ * doesn't seem to be a good idea (tested with GCC 3.3.6, 3.4.6, and 4.3.3
+ * on x86). Using a non-splitted version results in nicer looking code too.
+ *
+ * NOTE: This must return an int. Do not make it return a bool or the speed
+ * of the code generated by GCC 3.x decreases 10-15 %. (GCC 4.3 doesn't care,
+ * and it generates 10-20 % faster code than GCC 3.x from this file anyway.)
+ */
+static __always_inline int rc_bit(struct rc_dec *rc, uint16_t *prob)
+{
+	uint32_t bound;
+	int bit;
+
+	rc_normalize(rc);
+	bound = (rc->range >> RC_BIT_MODEL_TOTAL_BITS) * *prob;
+	if (rc->code < bound) {
+		rc->range = bound;
+		*prob += (RC_BIT_MODEL_TOTAL - *prob) >> RC_MOVE_BITS;
+		bit = 0;
+	} else {
+		rc->range -= bound;
+		rc->code -= bound;
+		*prob -= *prob >> RC_MOVE_BITS;
+		bit = 1;
+	}
+
+	return bit;
+}
+
+/* Decode a bittree starting from the most significant bit. */
+static __always_inline uint32_t rc_bittree(struct rc_dec *rc,
+					   uint16_t *probs, uint32_t limit)
+{
+	uint32_t symbol = 1;
+
+	do {
+		if (rc_bit(rc, &probs[symbol]))
+			symbol = (symbol << 1) + 1;
+		else
+			symbol <<= 1;
+	} while (symbol < limit);
+
+	return symbol;
+}
+
+/* Decode a bittree starting from the least significant bit. */
+static __always_inline void rc_bittree_reverse(struct rc_dec *rc,
+					       uint16_t *probs,
+					       uint32_t *dest, uint32_t limit)
+{
+	uint32_t symbol = 1;
+	uint32_t i = 0;
+
+	do {
+		if (rc_bit(rc, &probs[symbol])) {
+			symbol = (symbol << 1) + 1;
+			*dest += 1 << i;
+		} else {
+			symbol <<= 1;
+		}
+	} while (++i < limit);
+}
+
+/* Decode direct bits (fixed fifty-fifty probability) */
+static inline void rc_direct(struct rc_dec *rc, uint32_t *dest, uint32_t limit)
+{
+	uint32_t mask;
+
+	do {
+		rc_normalize(rc);
+		rc->range >>= 1;
+		rc->code -= rc->range;
+		mask = (uint32_t)0 - (rc->code >> 31);
+		rc->code += rc->range & mask;
+		*dest = (*dest << 1) + (mask + 1);
+	} while (--limit > 0);
+}
+
+/********
+ * LZMA *
+ ********/
+
+/* Get pointer to literal coder probability array. */
+static uint16_t *lzma_literal_probs(struct xz_dec_lzma2 *s)
+{
+	uint32_t prev_byte = dict_get(&s->dict, 0);
+	uint32_t low = prev_byte >> (8 - s->lzma.lc);
+	uint32_t high = (s->dict.pos & s->lzma.literal_pos_mask) << s->lzma.lc;
+	return s->lzma.literal[low + high];
+}
+
+/* Decode a literal (one 8-bit byte) */
+static void lzma_literal(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	uint32_t symbol;
+	uint32_t match_byte;
+	uint32_t match_bit;
+	uint32_t offset;
+	uint32_t i;
+
+	probs = lzma_literal_probs(s);
+
+	if (lzma_state_is_literal(s->lzma.state)) {
+		symbol = rc_bittree(&s->rc, probs, 0x100);
+	} else {
+		symbol = 1;
+		match_byte = dict_get(&s->dict, s->lzma.rep0) << 1;
+		offset = 0x100;
+
+		do {
+			match_bit = match_byte & offset;
+			match_byte <<= 1;
+			i = offset + match_bit + symbol;
+
+			if (rc_bit(&s->rc, &probs[i])) {
+				symbol = (symbol << 1) + 1;
+				offset &= match_bit;
+			} else {
+				symbol <<= 1;
+				offset &= ~match_bit;
+			}
+		} while (symbol < 0x100);
+	}
+
+	dict_put(&s->dict, (uint8_t)symbol);
+	lzma_state_literal(&s->lzma.state);
+}
+
+/* Decode the length of the match into s->lzma.len. */
+static void lzma_len(struct xz_dec_lzma2 *s, struct lzma_len_dec *l,
+		     uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t limit;
+
+	if (!rc_bit(&s->rc, &l->choice)) {
+		probs = l->low[pos_state];
+		limit = LEN_LOW_SYMBOLS;
+		s->lzma.len = MATCH_LEN_MIN;
+	} else {
+		if (!rc_bit(&s->rc, &l->choice2)) {
+			probs = l->mid[pos_state];
+			limit = LEN_MID_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS;
+		} else {
+			probs = l->high;
+			limit = LEN_HIGH_SYMBOLS;
+			s->lzma.len = MATCH_LEN_MIN + LEN_LOW_SYMBOLS
+					+ LEN_MID_SYMBOLS;
+		}
+	}
+
+	s->lzma.len += rc_bittree(&s->rc, probs, limit) - limit;
+}
+
+/* Decode a match. The distance will be stored in s->lzma.rep0. */
+static void lzma_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint16_t *probs;
+	uint32_t dist_slot;
+	uint32_t limit;
+
+	lzma_state_match(&s->lzma.state);
+
+	s->lzma.rep3 = s->lzma.rep2;
+	s->lzma.rep2 = s->lzma.rep1;
+	s->lzma.rep1 = s->lzma.rep0;
+
+	lzma_len(s, &s->lzma.match_len_dec, pos_state);
+
+	probs = s->lzma.dist_slot[lzma_get_dist_state(s->lzma.len)];
+	dist_slot = rc_bittree(&s->rc, probs, DIST_SLOTS) - DIST_SLOTS;
+
+	if (dist_slot < DIST_MODEL_START) {
+		s->lzma.rep0 = dist_slot;
+	} else {
+		limit = (dist_slot >> 1) - 1;
+		s->lzma.rep0 = 2 + (dist_slot & 1);
+
+		if (dist_slot < DIST_MODEL_END) {
+			s->lzma.rep0 <<= limit;
+			probs = s->lzma.dist_special + s->lzma.rep0
+					- dist_slot - 1;
+			rc_bittree_reverse(&s->rc, probs,
+					&s->lzma.rep0, limit);
+		} else {
+			rc_direct(&s->rc, &s->lzma.rep0, limit - ALIGN_BITS);
+			s->lzma.rep0 <<= ALIGN_BITS;
+			rc_bittree_reverse(&s->rc, s->lzma.dist_align,
+					&s->lzma.rep0, ALIGN_BITS);
+		}
+	}
+}
+
+/*
+ * Decode a repeated match. The distance is one of the four most recently
+ * seen matches. The distance will be stored in s->lzma.rep0.
+ */
+static void lzma_rep_match(struct xz_dec_lzma2 *s, uint32_t pos_state)
+{
+	uint32_t tmp;
+
+	if (!rc_bit(&s->rc, &s->lzma.is_rep0[s->lzma.state])) {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep0_long[
+				s->lzma.state][pos_state])) {
+			lzma_state_short_rep(&s->lzma.state);
+			s->lzma.len = 1;
+			return;
+		}
+	} else {
+		if (!rc_bit(&s->rc, &s->lzma.is_rep1[s->lzma.state])) {
+			tmp = s->lzma.rep1;
+		} else {
+			if (!rc_bit(&s->rc, &s->lzma.is_rep2[s->lzma.state])) {
+				tmp = s->lzma.rep2;
+			} else {
+				tmp = s->lzma.rep3;
+				s->lzma.rep3 = s->lzma.rep2;
+			}
+
+			s->lzma.rep2 = s->lzma.rep1;
+		}
+
+		s->lzma.rep1 = s->lzma.rep0;
+		s->lzma.rep0 = tmp;
+	}
+
+	lzma_state_long_rep(&s->lzma.state);
+	lzma_len(s, &s->lzma.rep_len_dec, pos_state);
+}
+
+/* LZMA decoder core */
+static bool lzma_main(struct xz_dec_lzma2 *s)
+{
+	uint32_t pos_state;
+
+	/*
+	 * If the dictionary was reached during the previous call, try to
+	 * finish the possibly pending repeat in the dictionary.
+	 */
+	if (dict_has_space(&s->dict) && s->lzma.len > 0)
+		dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0);
+
+	/*
+	 * Decode more LZMA symbols. One iteration may consume up to
+	 * LZMA_IN_REQUIRED - 1 bytes.
+	 */
+	while (dict_has_space(&s->dict) && !rc_limit_exceeded(&s->rc)) {
+		pos_state = s->dict.pos & s->lzma.pos_mask;
+
+		if (!rc_bit(&s->rc, &s->lzma.is_match[
+				s->lzma.state][pos_state])) {
+			lzma_literal(s);
+		} else {
+			if (rc_bit(&s->rc, &s->lzma.is_rep[s->lzma.state]))
+				lzma_rep_match(s, pos_state);
+			else
+				lzma_match(s, pos_state);
+
+			if (!dict_repeat(&s->dict, &s->lzma.len, s->lzma.rep0))
+				return false;
+		}
+	}
+
+	/*
+	 * Having the range decoder always normalized when we are outside
+	 * this function makes it easier to correctly handle end of the chunk.
+	 */
+	rc_normalize(&s->rc);
+
+	return true;
+}
+
+/*
+ * Reset the LZMA decoder and range decoder state. Dictionary is nore reset
+ * here, because LZMA state may be reset without resetting the dictionary.
+ */
+static void lzma_reset(struct xz_dec_lzma2 *s)
+{
+	uint16_t *probs;
+	size_t i;
+
+	s->lzma.state = STATE_LIT_LIT;
+	s->lzma.rep0 = 0;
+	s->lzma.rep1 = 0;
+	s->lzma.rep2 = 0;
+	s->lzma.rep3 = 0;
+
+	/*
+	 * All probabilities are initialized to the same value. This hack
+	 * makes the code smaller by avoiding a separate loop for each
+	 * probability array.
+	 *
+	 * This could be optimized so that only that part of literal
+	 * probabilities that are actually required. In the common case
+	 * we would write 12 KiB less.
+	 */
+	probs = s->lzma.is_match[0];
+	for (i = 0; i < PROBS_TOTAL; ++i)
+		probs[i] = RC_BIT_MODEL_TOTAL / 2;
+
+	rc_reset(&s->rc);
+}
+
+/*
+ * Decode and validate LZMA properties (lc/lp/pb) and calculate the bit masks
+ * from the decoded lp and pb values. On success, the LZMA decoder state is
+ * reset and true is returned.
+ */
+static bool lzma_props(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	if (props > (4 * 5 + 4) * 9 + 8)
+		return false;
+
+	s->lzma.pos_mask = 0;
+	while (props >= 9 * 5) {
+		props -= 9 * 5;
+		++s->lzma.pos_mask;
+	}
+
+	s->lzma.pos_mask = (1 << s->lzma.pos_mask) - 1;
+
+	s->lzma.literal_pos_mask = 0;
+	while (props >= 9) {
+		props -= 9;
+		++s->lzma.literal_pos_mask;
+	}
+
+	s->lzma.lc = props;
+
+	if (s->lzma.lc + s->lzma.literal_pos_mask > 4)
+		return false;
+
+	s->lzma.literal_pos_mask = (1 << s->lzma.literal_pos_mask) - 1;
+
+	lzma_reset(s);
+
+	return true;
+}
+
+/*********
+ * LZMA2 *
+ *********/
+
+/*
+ * The LZMA decoder assumes that if the input limit (s->rc.in_limit) hasn't
+ * been exceeded, it is safe to read up to LZMA_IN_REQUIRED bytes. This
+ * wrapper function takes care of making the LZMA decoder's assumption safe.
+ *
+ * As long as there is plenty of input left to be decoded in the current LZMA
+ * chunk, we decode directly from the caller-supplied input buffer until
+ * there's LZMA_IN_REQUIRED bytes left. Those remaining bytes are copied into
+ * s->temp.buf, which (hopefully) gets filled on the next call to this
+ * function. We decode a few bytes from the temporary buffer so that we can
+ * continue decoding from the caller-supplied input buffer again.
+ */
+static bool lzma2_lzma(struct xz_dec_lzma2 *s, struct xz_buf *b)
+{
+	size_t in_avail;
+	uint32_t tmp;
+
+	in_avail = b->in_size - b->in_pos;
+	if (s->temp.size > 0 || s->lzma2.compressed == 0) {
+		tmp = 2 * LZMA_IN_REQUIRED - s->temp.size;
+		if (tmp > s->lzma2.compressed - s->temp.size)
+			tmp = s->lzma2.compressed - s->temp.size;
+		if (tmp > in_avail)
+			tmp = in_avail;
+
+		memcpy(s->temp.buf + s->temp.size, b->in + b->in_pos, tmp);
+
+		if (s->temp.size + tmp == s->lzma2.compressed) {
+			memzero(s->temp.buf + s->temp.size + tmp,
+					sizeof(s->temp.buf)
+						- s->temp.size - tmp);
+			s->rc.in_limit = s->temp.size + tmp;
+		} else if (s->temp.size + tmp < LZMA_IN_REQUIRED) {
+			s->temp.size += tmp;
+			b->in_pos += tmp;
+			return true;
+		} else {
+			s->rc.in_limit = s->temp.size + tmp - LZMA_IN_REQUIRED;
+		}
+
+		s->rc.in = s->temp.buf;
+		s->rc.in_pos = 0;
+
+		if (!lzma_main(s) || s->rc.in_pos > s->temp.size + tmp)
+			return false;
+
+		s->lzma2.compressed -= s->rc.in_pos;
+
+		if (s->rc.in_pos < s->temp.size) {
+			s->temp.size -= s->rc.in_pos;
+			memmove(s->temp.buf, s->temp.buf + s->rc.in_pos,
+					s->temp.size);
+			return true;
+		}
+
+		b->in_pos += s->rc.in_pos - s->temp.size;
+		s->temp.size = 0;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail >= LZMA_IN_REQUIRED) {
+		s->rc.in = b->in;
+		s->rc.in_pos = b->in_pos;
+
+		if (in_avail >= s->lzma2.compressed + LZMA_IN_REQUIRED)
+			s->rc.in_limit = b->in_pos + s->lzma2.compressed;
+		else
+			s->rc.in_limit = b->in_size - LZMA_IN_REQUIRED;
+
+		if (!lzma_main(s))
+			return false;
+
+		in_avail = s->rc.in_pos - b->in_pos;
+		if (in_avail > s->lzma2.compressed)
+			return false;
+
+		s->lzma2.compressed -= in_avail;
+		b->in_pos = s->rc.in_pos;
+	}
+
+	in_avail = b->in_size - b->in_pos;
+	if (in_avail < LZMA_IN_REQUIRED) {
+		if (in_avail > s->lzma2.compressed)
+			in_avail = s->lzma2.compressed;
+
+		memcpy(s->temp.buf, b->in + b->in_pos, in_avail);
+		s->temp.size = in_avail;
+		b->in_pos += in_avail;
+	}
+
+	return true;
+}
+
+/*
+ * Take care of the LZMA2 control layer, and forward the job of actual LZMA
+ * decoding or copying of uncompressed chunks to other functions.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b)
+{
+	uint32_t tmp;
+
+	while (b->in_pos < b->in_size || s->lzma2.sequence == SEQ_LZMA_RUN) {
+		switch (s->lzma2.sequence) {
+		case SEQ_CONTROL:
+			/*
+			 * LZMA2 control byte
+			 *
+			 * Exact values:
+			 *   0x00   End marker
+			 *   0x01   Dictionary reset followed by
+			 *          an uncompressed chunk
+			 *   0x02   Uncompressed chunk (no dictionary reset)
+			 *
+			 * Highest three bits (s->control & 0xE0):
+			 *   0xE0   Dictionary reset, new properties and state
+			 *          reset, followed by LZMA compressed chunk
+			 *   0xC0   New properties and state reset, followed
+			 *          by LZMA compressed chunk (no dictionary
+			 *          reset)
+			 *   0xA0   State reset using old properties,
+			 *          followed by LZMA compressed chunk (no
+			 *          dictionary reset)
+			 *   0x80   LZMA chunk (no dictionary or state reset)
+			 *
+			 * For LZMA compressed chunks, the lowest five bits
+			 * (s->control & 1F) are the highest bits of the
+			 * uncompressed size (bits 16-20).
+			 *
+			 * A new LZMA2 stream must begin with a dictionary
+			 * reset. The first LZMA chunk must set new
+			 * properties and reset the LZMA state.
+			 *
+			 * Values that don't match anything described above
+			 * are invalid and we return XZ_DATA_ERROR.
+			 */
+			tmp = b->in[b->in_pos++];
+
+			if (tmp >= 0xE0 || tmp == 0x01) {
+				s->lzma2.need_props = true;
+				s->lzma2.need_dict_reset = false;
+				dict_reset(&s->dict, b);
+			} else if (s->lzma2.need_dict_reset) {
+				return XZ_DATA_ERROR;
+			}
+
+			if (tmp >= 0x80) {
+				s->lzma2.uncompressed = (tmp & 0x1F) << 16;
+				s->lzma2.sequence = SEQ_UNCOMPRESSED_1;
+
+				if (tmp >= 0xC0) {
+					/*
+					 * When there are new properties,
+					 * state reset is done at
+					 * SEQ_PROPERTIES.
+					 */
+					s->lzma2.need_props = false;
+					s->lzma2.next_sequence
+							= SEQ_PROPERTIES;
+
+				} else if (s->lzma2.need_props) {
+					return XZ_DATA_ERROR;
+
+				} else {
+					s->lzma2.next_sequence
+							= SEQ_LZMA_PREPARE;
+					if (tmp >= 0xA0)
+						lzma_reset(s);
+				}
+			} else {
+				if (tmp == 0x00)
+					return XZ_STREAM_END;
+
+				if (tmp > 0x02)
+					return XZ_DATA_ERROR;
+
+				s->lzma2.sequence = SEQ_COMPRESSED_0;
+				s->lzma2.next_sequence = SEQ_COPY;
+			}
+
+			break;
+
+		case SEQ_UNCOMPRESSED_1:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_UNCOMPRESSED_2;
+			break;
+
+		case SEQ_UNCOMPRESSED_2:
+			s->lzma2.uncompressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = SEQ_COMPRESSED_0;
+			break;
+
+		case SEQ_COMPRESSED_0:
+			s->lzma2.compressed
+					= (uint32_t)b->in[b->in_pos++] << 8;
+			s->lzma2.sequence = SEQ_COMPRESSED_1;
+			break;
+
+		case SEQ_COMPRESSED_1:
+			s->lzma2.compressed
+					+= (uint32_t)b->in[b->in_pos++] + 1;
+			s->lzma2.sequence = s->lzma2.next_sequence;
+			break;
+
+		case SEQ_PROPERTIES:
+			if (!lzma_props(s, b->in[b->in_pos++]))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.sequence = SEQ_LZMA_PREPARE;
+
+		case SEQ_LZMA_PREPARE:
+			if (s->lzma2.compressed < RC_INIT_BYTES)
+				return XZ_DATA_ERROR;
+
+			if (!rc_read_init(&s->rc, b))
+				return XZ_OK;
+
+			s->lzma2.compressed -= RC_INIT_BYTES;
+			s->lzma2.sequence = SEQ_LZMA_RUN;
+
+		case SEQ_LZMA_RUN:
+			/*
+			 * Set dictionary limit to indicate how much we want
+			 * to be encoded at maximum. Decode new data into the
+			 * dictionary. Flush the new data from dictionary to
+			 * b->out. Check if we finished decoding this chunk.
+			 * In case the dictionary got full but we didn't fill
+			 * the output buffer yet, we may run this loop
+			 * multiple times without changing s->lzma2.sequence.
+			 */
+			dict_limit(&s->dict, min_t(size_t,
+					b->out_size - b->out_pos,
+					s->lzma2.uncompressed));
+			if (!lzma2_lzma(s, b))
+				return XZ_DATA_ERROR;
+
+			s->lzma2.uncompressed -= dict_flush(&s->dict, b);
+
+			if (s->lzma2.uncompressed == 0) {
+				if (s->lzma2.compressed > 0 || s->lzma.len > 0
+						|| !rc_is_finished(&s->rc))
+					return XZ_DATA_ERROR;
+
+				rc_reset(&s->rc);
+				s->lzma2.sequence = SEQ_CONTROL;
+
+			} else if (b->out_pos == b->out_size
+					|| (b->in_pos == b->in_size
+						&& s->temp.size
+						< s->lzma2.compressed)) {
+				return XZ_OK;
+			}
+
+			break;
+
+		case SEQ_COPY:
+			dict_uncompressed(&s->dict, b, &s->lzma2.compressed);
+			if (s->lzma2.compressed > 0)
+				return XZ_OK;
+
+			s->lzma2.sequence = SEQ_CONTROL;
+			break;
+		}
+	}
+
+	return XZ_OK;
+}
+
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max)
+{
+	struct xz_dec_lzma2 *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->dict.mode = mode;
+	s->dict.size_max = dict_max;
+
+	if (DEC_IS_PREALLOC(mode)) {
+		s->dict.buf = vmalloc(dict_max);
+		if (s->dict.buf == NULL) {
+			kfree(s);
+			return NULL;
+		}
+	} else if (DEC_IS_DYNALLOC(mode)) {
+		s->dict.buf = NULL;
+		s->dict.allocated = 0;
+	}
+
+	return s;
+}
+
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s, uint8_t props)
+{
+	/* This limits dictionary size to 3 GiB to keep parsing simpler. */
+	if (props > 39)
+		return XZ_OPTIONS_ERROR;
+
+	s->dict.size = 2 + (props & 1);
+	s->dict.size <<= (props >> 1) + 11;
+
+	if (DEC_IS_MULTI(s->dict.mode)) {
+		if (s->dict.size > s->dict.size_max)
+			return XZ_MEMLIMIT_ERROR;
+
+		s->dict.end = s->dict.size;
+
+		if (DEC_IS_DYNALLOC(s->dict.mode)) {
+			if (s->dict.allocated < s->dict.size) {
+				vfree(s->dict.buf);
+				s->dict.buf = vmalloc(s->dict.size);
+				if (s->dict.buf == NULL) {
+					s->dict.allocated = 0;
+					return XZ_MEM_ERROR;
+				}
+			}
+		}
+	}
+
+	s->lzma.len = 0;
+
+	s->lzma2.sequence = SEQ_CONTROL;
+	s->lzma2.need_dict_reset = true;
+
+	s->temp.size = 0;
+
+	return XZ_OK;
+}
+
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s)
+{
+	if (DEC_IS_MULTI(s->dict.mode))
+		vfree(s->dict.buf);
+
+	kfree(s);
+}
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c
new file mode 100644
index 0000000..ac809b1
--- /dev/null
+++ b/lib/xz/xz_dec_stream.c
@@ -0,0 +1,821 @@
+/*
+ * .xz Stream decoder
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include "xz_private.h"
+#include "xz_stream.h"
+
+/* Hash used to validate the Index field */
+struct xz_dec_hash {
+	vli_type unpadded;
+	vli_type uncompressed;
+	uint32_t crc32;
+};
+
+struct xz_dec {
+	/* Position in dec_main() */
+	enum {
+		SEQ_STREAM_HEADER,
+		SEQ_BLOCK_START,
+		SEQ_BLOCK_HEADER,
+		SEQ_BLOCK_UNCOMPRESS,
+		SEQ_BLOCK_PADDING,
+		SEQ_BLOCK_CHECK,
+		SEQ_INDEX,
+		SEQ_INDEX_PADDING,
+		SEQ_INDEX_CRC32,
+		SEQ_STREAM_FOOTER
+	} sequence;
+
+	/* Position in variable-length integers and Check fields */
+	uint32_t pos;
+
+	/* Variable-length integer decoded by dec_vli() */
+	vli_type vli;
+
+	/* Saved in_pos and out_pos */
+	size_t in_start;
+	size_t out_start;
+
+	/* CRC32 value in Block or Index */
+	uint32_t crc32;
+
+	/* Type of the integrity check calculated from uncompressed data */
+	enum xz_check check_type;
+
+	/* Operation mode */
+	enum xz_mode mode;
+
+	/*
+	 * True if the next call to xz_dec_run() is allowed to return
+	 * XZ_BUF_ERROR.
+	 */
+	bool allow_buf_error;
+
+	/* Information stored in Block Header */
+	struct {
+		/*
+		 * Value stored in the Compressed Size field, or
+		 * VLI_UNKNOWN if Compressed Size is not present.
+		 */
+		vli_type compressed;
+
+		/*
+		 * Value stored in the Uncompressed Size field, or
+		 * VLI_UNKNOWN if Uncompressed Size is not present.
+		 */
+		vli_type uncompressed;
+
+		/* Size of the Block Header field */
+		uint32_t size;
+	} block_header;
+
+	/* Information collected when decoding Blocks */
+	struct {
+		/* Observed compressed size of the current Block */
+		vli_type compressed;
+
+		/* Observed uncompressed size of the current Block */
+		vli_type uncompressed;
+
+		/* Number of Blocks decoded so far */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Block sizes. This is used to
+		 * validate the Index field.
+		 */
+		struct xz_dec_hash hash;
+	} block;
+
+	/* Variables needed when verifying the Index field */
+	struct {
+		/* Position in dec_index() */
+		enum {
+			SEQ_INDEX_COUNT,
+			SEQ_INDEX_UNPADDED,
+			SEQ_INDEX_UNCOMPRESSED
+		} sequence;
+
+		/* Size of the Index in bytes */
+		vli_type size;
+
+		/* Number of Records (matches block.count in valid files) */
+		vli_type count;
+
+		/*
+		 * Hash calculated from the Records (matches block.hash in
+		 * valid files).
+		 */
+		struct xz_dec_hash hash;
+	} index;
+
+	/*
+	 * Temporary buffer needed to hold Stream Header, Block Header,
+	 * and Stream Footer. The Block Header is the biggest (1 KiB)
+	 * so we reserve space according to that. buf[] has to be aligned
+	 * to a multiple of four bytes; the size_t variables before it
+	 * should guarantee this.
+	 */
+	struct {
+		size_t pos;
+		size_t size;
+		uint8_t buf[1024];
+	} temp;
+
+	struct xz_dec_lzma2 *lzma2;
+
+#ifdef XZ_DEC_BCJ
+	struct xz_dec_bcj *bcj;
+	bool bcj_active;
+#endif
+};
+
+#ifdef XZ_DEC_ANY_CHECK
+/* Sizes of the Check field with different Check IDs */
+static const uint8_t check_sizes[16] = {
+	0,
+	4, 4, 4,
+	8, 8, 8,
+	16, 16, 16,
+	32, 32, 32,
+	64, 64, 64
+};
+#endif
+
+/*
+ * Fill s->temp by copying data starting from b->in[b->in_pos]. Caller
+ * must have set s->temp.pos to indicate how much data we are supposed
+ * to copy into s->temp.buf. Return true once s->temp.pos has reached
+ * s->temp.size.
+ */
+static bool fill_temp(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t copy_size = min_t(size_t,
+			b->in_size - b->in_pos, s->temp.size - s->temp.pos);
+
+	memcpy(s->temp.buf + s->temp.pos, b->in + b->in_pos, copy_size);
+	b->in_pos += copy_size;
+	s->temp.pos += copy_size;
+
+	if (s->temp.pos == s->temp.size) {
+		s->temp.pos = 0;
+		return true;
+	}
+
+	return false;
+}
+
+/* Decode a variable-length integer (little-endian base-128 encoding) */
+static enum xz_ret dec_vli(struct xz_dec *s, const uint8_t *in,
+			   size_t *in_pos, size_t in_size)
+{
+	uint8_t byte;
+
+	if (s->pos == 0)
+		s->vli = 0;
+
+	while (*in_pos < in_size) {
+		byte = in[*in_pos];
+		++*in_pos;
+
+		s->vli |= (vli_type)(byte & 0x7F) << s->pos;
+
+		if ((byte & 0x80) == 0) {
+			/* Don't allow non-minimal encodings. */
+			if (byte == 0 && s->pos != 0)
+				return XZ_DATA_ERROR;
+
+			s->pos = 0;
+			return XZ_STREAM_END;
+		}
+
+		s->pos += 7;
+		if (s->pos == 7 * VLI_BYTES_MAX)
+			return XZ_DATA_ERROR;
+	}
+
+	return XZ_OK;
+}
+
+/*
+ * Decode the Compressed Data field from a Block. Update and validate
+ * the observed compressed and uncompressed sizes of the Block so that
+ * they don't exceed the values possibly stored in the Block Header
+ * (validation assumes that no integer overflow occurs, since vli_type
+ * is normally uint64_t). Update the CRC32 if presence of the CRC32
+ * field was indicated in Stream Header.
+ *
+ * Once the decoding is finished, validate that the observed sizes match
+ * the sizes possibly stored in the Block Header. Update the hash and
+ * Block count, which are later used to validate the Index field.
+ */
+static enum xz_ret dec_block(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	s->in_start = b->in_pos;
+	s->out_start = b->out_pos;
+
+#ifdef XZ_DEC_BCJ
+	if (s->bcj_active)
+		ret = xz_dec_bcj_run(s->bcj, s->lzma2, b);
+	else
+#endif
+		ret = xz_dec_lzma2_run(s->lzma2, b);
+
+	s->block.compressed += b->in_pos - s->in_start;
+	s->block.uncompressed += b->out_pos - s->out_start;
+
+	/*
+	 * There is no need to separately check for VLI_UNKNOWN, since
+	 * the observed sizes are always smaller than VLI_UNKNOWN.
+	 */
+	if (s->block.compressed > s->block_header.compressed
+			|| s->block.uncompressed
+				> s->block_header.uncompressed)
+		return XZ_DATA_ERROR;
+
+	if (s->check_type == XZ_CHECK_CRC32)
+		s->crc32 = xz_crc32(b->out + s->out_start,
+				b->out_pos - s->out_start, s->crc32);
+
+	if (ret == XZ_STREAM_END) {
+		if (s->block_header.compressed != VLI_UNKNOWN
+				&& s->block_header.compressed
+					!= s->block.compressed)
+			return XZ_DATA_ERROR;
+
+		if (s->block_header.uncompressed != VLI_UNKNOWN
+				&& s->block_header.uncompressed
+					!= s->block.uncompressed)
+			return XZ_DATA_ERROR;
+
+		s->block.hash.unpadded += s->block_header.size
+				+ s->block.compressed;
+
+#ifdef XZ_DEC_ANY_CHECK
+		s->block.hash.unpadded += check_sizes[s->check_type];
+#else
+		if (s->check_type == XZ_CHECK_CRC32)
+			s->block.hash.unpadded += 4;
+#endif
+
+		s->block.hash.uncompressed += s->block.uncompressed;
+		s->block.hash.crc32 = xz_crc32(
+				(const uint8_t *)&s->block.hash,
+				sizeof(s->block.hash), s->block.hash.crc32);
+
+		++s->block.count;
+	}
+
+	return ret;
+}
+
+/* Update the Index size and the CRC32 value. */
+static void index_update(struct xz_dec *s, const struct xz_buf *b)
+{
+	size_t in_used = b->in_pos - s->in_start;
+	s->index.size += in_used;
+	s->crc32 = xz_crc32(b->in + s->in_start, in_used, s->crc32);
+}
+
+/*
+ * Decode the Number of Records, Unpadded Size, and Uncompressed Size
+ * fields from the Index field. That is, Index Padding and CRC32 are not
+ * decoded by this function.
+ *
+ * This can return XZ_OK (more input needed), XZ_STREAM_END (everything
+ * successfully decoded), or XZ_DATA_ERROR (input is corrupt).
+ */
+static enum xz_ret dec_index(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	do {
+		ret = dec_vli(s, b->in, &b->in_pos, b->in_size);
+		if (ret != XZ_STREAM_END) {
+			index_update(s, b);
+			return ret;
+		}
+
+		switch (s->index.sequence) {
+		case SEQ_INDEX_COUNT:
+			s->index.count = s->vli;
+
+			/*
+			 * Validate that the Number of Records field
+			 * indicates the same number of Records as
+			 * there were Blocks in the Stream.
+			 */
+			if (s->index.count != s->block.count)
+				return XZ_DATA_ERROR;
+
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+
+		case SEQ_INDEX_UNPADDED:
+			s->index.hash.unpadded += s->vli;
+			s->index.sequence = SEQ_INDEX_UNCOMPRESSED;
+			break;
+
+		case SEQ_INDEX_UNCOMPRESSED:
+			s->index.hash.uncompressed += s->vli;
+			s->index.hash.crc32 = xz_crc32(
+					(const uint8_t *)&s->index.hash,
+					sizeof(s->index.hash),
+					s->index.hash.crc32);
+			--s->index.count;
+			s->index.sequence = SEQ_INDEX_UNPADDED;
+			break;
+		}
+	} while (s->index.count > 0);
+
+	return XZ_STREAM_END;
+}
+
+/*
+ * Validate that the next four input bytes match the value of s->crc32.
+ * s->pos must be zero when starting to validate the first byte.
+ */
+static enum xz_ret crc32_validate(struct xz_dec *s, struct xz_buf *b)
+{
+	do {
+		if (b->in_pos == b->in_size)
+			return XZ_OK;
+
+		if (((s->crc32 >> s->pos) & 0xFF) != b->in[b->in_pos++])
+			return XZ_DATA_ERROR;
+
+		s->pos += 8;
+
+	} while (s->pos < 32);
+
+	s->crc32 = 0;
+	s->pos = 0;
+
+	return XZ_STREAM_END;
+}
+
+#ifdef XZ_DEC_ANY_CHECK
+/*
+ * Skip over the Check field when the Check ID is not supported.
+ * Returns true once the whole Check field has been skipped over.
+ */
+static bool check_skip(struct xz_dec *s, struct xz_buf *b)
+{
+	while (s->pos < check_sizes[s->check_type]) {
+		if (b->in_pos == b->in_size)
+			return false;
+
+		++b->in_pos;
+		++s->pos;
+	}
+
+	s->pos = 0;
+
+	return true;
+}
+#endif
+
+/* Decode the Stream Header field (the first 12 bytes of the .xz Stream). */
+static enum xz_ret dec_stream_header(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf, HEADER_MAGIC, HEADER_MAGIC_SIZE))
+		return XZ_FORMAT_ERROR;
+
+	if (xz_crc32(s->temp.buf + HEADER_MAGIC_SIZE, 2, 0)
+			!= get_le32(s->temp.buf + HEADER_MAGIC_SIZE + 2))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[HEADER_MAGIC_SIZE] != 0)
+		return XZ_OPTIONS_ERROR;
+
+	/*
+	 * Of integrity checks, we support only none (Check ID = 0) and
+	 * CRC32 (Check ID = 1). However, if XZ_DEC_ANY_CHECK is defined,
+	 * we will accept other check types too, but then the check won't
+	 * be verified and a warning (XZ_UNSUPPORTED_CHECK) will be given.
+	 */
+	s->check_type = s->temp.buf[HEADER_MAGIC_SIZE + 1];
+
+#ifdef XZ_DEC_ANY_CHECK
+	if (s->check_type > XZ_CHECK_MAX)
+		return XZ_OPTIONS_ERROR;
+
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_UNSUPPORTED_CHECK;
+#else
+	if (s->check_type > XZ_CHECK_CRC32)
+		return XZ_OPTIONS_ERROR;
+#endif
+
+	return XZ_OK;
+}
+
+/* Decode the Stream Footer field (the last 12 bytes of the .xz Stream) */
+static enum xz_ret dec_stream_footer(struct xz_dec *s)
+{
+	if (!memeq(s->temp.buf + 10, FOOTER_MAGIC, FOOTER_MAGIC_SIZE))
+		return XZ_DATA_ERROR;
+
+	if (xz_crc32(s->temp.buf + 4, 6, 0) != get_le32(s->temp.buf))
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Validate Backward Size. Note that we never added the size of the
+	 * Index CRC32 field to s->index.size, thus we use s->index.size / 4
+	 * instead of s->index.size / 4 - 1.
+	 */
+	if ((s->index.size >> 2) != get_le32(s->temp.buf + 4))
+		return XZ_DATA_ERROR;
+
+	if (s->temp.buf[8] != 0 || s->temp.buf[9] != s->check_type)
+		return XZ_DATA_ERROR;
+
+	/*
+	 * Use XZ_STREAM_END instead of XZ_OK to be more convenient
+	 * for the caller.
+	 */
+	return XZ_STREAM_END;
+}
+
+/* Decode the Block Header and initialize the filter chain. */
+static enum xz_ret dec_block_header(struct xz_dec *s)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Validate the CRC32. We know that the temp buffer is at least
+	 * eight bytes so this is safe.
+	 */
+	s->temp.size -= 4;
+	if (xz_crc32(s->temp.buf, s->temp.size, 0)
+			!= get_le32(s->temp.buf + s->temp.size))
+		return XZ_DATA_ERROR;
+
+	s->temp.pos = 2;
+
+	/*
+	 * Catch unsupported Block Flags. We support only one or two filters
+	 * in the chain, so we catch that with the same test.
+	 */
+#ifdef XZ_DEC_BCJ
+	if (s->temp.buf[1] & 0x3E)
+#else
+	if (s->temp.buf[1] & 0x3F)
+#endif
+		return XZ_OPTIONS_ERROR;
+
+	/* Compressed Size */
+	if (s->temp.buf[1] & 0x40) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+					!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.compressed = s->vli;
+	} else {
+		s->block_header.compressed = VLI_UNKNOWN;
+	}
+
+	/* Uncompressed Size */
+	if (s->temp.buf[1] & 0x80) {
+		if (dec_vli(s, s->temp.buf, &s->temp.pos, s->temp.size)
+				!= XZ_STREAM_END)
+			return XZ_DATA_ERROR;
+
+		s->block_header.uncompressed = s->vli;
+	} else {
+		s->block_header.uncompressed = VLI_UNKNOWN;
+	}
+
+#ifdef XZ_DEC_BCJ
+	/* If there are two filters, the first one must be a BCJ filter. */
+	s->bcj_active = s->temp.buf[1] & 0x01;
+	if (s->bcj_active) {
+		if (s->temp.size - s->temp.pos < 2)
+			return XZ_OPTIONS_ERROR;
+
+		ret = xz_dec_bcj_reset(s->bcj, s->temp.buf[s->temp.pos++]);
+		if (ret != XZ_OK)
+			return ret;
+
+		/*
+		 * We don't support custom start offset,
+		 * so Size of Properties must be zero.
+		 */
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+	}
+#endif
+
+	/* Valid Filter Flags always take at least two bytes. */
+	if (s->temp.size - s->temp.pos < 2)
+		return XZ_DATA_ERROR;
+
+	/* Filter ID = LZMA2 */
+	if (s->temp.buf[s->temp.pos++] != 0x21)
+		return XZ_OPTIONS_ERROR;
+
+	/* Size of Properties = 1-byte Filter Properties */
+	if (s->temp.buf[s->temp.pos++] != 0x01)
+		return XZ_OPTIONS_ERROR;
+
+	/* Filter Properties contains LZMA2 dictionary size. */
+	if (s->temp.size - s->temp.pos < 1)
+		return XZ_DATA_ERROR;
+
+	ret = xz_dec_lzma2_reset(s->lzma2, s->temp.buf[s->temp.pos++]);
+	if (ret != XZ_OK)
+		return ret;
+
+	/* The rest must be Header Padding. */
+	while (s->temp.pos < s->temp.size)
+		if (s->temp.buf[s->temp.pos++] != 0x00)
+			return XZ_OPTIONS_ERROR;
+
+	s->temp.pos = 0;
+	s->block.compressed = 0;
+	s->block.uncompressed = 0;
+
+	return XZ_OK;
+}
+
+static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b)
+{
+	enum xz_ret ret;
+
+	/*
+	 * Store the start position for the case when we are in the middle
+	 * of the Index field.
+	 */
+	s->in_start = b->in_pos;
+
+	while (true) {
+		switch (s->sequence) {
+		case SEQ_STREAM_HEADER:
+			/*
+			 * Stream Header is copied to s->temp, and then
+			 * decoded from there. This way if the caller
+			 * gives us only little input at a time, we can
+			 * still keep the Stream Header decoding code
+			 * simple. Similar approach is used in many places
+			 * in this file.
+			 */
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			/*
+			 * If dec_stream_header() returns
+			 * XZ_UNSUPPORTED_CHECK, it is still possible
+			 * to continue decoding if working in multi-call
+			 * mode. Thus, update s->sequence before calling
+			 * dec_stream_header().
+			 */
+			s->sequence = SEQ_BLOCK_START;
+
+			ret = dec_stream_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+		case SEQ_BLOCK_START:
+			/* We need one byte of input to continue. */
+			if (b->in_pos == b->in_size)
+				return XZ_OK;
+
+			/* See if this is the beginning of the Index field. */
+			if (b->in[b->in_pos] == 0) {
+				s->in_start = b->in_pos++;
+				s->sequence = SEQ_INDEX;
+				break;
+			}
+
+			/*
+			 * Calculate the size of the Block Header and
+			 * prepare to decode it.
+			 */
+			s->block_header.size
+				= ((uint32_t)b->in[b->in_pos] + 1) * 4;
+
+			s->temp.size = s->block_header.size;
+			s->temp.pos = 0;
+			s->sequence = SEQ_BLOCK_HEADER;
+
+		case SEQ_BLOCK_HEADER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			ret = dec_block_header(s);
+			if (ret != XZ_OK)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_UNCOMPRESS;
+
+		case SEQ_BLOCK_UNCOMPRESS:
+			ret = dec_block(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_BLOCK_PADDING;
+
+		case SEQ_BLOCK_PADDING:
+			/*
+			 * Size of Compressed Data + Block Padding
+			 * must be a multiple of four. We don't need
+			 * s->block.compressed for anything else
+			 * anymore, so we use it here to test the size
+			 * of the Block Padding field.
+			 */
+			while (s->block.compressed & 3) {
+				if (b->in_pos == b->in_size)
+					return XZ_OK;
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+
+				++s->block.compressed;
+			}
+
+			s->sequence = SEQ_BLOCK_CHECK;
+
+		case SEQ_BLOCK_CHECK:
+			if (s->check_type == XZ_CHECK_CRC32) {
+				ret = crc32_validate(s, b);
+				if (ret != XZ_STREAM_END)
+					return ret;
+			}
+#ifdef XZ_DEC_ANY_CHECK
+			else if (!check_skip(s, b)) {
+				return XZ_OK;
+			}
+#endif
+
+			s->sequence = SEQ_BLOCK_START;
+			break;
+
+		case SEQ_INDEX:
+			ret = dec_index(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->sequence = SEQ_INDEX_PADDING;
+
+		case SEQ_INDEX_PADDING:
+			while ((s->index.size + (b->in_pos - s->in_start))
+					& 3) {
+				if (b->in_pos == b->in_size) {
+					index_update(s, b);
+					return XZ_OK;
+				}
+
+				if (b->in[b->in_pos++] != 0)
+					return XZ_DATA_ERROR;
+			}
+
+			/* Finish the CRC32 value and Index size. */
+			index_update(s, b);
+
+			/* Compare the hashes to validate the Index field. */
+			if (!memeq(&s->block.hash, &s->index.hash,
+					sizeof(s->block.hash)))
+				return XZ_DATA_ERROR;
+
+			s->sequence = SEQ_INDEX_CRC32;
+
+		case SEQ_INDEX_CRC32:
+			ret = crc32_validate(s, b);
+			if (ret != XZ_STREAM_END)
+				return ret;
+
+			s->temp.size = STREAM_HEADER_SIZE;
+			s->sequence = SEQ_STREAM_FOOTER;
+
+		case SEQ_STREAM_FOOTER:
+			if (!fill_temp(s, b))
+				return XZ_OK;
+
+			return dec_stream_footer(s);
+		}
+	}
+
+	/* Never reached */
+}
+
+/*
+ * xz_dec_run() is a wrapper for dec_main() to handle some special cases in
+ * multi-call and single-call decoding.
+ *
+ * In multi-call mode, we must return XZ_BUF_ERROR when it seems clear that we
+ * are not going to make any progress anymore. This is to prevent the caller
+ * from calling us infinitely when the input file is truncated or otherwise
+ * corrupt. Since zlib-style API allows that the caller fills the input buffer
+ * only when the decoder doesn't produce any new output, we have to be careful
+ * to avoid returning XZ_BUF_ERROR too easily: XZ_BUF_ERROR is returned only
+ * after the second consecutive call to xz_dec_run() that makes no progress.
+ *
+ * In single-call mode, if we couldn't decode everything and no error
+ * occurred, either the input is truncated or the output buffer is too small.
+ * Since we know that the last input byte never produces any output, we know
+ * that if all the input was consumed and decoding wasn't finished, the file
+ * must be corrupt. Otherwise the output buffer has to be too small or the
+ * file is corrupt in a way that decoding it produces too big output.
+ *
+ * If single-call decoding fails, we reset b->in_pos and b->out_pos back to
+ * their original values. This is because with some filter chains there won't
+ * be any valid uncompressed data in the output buffer unless the decoding
+ * actually succeeds (that's the price to pay of using the output buffer as
+ * the workspace).
+ */
+XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b)
+{
+	size_t in_start;
+	size_t out_start;
+	enum xz_ret ret;
+
+	if (DEC_IS_SINGLE(s->mode))
+		xz_dec_reset(s);
+
+	in_start = b->in_pos;
+	out_start = b->out_pos;
+	ret = dec_main(s, b);
+
+	if (DEC_IS_SINGLE(s->mode)) {
+		if (ret == XZ_OK)
+			ret = b->in_pos == b->in_size
+					? XZ_DATA_ERROR : XZ_BUF_ERROR;
+
+		if (ret != XZ_STREAM_END) {
+			b->in_pos = in_start;
+			b->out_pos = out_start;
+		}
+
+	} else if (ret == XZ_OK && in_start == b->in_pos
+			&& out_start == b->out_pos) {
+		if (s->allow_buf_error)
+			ret = XZ_BUF_ERROR;
+
+		s->allow_buf_error = true;
+	} else {
+		s->allow_buf_error = false;
+	}
+
+	return ret;
+}
+
+XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max)
+{
+	struct xz_dec *s = kmalloc(sizeof(*s), GFP_KERNEL);
+	if (s == NULL)
+		return NULL;
+
+	s->mode = mode;
+
+#ifdef XZ_DEC_BCJ
+	s->bcj = xz_dec_bcj_create(DEC_IS_SINGLE(mode));
+	if (s->bcj == NULL)
+		goto error_bcj;
+#endif
+
+	s->lzma2 = xz_dec_lzma2_create(mode, dict_max);
+	if (s->lzma2 == NULL)
+		goto error_lzma2;
+
+	xz_dec_reset(s);
+	return s;
+
+error_lzma2:
+#ifdef XZ_DEC_BCJ
+	xz_dec_bcj_end(s->bcj);
+error_bcj:
+#endif
+	kfree(s);
+	return NULL;
+}
+
+XZ_EXTERN void xz_dec_reset(struct xz_dec *s)
+{
+	s->sequence = SEQ_STREAM_HEADER;
+	s->allow_buf_error = false;
+	s->pos = 0;
+	s->crc32 = 0;
+	memzero(&s->block, sizeof(s->block));
+	memzero(&s->index, sizeof(s->index));
+	s->temp.pos = 0;
+	s->temp.size = STREAM_HEADER_SIZE;
+}
+
+XZ_EXTERN void xz_dec_end(struct xz_dec *s)
+{
+	if (s != NULL) {
+		xz_dec_lzma2_end(s->lzma2);
+#ifdef XZ_DEC_BCJ
+		xz_dec_bcj_end(s->bcj);
+#endif
+		kfree(s);
+	}
+}
diff --git a/lib/xz/xz_dec_syms.c b/lib/xz/xz_dec_syms.c
new file mode 100644
index 0000000..32eb3c0
--- /dev/null
+++ b/lib/xz/xz_dec_syms.c
@@ -0,0 +1,26 @@
+/*
+ * XZ decoder module information
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/module.h>
+#include <linux/xz.h>
+
+EXPORT_SYMBOL(xz_dec_init);
+EXPORT_SYMBOL(xz_dec_reset);
+EXPORT_SYMBOL(xz_dec_run);
+EXPORT_SYMBOL(xz_dec_end);
+
+MODULE_DESCRIPTION("XZ decompressor");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org> and Igor Pavlov");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_dec_test.c b/lib/xz/xz_dec_test.c
new file mode 100644
index 0000000..da28a19
--- /dev/null
+++ b/lib/xz/xz_dec_test.c
@@ -0,0 +1,220 @@
+/*
+ * XZ decoder tester
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/crc32.h>
+#include <linux/xz.h>
+
+/* Maximum supported dictionary size */
+#define DICT_MAX (1 << 20)
+
+/* Device name to pass to register_chrdev(). */
+#define DEVICE_NAME "xz_dec_test"
+
+/* Dynamically allocated device major number */
+static int device_major;
+
+/*
+ * We reuse the same decoder state, and thus can decode only one
+ * file at a time.
+ */
+static bool device_is_open;
+
+/* XZ decoder state */
+static struct xz_dec *state;
+
+/*
+ * Return value of xz_dec_run(). We need to avoid calling xz_dec_run() after
+ * it has returned XZ_STREAM_END, so we make this static.
+ */
+static enum xz_ret ret;
+
+/*
+ * Input and output buffers. The input buffer is used as a temporary safe
+ * place for the data coming from the userspace.
+ */
+static uint8_t buffer_in[1024];
+static uint8_t buffer_out[1024];
+
+/*
+ * Structure to pass the input and output buffers to the XZ decoder.
+ * A few of the fields are never modified so we initialize them here.
+ */
+static struct xz_buf buffers = {
+	.in = buffer_in,
+	.out = buffer_out,
+	.out_size = sizeof(buffer_out)
+};
+
+/*
+ * CRC32 of uncompressed data. This is used to give the user a simple way
+ * to check that the decoder produces correct output.
+ */
+static uint32_t crc;
+
+static int xz_dec_test_open(struct inode *i, struct file *f)
+{
+	if (device_is_open)
+		return -EBUSY;
+
+	device_is_open = true;
+
+	xz_dec_reset(state);
+	ret = XZ_OK;
+	crc = 0xFFFFFFFF;
+
+	buffers.in_pos = 0;
+	buffers.in_size = 0;
+	buffers.out_pos = 0;
+
+	printk(KERN_INFO DEVICE_NAME ": opened\n");
+	return 0;
+}
+
+static int xz_dec_test_release(struct inode *i, struct file *f)
+{
+	device_is_open = false;
+
+	if (ret == XZ_OK)
+		printk(KERN_INFO DEVICE_NAME ": input was truncated\n");
+
+	printk(KERN_INFO DEVICE_NAME ": closed\n");
+	return 0;
+}
+
+/*
+ * Decode the data given to us from the userspace. CRC32 of the uncompressed
+ * data is calculated and is printed at the end of successful decoding. The
+ * uncompressed data isn't stored anywhere for further use.
+ *
+ * The .xz file must have exactly one Stream and no Stream Padding. The data
+ * after the first Stream is considered to be garbage.
+ */
+static ssize_t xz_dec_test_write(struct file *file, const char __user *buf,
+				 size_t size, loff_t *pos)
+{
+	size_t remaining;
+
+	if (ret != XZ_OK) {
+		if (size > 0)
+			printk(KERN_INFO DEVICE_NAME ": %zu bytes of "
+					"garbage at the end of the file\n",
+					size);
+
+		return -ENOSPC;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": decoding %zu bytes of input\n",
+			size);
+
+	remaining = size;
+	while ((remaining > 0 || buffers.out_pos == buffers.out_size)
+			&& ret == XZ_OK) {
+		if (buffers.in_pos == buffers.in_size) {
+			buffers.in_pos = 0;
+			buffers.in_size = min(remaining, sizeof(buffer_in));
+			if (copy_from_user(buffer_in, buf, buffers.in_size))
+				return -EFAULT;
+
+			buf += buffers.in_size;
+			remaining -= buffers.in_size;
+		}
+
+		buffers.out_pos = 0;
+		ret = xz_dec_run(state, &buffers);
+		crc = crc32(crc, buffer_out, buffers.out_pos);
+	}
+
+	switch (ret) {
+	case XZ_OK:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OK\n");
+		return size;
+
+	case XZ_STREAM_END:
+		printk(KERN_INFO DEVICE_NAME ": XZ_STREAM_END, "
+				"CRC32 = 0x%08X\n", ~crc);
+		return size - remaining - (buffers.in_size - buffers.in_pos);
+
+	case XZ_MEMLIMIT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_MEMLIMIT_ERROR\n");
+		break;
+
+	case XZ_FORMAT_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_FORMAT_ERROR\n");
+		break;
+
+	case XZ_OPTIONS_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_OPTIONS_ERROR\n");
+		break;
+
+	case XZ_DATA_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_DATA_ERROR\n");
+		break;
+
+	case XZ_BUF_ERROR:
+		printk(KERN_INFO DEVICE_NAME ": XZ_BUF_ERROR\n");
+		break;
+
+	default:
+		printk(KERN_INFO DEVICE_NAME ": Bug detected!\n");
+		break;
+	}
+
+	return -EIO;
+}
+
+/* Allocate the XZ decoder state and register the character device. */
+static int __init xz_dec_test_init(void)
+{
+	static const struct file_operations fileops = {
+		.owner = THIS_MODULE,
+		.open = &xz_dec_test_open,
+		.release = &xz_dec_test_release,
+		.write = &xz_dec_test_write
+	};
+
+	state = xz_dec_init(XZ_PREALLOC, DICT_MAX);
+	if (state == NULL)
+		return -ENOMEM;
+
+	device_major = register_chrdev(0, DEVICE_NAME, &fileops);
+	if (device_major < 0) {
+		xz_dec_end(state);
+		return device_major;
+	}
+
+	printk(KERN_INFO DEVICE_NAME ": module loaded\n");
+	printk(KERN_INFO DEVICE_NAME ": Create a device node with "
+			"'mknod " DEVICE_NAME " c %d 0' and write .xz files "
+			"to it.\n", device_major);
+	return 0;
+}
+
+static void __exit xz_dec_test_exit(void)
+{
+	unregister_chrdev(device_major, DEVICE_NAME);
+	xz_dec_end(state);
+	printk(KERN_INFO DEVICE_NAME ": module unloaded\n");
+}
+
+module_init(xz_dec_test_init);
+module_exit(xz_dec_test_exit);
+
+MODULE_DESCRIPTION("XZ decompressor tester");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Lasse Collin <lasse.collin@tukaani.org>");
+
+/*
+ * This code is in the public domain, but in Linux it's simplest to just
+ * say it's GPL and consider the authors as the copyright holders.
+ */
+MODULE_LICENSE("GPL");
diff --git a/lib/xz/xz_lzma2.h b/lib/xz/xz_lzma2.h
new file mode 100644
index 0000000..071d67b
--- /dev/null
+++ b/lib/xz/xz_lzma2.h
@@ -0,0 +1,204 @@
+/*
+ * LZMA2 definitions
+ *
+ * Authors: Lasse Collin <lasse.collin@tukaani.org>
+ *          Igor Pavlov <http://7-zip.org/>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_LZMA2_H
+#define XZ_LZMA2_H
+
+/* Range coder constants */
+#define RC_SHIFT_BITS 8
+#define RC_TOP_BITS 24
+#define RC_TOP_VALUE (1 << RC_TOP_BITS)
+#define RC_BIT_MODEL_TOTAL_BITS 11
+#define RC_BIT_MODEL_TOTAL (1 << RC_BIT_MODEL_TOTAL_BITS)
+#define RC_MOVE_BITS 5
+
+/*
+ * Maximum number of position states. A position state is the lowest pb
+ * number of bits of the current uncompressed offset. In some places there
+ * are different sets of probabilities for different position states.
+ */
+#define POS_STATES_MAX (1 << 4)
+
+/*
+ * This enum is used to track which LZMA symbols have occurred most recently
+ * and in which order. This information is used to predict the next symbol.
+ *
+ * Symbols:
+ *  - Literal: One 8-bit byte
+ *  - Match: Repeat a chunk of data at some distance
+ *  - Long repeat: Multi-byte match at a recently seen distance
+ *  - Short repeat: One-byte repeat at a recently seen distance
+ *
+ * The symbol names are in from STATE_oldest_older_previous. REP means
+ * either short or long repeated match, and NONLIT means any non-literal.
+ */
+enum lzma_state {
+	STATE_LIT_LIT,
+	STATE_MATCH_LIT_LIT,
+	STATE_REP_LIT_LIT,
+	STATE_SHORTREP_LIT_LIT,
+	STATE_MATCH_LIT,
+	STATE_REP_LIT,
+	STATE_SHORTREP_LIT,
+	STATE_LIT_MATCH,
+	STATE_LIT_LONGREP,
+	STATE_LIT_SHORTREP,
+	STATE_NONLIT_MATCH,
+	STATE_NONLIT_REP
+};
+
+/* Total number of states */
+#define STATES 12
+
+/* The lowest 7 states indicate that the previous state was a literal. */
+#define LIT_STATES 7
+
+/* Indicate that the latest symbol was a literal. */
+static inline void lzma_state_literal(enum lzma_state *state)
+{
+	if (*state <= STATE_SHORTREP_LIT_LIT)
+		*state = STATE_LIT_LIT;
+	else if (*state <= STATE_LIT_SHORTREP)
+		*state -= 3;
+	else
+		*state -= 6;
+}
+
+/* Indicate that the latest symbol was a match. */
+static inline void lzma_state_match(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_MATCH : STATE_NONLIT_MATCH;
+}
+
+/* Indicate that the latest state was a long repeated match. */
+static inline void lzma_state_long_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_LONGREP : STATE_NONLIT_REP;
+}
+
+/* Indicate that the latest symbol was a short match. */
+static inline void lzma_state_short_rep(enum lzma_state *state)
+{
+	*state = *state < LIT_STATES ? STATE_LIT_SHORTREP : STATE_NONLIT_REP;
+}
+
+/* Test if the previous symbol was a literal. */
+static inline bool lzma_state_is_literal(enum lzma_state state)
+{
+	return state < LIT_STATES;
+}
+
+/* Each literal coder is divided in three sections:
+ *   - 0x001-0x0FF: Without match byte
+ *   - 0x101-0x1FF: With match byte; match bit is 0
+ *   - 0x201-0x2FF: With match byte; match bit is 1
+ *
+ * Match byte is used when the previous LZMA symbol was something else than
+ * a literal (that is, it was some kind of match).
+ */
+#define LITERAL_CODER_SIZE 0x300
+
+/* Maximum number of literal coders */
+#define LITERAL_CODERS_MAX (1 << 4)
+
+/* Minimum length of a match is two bytes. */
+#define MATCH_LEN_MIN 2
+
+/* Match length is encoded with 4, 5, or 10 bits.
+ *
+ * Length   Bits
+ *  2-9      4 = Choice=0 + 3 bits
+ * 10-17     5 = Choice=1 + Choice2=0 + 3 bits
+ * 18-273   10 = Choice=1 + Choice2=1 + 8 bits
+ */
+#define LEN_LOW_BITS 3
+#define LEN_LOW_SYMBOLS (1 << LEN_LOW_BITS)
+#define LEN_MID_BITS 3
+#define LEN_MID_SYMBOLS (1 << LEN_MID_BITS)
+#define LEN_HIGH_BITS 8
+#define LEN_HIGH_SYMBOLS (1 << LEN_HIGH_BITS)
+#define LEN_SYMBOLS (LEN_LOW_SYMBOLS + LEN_MID_SYMBOLS + LEN_HIGH_SYMBOLS)
+
+/*
+ * Maximum length of a match is 273 which is a result of the encoding
+ * described above.
+ */
+#define MATCH_LEN_MAX (MATCH_LEN_MIN + LEN_SYMBOLS - 1)
+
+/*
+ * Different sets of probabilities are used for match distances that have
+ * very short match length: Lengths of 2, 3, and 4 bytes have a separate
+ * set of probabilities for each length. The matches with longer length
+ * use a shared set of probabilities.
+ */
+#define DIST_STATES 4
+
+/*
+ * Get the index of the appropriate probability array for decoding
+ * the distance slot.
+ */
+static inline uint32_t lzma_get_dist_state(uint32_t len)
+{
+	return len < DIST_STATES + MATCH_LEN_MIN
+			? len - MATCH_LEN_MIN : DIST_STATES - 1;
+}
+
+/*
+ * The highest two bits of a 32-bit match distance are encoded using six bits.
+ * This six-bit value is called a distance slot. This way encoding a 32-bit
+ * value takes 6-36 bits, larger values taking more bits.
+ */
+#define DIST_SLOT_BITS 6
+#define DIST_SLOTS (1 << DIST_SLOT_BITS)
+
+/* Match distances up to 127 are fully encoded using probabilities. Since
+ * the highest two bits (distance slot) are always encoded using six bits,
+ * the distances 0-3 don't need any additional bits to encode, since the
+ * distance slot itself is the same as the actual distance. DIST_MODEL_START
+ * indicates the first distance slot where at least one additional bit is
+ * needed.
+ */
+#define DIST_MODEL_START 4
+
+/*
+ * Match distances greater than 127 are encoded in three pieces:
+ *   - distance slot: the highest two bits
+ *   - direct bits: 2-26 bits below the highest two bits
+ *   - alignment bits: four lowest bits
+ *
+ * Direct bits don't use any probabilities.
+ *
+ * The distance slot value of 14 is for distances 128-191.
+ */
+#define DIST_MODEL_END 14
+
+/* Distance slots that indicate a distance <= 127. */
+#define FULL_DISTANCES_BITS (DIST_MODEL_END / 2)
+#define FULL_DISTANCES (1 << FULL_DISTANCES_BITS)
+
+/*
+ * For match distances greater than 127, only the highest two bits and the
+ * lowest four bits (alignment) is encoded using probabilities.
+ */
+#define ALIGN_BITS 4
+#define ALIGN_SIZE (1 << ALIGN_BITS)
+#define ALIGN_MASK (ALIGN_SIZE - 1)
+
+/* Total number of all probability variables */
+#define PROBS_TOTAL (1846 + LITERAL_CODERS_MAX * LITERAL_CODER_SIZE)
+
+/*
+ * LZMA remembers the four most recent match distances. Reusing these
+ * distances tends to take less space than re-encoding the actual
+ * distance value.
+ */
+#define REPS 4
+
+#endif
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
new file mode 100644
index 0000000..a65633e
--- /dev/null
+++ b/lib/xz/xz_private.h
@@ -0,0 +1,156 @@
+/*
+ * Private includes and definitions
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_PRIVATE_H
+#define XZ_PRIVATE_H
+
+#ifdef __KERNEL__
+#	include <linux/xz.h>
+#	include <asm/byteorder.h>
+#	include <asm/unaligned.h>
+	/* XZ_PREBOOT may be defined only via decompress_unxz.c. */
+#	ifndef XZ_PREBOOT
+#		include <linux/slab.h>
+#		include <linux/vmalloc.h>
+#		include <linux/string.h>
+#		ifdef CONFIG_XZ_DEC_X86
+#			define XZ_DEC_X86
+#		endif
+#		ifdef CONFIG_XZ_DEC_POWERPC
+#			define XZ_DEC_POWERPC
+#		endif
+#		ifdef CONFIG_XZ_DEC_IA64
+#			define XZ_DEC_IA64
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARM
+#			define XZ_DEC_ARM
+#		endif
+#		ifdef CONFIG_XZ_DEC_ARMTHUMB
+#			define XZ_DEC_ARMTHUMB
+#		endif
+#		ifdef CONFIG_XZ_DEC_SPARC
+#			define XZ_DEC_SPARC
+#		endif
+#		define memeq(a, b, size) (memcmp(a, b, size) == 0)
+#		define memzero(buf, size) memset(buf, 0, size)
+#	endif
+#	define get_le32(p) le32_to_cpup((const uint32_t *)(p))
+#else
+	/*
+	 * For userspace builds, use a separate header to define the required
+	 * macros and functions. This makes it easier to adapt the code into
+	 * different environments and avoids clutter in the Linux kernel tree.
+	 */
+#	include "xz_config.h"
+#endif
+
+/* If no specific decoding mode is requested, enable support for all modes. */
+#if !defined(XZ_DEC_SINGLE) && !defined(XZ_DEC_PREALLOC) \
+		&& !defined(XZ_DEC_DYNALLOC)
+#	define XZ_DEC_SINGLE
+#	define XZ_DEC_PREALLOC
+#	define XZ_DEC_DYNALLOC
+#endif
+
+/*
+ * The DEC_IS_foo(mode) macros are used in "if" statements. If only some
+ * of the supported modes are enabled, these macros will evaluate to true or
+ * false at compile time and thus allow the compiler to omit unneeded code.
+ */
+#ifdef XZ_DEC_SINGLE
+#	define DEC_IS_SINGLE(mode) ((mode) == XZ_SINGLE)
+#else
+#	define DEC_IS_SINGLE(mode) (false)
+#endif
+
+#ifdef XZ_DEC_PREALLOC
+#	define DEC_IS_PREALLOC(mode) ((mode) == XZ_PREALLOC)
+#else
+#	define DEC_IS_PREALLOC(mode) (false)
+#endif
+
+#ifdef XZ_DEC_DYNALLOC
+#	define DEC_IS_DYNALLOC(mode) ((mode) == XZ_DYNALLOC)
+#else
+#	define DEC_IS_DYNALLOC(mode) (false)
+#endif
+
+#if !defined(XZ_DEC_SINGLE)
+#	define DEC_IS_MULTI(mode) (true)
+#elif defined(XZ_DEC_PREALLOC) || defined(XZ_DEC_DYNALLOC)
+#	define DEC_IS_MULTI(mode) ((mode) != XZ_SINGLE)
+#else
+#	define DEC_IS_MULTI(mode) (false)
+#endif
+
+/*
+ * If any of the BCJ filter decoders are wanted, define XZ_DEC_BCJ.
+ * XZ_DEC_BCJ is used to enable generic support for BCJ decoders.
+ */
+#ifndef XZ_DEC_BCJ
+#	if defined(XZ_DEC_X86) || defined(XZ_DEC_POWERPC) \
+			|| defined(XZ_DEC_IA64) || defined(XZ_DEC_ARM) \
+			|| defined(XZ_DEC_ARM) || defined(XZ_DEC_ARMTHUMB) \
+			|| defined(XZ_DEC_SPARC)
+#		define XZ_DEC_BCJ
+#	endif
+#endif
+
+/*
+ * Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
+ * before calling xz_dec_lzma2_run().
+ */
+XZ_EXTERN struct xz_dec_lzma2 *xz_dec_lzma2_create(enum xz_mode mode,
+						   uint32_t dict_max);
+
+/*
+ * Decode the LZMA2 properties (one byte) and reset the decoder. Return
+ * XZ_OK on success, XZ_MEMLIMIT_ERROR if the preallocated dictionary is not
+ * big enough, and XZ_OPTIONS_ERROR if props indicates something that this
+ * decoder doesn't support.
+ */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_reset(struct xz_dec_lzma2 *s,
+					 uint8_t props);
+
+/* Decode raw LZMA2 stream from b->in to b->out. */
+XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
+				       struct xz_buf *b);
+
+/* Free the memory allocated for the LZMA2 decoder. */
+XZ_EXTERN void xz_dec_lzma2_end(struct xz_dec_lzma2 *s);
+
+#ifdef XZ_DEC_BCJ
+/*
+ * Allocate memory for BCJ decoders. xz_dec_bcj_reset() must be used before
+ * calling xz_dec_bcj_run().
+ */
+XZ_EXTERN struct xz_dec_bcj *xz_dec_bcj_create(bool single_call);
+
+/*
+ * Decode the Filter ID of a BCJ filter. This implementation doesn't
+ * support custom start offsets, so no decoding of Filter Properties
+ * is needed. Returns XZ_OK if the given Filter ID is supported.
+ * Otherwise XZ_OPTIONS_ERROR is returned.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_reset(struct xz_dec_bcj *s, uint8_t id);
+
+/*
+ * Decode raw BCJ + LZMA2 stream. This must be used only if there actually is
+ * a BCJ filter in the chain. If the chain has only LZMA2, xz_dec_lzma2_run()
+ * must be called directly.
+ */
+XZ_EXTERN enum xz_ret xz_dec_bcj_run(struct xz_dec_bcj *s,
+				     struct xz_dec_lzma2 *lzma2,
+				     struct xz_buf *b);
+
+/* Free the memory allocated for the BCJ filters. */
+#define xz_dec_bcj_end(s) kfree(s)
+#endif
+
+#endif
diff --git a/lib/xz/xz_stream.h b/lib/xz/xz_stream.h
new file mode 100644
index 0000000..66cb5a7
--- /dev/null
+++ b/lib/xz/xz_stream.h
@@ -0,0 +1,62 @@
+/*
+ * Definitions for handling the .xz file format
+ *
+ * Author: Lasse Collin <lasse.collin@tukaani.org>
+ *
+ * This file has been put into the public domain.
+ * You can do whatever you want with this file.
+ */
+
+#ifndef XZ_STREAM_H
+#define XZ_STREAM_H
+
+#if defined(__KERNEL__) && !XZ_INTERNAL_CRC32
+#	include <linux/crc32.h>
+#	undef crc32
+#	define xz_crc32(buf, size, crc) \
+		(~crc32_le(~(uint32_t)(crc), buf, size))
+#endif
+
+/*
+ * See the .xz file format specification at
+ * http://tukaani.org/xz/xz-file-format.txt
+ * to understand the container format.
+ */
+
+#define STREAM_HEADER_SIZE 12
+
+#define HEADER_MAGIC "\3757zXZ"
+#define HEADER_MAGIC_SIZE 6
+
+#define FOOTER_MAGIC "YZ"
+#define FOOTER_MAGIC_SIZE 2
+
+/*
+ * Variable-length integer can hold a 63-bit unsigned integer or a special
+ * value indicating that the value is unknown.
+ *
+ * Experimental: vli_type can be defined to uint32_t to save a few bytes
+ * in code size (no effect on speed). Doing so limits the uncompressed and
+ * compressed size of the file to less than 256 MiB and may also weaken
+ * error detection slightly.
+ */
+typedef uint64_t vli_type;
+
+#define VLI_MAX ((vli_type)-1 / 2)
+#define VLI_UNKNOWN ((vli_type)-1)
+
+/* Maximum encoded size of a VLI */
+#define VLI_BYTES_MAX (sizeof(vli_type) * 8 / 7)
+
+/* Integrity Check types */
+enum xz_check {
+	XZ_CHECK_NONE = 0,
+	XZ_CHECK_CRC32 = 1,
+	XZ_CHECK_CRC64 = 4,
+	XZ_CHECK_SHA256 = 10
+};
+
+/* Maximum possible Check ID */
+#define XZ_CHECK_MAX 15
+
+#endif
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index b4edfe7..b5d8a1f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -404,7 +404,7 @@
  * - vm.dirty_background_ratio  or  vm.dirty_background_bytes
  * - vm.dirty_ratio             or  vm.dirty_bytes
  * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
- * runtime tasks.
+ * real-time tasks.
  */
 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff7e158..826ba69 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4014,7 +4014,7 @@
 		zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
 }
 #else
-static void inline setup_usemap(struct pglist_data *pgdat,
+static inline void setup_usemap(struct pglist_data *pgdat,
 				struct zone *zone, unsigned long zonesize) {}
 #endif /* CONFIG_SPARSEMEM */
 
diff --git a/mm/percpu.c b/mm/percpu.c
index 3dd4984..3f93001 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -258,7 +258,7 @@
 
 /*
  * (Un)populated page region iterators.  Iterate over (un)populated
- * page regions betwen @start and @end in @chunk.  @rs and @re should
+ * page regions between @start and @end in @chunk.  @rs and @re should
  * be integer variables and will be set to start and end page index of
  * the current region.
  */
diff --git a/mm/rmap.c b/mm/rmap.c
index 1a8bf76b..c95d2ba 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -94,7 +94,7 @@
  * anonymous pages mapped into it with that anon_vma.
  *
  * The common case will be that we already have one, but if
- * if not we either need to find an adjacent mapping that we
+ * not we either need to find an adjacent mapping that we
  * can re-use the anon_vma from (very common when the only
  * reason for splitting a vma has been mprotect()), or we
  * allocate a new one.
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 29d6cbf..64b9840 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -9,7 +9,7 @@
  *
  * However, virtual mappings need a page table and TLBs. Many Linux
  * architectures already map their physical space using 1-1 mappings
- * via TLBs. For those arches the virtual memmory map is essentially
+ * via TLBs. For those arches the virtual memory map is essentially
  * for free if we use the same page size as the 1-1 mappings. In that
  * case the overhead consists of a few additional pages that are
  * allocated to create a view of memory for vmemmap.
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac..1e308f2 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@
 			break;
 		case 's':{
 				char **sptr = va_arg(ap, char **);
-				int16_t len;
-				int size;
+				uint16_t len;
 
 				errcode = p9pdu_readf(pdu, proto_version,
 								"w", &len);
 				if (errcode)
 					break;
 
-				size = max_t(int16_t, len, 0);
-
-				*sptr = kmalloc(size + 1, GFP_KERNEL);
+				*sptr = kmalloc(len + 1, GFP_KERNEL);
 				if (*sptr == NULL) {
 					errcode = -EFAULT;
 					break;
 				}
-				if (pdu_read(pdu, *sptr, size)) {
+				if (pdu_read(pdu, *sptr, len)) {
 					errcode = -EFAULT;
 					kfree(*sptr);
 					*sptr = NULL;
 				} else
-					(*sptr)[size] = 0;
+					(*sptr)[len] = 0;
 			}
 			break;
 		case 'Q':{
@@ -234,14 +231,14 @@
 			}
 			break;
 		case 'D':{
-				int32_t *count = va_arg(ap, int32_t *);
+				uint32_t *count = va_arg(ap, uint32_t *);
 				void **data = va_arg(ap, void **);
 
 				errcode =
 				    p9pdu_readf(pdu, proto_version, "d", count);
 				if (!errcode) {
 					*count =
-					    min_t(int32_t, *count,
+					    min_t(uint32_t, *count,
 						  pdu->size - pdu->offset);
 					*data = &pdu->sdata[pdu->offset];
 				}
@@ -404,9 +401,10 @@
 			break;
 		case 's':{
 				const char *sptr = va_arg(ap, const char *);
-				int16_t len = 0;
+				uint16_t len = 0;
 				if (sptr)
-					len = min_t(int16_t, strlen(sptr), USHRT_MAX);
+					len = min_t(uint16_t, strlen(sptr),
+								USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
 								"w", len);
@@ -438,7 +436,7 @@
 						 stbuf->n_gid, stbuf->n_muid);
 			} break;
 		case 'D':{
-				int32_t count = va_arg(ap, int32_t);
+				uint32_t count = va_arg(ap, uint32_t);
 				const void *data = va_arg(ap, const void *);
 
 				errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/Kconfig b/net/Kconfig
index ad0aafe..7284062 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -253,7 +253,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use TCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/TcpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/tcpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called tcp_probe.
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf5..8184c03 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@
 		if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
 			return -ENOPROTOOPT;
 		lock_sock(&(cf_sk->sk));
-		cf_sk->conn_req.param.size = ol;
 		if (ol > sizeof(cf_sk->conn_req.param.data) ||
 			copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
 			release_sock(&cf_sk->sk);
 			return -EINVAL;
 		}
+		cf_sk->conn_req.param.size = ol;
 		release_sock(&cf_sk->sk);
 		return 0;
 
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c..fa9dab3 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@
 	struct chnl_net *priv  = container_of(layr, struct chnl_net, chnl);
 	int pktlen;
 	int err = 0;
+	const u8 *ip_version;
+	u8 buf;
 
 	priv = container_of(layr, struct chnl_net, chnl);
 
@@ -90,7 +92,21 @@
 	 * send the packet to the net stack.
 	 */
 	skb->dev = priv->netdev;
-	skb->protocol = htons(ETH_P_IP);
+
+	/* check the version of IP */
+	ip_version = skb_header_pointer(skb, 0, 1, &buf);
+	if (!ip_version)
+		return -EINVAL;
+	switch (*ip_version >> 4) {
+	case 4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case 6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		return -EINVAL;
+	}
 
 	/* If we change the header in loop mode, the checksum is corrupted. */
 	if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 815ef88..0a1b53b 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -1,5 +1,6 @@
 
 #include <linux/ceph/types.h>
+#include <linux/module.h>
 
 /*
  * Robert Jenkin's hash function.
@@ -104,6 +105,7 @@
 		return -1;
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash);
 
 const char *ceph_str_hash_name(int type)
 {
@@ -116,3 +118,4 @@
 		return "unknown";
 	}
 }
+EXPORT_SYMBOL(ceph_str_hash_name);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b6ff4a1..dff633d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -96,7 +96,7 @@
 
 int ceph_msgr_init(void)
 {
-	ceph_msgr_wq = create_workqueue("ceph-msgr");
+	ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
 	if (!ceph_msgr_wq) {
 		pr_err("msgr_init failed to create workqueue\n");
 		return -ENOMEM;
@@ -1920,20 +1920,6 @@
 /*
  * Atomically queue work on a connection.  Bump @con reference to
  * avoid races with connection teardown.
- *
- * There is some trickery going on with QUEUED and BUSY because we
- * only want a _single_ thread operating on each connection at any
- * point in time, but we want to use all available CPUs.
- *
- * The worker thread only proceeds if it can atomically set BUSY.  It
- * clears QUEUED and does it's thing.  When it thinks it's done, it
- * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
- * (tries again to set BUSY).
- *
- * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
- * try to queue work.  If that fails (work is already queued, or BUSY)
- * we give up (work also already being done or is queued) but leave QUEUED
- * set so that the worker thread will loop if necessary.
  */
 static void queue_con(struct ceph_connection *con)
 {
@@ -1948,11 +1934,7 @@
 		return;
 	}
 
-	set_bit(QUEUED, &con->state);
-	if (test_bit(BUSY, &con->state)) {
-		dout("queue_con %p - already BUSY\n", con);
-		con->ops->put(con);
-	} else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+	if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
 		dout("queue_con %p - already queued\n", con);
 		con->ops->put(con);
 	} else {
@@ -1967,15 +1949,6 @@
 {
 	struct ceph_connection *con = container_of(work, struct ceph_connection,
 						   work.work);
-	int backoff = 0;
-
-more:
-	if (test_and_set_bit(BUSY, &con->state) != 0) {
-		dout("con_work %p BUSY already set\n", con);
-		goto out;
-	}
-	dout("con_work %p start, clearing QUEUED\n", con);
-	clear_bit(QUEUED, &con->state);
 
 	mutex_lock(&con->mutex);
 
@@ -1994,28 +1967,13 @@
 	    try_read(con) < 0 ||
 	    try_write(con) < 0) {
 		mutex_unlock(&con->mutex);
-		backoff = 1;
 		ceph_fault(con);     /* error/fault path */
 		goto done_unlocked;
 	}
 
 done:
 	mutex_unlock(&con->mutex);
-
 done_unlocked:
-	clear_bit(BUSY, &con->state);
-	dout("con->state=%lu\n", con->state);
-	if (test_bit(QUEUED, &con->state)) {
-		if (!backoff || test_bit(OPENING, &con->state)) {
-			dout("con_work %p QUEUED reset, looping\n", con);
-			goto more;
-		}
-		dout("con_work %p QUEUED reset, but just faulted\n", con);
-		clear_bit(QUEUED, &con->state);
-	}
-	dout("con_work %p done\n", con);
-
-out:
 	con->ops->put(con);
 }
 
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d73f3f6..71603ac 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -605,8 +605,10 @@
 			goto bad;
 		}
 		err = __decode_pool(p, end, pi);
-		if (err < 0)
+		if (err < 0) {
+			kfree(pi);
 			goto bad;
+		}
 		__insert_pg_pool(&map->pg_pools, pi);
 	}
 
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269..06d0e7b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@
 }
 EXPORT_SYMBOL(netif_device_attach);
 
-static bool can_checksum_protocol(unsigned long features, __be16 protocol)
-{
-	return ((features & NETIF_F_NO_CSUM) ||
-		((features & NETIF_F_V4_CSUM) &&
-		 protocol == htons(ETH_P_IP)) ||
-		((features & NETIF_F_V6_CSUM) &&
-		 protocol == htons(ETH_P_IPV6)) ||
-		((features & NETIF_F_FCOE_CRC) &&
-		 protocol == htons(ETH_P_FCOE)));
-}
-
-static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
-{
-	__be16 protocol = skb->protocol;
-	int features = dev->features;
-
-	if (vlan_tx_tag_present(skb)) {
-		features &= dev->vlan_features;
-	} else if (protocol == htons(ETH_P_8021Q)) {
-		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
-		protocol = veh->h_vlan_encapsulated_proto;
-		features &= dev->vlan_features;
-	}
-
-	return can_checksum_protocol(features, protocol);
-}
-
 /**
  * skb_dev_set -- assign a new device to a buffer
  * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@
 /**
  *	dev_gso_segment - Perform emulated hardware segmentation on skb.
  *	@skb: buffer to segment
+ *	@features: device features as applicable to this skb
  *
  *	This function segments the given skb and stores the list of segments
  *	in skb->next.
  */
-static int dev_gso_segment(struct sk_buff *skb)
+static int dev_gso_segment(struct sk_buff *skb, int features)
 {
-	struct net_device *dev = skb->dev;
 	struct sk_buff *segs;
-	int features = dev->features & ~(illegal_highdma(dev, skb) ?
-					 NETIF_F_SG : 0);
 
 	segs = skb_gso_segment(skb, features);
 
@@ -2017,22 +1988,52 @@
 	}
 }
 
-int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+	return ((features & NETIF_F_GEN_CSUM) ||
+		((features & NETIF_F_V4_CSUM) &&
+		 protocol == htons(ETH_P_IP)) ||
+		((features & NETIF_F_V6_CSUM) &&
+		 protocol == htons(ETH_P_IPV6)) ||
+		((features & NETIF_F_FCOE_CRC) &&
+		 protocol == htons(ETH_P_FCOE)));
+}
+
+static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
+{
+	if (!can_checksum_protocol(protocol, features)) {
+		features &= ~NETIF_F_ALL_CSUM;
+		features &= ~NETIF_F_SG;
+	} else if (illegal_highdma(skb->dev, skb)) {
+		features &= ~NETIF_F_SG;
+	}
+
+	return features;
+}
+
+int netif_skb_features(struct sk_buff *skb)
 {
 	__be16 protocol = skb->protocol;
+	int features = skb->dev->features;
 
 	if (protocol == htons(ETH_P_8021Q)) {
 		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
 		protocol = veh->h_vlan_encapsulated_proto;
-	} else if (!skb->vlan_tci)
-		return dev->features;
+	} else if (!vlan_tx_tag_present(skb)) {
+		return harmonize_features(skb, protocol, features);
+	}
 
-	if (protocol != htons(ETH_P_8021Q))
-		return dev->features & dev->vlan_features;
-	else
-		return 0;
+	features &= skb->dev->vlan_features;
+
+	if (protocol != htons(ETH_P_8021Q)) {
+		return harmonize_features(skb, protocol, features);
+	} else {
+		features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
+				NETIF_F_GEN_CSUM;
+		return harmonize_features(skb, protocol, features);
+	}
 }
-EXPORT_SYMBOL(netif_get_vlan_features);
+EXPORT_SYMBOL(netif_skb_features);
 
 /*
  * Returns true if either:
@@ -2042,22 +2043,13 @@
  *	   support DMA from it.
  */
 static inline int skb_needs_linearize(struct sk_buff *skb,
-				      struct net_device *dev)
+				      int features)
 {
-	if (skb_is_nonlinear(skb)) {
-		int features = dev->features;
-
-		if (vlan_tx_tag_present(skb))
-			features &= dev->vlan_features;
-
-		return (skb_has_frag_list(skb) &&
-			!(features & NETIF_F_FRAGLIST)) ||
+	return skb_is_nonlinear(skb) &&
+			((skb_has_frag_list(skb) &&
+				!(features & NETIF_F_FRAGLIST)) ||
 			(skb_shinfo(skb)->nr_frags &&
-			(!(features & NETIF_F_SG) ||
-			illegal_highdma(dev, skb)));
-	}
-
-	return 0;
+				!(features & NETIF_F_SG)));
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@
 	int rc = NETDEV_TX_OK;
 
 	if (likely(!skb->next)) {
+		int features;
+
 		/*
 		 * If device doesnt need skb->dst, release it right now while
 		 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@
 
 		skb_orphan_try(skb);
 
+		features = netif_skb_features(skb);
+
 		if (vlan_tx_tag_present(skb) &&
-		    !(dev->features & NETIF_F_HW_VLAN_TX)) {
+		    !(features & NETIF_F_HW_VLAN_TX)) {
 			skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
 			if (unlikely(!skb))
 				goto out;
@@ -2088,13 +2084,13 @@
 			skb->vlan_tci = 0;
 		}
 
-		if (netif_needs_gso(dev, skb)) {
-			if (unlikely(dev_gso_segment(skb)))
+		if (netif_needs_gso(skb, features)) {
+			if (unlikely(dev_gso_segment(skb, features)))
 				goto out_kfree_skb;
 			if (skb->next)
 				goto gso;
 		} else {
-			if (skb_needs_linearize(skb, dev) &&
+			if (skb_needs_linearize(skb, features) &&
 			    __skb_linearize(skb))
 				goto out_kfree_skb;
 
@@ -2105,7 +2101,7 @@
 			if (skb->ip_summed == CHECKSUM_PARTIAL) {
 				skb_set_transport_header(skb,
 					skb_checksum_start_offset(skb));
-				if (!dev_can_checksum(dev, skb) &&
+				if (!(features & NETIF_F_ALL_CSUM) &&
 				     skb_checksum_help(skb))
 					goto out_kfree_skb;
 			}
@@ -2301,7 +2297,10 @@
 		 */
 		if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
 			skb_dst_force(skb);
-		__qdisc_update_bstats(q, skb->len);
+
+		qdisc_skb_cb(skb)->pkt_len = skb->len;
+		qdisc_bstats_update(q, skb);
+
 		if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
 			if (unlikely(contended)) {
 				spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@
 }
 
 /**
- *	alloc_netdev_mq - allocate network device
+ *	alloc_netdev_mqs - allocate network device
  *	@sizeof_priv:	size of private data to allocate space for
  *	@name:		device name format string
  *	@setup:		callback to initialize device
- *	@queue_count:	the number of subqueues to allocate
+ *	@txqs:		the number of TX subqueues to allocate
+ *	@rxqs:		the number of RX subqueues to allocate
  *
  *	Allocates a struct net_device with private data area for driver use
  *	and performs basic initialization.  Also allocates subquue structs
- *	for each queue on the device at the end of the netdevice.
+ *	for each queue on the device.
  */
-struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
-		void (*setup)(struct net_device *), unsigned int queue_count)
+struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+		void (*setup)(struct net_device *),
+		unsigned int txqs, unsigned int rxqs)
 {
 	struct net_device *dev;
 	size_t alloc_size;
@@ -5640,12 +5641,20 @@
 
 	BUG_ON(strlen(name) >= sizeof(dev->name));
 
-	if (queue_count < 1) {
+	if (txqs < 1) {
 		pr_err("alloc_netdev: Unable to allocate device "
 		       "with zero queues.\n");
 		return NULL;
 	}
 
+#ifdef CONFIG_RPS
+	if (rxqs < 1) {
+		pr_err("alloc_netdev: Unable to allocate device "
+		       "with zero RX queues.\n");
+		return NULL;
+	}
+#endif
+
 	alloc_size = sizeof(struct net_device);
 	if (sizeof_priv) {
 		/* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@
 
 	dev_net_set(dev, &init_net);
 
-	dev->num_tx_queues = queue_count;
-	dev->real_num_tx_queues = queue_count;
+	dev->num_tx_queues = txqs;
+	dev->real_num_tx_queues = txqs;
 	if (netif_alloc_netdev_queues(dev))
 		goto free_pcpu;
 
 #ifdef CONFIG_RPS
-	dev->num_rx_queues = queue_count;
-	dev->real_num_rx_queues = queue_count;
+	dev->num_rx_queues = rxqs;
+	dev->real_num_rx_queues = rxqs;
 	if (netif_alloc_rx_queues(dev))
 		goto free_pcpu;
 #endif
@@ -5711,7 +5720,7 @@
 	kfree(p);
 	return NULL;
 }
-EXPORT_SYMBOL(alloc_netdev_mq);
+EXPORT_SYMBOL(alloc_netdev_mqs);
 
 /**
  *	free_netdev - free network device
@@ -6209,7 +6218,7 @@
 static void __net_exit default_device_exit_batch(struct list_head *net_list)
 {
 	/* At exit all network devices most be removed from a network
-	 * namespace.  Do this in the reverse order of registeration.
+	 * namespace.  Do this in the reverse order of registration.
 	 * Do this across as many network namespaces as possible to
 	 * improve batching efficiency.
 	 */
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4e..afc5837 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@
 /**
  *	sk_run_filter - run a filter on a socket
  *	@skb: buffer to run the filter on
- *	@filter: filter to apply
+ *	@fentry: filter to apply
  *
  * Decode and apply filter instructions to the skb->data.
  * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57..a5f7535 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@
 	if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+	if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		struct sock *rtnl;
 		rtnl_dumpit_func dumpit;
 
diff --git a/net/core/sock.c b/net/core/sock.c
index a658aeb..7dfed79 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -157,7 +157,7 @@
   "sk_lock-27"       , "sk_lock-28"          , "sk_lock-AF_CAN"      ,
   "sk_lock-AF_TIPC"  , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV"        ,
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
-  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
+  "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -173,7 +173,7 @@
   "slock-27"       , "slock-28"          , "slock-AF_CAN"      ,
   "slock-AF_TIPC"  , "slock-AF_BLUETOOTH", "slock-AF_IUCV"     ,
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
-  "slock-AF_IEEE802154", "slock-AF_CAIF" ,
+  "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -189,7 +189,7 @@
   "clock-27"       , "clock-28"          , "clock-AF_CAN"      ,
   "clock-AF_TIPC"  , "clock-AF_BLUETOOTH", "clock-AF_IUCV"     ,
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
-  "clock-AF_IEEE802154", "clock-AF_CAIF" ,
+  "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_MAX"
 };
 
diff --git a/net/dccp/Kconfig b/net/dccp/Kconfig
index ad6dffd..b75968a 100644
--- a/net/dccp/Kconfig
+++ b/net/dccp/Kconfig
@@ -49,7 +49,9 @@
 	what was just said, you don't need it: say N.
 
 	Documentation on how to use DCCP connection probing can be found
-	at http://linux-net.osdl.org/index.php/DccpProbe
+	at:
+	
+	  http://www.linuxfoundation.org/collaborate/workgroups/networking/dccpprobe
 
 	To compile this code as a module, choose M here: the
 	module will be called dccp_probe.
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 4508705..5fdb072 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 
-	dp->dccps_gsr = seq;
+	if (after48(seq, dp->dccps_gsr))
+		dp->dccps_gsr = seq;
 	/* Sequence validity window depends on remote Sequence Window (7.5.1) */
 	dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
 	/*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247..8cde009 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@
 		 */
 		if (time_before(now, (dp->dccps_rate_last +
 				      sysctl_dccp_sync_ratelimit)))
-			return 0;
+			return -1;
 
 		DCCP_WARN("Step 6 failed for %s packet, "
 			  "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 56394382..4234882 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
 /* Boundary values */
 static int		zero     = 0,
 			u8_max   = 0xFF;
-static unsigned long	seqw_min = 32;
+static unsigned long	seqw_min = DCCPF_SEQ_WMIN,
+			seqw_max = 0xFFFFFFFF;		/* maximum on 32 bit */
 
 static struct ctl_table dccp_default_table[] = {
 	{
@@ -31,6 +32,7 @@
 		.mode		= 0644,
 		.proc_handler	= proc_doulongvec_minmax,
 		.extra1		= &seqw_min,		/* RFC 4340, 7.5.2 */
+		.extra2		= &seqw_max,
 	},
 	{
 		.procname	= "rx_ccid",
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0ba1563..0dcaa90 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1130,7 +1130,7 @@
 /*
  * This processes a device up event. We only start up
  * the loopback device & ethernet devices with correct
- * MAC addreses automatically. Others must be started
+ * MAC addresses automatically. Others must be started
  * specifically.
  *
  * FIXME: How should we configure the loopback address ? If we could dispense
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f..f9d7ac9 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@
 EXPORT_SYMBOL(ether_setup);
 
 /**
- * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
  * @sizeof_priv: Size of additional driver-private structure to be allocated
  *	for this Ethernet device
- * @queue_count: The number of queues this device has.
+ * @txqs: The number of TX queues this device has.
+ * @txqs: The number of RX queues this device has.
  *
  * Fill in the fields of the device structure with Ethernet-generic
  * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@
  * this private data area.
  */
 
-struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
+				      unsigned int rxqs)
 {
-	return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+	return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
 }
-EXPORT_SYMBOL(alloc_etherdev_mq);
+EXPORT_SYMBOL(alloc_etherdev_mqs);
 
 static size_t _format_mac_addr(char *buf, int buflen,
 			       const unsigned char *addr, int len)
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 9e95d7f..a5a1050 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -432,7 +432,9 @@
 	---help---
 	  Support for INET (TCP, DCCP, etc) socket monitoring interface used by
 	  native Linux tools such as ss. ss is included in iproute2, currently
-	  downloadable at <http://linux-net.osdl.org/index.php/Iproute2>.
+	  downloadable at:
+	  
+	    http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
 
 	  If unsure, say Y.
 
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec..86961be 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@
 
 	skb->ip_summed = CHECKSUM_NONE;
 
-	ah = (struct ip_auth_hdr *)skb->data;
-	iph = ip_hdr(skb);
-	ihl = ip_hdrlen(skb);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	iph = ip_hdr(skb);
+	ihl = ip_hdrlen(skb);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b9..04c8b69 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@
 	return err;
 }
 
+int arp_invalidate(struct net_device *dev, __be32 ip)
+{
+	struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
+	int err = -ENXIO;
+
+	if (neigh) {
+		if (neigh->nud_state & ~NUD_NOARP)
+			err = neigh_update(neigh, NULL, NUD_FAILED,
+					   NEIGH_UPDATE_F_OVERRIDE|
+					   NEIGH_UPDATE_F_ADMIN);
+		neigh_release(neigh);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(arp_invalidate);
+
 static int arp_req_delete_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1163,7 +1180,6 @@
 {
 	int err;
 	__be32 ip;
-	struct neighbour *neigh;
 
 	if (r->arp_flags & ATF_PUBL)
 		return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@
 		if (!dev)
 			return -EINVAL;
 	}
-	err = -ENXIO;
-	neigh = neigh_lookup(&arp_tbl, &ip, dev);
-	if (neigh) {
-		if (neigh->nud_state & ~NUD_NOARP)
-			err = neigh_update(neigh, NULL, NUD_FAILED,
-					   NEIGH_UPDATE_F_OVERRIDE|
-					   NEIGH_UPDATE_F_ADMIN);
-		neigh_release(neigh);
-	}
-	return err;
+	return arp_invalidate(dev, ip);
 }
 
 /*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e3181..97e5fb7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 			if (!reuse || !sk2->sk_reuse ||
-			    sk2->sk_state == TCP_LISTEN) {
+			    ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
 				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
 				    sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@
 					    (tb->num_owners < smallest_size || smallest_size == -1)) {
 						smallest_size = tb->num_owners;
 						smallest_rover = rover;
-						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
+						if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
+						    !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
 							spin_unlock(&head->lock);
 							snum = smallest_rover;
 							goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada171..2746c1f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@
 	    nlmsg_len(nlh) < hdrlen)
 		return -EINVAL;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (nlmsg_attrlen(nlh, hdrlen)) {
 			struct nlattr *attr;
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340..e855fff 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@
 	struct arpt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@
 	 * about).
 	 */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@
 	struct arpt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d63..652efea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@
 	struct ipt_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU.
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i; /* macro does multi eval of i */
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@
 	struct ipt_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dc7c096..406f320 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1350,7 +1350,7 @@
 	return 0;
 }
 
-/* Intialize TSO state of a skb.
+/* Initialize TSO state of a skb.
  * This must be invoked the first time we consider transmitting
  * SKB onto the wire.
  */
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 059a3de..978e80e 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -300,7 +300,7 @@
 			goto out;
 		}
 
-		/* Reproduce AF_INET checks to make the bindings consitant */
+		/* Reproduce AF_INET checks to make the bindings consistent */
 		v4addr = addr->sin6_addr.s6_addr32[3];
 		chk_addr_ret = inet_addr_type(net, v4addr);
 		if (!sysctl_ip_nonlocal_bind &&
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4e..1aba54a 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@
 	if (!pskb_may_pull(skb, ah_hlen))
 		goto out;
 
-	ip6h = ipv6_hdr(skb);
-
-	skb_push(skb, hdr_len);
 
 	if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
 		goto out;
 	nfrags = err;
 
+	ah = (struct ip_auth_hdr *)skb->data;
+	ip6h = ipv6_hdr(skb);
+
+	skb_push(skb, hdr_len);
+
 	work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
 	if (!work_iph)
 		goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d..d144e62 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@
 		     !sk2->sk_bound_dev_if ||
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
 		    (!sk->sk_reuse || !sk2->sk_reuse ||
-		     sk2->sk_state == TCP_LISTEN) &&
+		     ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
 		     ipv6_rcv_saddr_equal(sk, sk2))
 			break;
 	}
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 4555823..7d227c6 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@
 	struct ip6t_entry *iter;
 	unsigned int cpu;
 	unsigned int i;
-	unsigned int curcpu = get_cpu();
-
-	/* Instead of clearing (by a previous call to memset())
-	 * the counters and using adds, we set the counters
-	 * with data used by 'current' CPU
-	 *
-	 * Bottom half has to be disabled to prevent deadlock
-	 * if new softirq were to run and call ipt_do_table
-	 */
-	local_bh_disable();
-	i = 0;
-	xt_entry_foreach(iter, t->entries[curcpu], t->size) {
-		SET_COUNTER(counters[i], iter->counters.bcnt,
-			    iter->counters.pcnt);
-		++i;
-	}
-	local_bh_enable();
-	/* Processing counters from other cpus, we can let bottom half enabled,
-	 * (preemption is disabled)
-	 */
 
 	for_each_possible_cpu(cpu) {
-		if (cpu == curcpu)
-			continue;
+		seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
+
 		i = 0;
-		local_bh_disable();
-		xt_info_wrlock(cpu);
 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
-			ADD_COUNTER(counters[i], iter->counters.bcnt,
-				    iter->counters.pcnt);
+			u64 bcnt, pcnt;
+			unsigned int start;
+
+			do {
+				start = read_seqbegin(lock);
+				bcnt = iter->counters.bcnt;
+				pcnt = iter->counters.pcnt;
+			} while (read_seqretry(lock, start));
+
+			ADD_COUNTER(counters[i], bcnt, pcnt);
 			++i;
 		}
-		xt_info_wrunlock(cpu);
-		local_bh_enable();
 	}
-	put_cpu();
 }
 
 static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@
 	   (other than comefrom, which userspace doesn't care
 	   about). */
 	countersize = sizeof(struct xt_counters) * private->number;
-	counters = vmalloc(countersize);
+	counters = vzalloc(countersize);
 
 	if (counters == NULL)
 		return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@
 	struct ip6t_entry *iter;
 
 	ret = 0;
-	counters = vmalloc(num_counters * sizeof(struct xt_counters));
+	counters = vzalloc(num_counters * sizeof(struct xt_counters));
 	if (!counters) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50..5cb8d30 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@
 	struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
 	u_int8_t l3proto = nfmsg->nfgen_family;
 
-	rcu_read_lock();
+	spin_lock_bh(&nf_conntrack_lock);
 	last = (struct nf_conn *)cb->args[1];
 	for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
-		hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]],
+		hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
 					 hnnode) {
 			if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
 				continue;
 			ct = nf_ct_tuplehash_to_ctrack(h);
-			if (!atomic_inc_not_zero(&ct->ct_general.use))
-				continue;
 			/* Dump entries of a given L3 protocol number.
 			 * If it is not specified, ie. l3proto == 0,
 			 * then dump everything. */
 			if (l3proto && nf_ct_l3num(ct) != l3proto)
-				goto releasect;
+				continue;
 			if (cb->args[1]) {
 				if (ct != last)
-					goto releasect;
+					continue;
 				cb->args[1] = 0;
 			}
 			if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@
 				if (acct)
 					memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
 			}
-releasect:
-		nf_ct_put(ct);
 		}
 		if (cb->args[1]) {
 			cb->args[1] = 0;
@@ -690,7 +686,7 @@
 		}
 	}
 out:
-	rcu_read_unlock();
+	spin_unlock_bh(&nf_conntrack_lock);
 	if (last)
 		nf_ct_put(last);
 
@@ -928,7 +924,7 @@
 	u16 zone;
 	int err;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP)
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
 		return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
 					  ctnetlink_done);
 
@@ -1790,7 +1786,7 @@
 	u16 zone;
 	int err;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		return netlink_dump_start(ctnl, skb, nlh,
 					  ctnetlink_exp_dump_table,
 					  ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507..c942376 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@
 
 	for_each_possible_cpu(i) {
 		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
-		spin_lock_init(&lock->lock);
+
+		seqlock_init(&lock->lock);
 		lock->readers = 0;
 	}
 
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99..f83cb37 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@
 	    security_netlink_recv(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (nlh->nlmsg_flags & NLM_F_DUMP) {
+	if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (ops->dumpit == NULL)
 			return -EOPNOTSUPP;
 
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb..1072b2c 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
 /* Transport protocol registration */
 static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
 
-static struct phonet_protocol *phonet_proto_get(int protocol)
+static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
 {
 	struct phonet_protocol *pp;
 
@@ -458,7 +458,7 @@
 
 static DEFINE_MUTEX(proto_tab_lock);
 
-int __init_or_module phonet_proto_register(int protocol,
+int __init_or_module phonet_proto_register(unsigned int protocol,
 						struct phonet_protocol *pp)
 {
 	int err = 0;
@@ -481,7 +481,7 @@
 }
 EXPORT_SYMBOL(phonet_proto_register);
 
-void phonet_proto_unregister(int protocol, struct phonet_protocol *pp)
+void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
 {
 	mutex_lock(&proto_tab_lock);
 	BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a36270a..f04d4a4 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -24,7 +24,7 @@
 	  To administer these schedulers, you'll need the user-level utilities
 	  from the package iproute2+tc at <ftp://ftp.tux.org/pub/net/ip-routing/>.
 	  That package also contains some documentation; for more, check out
-	  <http://linux-net.osdl.org/index.php/Iproute2>.
+	  <http://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2>.
 
 	  This Quality of Service (QoS) support will enable you to use
 	  Differentiated Services (diffserv) and Resource Reservation Protocol
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce..83ddfc0 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@
 
 	spin_lock(&p->tcf_lock);
 	p->tcf_tm.lastuse = jiffies;
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	action = p->tcf_action;
 	update_flags = p->update_flags;
 	spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef96..c2a7c20 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@
 	spin_lock(&ipt->tcf_lock);
 
 	ipt->tcf_tm.lastuse = jiffies;
-	ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	ipt->tcf_bstats.packets++;
+	bstats_update(&ipt->tcf_bstats, skb);
 
 	/* yes, we have to worry about both in and out dev
 	 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be..d765067 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@
 
 	spin_lock(&m->tcf_lock);
 	m->tcf_tm.lastuse = jiffies;
-	m->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	m->tcf_bstats.packets++;
+	bstats_update(&m->tcf_bstats, skb);
 
 	dev = m->tcfm_dev;
 	if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb83..178a4bd 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@
 	egress = p->flags & TCA_NAT_FLAG_EGRESS;
 	action = p->tcf_action;
 
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 
 	spin_unlock(&p->tcf_lock);
 
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9..445bef7 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@
 bad:
 	p->tcf_qstats.overlimits++;
 done:
-	p->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	p->tcf_bstats.packets++;
+	bstats_update(&p->tcf_bstats, skb);
 	spin_unlock(&p->tcf_lock);
 	return p->tcf_action;
 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf743..e2f08b1 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@
 
 	spin_lock(&police->tcf_lock);
 
-	police->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	police->tcf_bstats.packets++;
+	bstats_update(&police->tcf_bstats, skb);
 
 	if (police->tcfp_ewma_rate &&
 	    police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3..7287cff 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	/* print policy string followed by _ then packet count
 	 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4e..836f5fe 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@
 
 	spin_lock(&d->tcf_lock);
 	d->tcf_tm.lastuse = jiffies;
-	d->tcf_bstats.bytes += qdisc_pkt_len(skb);
-	d->tcf_bstats.packets++;
+	bstats_update(&d->tcf_bstats, skb);
 
 	if (d->flags & SKBEDIT_F_PRIORITY)
 		skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2825407..943d733 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@
 		}
 		return ret;
 	}
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	flow->bstats.bytes += qdisc_pkt_len(skb);
-	flow->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
+	bstats_update(&flow->bstats, skb);
 	/*
 	 * Okay, this may seem weird. We pretend we've dropped the packet if
 	 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb76315..c80d1c2 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@
 	ret = qdisc_enqueue(skb, cl->q);
 	if (ret == NET_XMIT_SUCCESS) {
 		sch->q.qlen++;
-		sch->bstats.packets++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
+		qdisc_bstats_update(sch, skb);
 		cbq_mark_toplevel(q, cl);
 		if (!cl->next_alive)
 			cbq_activate_class(cl);
@@ -650,8 +649,7 @@
 		ret = qdisc_enqueue(skb, cl->q);
 		if (ret == NET_XMIT_SUCCESS) {
 			sch->q.qlen++;
-			sch->bstats.packets++;
-			sch->bstats.bytes += qdisc_pkt_len(skb);
+			qdisc_bstats_update(sch, skb);
 			if (!cl->next_alive)
 				cbq_activate_class(cl);
 			return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b531..de55e64 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@
 {
 	struct drr_sched *q = qdisc_priv(sch);
 	struct drr_class *cl;
-	unsigned int len;
 	int err;
 
 	cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@
 		return err;
 	}
 
-	len = qdisc_pkt_len(skb);
 	err = qdisc_enqueue(skb, cl->qdisc);
 	if (unlikely(err != NET_XMIT_SUCCESS)) {
 		if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@
 		cl->deficit = cl->quantum;
 	}
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += len;
-	sch->bstats.packets++;
-	sch->bstats.bytes += len;
+	bstats_update(&cl->bstats, skb);
+	qdisc_bstats_update(sch, skb);
 
 	sch->q.qlen++;
 	return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d6..60f4bdd 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@
 		return err;
 	}
 
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b..2e45791 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@
 	if (cl->qdisc->q.qlen == 1)
 		set_active(cl, qdisc_pkt_len(skb));
 
-	cl->bstats.packets++;
-	cl->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	bstats_update(&cl->bstats, skb);
+	qdisc_bstats_update(sch, skb);
 	sch->q.qlen++;
 
 	return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d..984c1b0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@
 		}
 		return ret;
 	} else {
-		cl->bstats.packets +=
-			skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-		cl->bstats.bytes += qdisc_pkt_len(skb);
+		bstats_update(&cl->bstats, skb);
 		htb_activate(q, cl);
 	}
 
 	sch->q.qlen++;
-	sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
@@ -648,12 +645,10 @@
 				htb_add_to_wait_tree(q, cl, diff);
 		}
 
-		/* update byte stats except for leaves which are already updated */
-		if (cl->level) {
-			cl->bstats.bytes += bytes;
-			cl->bstats.packets += skb_is_gso(skb)?
-					skb_shinfo(skb)->gso_segs:1;
-		}
+		/* update basic stats except for leaves which are already updated */
+		if (cl->level)
+			bstats_update(&cl->bstats, skb);
+
 		cl = cl->parent;
 	}
 }
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a..bce1665 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@
 
 	result = tc_classify(skb, p->filter_list, &res);
 
-	sch->bstats.packets++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
+	qdisc_bstats_update(sch, skb);
 	switch (result) {
 	case TC_ACT_SHOT:
 		result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690de..21f13da 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c0..1c4bce8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@
 
 	if (likely(ret == NET_XMIT_SUCCESS)) {
 		sch->q.qlen++;
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 	} else if (net_xmit_drop_count(ret)) {
 		sch->qstats.drops++;
 	}
@@ -477,8 +476,7 @@
 		__skb_queue_after(list, skb, nskb);
 
 		sch->qstats.backlog += qdisc_pkt_len(nskb);
-		sch->bstats.bytes += qdisc_pkt_len(nskb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, nskb);
 
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bc..966158d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@
 
 	ret = qdisc_enqueue(skb, qdisc);
 	if (ret == NET_XMIT_SUCCESS) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 		return NET_XMIT_SUCCESS;
 	}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c..a6009c5 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@
 
 	ret = qdisc_enqueue(skb, child);
 	if (likely(ret == NET_XMIT_SUCCESS)) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		sch->q.qlen++;
 	} else if (net_xmit_drop_count(ret)) {
 		q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94..239ec53 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@
 		slot->allot = q->scaled_quantum;
 	}
 	if (++sch->q.qlen <= q->limit) {
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		return NET_XMIT_SUCCESS;
 	}
 
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d..77565e7 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@
 	}
 
 	sch->q.qlen++;
-	sch->bstats.bytes += qdisc_pkt_len(skb);
-	sch->bstats.packets++;
+	qdisc_bstats_update(sch, skb);
 	return NET_XMIT_SUCCESS;
 }
 
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a..af9360d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@
 
 	if (q->q.qlen < dev->tx_queue_len) {
 		__skb_queue_tail(&q->q, skb);
-		sch->bstats.bytes += qdisc_pkt_len(skb);
-		sch->bstats.packets++;
+		qdisc_bstats_update(sch, skb);
 		return NET_XMIT_SUCCESS;
 	}
 
diff --git a/net/socket.c b/net/socket.c
index ccc576a..ac2219f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -306,20 +306,6 @@
 	.statfs		= simple_statfs,
 };
 
-static struct dentry *sockfs_mount(struct file_system_type *fs_type,
-			 int flags, const char *dev_name, void *data)
-{
-	return mount_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
-}
-
-static struct vfsmount *sock_mnt __read_mostly;
-
-static struct file_system_type sock_fs_type = {
-	.name =		"sockfs",
-	.mount =	sockfs_mount,
-	.kill_sb =	kill_anon_super,
-};
-
 /*
  * sockfs_dname() is called from d_path().
  */
@@ -333,6 +319,21 @@
 	.d_dname  = sockfs_dname,
 };
 
+static struct dentry *sockfs_mount(struct file_system_type *fs_type,
+			 int flags, const char *dev_name, void *data)
+{
+	return mount_pseudo(fs_type, "socket:", &sockfs_ops,
+		&sockfs_dentry_operations, SOCKFS_MAGIC);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+	.name =		"sockfs",
+	.mount =	sockfs_mount,
+	.kill_sb =	kill_anon_super,
+};
+
 /*
  *	Obtains the first available file descriptor and sets it up for use.
  *
@@ -368,7 +369,6 @@
 	}
 	path.mnt = mntget(sock_mnt);
 
-	d_set_d_op(path.dentry, &sockfs_dentry_operations);
 	d_instantiate(path.dentry, SOCK_INODE(sock));
 	SOCK_INODE(sock)->i_fop = &socket_file_ops;
 
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe6784..67e3127 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@
 	return cred->cr_ops->crvalidate(task, p);
 }
 
+static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				   __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
+	encode(rqstp, &xdr, obj);
+}
+
 int
-rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
+rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@
 	if (cred->cr_ops->crwrap_req)
 		return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
 	/* By default, we encode the arguments normally. */
-	return encode(rqstp, data, obj);
+	rpcauth_wrap_req_encode(encode, rqstp, data, obj);
+	return 0;
+}
+
+static int
+rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+			  __be32 *data, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
+	return decode(rqstp, &xdr, obj);
 }
 
 int
-rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
+rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
 		__be32 *data, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@
 		return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
 						   data, obj);
 	/* By default, we decode the arguments normally. */
-	return decode(rqstp, data, obj);
+	return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
 }
 
 int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce3..45dbf15 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@
 	return NULL;
 }
 
+static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
+				__be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
+	encode(rqstp, &xdr, obj);
+}
+
 static inline int
 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		   kxdreproc_t encode, struct rpc_rqst *rqstp,
+		   __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	struct xdr_buf	integ_buf;
@@ -1249,9 +1259,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
 				offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@
 
 static inline int
 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
-		kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj)
+		  kxdreproc_t encode, struct rpc_rqst *rqstp,
+		  __be32 *p, void *obj)
 {
 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
 	u32		offset;
@@ -1342,9 +1351,7 @@
 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
 	*p++ = htonl(rqstp->rq_seqno);
 
-	status = encode(rqstp, p, obj);
-	if (status)
-		return status;
+	gss_wrap_req_encode(encode, rqstp, p, obj);
 
 	status = alloc_enc_pages(rqstp);
 	if (status)
@@ -1394,7 +1401,7 @@
 
 static int
 gss_wrap_req(struct rpc_task *task,
-	     kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
+	     kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@
 		/* The spec seems a little ambiguous here, but I think that not
 		 * wrapping context destruction requests makes the most sense.
 		 */
-		status = encode(rqstp, p, obj);
+		gss_wrap_req_encode(encode, rqstp, p, obj);
+		status = 0;
 		goto out;
 	}
 	switch (gss_cred->gc_service) {
 		case RPC_GSS_SVC_NONE:
-			status = encode(rqstp, p, obj);
+			gss_wrap_req_encode(encode, rqstp, p, obj);
+			status = 0;
 			break;
 		case RPC_GSS_SVC_INTEGRITY:
 			status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@
 	return 0;
 }
 
+static int
+gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
+		      __be32 *p, void *obj)
+{
+	struct xdr_stream xdr;
+
+	xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
+	return decode(rqstp, &xdr, obj);
+}
 
 static int
 gss_unwrap_resp(struct rpc_task *task,
-		kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
+		kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
 {
 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@
 	cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
 						+ (savedlen - head->iov_len);
 out_decode:
-	status = decode(rqstp, p, obj);
+	status = gss_unwrap_req_decode(decode, rqstp, p, obj);
 out:
 	gss_put_ctx(ctx);
 	dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0c..1dd1a68 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@
 		ret = task->tk_status;
 		rpc_put_task(task);
 	}
-	return ret;
 	dprintk("RPC:       bc_send ret= %d\n", ret);
+	return ret;
 }
 
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f..57d344c 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@
 rpc_xdr_encode(struct rpc_task *task)
 {
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	encode;
+	kxdreproc_t	encode;
 	__be32		*p;
 
 	dprint_status(task);
@@ -1535,7 +1535,7 @@
 {
 	struct rpc_clnt	*clnt = task->tk_client;
 	struct rpc_rqst	*req = task->tk_rqstp;
-	kxdrproc_t	decode = task->tk_msg.rpc_proc->p_decode;
+	kxdrdproc_t	decode = task->tk_msg.rpc_proc->p_decode;
 	__be32		*p;
 
 	dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@
 	goto out_garbage;
 }
 
-static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj)
+static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
-	return 0;
 }
 
-static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj)
+static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
 {
 	return 0;
 }
@@ -1830,23 +1829,15 @@
 			  const struct rpc_task *task)
 {
 	const char *rpc_waitq = "none";
-	char *p, action[KSYM_SYMBOL_LEN];
 
 	if (RPC_IS_QUEUED(task))
 		rpc_waitq = rpc_qname(task->tk_waitqueue);
 
-	/* map tk_action pointer to a function name; then trim off
-	 * the "+0x0 [sunrpc]" */
-	sprint_symbol(action, (unsigned long)task->tk_action);
-	p = strchr(action, '+');
-	if (p)
-		*p = '\0';
-
-	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
+	printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
 		task->tk_pid, task->tk_flags, task->tk_status,
 		clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
 		clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
-		action, rpc_waitq);
+		task->tk_action, rpc_waitq);
 }
 
 void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f4..72bc536 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@
 {
 	struct inode *inode;
 
-	BUG_ON(!d_unhashed(dentry));
+	d_drop(dentry);
 	inode = rpc_get_inode(dir->i_sb, mode);
 	if (!inode)
 		goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca..c652e4c 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@
 	RPCBPROC_GETSTAT,
 };
 
-#define RPCB_HIGHPROC_2		RPCBPROC_CALLIT
-#define RPCB_HIGHPROC_3		RPCBPROC_TADDR2UADDR
-#define RPCB_HIGHPROC_4		RPCBPROC_GETSTAT
-
 /*
  * r_owner
  *
@@ -693,46 +689,37 @@
  * XDR functions for rpcbind
  */
 
-static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
-
-	p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz);
-	if (unlikely(p == NULL))
-		return -EIO;
-
-	*p++ = htonl(rpcb->r_prog);
-	*p++ = htonl(rpcb->r_vers);
-	*p++ = htonl(rpcb->r_prot);
-	*p   = htonl(rpcb->r_port);
-
-	return 0;
+	p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p++ = cpu_to_be32(rpcb->r_vers);
+	*p++ = cpu_to_be32(rpcb->r_prot);
+	*p   = cpu_to_be32(rpcb->r_port);
 }
 
-static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
 	unsigned long port;
-
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
+	__be32 *p;
 
 	rpcb->r_port = 0;
 
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
-	port = ntohl(*p);
+	port = be32_to_cpup(p);
 	dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
 			task->tk_msg.rpc_proc->p_name, port);
 	if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@
 	return 0;
 }
 
-static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
 			unsigned int *boolp)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		return -EIO;
 
 	*boolp = 0;
-	if (*p)
+	if (*p != xdr_zero)
 		*boolp = 1;
 
 	dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@
 	return 0;
 }
 
-static int encode_rpcb_string(struct xdr_stream *xdr, const char *string,
-				const u32 maxstrlen)
+static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
+			       const u32 maxstrlen)
 {
-	u32 len;
 	__be32 *p;
+	u32 len;
 
-	if (unlikely(string == NULL))
-		return -EIO;
 	len = strlen(string);
-	if (unlikely(len > maxstrlen))
-		return -EIO;
-
-	p = xdr_reserve_space(xdr, sizeof(__be32) + len);
-	if (unlikely(p == NULL))
-		return -EIO;
+	BUG_ON(len > maxstrlen);
+	p = xdr_reserve_space(xdr, 4 + len);
 	xdr_encode_opaque(p, string, len);
-
-	return 0;
 }
 
-static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p,
-			    const struct rpcbind_args *rpcb)
+static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
+			     const struct rpcbind_args *rpcb)
 {
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 
 	dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
 			task->tk_pid, task->tk_msg.rpc_proc->p_name,
 			rpcb->r_prog, rpcb->r_vers,
 			rpcb->r_netid, rpcb->r_addr);
 
-	xdr_init_encode(&xdr, &req->rq_snd_buf, p);
+	p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
+	*p++ = cpu_to_be32(rpcb->r_prog);
+	*p = cpu_to_be32(rpcb->r_vers);
 
-	p = xdr_reserve_space(&xdr,
-			sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
-	if (unlikely(p == NULL))
-		return -EIO;
-	*p++ = htonl(rpcb->r_prog);
-	*p = htonl(rpcb->r_vers);
-
-	if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
-		return -EIO;
-	if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
-		return -EIO;
-
-	return 0;
+	encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
+	encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
+	encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
 }
 
-static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
+static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
 			    struct rpcbind_args *rpcb)
 {
 	struct sockaddr_storage address;
 	struct sockaddr *sap = (struct sockaddr *)&address;
 	struct rpc_task *task = req->rq_task;
-	struct xdr_stream xdr;
+	__be32 *p;
 	u32 len;
 
 	rpcb->r_port = 0;
 
-	xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
-
-	p = xdr_inline_decode(&xdr, sizeof(__be32));
+	p = xdr_inline_decode(xdr, 4);
 	if (unlikely(p == NULL))
 		goto out_fail;
-	len = ntohl(*p);
+	len = be32_to_cpup(p);
 
 	/*
 	 * If the returned universal address is a null string,
@@ -845,7 +810,7 @@
 	if (unlikely(len > RPCBIND_MAXUADDRLEN))
 		goto out_fail;
 
-	p = xdr_inline_decode(&xdr, len);
+	p = xdr_inline_decode(xdr, len);
 	if (unlikely(p == NULL))
 		goto out_fail;
 	dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@
 static struct rpc_procinfo rpcb_procedures2[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -881,8 +846,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -891,8 +856,8 @@
 	},
 	[RPCBPROC_GETPORT] = {
 		.p_proc		= RPCBPROC_GETPORT,
-		.p_encode	= (kxdrproc_t)rpcb_enc_mapping,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getport,
+		.p_encode	= (kxdreproc_t)rpcb_enc_mapping,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getport,
 		.p_arglen	= RPCB_mappingargs_sz,
 		.p_replen	= RPCB_getportres_sz,
 		.p_statidx	= RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@
 static struct rpc_procinfo rpcb_procedures3[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -914,8 +879,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -924,8 +889,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@
 static struct rpc_procinfo rpcb_procedures4[] = {
 	[RPCBPROC_SET] = {
 		.p_proc		= RPCBPROC_SET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_SET,
@@ -947,8 +912,8 @@
 	},
 	[RPCBPROC_UNSET] = {
 		.p_proc		= RPCBPROC_UNSET,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_set,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_set,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_setres_sz,
 		.p_statidx	= RPCBPROC_UNSET,
@@ -957,8 +922,8 @@
 	},
 	[RPCBPROC_GETADDR] = {
 		.p_proc		= RPCBPROC_GETADDR,
-		.p_encode	= (kxdrproc_t)rpcb_enc_getaddr,
-		.p_decode	= (kxdrproc_t)rpcb_dec_getaddr,
+		.p_encode	= (kxdreproc_t)rpcb_enc_getaddr,
+		.p_decode	= (kxdrdproc_t)rpcb_dec_getaddr,
 		.p_arglen	= RPCB_getaddrargs_sz,
 		.p_replen	= RPCB_getaddrres_sz,
 		.p_statidx	= RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@
 
 static struct rpc_version rpcb_version2 = {
 	.number		= RPCBVERS_2,
-	.nrprocs	= RPCB_HIGHPROC_2,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures2),
 	.procs		= rpcb_procedures2
 };
 
 static struct rpc_version rpcb_version3 = {
 	.number		= RPCBVERS_3,
-	.nrprocs	= RPCB_HIGHPROC_3,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures3),
 	.procs		= rpcb_procedures3
 };
 
 static struct rpc_version rpcb_version4 = {
 	.number		= RPCBVERS_4,
-	.nrprocs	= RPCB_HIGHPROC_4,
+	.nrprocs	= ARRAY_SIZE(rpcb_procedures4),
 	.procs		= rpcb_procedures4
 };
 
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42..0e659c6 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@
 	if (svc_serv_is_pooled(serv))
 		svc_pool_map_put();
 
-#if defined(CONFIG_NFS_V4_1)
-	svc_sock_destroy(serv->bc_xprt);
-#endif /* CONFIG_NFS_V4_1 */
-
 	svc_unregister(serv);
 	kfree(serv->sv_pools);
 	kfree(serv);
@@ -1147,7 +1143,6 @@
  dropit:
 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
 	dprintk("svc: svc_process dropit\n");
-	svc_drop(rqstp);
 	return 0;
 
 err_short_len:
@@ -1218,7 +1213,6 @@
 	struct kvec		*resv = &rqstp->rq_res.head[0];
 	struct svc_serv		*serv = rqstp->rq_server;
 	u32			dir;
-	int			error;
 
 	/*
 	 * Setup response xdr_buf.
@@ -1246,11 +1240,13 @@
 		return 0;
 	}
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	return svc_send(rqstp);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv))
+		return svc_send(rqstp);
+	else {
+		svc_drop(rqstp);
+		return 0;
+	}
 }
 
 #if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1260,9 @@
 {
 	struct kvec	*argv = &rqstp->rq_arg.head[0];
 	struct kvec	*resv = &rqstp->rq_res.head[0];
-	int 		error;
 
 	/* Build the svc_rqst used by the common processing routine */
-	rqstp->rq_xprt = serv->bc_xprt;
+	rqstp->rq_xprt = serv->sv_bc_xprt;
 	rqstp->rq_xid = req->rq_xid;
 	rqstp->rq_prot = req->rq_xprt->prot;
 	rqstp->rq_server = serv;
@@ -1292,12 +1287,15 @@
 	svc_getu32(argv);	/* XID */
 	svc_getnl(argv);	/* CALLDIR */
 
-	error = svc_process_common(rqstp, argv, resv);
-	if (error <= 0)
-		return error;
-
-	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
-	return bc_send(req);
+	/* Returns 1 for send, 0 for drop */
+	if (svc_process_common(rqstp, argv, resv)) {
+		memcpy(&req->rq_snd_buf, &rqstp->rq_res,
+						sizeof(req->rq_snd_buf));
+		return bc_send(req);
+	} else {
+		/* Nothing to do to drop request */
+		return 0;
+	}
 }
 EXPORT_SYMBOL(bc_svc_process);
 #endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e1..d265aa7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@
 static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
 					  struct net *, struct sockaddr *,
 					  int, int);
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+#endif /* CONFIG_NFS_V4_1 */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 static struct lock_class_key svc_key[2];
 static struct lock_class_key svc_slock_key[2];
@@ -1184,6 +1191,57 @@
 	return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
 }
 
+#if defined(CONFIG_NFS_V4_1)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
+					     struct net *, struct sockaddr *,
+					     int, int);
+static void svc_bc_sock_free(struct svc_xprt *xprt);
+
+static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
+				       struct net *net,
+				       struct sockaddr *sa, int salen,
+				       int flags)
+{
+	return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
+}
+
+static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
+{
+}
+
+static struct svc_xprt_ops svc_tcp_bc_ops = {
+	.xpo_create = svc_bc_tcp_create,
+	.xpo_detach = svc_bc_tcp_sock_detach,
+	.xpo_free = svc_bc_sock_free,
+	.xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
+};
+
+static struct svc_xprt_class svc_tcp_bc_class = {
+	.xcl_name = "tcp-bc",
+	.xcl_owner = THIS_MODULE,
+	.xcl_ops = &svc_tcp_bc_ops,
+	.xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
+};
+
+static void svc_init_bc_xprt_sock(void)
+{
+	svc_reg_xprt_class(&svc_tcp_bc_class);
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+	svc_unreg_xprt_class(&svc_tcp_bc_class);
+}
+#else /* CONFIG_NFS_V4_1 */
+static void svc_init_bc_xprt_sock(void)
+{
+}
+
+static void svc_cleanup_bc_xprt_sock(void)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
 static struct svc_xprt_ops svc_tcp_ops = {
 	.xpo_create = svc_tcp_create,
 	.xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1265,14 @@
 {
 	svc_reg_xprt_class(&svc_tcp_class);
 	svc_reg_xprt_class(&svc_udp_class);
+	svc_init_bc_xprt_sock();
 }
 
 void svc_cleanup_xprt_sock(void)
 {
 	svc_unreg_xprt_class(&svc_tcp_class);
 	svc_unreg_xprt_class(&svc_udp_class);
+	svc_cleanup_bc_xprt_sock();
 }
 
 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1569,45 @@
 	kfree(svsk);
 }
 
+#if defined(CONFIG_NFS_V4_1)
 /*
- * Create a svc_xprt.
- *
- * For internal use only (e.g. nfsv4.1 backchannel).
- * Callers should typically use the xpo_create() method.
+ * Create a back channel svc_xprt which shares the fore channel socket.
  */
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot)
+static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
+					     int protocol,
+					     struct net *net,
+					     struct sockaddr *sin, int len,
+					     int flags)
 {
 	struct svc_sock *svsk;
-	struct svc_xprt *xprt = NULL;
+	struct svc_xprt *xprt;
 
-	dprintk("svc: %s\n", __func__);
+	if (protocol != IPPROTO_TCP) {
+		printk(KERN_WARNING "svc: only TCP sockets"
+			" supported on shared back channel\n");
+		return ERR_PTR(-EINVAL);
+	}
+
 	svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
 	if (!svsk)
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	xprt = &svsk->sk_xprt;
-	if (prot == IPPROTO_TCP)
-		svc_xprt_init(&svc_tcp_class, xprt, serv);
-	else if (prot == IPPROTO_UDP)
-		svc_xprt_init(&svc_udp_class, xprt, serv);
-	else
-		BUG();
-out:
-	dprintk("svc: %s return %p\n", __func__, xprt);
+	svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
+
+	serv->sv_bc_xprt = xprt;
+
 	return xprt;
 }
-EXPORT_SYMBOL_GPL(svc_sock_create);
 
 /*
- * Destroy a svc_sock.
+ * Free a back channel svc_sock.
  */
-void svc_sock_destroy(struct svc_xprt *xprt)
+static void svc_bc_sock_free(struct svc_xprt *xprt)
 {
-	if (xprt)
+	if (xprt) {
+		kfree(xprt->xpt_bc_sid);
 		kfree(container_of(xprt, struct svc_sock, sk_xprt));
+	}
 }
-EXPORT_SYMBOL_GPL(svc_sock_destroy);
+#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841..679cd67 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@
 }
 EXPORT_SYMBOL_GPL(xdr_write_pages);
 
+static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
+		__be32 *p, unsigned int len)
+{
+	if (len > iov->iov_len)
+		len = iov->iov_len;
+	if (p == NULL)
+		p = (__be32*)iov->iov_base;
+	xdr->p = p;
+	xdr->end = (__be32*)(iov->iov_base + len);
+	xdr->iov = iov;
+	xdr->page_ptr = NULL;
+}
+
+static int xdr_set_page_base(struct xdr_stream *xdr,
+		unsigned int base, unsigned int len)
+{
+	unsigned int pgnr;
+	unsigned int maxlen;
+	unsigned int pgoff;
+	unsigned int pgend;
+	void *kaddr;
+
+	maxlen = xdr->buf->page_len;
+	if (base >= maxlen)
+		return -EINVAL;
+	maxlen -= base;
+	if (len > maxlen)
+		len = maxlen;
+
+	base += xdr->buf->page_base;
+
+	pgnr = base >> PAGE_SHIFT;
+	xdr->page_ptr = &xdr->buf->pages[pgnr];
+	kaddr = page_address(*xdr->page_ptr);
+
+	pgoff = base & ~PAGE_MASK;
+	xdr->p = (__be32*)(kaddr + pgoff);
+
+	pgend = pgoff + len;
+	if (pgend > PAGE_SIZE)
+		pgend = PAGE_SIZE;
+	xdr->end = (__be32*)(kaddr + pgend);
+	xdr->iov = NULL;
+	return 0;
+}
+
+static void xdr_set_next_page(struct xdr_stream *xdr)
+{
+	unsigned int newbase;
+
+	newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
+	newbase -= xdr->buf->page_base;
+
+	if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
+		xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+}
+
+static bool xdr_set_next_buffer(struct xdr_stream *xdr)
+{
+	if (xdr->page_ptr != NULL)
+		xdr_set_next_page(xdr);
+	else if (xdr->iov == xdr->buf->head) {
+		if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
+			xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
+	}
+	return xdr->p != xdr->end;
+}
+
 /**
  * xdr_init_decode - Initialize an xdr_stream for decoding data.
  * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@
  */
 void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 {
-	struct kvec *iov = buf->head;
-	unsigned int len = iov->iov_len;
-
-	if (len > buf->len)
-		len = buf->len;
 	xdr->buf = buf;
-	xdr->iov = iov;
-	xdr->p = p;
-	xdr->end = (__be32 *)((char *)iov->iov_base + len);
+	xdr->scratch.iov_base = NULL;
+	xdr->scratch.iov_len = 0;
+	if (buf->head[0].iov_len != 0)
+		xdr_set_iov(xdr, buf->head, p, buf->len);
+	else if (buf->page_len != 0)
+		xdr_set_page_base(xdr, 0, buf->len);
 }
 EXPORT_SYMBOL_GPL(xdr_init_decode);
 
-/**
- * xdr_inline_peek - Allow read-ahead in the XDR data stream
- * @xdr: pointer to xdr_stream struct
- * @nbytes: number of bytes of data to decode
- *
- * Check if the input buffer is long enough to enable us to decode
- * 'nbytes' more bytes of data starting at the current position.
- * If so return the current pointer without updating the current
- * pointer position.
- */
-__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
+static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
 	__be32 *p = xdr->p;
 	__be32 *q = p + XDR_QUADLEN(nbytes);
 
 	if (unlikely(q > xdr->end || q < p))
 		return NULL;
+	xdr->p = q;
 	return p;
 }
-EXPORT_SYMBOL_GPL(xdr_inline_peek);
 
 /**
- * xdr_inline_decode - Retrieve non-page XDR data to decode
+ * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
+ * @xdr: pointer to xdr_stream struct
+ * @buf: pointer to an empty buffer
+ * @buflen: size of 'buf'
+ *
+ * The scratch buffer is used when decoding from an array of pages.
+ * If an xdr_inline_decode() call spans across page boundaries, then
+ * we copy the data into the scratch buffer in order to allow linear
+ * access.
+ */
+void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
+{
+	xdr->scratch.iov_base = buf;
+	xdr->scratch.iov_len = buflen;
+}
+EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
+
+static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
+{
+	__be32 *p;
+	void *cpdest = xdr->scratch.iov_base;
+	size_t cplen = (char *)xdr->end - (char *)xdr->p;
+
+	if (nbytes > xdr->scratch.iov_len)
+		return NULL;
+	memcpy(cpdest, xdr->p, cplen);
+	cpdest += cplen;
+	nbytes -= cplen;
+	if (!xdr_set_next_buffer(xdr))
+		return NULL;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p == NULL)
+		return NULL;
+	memcpy(cpdest, p, nbytes);
+	return xdr->scratch.iov_base;
+}
+
+/**
+ * xdr_inline_decode - Retrieve XDR data to decode
  * @xdr: pointer to xdr_stream struct
  * @nbytes: number of bytes of data to decode
  *
@@ -605,13 +699,16 @@
  */
 __be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 {
-	__be32 *p = xdr->p;
-	__be32 *q = p + XDR_QUADLEN(nbytes);
+	__be32 *p;
 
-	if (unlikely(q > xdr->end || q < p))
+	if (nbytes == 0)
+		return xdr->p;
+	if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
 		return NULL;
-	xdr->p = q;
-	return p;
+	p = __xdr_inline_decode(xdr, nbytes);
+	if (p != NULL)
+		return p;
+	return xdr_copy_to_scratch(xdr, nbytes);
 }
 EXPORT_SYMBOL_GPL(xdr_inline_decode);
 
@@ -671,16 +768,12 @@
  */
 void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 {
-	char * kaddr = page_address(xdr->buf->pages[0]);
 	xdr_read_pages(xdr, len);
 	/*
 	 * Position current pointer at beginning of tail, and
 	 * set remaining message length.
 	 */
-	if (len > PAGE_CACHE_SIZE - xdr->buf->page_base)
-		len = PAGE_CACHE_SIZE - xdr->buf->page_base;
-	xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
-	xdr->end = (__be32 *)((char *)xdr->p + len);
+	xdr_set_page_base(xdr, 0, len);
 }
 EXPORT_SYMBOL_GPL(xdr_enter_page);
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb8895..d5e1e0b 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
 #include <net/sock.h>
 #include <net/xfrm.h>
 #include <net/netlink.h>
+#include <net/ah.h>
 #include <asm/uaccess.h>
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <linux/in6.h>
@@ -302,7 +303,8 @@
 	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
 	if (!algo)
 		return -ENOSYS;
-	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
+	if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
+	    ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
 		return -EINVAL;
 	*props = algo->desc.sadb_alg_id;
 
@@ -2187,7 +2189,7 @@
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
 	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
-	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
+	    (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
 		if (link->dump == NULL)
 			return -EINVAL;
 
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 396da16..1c702ca 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -262,6 +262,34 @@
 	lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
 	(rm -f $@ ; false)
 
+# XZ
+# ---------------------------------------------------------------------------
+# Use xzkern to compress the kernel image and xzmisc to compress other things.
+#
+# xzkern uses a big LZMA2 dictionary since it doesn't increase memory usage
+# of the kernel decompressor. A BCJ filter is used if it is available for
+# the target architecture. xzkern also appends uncompressed size of the data
+# using size_append. The .xz format has the size information available at
+# the end of the file too, but it's in more complex format and it's good to
+# avoid changing the part of the boot code that reads the uncompressed size.
+# Note that the bytes added by size_append will make the xz tool think that
+# the file is corrupt. This is expected.
+#
+# xzmisc doesn't use size_append, so it can be used to create normal .xz
+# files. xzmisc uses smaller LZMA2 dictionary than xzkern, because a very
+# big dictionary would increase the memory usage too much in the multi-call
+# decompression mode. A BCJ filter isn't used either.
+quiet_cmd_xzkern = XZKERN  $@
+cmd_xzkern = (cat $(filter-out FORCE,$^) | \
+	sh $(srctree)/scripts/xz_wrap.sh && \
+	$(call size_append, $(filter-out FORCE,$^))) > $@ || \
+	(rm -f $@ ; false)
+
+quiet_cmd_xzmisc = XZMISC  $@
+cmd_xzmisc = (cat $(filter-out FORCE,$^) | \
+	xz --check=crc32 --lzma2=dict=1MiB) > $@ || \
+	(rm -f $@ ; false)
+
 # misc stuff
 # ---------------------------------------------------------------------------
 quote:="
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e3c7fc0..4c0383d 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -859,7 +859,7 @@
 				$av_preprocessor = 0;
 			}
 
-		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/) {
+		} elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
 			print "CAST($1)\n" if ($dbg_values > 1);
 			push(@av_paren_type, $type);
 			$type = 'C';
@@ -2743,6 +2743,11 @@
 			WARN("plain inline is preferred over $1\n" . $herecurr);
 		}
 
+# Check for __attribute__ packed, prefer __packed
+		if ($line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
+			WARN("__packed is preferred over __attribute__((packed))\n" . $herecurr);
+		}
+
 # check for sizeof(&)
 		if ($line =~ /\bsizeof\s*\(\s*\&/) {
 			WARN("sizeof(& should be avoided\n" . $herecurr);
@@ -2785,10 +2790,15 @@
 		}
 
 # check for pointless casting of kmalloc return
-		if ($line =~ /\*\s*\)\s*k[czm]alloc\b/) {
+		if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) {
 			WARN("unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
 		}
 
+# check for multiple semicolons
+		if ($line =~ /;\s*;\s*$/) {
+		    WARN("Statements terminations use 1 semicolon\n" . $herecurr);
+		}
+
 # check for gcc specific __FUNCTION__
 		if ($line =~ /__FUNCTION__/) {
 			WARN("__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
@@ -2892,6 +2902,11 @@
 				ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
 			}
 		}
+
+		if ($line =~ /debugfs_create_file.*S_IWUGO/ ||
+		    $line =~ /DEVICE_ATTR.*S_IWUGO/ ) {
+			WARN("Exporting world writable files is usually an error. Consider more restrictive permissions.\n" . $herecurr);
+		}
 	}
 
 	# If we have no input at all, then there is nothing to report on
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index 5958fff..55caecd 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -243,6 +243,8 @@
 		echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
 		echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
 		echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+		echo "$output_file" | grep -q "\.xz$" && \
+				compr="xz --check=crc32 --lzma2=dict=1MiB"
 		echo "$output_file" | grep -q "\.lzo$" && compr="lzop -9 -f"
 		echo "$output_file" | grep -q "\.cpio$" && compr="cat"
 		shift
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index d21ec3a..139e0ff 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -13,7 +13,7 @@
 use strict;
 
 my $P = $0;
-my $V = '0.26-beta6';
+my $V = '0.26';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -40,7 +40,7 @@
 my $output_multiline = 1;
 my $output_separator = ", ";
 my $output_roles = 0;
-my $output_rolestats = 0;
+my $output_rolestats = 1;
 my $scm = 0;
 my $web = 0;
 my $subsystem = 0;
@@ -494,6 +494,40 @@
 
 exit($exit);
 
+sub range_is_maintained {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'S') {
+		if ($value =~ /(maintain|support)/i) {
+		    return 1;
+		}
+	    }
+	}
+    }
+    return 0;
+}
+
+sub range_has_maintainer {
+    my ($start, $end) = @_;
+
+    for (my $i = $start; $i < $end; $i++) {
+	my $line = $typevalue[$i];
+	if ($line =~ m/^(\C):\s*(.*)/) {
+	    my $type = $1;
+	    my $value = $2;
+	    if ($type eq 'M') {
+		return 1;
+	    }
+	}
+    }
+    return 0;
+}
+
 sub get_maintainers {
     %email_hash_name = ();
     %email_hash_address = ();
@@ -556,7 +590,9 @@
 				my $file_pd = ($file  =~ tr@/@@);
 				$value_pd++ if (substr($value,-1,1) ne "/");
 				$value_pd = -1 if ($value =~ /^\.\*/);
-				if ($value_pd >= $file_pd) {
+				if ($value_pd >= $file_pd &&
+				    range_is_maintained($start, $end) &&
+				    range_has_maintainer($start, $end)) {
 				    $exact_pattern_match_hash{$file} = 1;
 				}
 				if ($pattern_depth == 0 ||
@@ -720,7 +756,8 @@
   --help => show this help information
 
 Default options:
-  [--email --git --m --n --l --multiline --pattern-depth=0 --remove-duplicates]
+  [--email --nogit --git-fallback --m --n --l --multiline -pattern-depth=0
+   --remove-duplicates --rolestats]
 
 Notes:
   Using "-f directory" may give unexpected results:
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 97d2259..e8fba95 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1615,7 +1615,7 @@
  * A module includes a number of sections that are discarded
  * either when loaded or when used as built-in.
  * For loaded modules all functions marked __init and all data
- * marked __initdata will be discarded when the module has been intialized.
+ * marked __initdata will be discarded when the module has been initialized.
  * Likewise for modules used built-in the sections marked __exit
  * are discarded because __exit marked function are supposed to be called
  * only when a module is unloaded which never happens for built-in modules.
diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
new file mode 100644
index 0000000..17a5798
--- /dev/null
+++ b/scripts/xz_wrap.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# This is a wrapper for xz to compress the kernel image using appropriate
+# compression options depending on the architecture.
+#
+# Author: Lasse Collin <lasse.collin@tukaani.org>
+#
+# This file has been put into the public domain.
+# You can do whatever you want with this file.
+#
+
+BCJ=
+LZMA2OPTS=
+
+case $ARCH in
+	x86|x86_64)     BCJ=--x86 ;;
+	powerpc)        BCJ=--powerpc ;;
+	ia64)           BCJ=--ia64; LZMA2OPTS=pb=4 ;;
+	arm)            BCJ=--arm ;;
+	sparc)          BCJ=--sparc ;;
+esac
+
+exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index 19ba16e..a4a8639 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -28,7 +28,7 @@
  * The format used for transition tables is based on the GNU flex table
  * file format (--tables-file option; see Table File Format in the flex
  * info pages and the flex sources for documentation). The magic number
- * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
  * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
  * slightly differently (see the apparmor-parser package).
  */
diff --git a/sound/ac97_bus.c b/sound/ac97_bus.c
index a351dd0..2b50cbe 100644
--- a/sound/ac97_bus.c
+++ b/sound/ac97_bus.c
@@ -19,8 +19,8 @@
 
 /*
  * Let drivers decide whether they want to support given codec from their
- * probe method.  Drivers have direct access to the struct snd_ac97 structure and may
- * decide based on the id field amongst other things.
+ * probe method. Drivers have direct access to the struct snd_ac97
+ * structure and may  decide based on the id field amongst other things.
  */
 static int ac97_bus_match(struct device *dev, struct device_driver *drv)
 {
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 91852e4..3687a6c 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -1114,7 +1114,6 @@
 	of_node_put(onyx->codec.node);
 	if (onyx->codec_info)
 		kfree(onyx->codec_info);
-	i2c_set_clientdata(client, onyx);
 	kfree(onyx);
 	return 0;
 }
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index de8e03a..faa3174 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -287,10 +287,9 @@
 		free_irq(linein_detect_irq, &rt->line_in_notify);
 	if (rt->line_out_notify.gpio_private)
 		free_irq(lineout_detect_irq, &rt->line_out_notify);
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
 	mutex_destroy(&rt->line_out_notify.mutex);
diff --git a/sound/aoa/core/gpio-pmf.c b/sound/aoa/core/gpio-pmf.c
index 7e267c9..c8d8a1a 100644
--- a/sound/aoa/core/gpio-pmf.c
+++ b/sound/aoa/core/gpio-pmf.c
@@ -107,10 +107,9 @@
 
 	/* make sure no work is pending before freeing
 	 * all things */
-	cancel_delayed_work(&rt->headphone_notify.work);
-	cancel_delayed_work(&rt->line_in_notify.work);
-	cancel_delayed_work(&rt->line_out_notify.work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&rt->headphone_notify.work);
+	cancel_delayed_work_sync(&rt->line_in_notify.work);
+	cancel_delayed_work_sync(&rt->line_out_notify.work);
 
 	mutex_destroy(&rt->headphone_notify.mutex);
 	mutex_destroy(&rt->line_in_notify.mutex);
diff --git a/sound/core/control.c b/sound/core/control.c
index 45a8180..9ce00ed 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1488,7 +1488,7 @@
 }
 
 /*
- * Frequently used control callbacks
+ * Frequently used control callbacks/helpers
  */
 int snd_ctl_boolean_mono_info(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_info *uinfo)
@@ -1513,3 +1513,29 @@
 }
 
 EXPORT_SYMBOL(snd_ctl_boolean_stereo_info);
+
+/**
+ * snd_ctl_enum_info - fills the info structure for an enumerated control
+ * @info: the structure to be filled
+ * @channels: the number of the control's channels; often one
+ * @items: the number of control values; also the size of @names
+ * @names: an array containing the names of all control values
+ *
+ * Sets all required fields in @info to their appropriate values.
+ * If the control's accessibility is not the default (readable and writable),
+ * the caller has to fill @info->access.
+ */
+int snd_ctl_enum_info(struct snd_ctl_elem_info *info, unsigned int channels,
+		      unsigned int items, const char *const names[])
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	info->count = channels;
+	info->value.enumerated.items = items;
+	if (info->value.enumerated.item >= items)
+		info->value.enumerated.item = items - 1;
+	strlcpy(info->value.enumerated.name,
+		names[info->value.enumerated.item],
+		sizeof(info->value.enumerated.name));
+	return 0;
+}
+EXPORT_SYMBOL(snd_ctl_enum_info);
diff --git a/sound/core/init.c b/sound/core/init.c
index 57b792e2..3e65da2 100644
--- a/sound/core/init.c
+++ b/sound/core/init.c
@@ -642,7 +642,7 @@
  *  external accesses.  Thus, you should call this function at the end
  *  of the initialization of the card.
  *
- *  Returns zero otherwise a negative error code if the registrain failed.
+ *  Returns zero otherwise a negative error code if the registration failed.
  */
 int snd_card_register(struct snd_card *card)
 {
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index b753ec6..a2e4eb3 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -453,8 +453,10 @@
 	} else {
 		*params = *save;
 		max = snd_pcm_hw_param_max(pcm, params, var, max, &maxdir);
-		if (max < 0)
+		if (max < 0) {
+			kfree(save);
 			return max;
+		}
 		last = 1;
 	}
  _end:
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 11446a1..a82e3756 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -373,6 +373,27 @@
 			   (unsigned long)new_hw_ptr,
 			   (unsigned long)runtime->hw_ptr_base);
 	}
+
+	if (runtime->no_period_wakeup) {
+		/*
+		 * Without regular period interrupts, we have to check
+		 * the elapsed time to detect xruns.
+		 */
+		jdelta = jiffies - runtime->hw_ptr_jiffies;
+		if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
+			goto no_delta_check;
+		hdelta = jdelta - delta * HZ / runtime->rate;
+		while (hdelta > runtime->hw_ptr_buffer_jiffies / 2 + 1) {
+			delta += runtime->buffer_size;
+			hw_base += runtime->buffer_size;
+			if (hw_base >= runtime->boundary)
+				hw_base = 0;
+			new_hw_ptr = hw_base + pos;
+			hdelta -= runtime->hw_ptr_buffer_jiffies;
+		}
+		goto no_delta_check;
+	}
+
 	/* something must be really wrong */
 	if (delta >= runtime->buffer_size + runtime->period_size) {
 		hw_ptr_error(substream,
@@ -442,6 +463,7 @@
 			     (long)old_hw_ptr);
 	}
 
+ no_delta_check:
 	if (runtime->status->hw_ptr == new_hw_ptr)
 		return 0;
 
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index e82c1f9..4be45e7 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -422,6 +422,9 @@
 	runtime->info = params->info;
 	runtime->rate_num = params->rate_num;
 	runtime->rate_den = params->rate_den;
+	runtime->no_period_wakeup =
+			(params->info & SNDRV_PCM_INFO_NO_PERIOD_WAKEUP) &&
+			(params->flags & SNDRV_PCM_HW_PARAMS_NO_PERIOD_WAKEUP);
 
 	bits = snd_pcm_format_physical_width(runtime->format);
 	runtime->sample_bits = bits;
@@ -984,7 +987,7 @@
 	if (push)
 		snd_pcm_update_hw_ptr(substream);
 	/* The jiffies check in snd_pcm_update_hw_ptr*() is done by
-	 * a delta betwen the current jiffies, this gives a large enough
+	 * a delta between the current jiffies, this gives a large enough
 	 * delta, effectively to skip the check once.
 	 */
 	substream->runtime->hw_ptr_jiffies = jiffies - HZ * 1000;
diff --git a/sound/core/seq/seq.c b/sound/core/seq/seq.c
index bf09a5a..119fddb6 100644
--- a/sound/core/seq/seq.c
+++ b/sound/core/seq/seq.c
@@ -32,6 +32,7 @@
 #include "seq_timer.h"
 #include "seq_system.h"
 #include "seq_info.h"
+#include <sound/minors.h>
 #include <sound/seq_device.h>
 
 #if defined(CONFIG_SND_SEQ_DUMMY_MODULE)
@@ -73,6 +74,9 @@
 module_param(seq_default_timer_resolution, int, 0644);
 MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz.");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_SEQUENCER);
+MODULE_ALIAS("devname:snd/seq");
+
 /*
  *  INIT PART
  */
diff --git a/sound/core/sound.c b/sound/core/sound.c
index 66691fe..1c7a3ef 100644
--- a/sound/core/sound.c
+++ b/sound/core/sound.c
@@ -188,14 +188,22 @@
 };
 
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-static int snd_find_free_minor(void)
+static int snd_find_free_minor(int type)
 {
 	int minor;
 
+	/* static minors for module auto loading */
+	if (type == SNDRV_DEVICE_TYPE_SEQUENCER)
+		return SNDRV_MINOR_SEQUENCER;
+	if (type == SNDRV_DEVICE_TYPE_TIMER)
+		return SNDRV_MINOR_TIMER;
+
 	for (minor = 0; minor < ARRAY_SIZE(snd_minors); ++minor) {
-		/* skip minors still used statically for autoloading devices */
-		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL ||
-		    minor == SNDRV_MINOR_SEQUENCER)
+		/* skip static minors still used for module auto loading */
+		if (SNDRV_MINOR_DEVICE(minor) == SNDRV_MINOR_CONTROL)
+			continue;
+		if (minor == SNDRV_MINOR_SEQUENCER ||
+		    minor == SNDRV_MINOR_TIMER)
 			continue;
 		if (!snd_minors[minor])
 			return minor;
@@ -269,7 +277,7 @@
 	preg->private_data = private_data;
 	mutex_lock(&sound_mutex);
 #ifdef CONFIG_SND_DYNAMIC_MINORS
-	minor = snd_find_free_minor();
+	minor = snd_find_free_minor(type);
 #else
 	minor = snd_kernel_minor(type, card, dev);
 	if (minor >= 0 && snd_minors[minor])
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 13afb60..ed016329 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -34,8 +34,8 @@
 #include <sound/initval.h>
 #include <linux/kmod.h>
 
-#if defined(CONFIG_SND_HPET) || defined(CONFIG_SND_HPET_MODULE)
-#define DEFAULT_TIMER_LIMIT 3
+#if defined(CONFIG_SND_HRTIMER) || defined(CONFIG_SND_HRTIMER_MODULE)
+#define DEFAULT_TIMER_LIMIT 4
 #elif defined(CONFIG_SND_RTCTIMER) || defined(CONFIG_SND_RTCTIMER_MODULE)
 #define DEFAULT_TIMER_LIMIT 2
 #else
@@ -52,6 +52,9 @@
 module_param(timer_tstamp_monotonic, int, 0444);
 MODULE_PARM_DESC(timer_tstamp_monotonic, "Use posix monotonic clock source for timestamps (default).");
 
+MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_TIMER);
+MODULE_ALIAS("devname:snd/timer");
+
 struct snd_timer_user {
 	struct snd_timer_instance *timeri;
 	int tread;		/* enhanced read with timestamps and events */
diff --git a/sound/drivers/ml403-ac97cr.c b/sound/drivers/ml403-ac97cr.c
index a1282c1..5cfcb90 100644
--- a/sound/drivers/ml403-ac97cr.c
+++ b/sound/drivers/ml403-ac97cr.c
@@ -1143,8 +1143,8 @@
 					     (resource->start) + 1);
 	if (ml403_ac97cr->port == NULL) {
 		snd_printk(KERN_ERR SND_ML403_AC97CR_DRIVER ": "
-			   "unable to remap memory region (%x to %x)\n",
-			   resource->start, resource->end);
+			   "unable to remap memory region (%pR)\n",
+			   resource);
 		snd_ml403_ac97cr_free(ml403_ac97cr);
 		return -EBUSY;
 	}
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 971a84a..c424d32 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -57,8 +57,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -141,7 +140,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4113_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index 0341451..d9fb537 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -67,8 +67,7 @@
 {
 	chip->init = 1;	/* don't schedule new work */
 	mb();
-	cancel_delayed_work(&chip->work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&chip->work);
 	kfree(chip);
 }
 
@@ -154,7 +153,7 @@
 {
 	chip->init = 1;
 	mb();
-	flush_scheduled_work();
+	flush_delayed_work_sync(&chip->work);
 	ak4114_init_regs(chip);
 	/* bring up statistics / event queing */
 	chip->init = 0;
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 265abcce..9b915e2 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -264,7 +264,7 @@
 		snd_printd("OPL3-SA [0x%lx] detect (1) = 0x%x (0x%x)\n", port, tmp, tmp1);
 		return -ENODEV;
 	}
-	/* try if the MIC register is accesible */
+	/* try if the MIC register is accessible */
 	tmp = snd_opl3sa2_read(chip, OPL3SA2_MIC);
 	snd_opl3sa2_write(chip, OPL3SA2_MIC, 0x8a);
 	if (((tmp1 = snd_opl3sa2_read(chip, OPL3SA2_MIC)) & 0x9f) != 0x8a) {
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 12e3465..9823d59 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -209,7 +209,7 @@
         tristate
 
 config SND_OXYGEN
-	tristate "C-Media 8788 (Oxygen)"
+	tristate "C-Media 8786, 8787, 8788 (Oxygen)"
 	select SND_OXYGEN_LIB
 	select SND_PCM
 	select SND_MPU401_UART
@@ -217,13 +217,18 @@
 	  Say Y here to include support for sound cards based on the
 	  C-Media CMI8788 (Oxygen HD Audio) chip:
 	   * Asound A-8788
+	   * Asus Xonar DG
 	   * AuzenTech X-Meridian
+	   * AuzenTech X-Meridian 2G
 	   * Bgears b-Enspirer
 	   * Club3D Theatron DTS
 	   * HT-Omega Claro (plus)
 	   * HT-Omega Claro halo (XT)
+	   * Kuroutoshikou CMI8787-HG2PCI
 	   * Razer Barracuda AC-1
 	   * Sondigo Inferno
+	   * TempoTec/MediaTek HiFier Fantasia
+	   * TempoTec/MediaTek HiFier Serenade
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-oxygen.
@@ -578,18 +583,6 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-hdspm.
 
-config SND_HIFIER
-	tristate "TempoTec HiFier Fantasia"
-	select SND_OXYGEN_LIB
-	select SND_PCM
-	select SND_MPU401_UART
-	help
-	  Say Y here to include support for the MediaTek/TempoTec HiFier
-	  Fantasia sound card.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called snd-hifier.
-
 config SND_ICE1712
 	tristate "ICEnsemble ICE1712 (Envy24)"
 	select SND_MPU401_UART
@@ -826,8 +819,8 @@
 	  Say Y here to include support for sound cards based on the
 	  Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS,
 	  Essence ST (Deluxe), and Essence STX.
-	  Support for the HDAV1.3 (Deluxe) is incomplete; for the
-	  HDAV1.3 Slim and Xense, missing.
+	  Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental;
+	  for the Xense, missing.
 
 	  To compile this driver as a module, choose M here: the module
 	  will be called snd-virtuoso.
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index a7630e9e..0fc614c 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -1014,8 +1014,7 @@
 {
 	if (ac97) {
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-		cancel_delayed_work(&ac97->power_work);
-		flush_scheduled_work();
+		cancel_delayed_work_sync(&ac97->power_work);
 #endif
 		snd_ac97_proc_done(ac97);
 		if (ac97->bus)
@@ -2456,8 +2455,7 @@
 	if (ac97->build_ops->suspend)
 		ac97->build_ops->suspend(ac97);
 #ifdef CONFIG_SND_AC97_POWER_SAVE
-	cancel_delayed_work(&ac97->power_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&ac97->power_work);
 #endif
 	snd_ac97_powerdown(ac97);
 }
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 2f3cacb..6117595 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1,6 +1,6 @@
 /*
  *  azt3328.c - driver for Aztech AZF3328 based soundcards (e.g. PCI168).
- *  Copyright (C) 2002, 2005 - 2009 by Andreas Mohr <andi AT lisas.de>
+ *  Copyright (C) 2002, 2005 - 2010 by Andreas Mohr <andi AT lisas.de>
  *
  *  Framework borrowed from Bart Hartgers's als4000.c.
  *  Driver developed on PCI168 AP(W) version (PCI rev. 10, subsystem ID 1801),
@@ -175,6 +175,7 @@
 
 #include <asm/io.h>
 #include <linux/init.h>
+#include <linux/bug.h> /* WARN_ONCE */
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
@@ -201,14 +202,15 @@
 
 /* === Debug settings ===
   Further diagnostic functionality than the settings below
-  does not need to be provided, since one can easily write a bash script
+  does not need to be provided, since one can easily write a POSIX shell script
   to dump the card's I/O ports (those listed in lspci -v -v):
-  function dump()
+  dump()
   {
     local descr=$1; local addr=$2; local count=$3
 
     echo "${descr}: ${count} @ ${addr}:"
-    dd if=/dev/port skip=$[${addr}] count=${count} bs=1 2>/dev/null| hexdump -C
+    dd if=/dev/port skip=`printf %d ${addr}` count=${count} bs=1 \
+      2>/dev/null| hexdump -C
   }
   and then use something like
   "dump joy200 0x200 8", "dump mpu388 0x388 4", "dump joy 0xb400 8",
@@ -216,14 +218,14 @@
   possibly within a "while true; do ... sleep 1; done" loop.
   Tweaking ports could be done using
   VALSTRING="`printf "%02x" $value`"
-  printf "\x""$VALSTRING"|dd of=/dev/port seek=$[${addr}] bs=1 2>/dev/null
+  printf "\x""$VALSTRING"|dd of=/dev/port seek=`printf %d ${addr}` bs=1 \
+    2>/dev/null
 */
 
 #define DEBUG_MISC	0
 #define DEBUG_CALLS	0
 #define DEBUG_MIXER	0
 #define DEBUG_CODEC	0
-#define DEBUG_IO	0
 #define DEBUG_TIMER	0
 #define DEBUG_GAME	0
 #define DEBUG_PM	0
@@ -291,19 +293,23 @@
 module_param(seqtimer_scaling, int, 0444);
 MODULE_PARM_DESC(seqtimer_scaling, "Set 1024000Hz sequencer timer scale factor (lockup danger!). Default 128.");
 
-struct snd_azf3328_codec_data {
-	unsigned long io_base;
-	struct snd_pcm_substream *substream;
-	bool running;
-	const char *name;
-};
-
 enum snd_azf3328_codec_type {
+  /* warning: fixed indices (also used for bitmask checks!) */
   AZF_CODEC_PLAYBACK = 0,
   AZF_CODEC_CAPTURE = 1,
   AZF_CODEC_I2S_OUT = 2,
 };
 
+struct snd_azf3328_codec_data {
+	unsigned long io_base; /* keep first! (avoid offset calc) */
+	unsigned int dma_base; /* helper to avoid an indirection in hotpath */
+	spinlock_t *lock; /* TODO: convert to our own per-codec lock member */
+	struct snd_pcm_substream *substream;
+	bool running;
+	enum snd_azf3328_codec_type type;
+	const char *name;
+};
+
 struct snd_azf3328 {
 	/* often-used fields towards beginning, then grouped */
 
@@ -362,6 +368,9 @@
 static int
 snd_azf3328_io_reg_setb(unsigned reg, u8 mask, bool do_set)
 {
+	/* Well, strictly spoken, the inb/outb sequence isn't atomic
+	   and would need locking. However we currently don't care
+	   since it potentially complicates matters. */
 	u8 prev = inb(reg), new;
 
 	new = (do_set) ? (prev|mask) : (prev & ~mask);
@@ -413,6 +422,21 @@
 	outl(value, codec->io_base + reg);
 }
 
+static inline void
+snd_azf3328_codec_outl_multi(const struct snd_azf3328_codec_data *codec,
+			     unsigned reg, const void *buffer, int count
+)
+{
+	unsigned long addr = codec->io_base + reg;
+	if (count) {
+		const u32 *buf = buffer;
+		do {
+			outl(*buf++, addr);
+			addr += 4;
+		} while (--count);
+	}
+}
+
 static inline u32
 snd_azf3328_codec_inl(const struct snd_azf3328_codec_data *codec, unsigned reg)
 {
@@ -943,38 +967,43 @@
 }
 
 static void
-snd_azf3328_codec_setfmt(struct snd_azf3328 *chip,
-			       enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setfmt(struct snd_azf3328_codec_data *codec,
 			       enum azf_freq_t bitrate,
 			       unsigned int format_width,
 			       unsigned int channels
 )
 {
 	unsigned long flags;
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	u16 val = 0xff00;
+	u8 freq = 0;
 
 	snd_azf3328_dbgcallenter();
 	switch (bitrate) {
-	case AZF_FREQ_4000:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4000; break;
-	case AZF_FREQ_4800:  val |= SOUNDFORMAT_FREQ_SUSPECTED_4800; break;
-	case AZF_FREQ_5512:
-		/* the AZF3328 names it "5510" for some strange reason */
-			     val |= SOUNDFORMAT_FREQ_5510; break;
-	case AZF_FREQ_6620:  val |= SOUNDFORMAT_FREQ_6620; break;
-	case AZF_FREQ_8000:  val |= SOUNDFORMAT_FREQ_8000; break;
-	case AZF_FREQ_9600:  val |= SOUNDFORMAT_FREQ_9600; break;
-	case AZF_FREQ_11025: val |= SOUNDFORMAT_FREQ_11025; break;
-	case AZF_FREQ_13240: val |= SOUNDFORMAT_FREQ_SUSPECTED_13240; break;
-	case AZF_FREQ_16000: val |= SOUNDFORMAT_FREQ_16000; break;
-	case AZF_FREQ_22050: val |= SOUNDFORMAT_FREQ_22050; break;
-	case AZF_FREQ_32000: val |= SOUNDFORMAT_FREQ_32000; break;
+#define AZF_FMT_XLATE(in_freq, out_bits) \
+	do { \
+		case AZF_FREQ_ ## in_freq: \
+			freq = SOUNDFORMAT_FREQ_ ## out_bits; \
+			break; \
+	} while (0);
+	AZF_FMT_XLATE(4000, SUSPECTED_4000)
+	AZF_FMT_XLATE(4800, SUSPECTED_4800)
+	/* the AZF3328 names it "5510" for some strange reason: */
+	AZF_FMT_XLATE(5512, 5510)
+	AZF_FMT_XLATE(6620, 6620)
+	AZF_FMT_XLATE(8000, 8000)
+	AZF_FMT_XLATE(9600, 9600)
+	AZF_FMT_XLATE(11025, 11025)
+	AZF_FMT_XLATE(13240, SUSPECTED_13240)
+	AZF_FMT_XLATE(16000, 16000)
+	AZF_FMT_XLATE(22050, 22050)
+	AZF_FMT_XLATE(32000, 32000)
 	default:
 		snd_printk(KERN_WARNING "unknown bitrate %d, assuming 44.1kHz!\n", bitrate);
 		/* fall-through */
-	case AZF_FREQ_44100: val |= SOUNDFORMAT_FREQ_44100; break;
-	case AZF_FREQ_48000: val |= SOUNDFORMAT_FREQ_48000; break;
-	case AZF_FREQ_66200: val |= SOUNDFORMAT_FREQ_SUSPECTED_66200; break;
+	AZF_FMT_XLATE(44100, 44100)
+	AZF_FMT_XLATE(48000, 48000)
+	AZF_FMT_XLATE(66200, SUSPECTED_66200)
+#undef AZF_FMT_XLATE
 	}
 	/* val = 0xff07; 3m27.993s (65301Hz; -> 64000Hz???) hmm, 66120, 65967, 66123 */
 	/* val = 0xff09; 17m15.098s (13123,478Hz; -> 12000Hz???) hmm, 13237.2Hz? */
@@ -986,13 +1015,15 @@
 	/* val = 0xff0d; 41m23.135s (5523,600Hz; -> 5512Hz???) */
 	/* val = 0xff0e; 28m30.777s (8017Hz; -> 8000Hz???) */
 
+	val |= freq;
+
 	if (channels == 2)
 		val |= SOUNDFORMAT_FLAG_2CHANNELS;
 
 	if (format_width == 16)
 		val |= SOUNDFORMAT_FLAG_16BIT;
 
-	spin_lock_irqsave(&chip->reg_lock, flags);
+	spin_lock_irqsave(codec->lock, flags);
 
 	/* set bitrate/format */
 	snd_azf3328_codec_outw(codec, IDX_IO_CODEC_SOUNDFORMAT, val);
@@ -1004,7 +1035,8 @@
 	 * (FIXME: yes, it works, but what exactly am I doing here?? :)
 	 * FIXME: does this have some side effects for full-duplex
 	 * or other dramatic side effects? */
-	if (codec_type == AZF_CODEC_PLAYBACK) /* only do it for playback */
+	/* do it for non-capture codecs only */
+	if (codec->type != AZF_CODEC_CAPTURE)
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 			snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS) |
 			DMA_RUN_SOMETHING1 |
@@ -1014,20 +1046,19 @@
 			DMA_SOMETHING_ELSE
 		);
 
-	spin_unlock_irqrestore(&chip->reg_lock, flags);
+	spin_unlock_irqrestore(codec->lock, flags);
 	snd_azf3328_dbgcallleave();
 }
 
 static inline void
-snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328 *chip,
-			    enum snd_azf3328_codec_type codec_type
+snd_azf3328_codec_setfmt_lowpower(struct snd_azf3328_codec_data *codec
 )
 {
 	/* choose lowest frequency for low power consumption.
 	 * While this will cause louder noise due to rather coarse frequency,
 	 * it should never matter since output should always
 	 * get disabled properly when idle anyway. */
-	snd_azf3328_codec_setfmt(chip, codec_type, AZF_FREQ_4000, 8, 1);
+	snd_azf3328_codec_setfmt(codec, AZF_FREQ_4000, 8, 1);
 }
 
 static void
@@ -1101,69 +1132,87 @@
 		/* ...and adjust clock, too
 		 * (reduce noise and power consumption) */
 		if (!enable)
-			snd_azf3328_codec_setfmt_lowpower(
-				chip,
-				codec_type
-			);
+			snd_azf3328_codec_setfmt_lowpower(codec);
 		codec->running = enable;
 	}
 }
 
 static void
-snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
-				enum snd_azf3328_codec_type codec_type,
+snd_azf3328_codec_setdmaa(struct snd_azf3328_codec_data *codec,
 				unsigned long addr,
-				unsigned int count,
-				unsigned int size
+				unsigned int period_bytes,
+				unsigned int buffer_bytes
 )
 {
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	snd_azf3328_dbgcallenter();
+	WARN_ONCE(period_bytes & 1, "odd period length!?\n");
+	WARN_ONCE(buffer_bytes != 2 * period_bytes,
+		 "missed our input expectations! %u vs. %u\n",
+		 buffer_bytes, period_bytes);
 	if (!codec->running) {
 		/* AZF3328 uses a two buffer pointer DMA transfer approach */
 
-		unsigned long flags, addr_area2;
+		unsigned long flags;
 
 		/* width 32bit (prevent overflow): */
-		u32 count_areas, lengths;
+		u32 area_length;
+		struct codec_setup_io {
+			u32 dma_start_1;
+			u32 dma_start_2;
+			u32 dma_lengths;
+		} __attribute__((packed)) setup_io;
 
-		count_areas = size/2;
-		addr_area2 = addr+count_areas;
-		snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
-				addr, count_areas, addr_area2, count_areas);
+		area_length = buffer_bytes/2;
 
-		count_areas--; /* max. index */
+		setup_io.dma_start_1 = addr;
+		setup_io.dma_start_2 = addr+area_length;
+
+		snd_azf3328_dbgcodec(
+			"setdma: buffers %08x[%u] / %08x[%u], %u, %u\n",
+				setup_io.dma_start_1, area_length,
+				setup_io.dma_start_2, area_length,
+				period_bytes, buffer_bytes);
+
+		/* Hmm, are we really supposed to decrement this by 1??
+		   Most definitely certainly not: configuring full length does
+		   work properly (i.e. likely better), and BTW we
+		   violated possibly differing frame sizes with this...
+
+		area_length--; |* max. index *|
+		*/
 
 		/* build combined I/O buffer length word */
-		lengths = (count_areas << 16) | (count_areas);
-		spin_lock_irqsave(&chip->reg_lock, flags);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_1, addr);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_START_2,
-								addr_area2);
-		snd_azf3328_codec_outl(codec, IDX_IO_CODEC_DMA_LENGTHS,
-								lengths);
-		spin_unlock_irqrestore(&chip->reg_lock, flags);
+		setup_io.dma_lengths = (area_length << 16) | (area_length);
+
+		spin_lock_irqsave(codec->lock, flags);
+		snd_azf3328_codec_outl_multi(
+			codec, IDX_IO_CODEC_DMA_START_1, &setup_io, 3
+		);
+		spin_unlock_irqrestore(codec->lock, flags);
 	}
 	snd_azf3328_dbgcallleave();
 }
 
 static int
-snd_azf3328_codec_prepare(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_prepare(struct snd_pcm_substream *substream)
 {
-#if 0
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
+#if 0
         unsigned int size = snd_pcm_lib_buffer_bytes(substream);
 	unsigned int count = snd_pcm_lib_period_bytes(substream);
 #endif
 
 	snd_azf3328_dbgcallenter();
+
+	codec->dma_base = runtime->dma_addr;
+
 #if 0
-	snd_azf3328_codec_setfmt(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setfmt(codec,
 		runtime->rate,
 		snd_pcm_format_width(runtime->format),
 		runtime->channels);
-	snd_azf3328_codec_setdmaa(chip, AZF_CODEC_...,
+	snd_azf3328_codec_setdmaa(codec,
 					runtime->dma_addr, count, size);
 #endif
 	snd_azf3328_dbgcallleave();
@@ -1171,24 +1220,23 @@
 }
 
 static int
-snd_azf3328_codec_trigger(enum snd_azf3328_codec_type codec_type,
-			struct snd_pcm_substream *substream, int cmd)
+snd_azf3328_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = runtime->private_data;
 	int result = 0;
 	u16 flags1;
 	bool previously_muted = 0;
-	bool is_playback_codec = (AZF_CODEC_PLAYBACK == codec_type);
+	bool is_main_mixer_playback_codec = (AZF_CODEC_PLAYBACK == codec->type);
 
-	snd_azf3328_dbgcalls("snd_azf3328_codec_trigger cmd %d\n", cmd);
+	snd_azf3328_dbgcalls("snd_azf3328_pcm_trigger cmd %d\n", cmd);
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
 		snd_azf3328_dbgcodec("START %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1196,12 +1244,12 @@
 				);
 		}
 
-		snd_azf3328_codec_setfmt(chip, codec_type,
+		snd_azf3328_codec_setfmt(codec,
 			runtime->rate,
 			snd_pcm_format_width(runtime->format),
 			runtime->channels);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1211,14 +1259,14 @@
 
 		/* FIXME: clear interrupts or what??? */
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_IRQTYPE, 0xffff);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		snd_azf3328_codec_setdmaa(chip, codec_type, runtime->dma_addr,
+		snd_azf3328_codec_setdmaa(codec, runtime->dma_addr,
 			snd_pcm_lib_period_bytes(substream),
 			snd_pcm_lib_buffer_bytes(substream)
 		);
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 #ifdef WIN9X
 		/* FIXME: enable playback/recording??? */
 		flags1 |= DMA_RUN_SOMETHING1 | DMA_RUN_SOMETHING2;
@@ -1242,10 +1290,10 @@
 			DMA_EPILOGUE_SOMETHING |
 			DMA_SOMETHING_ELSE);
 #endif
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 1);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 1);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1258,19 +1306,19 @@
 	case SNDRV_PCM_TRIGGER_RESUME:
 		snd_azf3328_dbgcodec("RESUME %s\n", codec->name);
 		/* resume codec if we were active */
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		if (codec->running)
 			snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS,
 				snd_azf3328_codec_inw(
 					codec, IDX_IO_CODEC_DMA_FLAGS
 				) | DMA_RESUME
 			);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		snd_azf3328_dbgcodec("STOP %s\n", codec->name);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* mute WaveOut (avoid clicking during setup) */
 			previously_muted =
 				snd_azf3328_mixer_set_mute(
@@ -1278,7 +1326,7 @@
 				);
 		}
 
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		/* first, remember current value: */
 		flags1 = snd_azf3328_codec_inw(codec, IDX_IO_CODEC_DMA_FLAGS);
 
@@ -1293,10 +1341,10 @@
 
 		flags1 &= ~DMA_RUN_SOMETHING1;
 		snd_azf3328_codec_outw(codec, IDX_IO_CODEC_DMA_FLAGS, flags1);
-		spin_unlock(&chip->reg_lock);
-		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
+		spin_unlock(codec->lock);
+		snd_azf3328_ctrl_codec_activity(chip, codec->type, 0);
 
-		if (is_playback_codec) {
+		if (is_main_mixer_playback_codec) {
 			/* now unmute WaveOut */
 			if (!previously_muted)
 				snd_azf3328_mixer_set_mute(
@@ -1330,67 +1378,29 @@
 	return result;
 }
 
-static int
-snd_azf3328_codec_playback_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_PLAYBACK, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_capture_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_CAPTURE, substream, cmd);
-}
-
-static int
-snd_azf3328_codec_i2s_out_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-	return snd_azf3328_codec_trigger(AZF_CODEC_I2S_OUT, substream, cmd);
-}
-
 static snd_pcm_uframes_t
-snd_azf3328_codec_pointer(struct snd_pcm_substream *substream,
-			  enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_pointer(struct snd_pcm_substream *substream
 )
 {
-	const struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
-	const struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
-	unsigned long bufptr, result;
+	const struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
+	unsigned long result;
 	snd_pcm_uframes_t frmres;
 
-#ifdef QUERY_HARDWARE
-	bufptr = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
-#else
-	bufptr = substream->runtime->dma_addr;
-#endif
 	result = snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_CURRPOS);
 
 	/* calculate offset */
-	result -= bufptr;
+#ifdef QUERY_HARDWARE
+	result -= snd_azf3328_codec_inl(codec, IDX_IO_CODEC_DMA_START_1);
+#else
+	result -= codec->dma_base;
+#endif
 	frmres = bytes_to_frames( substream->runtime, result);
-	snd_azf3328_dbgcodec("%s @ 0x%8lx, frames %8ld\n",
-				codec->name, result, frmres);
+	snd_azf3328_dbgcodec("%08li %s @ 0x%8lx, frames %8ld\n",
+				jiffies, codec->name, result, frmres);
 	return frmres;
 }
 
-static snd_pcm_uframes_t
-snd_azf3328_codec_playback_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_PLAYBACK);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_capture_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_CAPTURE);
-}
-
-static snd_pcm_uframes_t
-snd_azf3328_codec_i2s_out_pointer(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_codec_pointer(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 #ifdef SUPPORT_GAMEPORT
@@ -1532,7 +1542,7 @@
 		}
 	}
 
-	/* trigger next axes sampling, to be evaluated the next time we
+	/* trigger next sampling of axes, to be evaluated the next time we
 	 * enter this function */
 
 	/* for some very, very strange reason we cannot enable
@@ -1624,29 +1634,29 @@
 }
 
 static inline void
-snd_azf3328_codec_interrupt(struct snd_azf3328 *chip, u8 status)
+snd_azf3328_pcm_interrupt(const struct snd_azf3328_codec_data *first_codec,
+			  u8 status
+)
 {
 	u8 which;
 	enum snd_azf3328_codec_type codec_type;
-	const struct snd_azf3328_codec_data *codec;
+	const struct snd_azf3328_codec_data *codec = first_codec;
 
 	for (codec_type = AZF_CODEC_PLAYBACK;
 		 codec_type <= AZF_CODEC_I2S_OUT;
-			 ++codec_type) {
+			 ++codec_type, ++codec) {
 
 		/* skip codec if there's no interrupt for it */
 		if (!(status & (1 << codec_type)))
 			continue;
 
-		codec = &chip->codecs[codec_type];
-
-		spin_lock(&chip->reg_lock);
+		spin_lock(codec->lock);
 		which = snd_azf3328_codec_inb(codec, IDX_IO_CODEC_IRQTYPE);
 		/* ack all IRQ types immediately */
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_IRQTYPE, which);
-		spin_unlock(&chip->reg_lock);
+		spin_unlock(codec->lock);
 
-		if ((chip->pcm[codec_type]) && (codec->substream)) {
+		if (codec->substream) {
 			snd_pcm_period_elapsed(codec->substream);
 			snd_azf3328_dbgcodec("%s period done (#%x), @ %x\n",
 				codec->name,
@@ -1701,7 +1711,7 @@
 	}
 
 	if (status & (IRQ_PLAYBACK|IRQ_RECORDING|IRQ_I2S_OUT))
-		snd_azf3328_codec_interrupt(chip, status);
+		snd_azf3328_pcm_interrupt(chip->codecs, status);
 
 	if (status & IRQ_GAMEPORT)
 		snd_azf3328_gameport_interrupt(chip);
@@ -1789,101 +1799,85 @@
 {
 	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
 	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct snd_azf3328_codec_data *codec = &chip->codecs[codec_type];
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = substream;
+	codec->substream = substream;
 
 	/* same parameters for all our codecs - at least we think so... */
 	runtime->hw = snd_azf3328_hardware;
 
 	snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
 				   &snd_azf3328_hw_constraints_rates);
+	runtime->private_data = codec;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
 static int
-snd_azf3328_playback_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_playback_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_PLAYBACK);
 }
 
 static int
-snd_azf3328_capture_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_capture_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_CAPTURE);
 }
 
 static int
-snd_azf3328_i2s_out_open(struct snd_pcm_substream *substream)
+snd_azf3328_pcm_i2s_out_open(struct snd_pcm_substream *substream)
 {
 	return snd_azf3328_pcm_open(substream, AZF_CODEC_I2S_OUT);
 }
 
 static int
-snd_azf3328_pcm_close(struct snd_pcm_substream *substream,
-		      enum snd_azf3328_codec_type codec_type
+snd_azf3328_pcm_close(struct snd_pcm_substream *substream
 )
 {
-	struct snd_azf3328 *chip = snd_pcm_substream_chip(substream);
+	struct snd_azf3328_codec_data *codec =
+		substream->runtime->private_data;
 
 	snd_azf3328_dbgcallenter();
-	chip->codecs[codec_type].substream = NULL;
+	codec->substream = NULL;
 	snd_azf3328_dbgcallleave();
 	return 0;
 }
 
-static int
-snd_azf3328_playback_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_PLAYBACK);
-}
-
-static int
-snd_azf3328_capture_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_CAPTURE);
-}
-
-static int
-snd_azf3328_i2s_out_close(struct snd_pcm_substream *substream)
-{
-	return snd_azf3328_pcm_close(substream, AZF_CODEC_I2S_OUT);
-}
-
 /******************************************************************/
 
 static struct snd_pcm_ops snd_azf3328_playback_ops = {
-	.open =		snd_azf3328_playback_open,
-	.close =	snd_azf3328_playback_close,
+	.open =		snd_azf3328_pcm_playback_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_playback_trigger,
-	.pointer =	snd_azf3328_codec_playback_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_capture_ops = {
-	.open =		snd_azf3328_capture_open,
-	.close =	snd_azf3328_capture_close,
+	.open =		snd_azf3328_pcm_capture_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_capture_trigger,
-	.pointer =	snd_azf3328_codec_capture_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static struct snd_pcm_ops snd_azf3328_i2s_out_ops = {
-	.open =		snd_azf3328_i2s_out_open,
-	.close =	snd_azf3328_i2s_out_close,
+	.open =		snd_azf3328_pcm_i2s_out_open,
+	.close =	snd_azf3328_pcm_close,
 	.ioctl =	snd_pcm_lib_ioctl,
 	.hw_params =	snd_azf3328_hw_params,
 	.hw_free =	snd_azf3328_hw_free,
-	.prepare =	snd_azf3328_codec_prepare,
-	.trigger =	snd_azf3328_codec_i2s_out_trigger,
-	.pointer =	snd_azf3328_codec_i2s_out_pointer
+	.prepare =	snd_azf3328_pcm_prepare,
+	.trigger =	snd_azf3328_pcm_trigger,
+	.pointer =	snd_azf3328_pcm_pointer
 };
 
 static int __devinit
@@ -1966,7 +1960,7 @@
 		snd_azf3328_dbgtimer("delay was too low (%d)!\n", delay);
 		delay = 49; /* minimum time is 49 ticks */
 	}
-	snd_azf3328_dbgtimer("setting timer countdown value %d, add COUNTDOWN|IRQ\n", delay);
+	snd_azf3328_dbgtimer("setting timer countdown value %d\n", delay);
 	delay |= TIMER_COUNTDOWN_ENABLE | TIMER_IRQ_ENABLE;
 	spin_lock_irqsave(&chip->reg_lock, flags);
 	snd_azf3328_ctrl_outl(chip, IDX_IO_TIMER_VALUE, delay);
@@ -2180,6 +2174,7 @@
 	};
 	u8 dma_init;
 	enum snd_azf3328_codec_type codec_type;
+	struct snd_azf3328_codec_data *codec_setup;
 
 	*rchip = NULL;
 
@@ -2217,15 +2212,23 @@
 	chip->opl3_io  = pci_resource_start(pci, 3);
 	chip->mixer_io = pci_resource_start(pci, 4);
 
-	chip->codecs[AZF_CODEC_PLAYBACK].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
-	chip->codecs[AZF_CODEC_PLAYBACK].name = "PLAYBACK";
-	chip->codecs[AZF_CODEC_CAPTURE].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
-	chip->codecs[AZF_CODEC_CAPTURE].name = "CAPTURE";
-	chip->codecs[AZF_CODEC_I2S_OUT].io_base =
-				chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
-	chip->codecs[AZF_CODEC_I2S_OUT].name = "I2S_OUT";
+	codec_setup = &chip->codecs[AZF_CODEC_PLAYBACK];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_PLAYBACK;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_PLAYBACK;
+	codec_setup->name = "PLAYBACK";
+
+	codec_setup = &chip->codecs[AZF_CODEC_CAPTURE];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_CAPTURE;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_CAPTURE;
+	codec_setup->name = "CAPTURE";
+
+	codec_setup = &chip->codecs[AZF_CODEC_I2S_OUT];
+	codec_setup->io_base = chip->ctrl_io + AZF_IO_OFFS_CODEC_I2S_OUT;
+	codec_setup->lock = &chip->reg_lock;
+	codec_setup->type = AZF_CODEC_I2S_OUT;
+	codec_setup->name = "I2S_OUT";
 
 	if (request_irq(pci->irq, snd_azf3328_interrupt,
 			IRQF_SHARED, card->shortname, chip)) {
@@ -2257,15 +2260,15 @@
 		struct snd_azf3328_codec_data *codec =
 			 &chip->codecs[codec_type];
 
-		/* shutdown codecs to save power */
+		/* shutdown codecs to reduce power / noise */
 			/* have ...ctrl_codec_activity() act properly */
 		codec->running = 1;
 		snd_azf3328_ctrl_codec_activity(chip, codec_type, 0);
 
-		spin_lock_irq(&chip->reg_lock);
+		spin_lock_irq(codec->lock);
 		snd_azf3328_codec_outb(codec, IDX_IO_CODEC_DMA_FLAGS,
 						 dma_init);
-		spin_unlock_irq(&chip->reg_lock);
+		spin_unlock_irq(codec->lock);
 	}
 
 	snd_card_set_dev(card, &pci->dev);
@@ -2419,6 +2422,7 @@
 
 	snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
+	/* same pcm object for playback/capture */
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_PLAYBACK]);
 	snd_pcm_suspend_all(chip->pcm[AZF_CODEC_I2S_OUT]);
 
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 37e1b5d..2958a05 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -637,15 +637,9 @@
 static int snd_bt87x_capture_source_info(struct snd_kcontrol *kcontrol,
 					 struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"TV Tuner", "FM", "Mic/Line"};
+	static const char *const texts[3] = {"TV Tuner", "FM", "Mic/Line"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_bt87x_capture_source_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
index f19c110..fc53b9b 100644
--- a/sound/pci/ca0106/ca0106.h
+++ b/sound/pci/ca0106/ca0106.h
@@ -188,7 +188,7 @@
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 						/* PTR[5:0], Default: 0x0 */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used ?? */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 						/* DMA[31:0], Default: 0x0 */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 						/* SIZE[31:16], Default: 0x0 */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index d2d12c0..01b4938 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1082,7 +1082,7 @@
 	struct snd_pcm_runtime *runtime = substream->runtime;
 	struct snd_ca0106_pcm *epcm = runtime->private_data;
 	snd_pcm_uframes_t ptr, ptr1, ptr2 = 0;
-	int channel = channel=epcm->channel_id;
+	int channel = epcm->channel_id;
 
 	if (!epcm->running)
 		return 0;
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 329968e..b5bb036 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -2507,14 +2507,12 @@
 					struct snd_ctl_elem_info *uinfo)
 {
 	struct cmipci *cm = snd_kcontrol_chip(kcontrol);
-	static char *texts[3] = { "Line-In", "Rear Output", "Bass Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cm->chip_version >= 39 ? 3 : 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[3] = {
+		"Line-In", "Rear Output", "Bass Output"
+	};
+
+	return snd_ctl_enum_info(uinfo, 1,
+				 cm->chip_version >= 39 ? 3 : 2, texts);
 }
 
 static inline unsigned int get_line_in_mode(struct cmipci *cm)
@@ -2564,14 +2562,9 @@
 static int snd_cmipci_mic_in_mode_info(struct snd_kcontrol *kcontrol,
 				       struct snd_ctl_elem_info *uinfo)
 {
-	static char *texts[2] = { "Mic-In", "Center/LFE Output" };
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = 2;
-	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
-		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
-	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
-	return 0;
+	static const char *const texts[2] = { "Mic-In", "Center/LFE Output" };
+
+	return snd_ctl_enum_info(uinfo, 1, 2, texts);
 }
 
 static int snd_cmipci_mic_in_mode_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index df47f73..0c701e4 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -114,7 +114,7 @@
 						 */
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Sample currently in DAC */
 #define PLAYBACK_UNKNOWN1       0x07
diff --git a/sound/pci/emu10k1/p16v.h b/sound/pci/emu10k1/p16v.h
index 15321494..00f4817 100644
--- a/sound/pci/emu10k1/p16v.h
+++ b/sound/pci/emu10k1/p16v.h
@@ -96,7 +96,7 @@
 #define PLAYBACK_LIST_SIZE	0x01		/* Size of list in bytes << 16. E.g. 8 periods -> 0x00380000  */
 #define PLAYBACK_LIST_PTR	0x02		/* Pointer to the current period being played */
 #define PLAYBACK_UNKNOWN3	0x03		/* Not used */
-#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA addresss */
+#define PLAYBACK_DMA_ADDR	0x04		/* Playback DMA address */
 #define PLAYBACK_PERIOD_SIZE	0x05		/* Playback period size. win2000 uses 0x04000000 */
 #define PLAYBACK_POINTER	0x06		/* Playback period pointer. Used with PLAYBACK_LIST_PTR to determine buffer position currently in DAC */
 #define PLAYBACK_FIFO_END_ADDRESS	0x07		/* Playback FIFO end address */
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 23a58f0..7c17f45 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -220,7 +220,7 @@
 #define	RINGB_EN_2CODEC		0x0020
 #define RINGB_SING_BIT_DUAL	0x0040
 
-/* ****Port Adresses**** */
+/* ****Port Addresses**** */
 
 /*   Write & Read */
 #define ESM_INDEX		0x02
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 98b6d02..05e5ec8 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4571,6 +4571,9 @@
 		}
 		memset(cfg->hp_pins + cfg->hp_outs, 0,
 		       sizeof(hda_nid_t) * (AUTO_CFG_MAX_OUTS - cfg->hp_outs));
+		if (!cfg->hp_outs)
+			cfg->line_out_type = AUTO_PIN_HP_OUT;
+
 	}
 
 	/* sort by sequence */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a1c4008..d3d18be 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1235,7 +1235,8 @@
 			pos_adj = 0;
 		} else {
 			ofs = setup_bdle(substream, azx_dev,
-					 &bdl, ofs, pos_adj, 1);
+					 &bdl, ofs, pos_adj,
+					 !substream->runtime->no_period_wakeup);
 			if (ofs < 0)
 				goto error;
 		}
@@ -1247,7 +1248,8 @@
 					 period_bytes - pos_adj, 0);
 		else
 			ofs = setup_bdle(substream, azx_dev, &bdl, ofs,
-					 period_bytes, 1);
+					 period_bytes,
+					 !substream->runtime->no_period_wakeup);
 		if (ofs < 0)
 			goto error;
 	}
@@ -1515,7 +1517,8 @@
 				 /* No full-resume yet implemented */
 				 /* SNDRV_PCM_INFO_RESUME |*/
 				 SNDRV_PCM_INFO_PAUSE |
-				 SNDRV_PCM_INFO_SYNC_START),
+				 SNDRV_PCM_INFO_SYNC_START |
+				 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index f7ff3f7..4678067 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -666,7 +666,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
@@ -729,7 +729,7 @@
 	HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	/* 
 	   HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
 	   HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */
@@ -775,7 +775,7 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
 	{
@@ -1358,7 +1358,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x0c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1515,8 +1515,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1726,8 +1726,8 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
 #endif
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x18, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -1774,7 +1774,7 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x08, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
 	{
@@ -2160,8 +2160,8 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 
 	{ } /* end */
 };
@@ -2203,8 +2203,8 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = "Channel Mode",
@@ -2232,7 +2232,7 @@
 	HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
 
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -2902,7 +2902,7 @@
 		idx = ad1988_pin_idx(pin);
 		bnid = ad1988_boost_nids[idx];
 		if (bnid) {
-			sprintf(name, "%s Boost", ctlname);
+			sprintf(name, "%s Boost Volume", ctlname);
 			return add_control(spec, AD_CTL_WIDGET_VOL, name,
 					   HDA_COMPOSE_AMP_VAL(bnid, 3, idx, HDA_OUTPUT));
 
@@ -3300,8 +3300,8 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3499,9 +3499,9 @@
 	HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Docking Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3560,8 +3560,8 @@
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3745,9 +3745,9 @@
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -3888,9 +3888,9 @@
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x15, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -4126,8 +4126,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x14, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	{
@@ -4255,8 +4255,8 @@
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x25, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x17, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -4494,9 +4494,9 @@
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x3c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x39, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Line-In Boost", 0x3a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
@@ -4547,7 +4547,7 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
-	HDA_CODEC_VOLUME("Digital Mic Boost", 0x1f, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT),
 	{ } /* end */
 };
 
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 76bd58a..e96581f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -869,16 +869,16 @@
 }
 
 static struct snd_kcontrol_new cxt5045_mixers[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -910,16 +910,16 @@
 };
 
 static struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
-	HDA_CODEC_VOLUME("Int Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0x2, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x17, 0x1, HDA_INPUT),
 	HDA_BIND_VOL("Master Playback Volume", &cxt5045_hp_bind_master_vol),
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -947,7 +947,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL,0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -960,7 +960,7 @@
 };
 
 static struct hda_verb cxt5045_benq_init_verbs[] = {
-	/* Int Mic, Mic */
+	/* Internal Mic, Mic */
 	{0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN|AC_PINCTL_VREF_80 },
 	/* Line In,HP, Amp  */
@@ -973,7 +973,7 @@
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
 	{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x1a, AC_VERB_SET_CONNECT_SEL, 0x1},
 	{0x1a, AC_VERB_SET_AMP_GAIN_MUTE,
 	 AC_AMP_SET_INPUT|AC_AMP_SET_RIGHT|AC_AMP_SET_LEFT|0x17},
@@ -1376,7 +1376,7 @@
 static struct snd_kcontrol_new cxt5047_base_mixers[] = {
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x19, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x19, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1a, 0x0, HDA_OUTPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1a, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x12, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("PCM Volume", 0x10, 0x00, HDA_OUTPUT),
@@ -1796,8 +1796,8 @@
 static struct snd_kcontrol_new cxt5051_capture_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("Docking Mic Volume", 0x15, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Docking Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
@@ -1806,8 +1806,8 @@
 static struct snd_kcontrol_new cxt5051_hp_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x15, 0x00, HDA_INPUT),
 	{}
 };
 
@@ -1826,8 +1826,8 @@
 static struct snd_kcontrol_new cxt5051_toshiba_mixers[] = {
 	HDA_CODEC_VOLUME("Internal Mic Volume", 0x14, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Switch", 0x14, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("External Mic Volume", 0x14, 0x01, HDA_INPUT),
-	HDA_CODEC_MUTE("External Mic Switch", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
 	{}
 };
 
@@ -1847,7 +1847,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */	
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1874,7 +1874,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -1904,7 +1904,7 @@
 	{0x19, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x44},
@@ -1932,7 +1932,7 @@
 	{0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
 	/* DAC1 */
 	{0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
-	/* Record selector: Int mic */
+	/* Record selector: Internal mic */
 	{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
 	{0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
 	/* SPDIF route: PCM */
@@ -2111,6 +2111,11 @@
 	{ 2, NULL },
 };
 
+#define HP_PRESENT_PORT_A	(1 << 0)
+#define HP_PRESENT_PORT_D	(1 << 1)
+#define hp_port_a_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_A)
+#define hp_port_d_present(spec)	((spec)->hp_present & HP_PRESENT_PORT_D)
+
 static void cxt5066_update_speaker(struct hda_codec *codec)
 {
 	struct conexant_spec *spec = codec->spec;
@@ -2120,24 +2125,20 @@
 		    spec->hp_present, spec->cur_eapd);
 
 	/* Port A (HP) */
-	pinctl = ((spec->hp_present & 1) && spec->cur_eapd) ? PIN_HP : 0;
+	pinctl = (hp_port_a_present(spec) && spec->cur_eapd) ? PIN_HP : 0;
 	snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
 
 	/* Port D (HP/LO) */
-	if (spec->dell_automute) {
-		/* DELL AIO Port Rule: PortA>  PortD>  IntSpk */
-		pinctl = (!(spec->hp_present & 1) && spec->cur_eapd)
-			? PIN_OUT : 0;
-	} else if (spec->thinkpad) {
-		if (spec->cur_eapd)
-			pinctl = spec->port_d_mode;
-		/* Mute dock line-out if Port A (laptop HP) is present */
-		if (spec->hp_present&  1)
+	pinctl = spec->cur_eapd ? spec->port_d_mode : 0;
+	if (spec->dell_automute || spec->thinkpad) {
+		/* Mute if Port A is connected */
+		if (hp_port_a_present(spec))
 			pinctl = 0;
 	} else {
-		pinctl = ((spec->hp_present & 2) && spec->cur_eapd)
-			? spec->port_d_mode : 0;
+		/* Thinkpad/Dell doesn't give pin-D status */
+		if (!hp_port_d_present(spec))
+			pinctl = 0;
 	}
 	snd_hda_codec_write(codec, 0x1c, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
 			pinctl);
@@ -2379,8 +2380,8 @@
 	/* Port D */
 	portD = snd_hda_jack_detect(codec, 0x1c);
 
-	spec->hp_present = !!(portA);
-	spec->hp_present |= portD ? 2 : 0;
+	spec->hp_present = portA ? HP_PRESENT_PORT_A : 0;
+	spec->hp_present |= portD ? HP_PRESENT_PORT_D : 0;
 	snd_printdd("CXT5066: hp automute portA=%x portD=%x present=%d\n",
 		portA, portD, spec->hp_present);
 	cxt5066_update_speaker(codec);
@@ -2728,7 +2729,7 @@
 static struct snd_kcontrol_new cxt5066_vostro_mixers[] = {
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "Int Mic Boost Capture Enum",
+		.name = "Internal Mic Boost Capture Enum",
 		.info = cxt5066_mic_boost_mux_enum_info,
 		.get = cxt5066_mic_boost_mux_enum_get,
 		.put = cxt5066_mic_boost_mux_enum_put,
@@ -2954,7 +2955,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3009,7 +3010,7 @@
 	{0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 
 	/* internal microphone */
-	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable int mic */
+	{0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN}, /* enable internal mic */
 
 	/* EAPD */
 	{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
@@ -3097,6 +3098,7 @@
 	SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
 	SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
@@ -3108,16 +3110,9 @@
 		      CXT5066_LAPTOP),
 	SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
 	SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
  	SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
- 	SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G series (AMD)", CXT5066_IDEAPAD),
-	SND_PCI_QUIRK(0x17aa, 0x3a0d, "ideapad", CXT5066_IDEAPAD),
+	SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
 	{}
 };
 
@@ -3422,6 +3417,9 @@
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
 				    present ? 0 : PIN_OUT);
 	}
+	for (i = 0; !present && i < cfg->line_outs; i++)
+		if (snd_hda_jack_detect(codec, cfg->line_out_pins[i]))
+			present = 1;
 	for (i = 0; i < cfg->speaker_outs; i++) {
 		snd_hda_codec_write(codec, cfg->speaker_pins[i], 0,
 				    AC_VERB_SET_PIN_WIDGET_CONTROL,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 31df774..f29b97b 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -31,10 +31,15 @@
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/slab.h>
+#include <linux/moduleparam.h>
 #include <sound/core.h>
 #include "hda_codec.h"
 #include "hda_local.h"
 
+static bool static_hdmi_pcm;
+module_param(static_hdmi_pcm, bool, 0644);
+MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
+
 /*
  * The HDMI/DisplayPort configuration can be highly dynamic. A graphics device
  * could support two independent pipes, each of them can be connected to one or
@@ -827,7 +832,7 @@
 		*codec_pars = *hinfo;
 
 	eld = &spec->sink_eld[idx];
-	if (eld->sad_count > 0) {
+	if (!static_hdmi_pcm && eld->eld_valid && eld->sad_count > 0) {
 		hdmi_eld_update_pcm_info(eld, hinfo, codec_pars);
 		if (hinfo->channels_min > hinfo->channels_max ||
 		    !hinfo->rates || !hinfo->formats)
@@ -904,23 +909,28 @@
 	spec->pin[spec->num_pins] = pin_nid;
 	spec->num_pins++;
 
-	/*
-	 * It is assumed that converter nodes come first in the node list and
-	 * hence have been registered and usable now.
-	 */
 	return hdmi_read_pin_conn(codec, pin_nid);
 }
 
 static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t nid)
 {
+	int i, found_pin = 0;
 	struct hdmi_spec *spec = codec->spec;
 
-	if (spec->num_cvts >= MAX_HDMI_CVTS) {
-		snd_printk(KERN_WARNING
-			   "HDMI: no space for converter %d\n", nid);
-		return -E2BIG;
+	for (i = 0; i < spec->num_pins; i++)
+		if (nid == spec->pin_cvt[i]) {
+			found_pin = 1;
+			break;
+		}
+
+	if (!found_pin) {
+		snd_printdd("HDMI: Skipping node %d (no connection)\n", nid);
+		return -EINVAL;
 	}
 
+	if (snd_BUG_ON(spec->num_cvts >= MAX_HDMI_CVTS))
+		return -E2BIG;
+
 	spec->cvt[spec->num_cvts] = nid;
 	spec->num_cvts++;
 
@@ -931,6 +941,8 @@
 {
 	hda_nid_t nid;
 	int i, nodes;
+	int num_tmp_cvts = 0;
+	hda_nid_t tmp_cvt[MAX_HDMI_CVTS];
 
 	nodes = snd_hda_get_sub_nodes(codec, codec->afg, &nid);
 	if (!nid || nodes < 0) {
@@ -941,6 +953,7 @@
 	for (i = 0; i < nodes; i++, nid++) {
 		unsigned int caps;
 		unsigned int type;
+		unsigned int config;
 
 		caps = snd_hda_param_read(codec, nid, AC_PAR_AUDIO_WIDGET_CAP);
 		type = get_wcaps_type(caps);
@@ -950,17 +963,32 @@
 
 		switch (type) {
 		case AC_WID_AUD_OUT:
-			hdmi_add_cvt(codec, nid);
+			if (num_tmp_cvts >= MAX_HDMI_CVTS) {
+				snd_printk(KERN_WARNING
+					   "HDMI: no space for converter %d\n", nid);
+				continue;
+			}
+			tmp_cvt[num_tmp_cvts] = nid;
+			num_tmp_cvts++;
 			break;
 		case AC_WID_PIN:
 			caps = snd_hda_param_read(codec, nid, AC_PAR_PIN_CAP);
 			if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
 				continue;
+
+			config = snd_hda_codec_read(codec, nid, 0,
+					     AC_VERB_GET_CONFIG_DEFAULT, 0);
+			if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
+				continue;
+
 			hdmi_add_pin(codec, nid);
 			break;
 		}
 	}
 
+	for (i = 0; i < num_tmp_cvts; i++)
+		hdmi_add_cvt(codec, tmp_cvt[i]);
+
 	/*
 	 * G45/IbexPeak don't support EPSS: the unsolicited pin hot plug event
 	 * can be lost and presence sense verb will become inaccurate if the
@@ -1165,11 +1193,53 @@
 	return 0;
 }
 
+static unsigned int channels_2_6_8[] = {
+	2, 6, 8
+};
+
+static unsigned int channels_2_8[] = {
+	2, 8
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_6_8_channels = {
+	.count = ARRAY_SIZE(channels_2_6_8),
+	.list = channels_2_6_8,
+	.mask = 0,
+};
+
+static struct snd_pcm_hw_constraint_list hw_constraints_2_8_channels = {
+	.count = ARRAY_SIZE(channels_2_8),
+	.list = channels_2_8,
+	.mask = 0,
+};
+
 static int simple_playback_pcm_open(struct hda_pcm_stream *hinfo,
 				    struct hda_codec *codec,
 				    struct snd_pcm_substream *substream)
 {
 	struct hdmi_spec *spec = codec->spec;
+	struct snd_pcm_hw_constraint_list *hw_constraints_channels = NULL;
+
+	switch (codec->preset->id) {
+	case 0x10de0002:
+	case 0x10de0003:
+	case 0x10de0005:
+	case 0x10de0006:
+		hw_constraints_channels = &hw_constraints_2_8_channels;
+		break;
+	case 0x10de0007:
+		hw_constraints_channels = &hw_constraints_2_6_8_channels;
+		break;
+	default:
+		break;
+	}
+
+	if (hw_constraints_channels != NULL) {
+		snd_pcm_hw_constraint_list(substream->runtime, 0,
+				SNDRV_PCM_HW_PARAM_CHANNELS,
+				hw_constraints_channels);
+	}
+
 	return snd_hda_multi_out_dig_open(codec, &spec->multiout);
 }
 
@@ -1532,7 +1602,7 @@
 { .id = 0x1002793c, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x10027919, .name = "RS600 HDMI",	.patch = patch_atihdmi },
 { .id = 0x1002791a, .name = "RS690/780 HDMI",	.patch = patch_atihdmi },
-{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_atihdmi },
+{ .id = 0x1002aa01, .name = "R6xx HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951390, .name = "SiI1390 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x10951392, .name = "SiI1392 HDMI",	.patch = patch_generic_hdmi },
 { .id = 0x17e80047, .name = "Chrontel HDMI",	.patch = patch_generic_hdmi },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 552a09e..51c08ed 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -231,7 +231,6 @@
 	ALC888_ACER_ASPIRE_8930G,
 	ALC888_ACER_ASPIRE_7730G,
 	ALC883_MEDION,
-	ALC883_MEDION_MD2,
 	ALC883_MEDION_WIM2160,
 	ALC883_LAPTOP_EAPD,
 	ALC883_LENOVO_101E_2ch,
@@ -1678,29 +1677,32 @@
 	u32 val;
 };
 
+struct alc_model_fixup {
+	const int id;
+	const char *name;
+};
+
 struct alc_fixup {
 	unsigned int sku;
 	const struct alc_pincfg *pins;
 	const struct hda_verb *verbs;
+	void (*func)(struct hda_codec *codec, const struct alc_fixup *fix,
+		     int pre_init);
 };
 
-static void alc_pick_fixup(struct hda_codec *codec,
-			   const struct snd_pci_quirk *quirk,
-			   const struct alc_fixup *fix,
-			   int pre_init)
+static void __alc_pick_fixup(struct hda_codec *codec,
+			     const struct alc_fixup *fix,
+			     const char *modelname,
+			     int pre_init)
 {
 	const struct alc_pincfg *cfg;
 	struct alc_spec *spec;
 
-	quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
-	if (!quirk)
-		return;
-	fix += quirk->value;
 	cfg = fix->pins;
 	if (pre_init && fix->sku) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply sku override for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		spec = codec->spec;
 		spec->cdefine.sku_cfg = fix->sku;
@@ -1709,7 +1711,7 @@
 	if (pre_init && cfg) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply pincfg for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		for (; cfg->nid; cfg++)
 			snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val);
@@ -1717,10 +1719,53 @@
 	if (!pre_init && fix->verbs) {
 #ifdef CONFIG_SND_DEBUG_VERBOSE
 		snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-verbs for %s\n",
-			    codec->chip_name, quirk->name);
+			    codec->chip_name, modelname);
 #endif
 		add_verb(codec->spec, fix->verbs);
 	}
+	if (fix->func) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+		snd_printdd(KERN_INFO "hda_codec: %s: Apply fix-func for %s\n",
+			    codec->chip_name, modelname);
+#endif
+		fix->func(codec, fix, pre_init);
+	}
+}
+
+static void alc_pick_fixup(struct hda_codec *codec,
+				 const struct snd_pci_quirk *quirk,
+				 const struct alc_fixup *fix,
+				 int pre_init)
+{
+	quirk = snd_pci_quirk_lookup(codec->bus->pci, quirk);
+	if (quirk) {
+		fix += quirk->value;
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+		__alc_pick_fixup(codec, fix, quirk->name, pre_init);
+#else
+		__alc_pick_fixup(codec, fix, NULL, pre_init);
+#endif
+	}
+}
+
+static void alc_pick_fixup_model(struct hda_codec *codec,
+				 const struct alc_model_fixup *models,
+				 const struct snd_pci_quirk *quirk,
+				 const struct alc_fixup *fix,
+				 int pre_init)
+{
+	if (codec->modelname && models) {
+		while (models->name) {
+			if (!strcmp(codec->modelname, models->name)) {
+				fix += models->id;
+				break;
+			}
+			models++;
+		}
+		__alc_pick_fixup(codec, fix, codec->modelname, pre_init);
+	} else {
+		alc_pick_fixup(codec, quirk, fix, pre_init);
+	}
 }
 
 static int alc_read_coef_idx(struct hda_codec *codec,
@@ -1981,6 +2026,7 @@
 	{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
 	{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
+	{0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
 	{ }
 };
 
@@ -2120,17 +2166,17 @@
 	{
 		.num_items = 5,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
-			{ "Int Mic", 0xb },
+			{ "Internal Mic", 0xb },
 		},
 	},
 	{
 		.num_items = 4,
 		.items = {
-			{ "Ext Mic", 0x0 },
+			{ "Mic", 0x0 },
 			{ "Line In", 0x2 },
 			{ "CD", 0x4 },
 			{ "Input Mix", 0xa },
@@ -2187,7 +2233,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -2205,7 +2251,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -2796,10 +2842,10 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -3307,7 +3353,7 @@
 };
 
 /* auto-toggle front mic */
-static void alc880_uniwill_mic_automute(struct hda_codec *codec)
+static void alc88x_simple_mic_automute(struct hda_codec *codec)
 {
  	unsigned int present;
 	unsigned char bits;
@@ -3329,7 +3375,7 @@
 static void alc880_uniwill_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc880_uniwill_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc880_uniwill_unsol_event(struct hda_codec *codec,
@@ -3340,7 +3386,7 @@
 	 */
 	switch (res >> 28) {
 	case ALC880_MIC_EVENT:
-		alc880_uniwill_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -5023,6 +5069,25 @@
 	return 0;
 }
 
+static const char *alc_get_line_out_pfx(const struct auto_pin_cfg *cfg,
+					bool can_be_master)
+{
+	if (!cfg->hp_outs && !cfg->speaker_outs && can_be_master)
+		return "Master";
+
+	switch (cfg->line_out_type) {
+	case AUTO_PIN_SPEAKER_OUT:
+		return "Speaker";
+	case AUTO_PIN_HP_OUT:
+		return "Headphone";
+	default:
+		if (cfg->line_outs == 1)
+			return "PCM";
+		break;
+	}
+	return NULL;
+}
+
 /* add playback controls from the parsed DAC table */
 static int alc880_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
@@ -5030,6 +5095,7 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, false);
 	hda_nid_t nid;
 	int i, err;
 
@@ -5037,7 +5103,7 @@
 		if (!spec->multiout.dac_nids[i])
 			continue;
 		nid = alc880_idx_to_mixer(alc880_dac_to_idx(spec->multiout.dac_nids[i]));
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -5064,18 +5130,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -5155,7 +5220,8 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct hda_input_mux *imux = &spec->private_imux[0];
-	int i, err, idx, type, type_idx = 0;
+	int i, err, idx, type_idx = 0;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		hda_nid_t pin;
@@ -5165,12 +5231,13 @@
 		if (!alc_is_input_pin(codec, pin))
 			continue;
 
-		type = cfg->inputs[i].type;
-		if (i > 0 && type == cfg->inputs[i - 1].type)
+		label = hda_get_autocfg_input_label(codec, cfg, i);
+		if (prev_label && !strcmp(label, prev_label))
 			type_idx++;
 		else
 			type_idx = 0;
-		label = hda_get_autocfg_input_label(codec, cfg, i);
+		prev_label = label;
+
 		if (mixer) {
 			idx = get_connection_index(codec, mixer, pin);
 			if (idx >= 0) {
@@ -7406,7 +7473,7 @@
 	.num_items = 4,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "Line", 0x2 },
 		{ "CD", 0x4 },
 	},
@@ -7416,7 +7483,7 @@
 	.num_items = 2,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -7851,10 +7918,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -7878,8 +7945,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7896,8 +7963,8 @@
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7912,7 +7979,7 @@
 	HDA_BIND_MUTE   ("Headphone Playback Switch", 0x0f, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x07, HDA_INPUT),
 	HDA_CODEC_MUTE  ("Line Playback Switch", 0x0b, 0x07, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x15, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x00, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7931,7 +7998,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -7946,10 +8013,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7969,7 +8036,7 @@
 	HDA_CODEC_MUTE("Mobile Line Playback Switch", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -7982,7 +8049,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8763,10 +8830,10 @@
 	HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x0e, 2, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8777,11 +8844,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8791,11 +8858,11 @@
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8808,10 +8875,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8831,10 +8898,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8855,10 +8922,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8879,10 +8946,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x3, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x3, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8902,10 +8969,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8926,7 +8993,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -8939,20 +9006,20 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc883_targa_8ch_mixer[] = {
 	HDA_CODEC_VOLUME("Side Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Side Playback Switch", 0x0f, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -8963,7 +9030,7 @@
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0d, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -8976,21 +9043,8 @@
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	{ } /* end */
-};
-
-static struct snd_kcontrol_new alc883_medion_md2_mixer[] = {
-	HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
-	HDA_CODEC_MUTE("Front Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -9037,7 +9091,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9050,7 +9104,7 @@
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9072,10 +9126,10 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9096,8 +9150,8 @@
 	HDA_CODEC_MUTE("Enable Headphones", 0x15, 0x00, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Enable LFE", 0x16, 2, 0x00, HDA_OUTPUT),
 	/* Boost mixers */
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0x00, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line Boost", 0x1a, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x00, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line Boost Volume", 0x1a, 0x00, HDA_INPUT),
 	/* Input mixers */
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x00, HDA_INPUT),
@@ -9111,7 +9165,7 @@
 	HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
@@ -9141,7 +9195,7 @@
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 	{ } /* end */
 };
@@ -9182,16 +9236,6 @@
 	spec->autocfg.speaker_pins[1] = 0x17;
 }
 
-/* auto-toggle front mic */
-/*
-static void alc883_mitac_mic_automute(struct hda_codec *codec)
-{
-	unsigned char bits = snd_hda_jack_detect(codec, 0x18) ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1, HDA_AMP_MUTE, bits);
-}
-*/
-
 static struct hda_verb alc883_mitac_verbs[] = {
 	/* HP */
 	{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
@@ -9435,18 +9479,8 @@
 		alc888_lenovo_ms7195_rca_automute(codec);
 }
 
-static struct hda_verb alc883_medion_md2_verbs[] = {
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
-	{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
-
-	{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-
-	{0x14, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
-	{ } /* end */
-};
-
 /* toggle speaker-output according to the hp-jack state */
-static void alc883_medion_md2_setup(struct hda_codec *codec)
+static void alc883_lenovo_nb0763_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
 
@@ -9458,15 +9492,6 @@
 #define alc883_targa_init_hook		alc882_targa_init_hook
 #define alc883_targa_unsol_event	alc882_targa_unsol_event
 
-static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
 static void alc883_clevo_m720_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -9478,7 +9503,7 @@
 static void alc883_clevo_m720_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc883_clevo_m720_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc883_clevo_m720_unsol_event(struct hda_codec *codec,
@@ -9486,7 +9511,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc883_clevo_m720_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -9731,7 +9756,6 @@
 	[ALC888_ACER_ASPIRE_8930G]	= "acer-aspire-8930g",
 	[ALC888_ACER_ASPIRE_7730G]	= "acer-aspire-7730g",
 	[ALC883_MEDION]		= "medion",
-	[ALC883_MEDION_MD2]	= "medion-md2",
 	[ALC883_MEDION_WIM2160]	= "medion-wim2160",
 	[ALC883_LAPTOP_EAPD]	= "laptop-eapd",
 	[ALC883_LENOVO_101E_2ch] = "lenovo-101e",
@@ -10379,19 +10403,6 @@
 		.channel_mode = alc883_sixstack_modes,
 		.input_mux = &alc883_capture_source,
 	},
-	[ALC883_MEDION_MD2] = {
-		.mixers = { alc883_medion_md2_mixer},
-		.init_verbs = { alc883_init_verbs, alc883_medion_md2_verbs},
-		.num_dacs = ARRAY_SIZE(alc883_dac_nids),
-		.dac_nids = alc883_dac_nids,
-		.dig_out_nid = ALC883_DIGOUT_NID,
-		.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
-		.channel_mode = alc883_3ST_2ch_modes,
-		.input_mux = &alc883_capture_source,
-		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
-		.init_hook = alc_automute_amp,
-	},
 	[ALC883_MEDION_WIM2160] = {
 		.mixers = { alc883_medion_wim2160_mixer },
 		.init_verbs = { alc883_init_verbs, alc883_medion_wim2160_verbs },
@@ -10468,7 +10479,7 @@
 		.need_dac_fix = 1,
 		.input_mux = &alc883_lenovo_nb0763_capture_source,
 		.unsol_event = alc_automute_amp_unsol_event,
-		.setup = alc883_medion_md2_setup,
+		.setup = alc883_lenovo_nb0763_setup,
 		.init_hook = alc_automute_amp,
 	},
 	[ALC888_LENOVO_MS7195_DIG] = {
@@ -10830,25 +10841,30 @@
 {
 	struct alc_spec *spec = codec->spec;
 	struct auto_pin_cfg *cfg = &spec->autocfg;
-	int i, err, type;
+	int i, err;
 	int type_idx = 0;
 	hda_nid_t nid;
+	const char *prev_label = NULL;
 
 	for (i = 0; i < cfg->num_inputs; i++) {
 		if (cfg->inputs[i].type > AUTO_PIN_MIC)
 			break;
 		nid = cfg->inputs[i].pin;
 		if (get_wcaps(codec, nid) & AC_WCAP_IN_AMP) {
-			char label[32];
-			type = cfg->inputs[i].type;
-			if (i > 0 && type == cfg->inputs[i - 1].type)
+			const char *label;
+			char boost_label[32];
+
+			label = hda_get_autocfg_input_label(codec, cfg, i);
+			if (prev_label && !strcmp(label, prev_label))
 				type_idx++;
 			else
 				type_idx = 0;
-			snprintf(label, sizeof(label), "%s Boost",
-				 hda_get_autocfg_input_label(codec, cfg, i));
-			err = add_control(spec, ALC_CTL_WIDGET_VOL, label,
-					  type_idx,
+			prev_label = label;
+
+			snprintf(boost_label, sizeof(boost_label),
+				 "%s Boost Volume", label);
+			err = add_control(spec, ALC_CTL_WIDGET_VOL,
+					  boost_label, type_idx,
 				  HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_INPUT));
 			if (err < 0)
 				return err;
@@ -10857,6 +10873,9 @@
 	return 0;
 }
 
+static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
+					     const struct auto_pin_cfg *cfg);
+
 /* almost identical with ALC880 parser... */
 static int alc882_parse_auto_config(struct hda_codec *codec)
 {
@@ -10874,7 +10893,10 @@
 	err = alc880_auto_fill_dac_nids(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
-	err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
+	if (codec->vendor_id == 0x10ec0887)
+		err = alc861vd_auto_create_multi_out_ctls(spec, &spec->autocfg);
+	else
+		err = alc880_auto_create_multi_out_ctls(spec, &spec->autocfg);
 	if (err < 0)
 		return err;
 	err = alc880_auto_create_extra_out(spec, spec->autocfg.hp_pins[0],
@@ -11090,10 +11112,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0D, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x0e, 2, 0x0, HDA_OUTPUT),
@@ -11194,10 +11216,10 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11219,7 +11241,7 @@
 			    HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x02, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x1a, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
@@ -11230,7 +11252,7 @@
 static struct snd_kcontrol_new alc262_HP_BPC_WildWest_option_mixer[] = {
 	HDA_CODEC_VOLUME("Rear Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Rear Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Rear Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Rear Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11250,7 +11272,7 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11357,10 +11379,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	{ } /* end */
 };
@@ -11374,10 +11396,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11445,10 +11467,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11632,7 +11654,7 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0d, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
@@ -11687,7 +11709,7 @@
 	.num_items = 3,
 	.items = {
 		{ "Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Internal Mic", 0x1 },
 		{ "CD", 0x4 },
 	},
 };
@@ -11839,12 +11861,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11875,12 +11897,12 @@
 	},
 	HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
 	HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11889,10 +11911,10 @@
 	ALC262_HIPPO_MASTER_SWITCH,
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -11918,8 +11940,8 @@
 	HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Headphone Mic Boost", 0x15, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Headphone Mic Boost Volume", 0x15, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -12089,13 +12111,8 @@
 	spec->multiout.dac_nids = spec->private_dac_nids;
 	spec->multiout.dac_nids[0] = 2;
 
-	if (!cfg->speaker_pins[0] && !cfg->hp_pins[0])
-		pfx = "Master";
-	else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-		pfx = "Speaker";
-	else if (cfg->line_out_type == AUTO_PIN_HP_OUT)
-		pfx = "Headphone";
-	else
+	pfx = alc_get_line_out_pfx(cfg, true);
+	if (!pfx)
 		pfx = "Front";
 	for (i = 0; i < 2; i++) {
 		err = alc262_add_out_sw_ctl(spec, cfg->line_out_pins[i], pfx, i);
@@ -12996,9 +13013,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13007,9 +13024,9 @@
 	HDA_CODEC_VOLUME("Front Playback Volume", 0x2, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x3, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13113,9 +13130,9 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13131,8 +13148,8 @@
 		.put = alc268_acer_master_sw_put,
 		.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
 	},
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x1a, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x1a, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13224,8 +13241,8 @@
 	HDA_CODEC_MUTE("Speaker Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -13258,8 +13275,8 @@
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Mic Capture Volume", 0x23, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Mic Capture Switch", 0x23, 2, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14082,10 +14099,10 @@
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x15, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x16, 2, 0x0, HDA_OUTPUT),
 	{ } /* end */
@@ -14105,10 +14122,10 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14126,13 +14143,13 @@
 	},
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x01, HDA_INPUT),
 	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x01, HDA_INPUT),
-	HDA_CODEC_VOLUME("Internal Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x0b, 0x03, HDA_INPUT),
 	HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x0b, 0x03, HDA_INPUT),
-	HDA_CODEC_VOLUME("Dock Mic Boost", 0x1b, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x1b, 0, HDA_INPUT),
 	{ }
 };
 
@@ -14162,30 +14179,30 @@
 static struct snd_kcontrol_new alc269_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x08, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x08, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_analog_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("IntMic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
 static struct snd_kcontrol_new alc269vb_laptop_digital_capture_mixer[] = {
 	HDA_CODEC_VOLUME("Capture Volume", 0x09, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Capture Switch", 0x09, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -14804,12 +14821,23 @@
 }
 #endif /* SND_HDA_NEEDS_RESUME */
 
+static void alc269_fixup_hweq(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int pre_init)
+{
+	int coef;
+
+	coef = alc_read_coef_idx(codec, 0x1e);
+	alc_write_coef_idx(codec, 0x1e, coef | 0x80);
+}
+
 enum {
 	ALC269_FIXUP_SONY_VAIO,
 	ALC275_FIX_SONY_VAIO_GPIO2,
 	ALC269_FIXUP_DELL_M101Z,
 	ALC269_FIXUP_SKU_IGNORE,
 	ALC269_FIXUP_ASUS_G73JW,
+	ALC269_FIXUP_LENOVO_EAPD,
+	ALC275_FIXUP_SONY_HWEQ,
 };
 
 static const struct alc_fixup alc269_fixups[] = {
@@ -14824,6 +14852,7 @@
 			{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
 			{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+			{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
 			{ }
 		}
 	},
@@ -14844,17 +14873,34 @@
 			{ }
 		}
 	},
+	[ALC269_FIXUP_LENOVO_EAPD] = {
+		.verbs = (const struct hda_verb[]) {
+			{0x14, AC_VERB_SET_EAPD_BTLENABLE, 0},
+			{}
+		}
+	},
+	[ALC275_FIXUP_SONY_HWEQ] = {
+		.func = alc269_fixup_hweq,
+		.verbs = (const struct hda_verb[]) {
+			{0x01, AC_VERB_SET_GPIO_MASK, 0x04},
+			{0x01, AC_VERB_SET_GPIO_DIRECTION, 0x04},
+			{0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+			{0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREFGRD},
+			{ }
+		}
+	}
 };
 
 static struct snd_pci_quirk alc269_fixup_tbl[] = {
 	SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
-	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIX_SONY_VAIO_GPIO2),
+	SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
+	SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
 	SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
 	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
 	SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
 	SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+	SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
 	{}
 };
 
@@ -15889,13 +15935,16 @@
 	return 0;
 }
 
-static int alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
-				hda_nid_t nid, unsigned int chs)
+static int __alc861_create_out_sw(struct hda_codec *codec, const char *pfx,
+				  hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(codec->spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
+#define alc861_create_out_sw(codec, pfx, nid, chs) \
+	__alc861_create_out_sw(codec, pfx, nid, 0, chs)
+
 /* add playback controls from the parsed DAC table */
 static int alc861_auto_create_multi_out_ctls(struct hda_codec *codec,
 					     const struct auto_pin_cfg *cfg)
@@ -15904,26 +15953,15 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid;
 	int i, err;
 
-	if (cfg->line_outs == 1) {
-		const char *pfx = NULL;
-		if (!cfg->hp_outs)
-			pfx = "Master";
-		else if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-			pfx = "Speaker";
-		if (pfx) {
-			nid = spec->multiout.dac_nids[0];
-			return alc861_create_out_sw(codec, pfx, nid, 3);
-		}
-	}
-
 	for (i = 0; i < cfg->line_outs; i++) {
 		nid = spec->multiout.dac_nids[i];
 		if (!nid)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc861_create_out_sw(codec, "Center", nid, 1);
 			if (err < 0)
@@ -15932,7 +15970,10 @@
 			if (err < 0)
 				return err;
 		} else {
-			err = alc861_create_out_sw(codec, chname[i], nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc861_create_out_sw(codec, name, nid, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -16404,8 +16445,8 @@
 static struct hda_input_mux alc861vd_dallas_capture_source = {
 	.num_items = 2,
 	.items = {
-		{ "Ext Mic", 0x0 },
-		{ "Int Mic", 0x1 },
+		{ "Mic", 0x0 },
+		{ "Internal Mic", 0x1 },
 	},
 };
 
@@ -16484,11 +16525,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16507,11 +16548,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16531,11 +16572,11 @@
 
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Front Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_MUTE("Front Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 
@@ -16546,19 +16587,19 @@
 };
 
 /* Pin assignment: Speaker=0x14, HP = 0x15,
- *                 Ext Mic=0x18, Int Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
+ *                 Mic=0x18, Internal Mic = 0x19, CD = 0x1c, PC Beep = 0x1d
  */
 static struct snd_kcontrol_new alc861vd_dallas_mixer[] = {
 	HDA_CODEC_VOLUME("Speaker Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Speaker Playback Switch", 0x0c, 2, HDA_INPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_BIND_MUTE("Headphone Playback Switch", 0x0d, 2, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -16723,18 +16764,6 @@
 	{}
 };
 
-static void alc861vd_lenovo_mic_automute(struct hda_codec *codec)
-{
-	unsigned int present;
-	unsigned char bits;
-
-	present = snd_hda_jack_detect(codec, 0x18);
-	bits = present ? HDA_AMP_MUTE : 0;
-
-	snd_hda_codec_amp_stereo(codec, 0x0b, HDA_INPUT, 1,
-				 HDA_AMP_MUTE, bits);
-}
-
 static void alc861vd_lenovo_setup(struct hda_codec *codec)
 {
 	struct alc_spec *spec = codec->spec;
@@ -16745,7 +16774,7 @@
 static void alc861vd_lenovo_init_hook(struct hda_codec *codec)
 {
 	alc_automute_amp(codec);
-	alc861vd_lenovo_mic_automute(codec);
+	alc88x_simple_mic_automute(codec);
 }
 
 static void alc861vd_lenovo_unsol_event(struct hda_codec *codec,
@@ -16753,7 +16782,7 @@
 {
 	switch (res >> 26) {
 	case ALC880_MIC_EVENT:
-		alc861vd_lenovo_mic_automute(codec);
+		alc88x_simple_mic_automute(codec);
 		break;
 	default:
 		alc_automute_amp_unsol_event(codec, res);
@@ -17043,12 +17072,13 @@
 #define alc861vd_idx_to_mixer_switch(nid)	((nid) + 0x0c)
 
 /* add playback controls from the parsed DAC table */
-/* Based on ALC880 version. But ALC861VD has separate,
+/* Based on ALC880 version. But ALC861VD and ALC887 have separate,
  * different NIDs for mute/unmute switch and volume control */
 static int alc861vd_auto_create_multi_out_ctls(struct alc_spec *spec,
 					     const struct auto_pin_cfg *cfg)
 {
 	static const char *chname[4] = {"Front", "Surround", "CLFE", "Side"};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid_v, nid_s;
 	int i, err;
 
@@ -17062,7 +17092,7 @@
 				alc880_dac_to_idx(
 					spec->multiout.dac_nids[i]));
 
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
 					      "Center",
@@ -17089,24 +17119,17 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (!cfg->hp_pins)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL,
+						name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_v, 3, 0,
 							      HDA_OUTPUT));
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE, pfx,
+			err = __add_pb_sw_ctrl(spec, ALC_CTL_BIND_MUTE,
+					       name, i,
 					  HDA_COMPOSE_AMP_VAL(nid_s, 3, 2,
 							      HDA_INPUT));
 			if (err < 0)
@@ -17570,13 +17593,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17720,8 +17743,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -17732,8 +17755,8 @@
 
 	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
 	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
 	HDA_CODEC_MUTE("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
 	{ } /* end */
@@ -18566,13 +18589,13 @@
 	HDA_CODEC_VOLUME("Master Playback Volume", 0x02, 0x0, HDA_OUTPUT),
 	ALC262_HIPPO_MASTER_SWITCH,
 
-	HDA_CODEC_VOLUME("e-Mic/LineIn Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("e-Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("e-Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic/LineIn Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic/LineIn Playback Switch", 0x0b, 0x0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("i-Mic Boost", 0x19, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("i-Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("i-Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -18583,13 +18606,13 @@
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x21, 0x0, HDA_OUTPUT),
 
-	HDA_CODEC_VOLUME("Ext Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_MUTE("Ext Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Ext Mic Boost", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
 
-	HDA_CODEC_VOLUME("Int Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_MUTE("Int Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x0b, 0x1, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x19, 0, HDA_INPUT),
 	{ } /* end */
 };
 
@@ -19094,20 +19117,24 @@
 	return 0;
 }
 
-static inline int alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
-			      hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_vol_ctl(struct alc_spec *spec, const char *pfx,
+				       hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx,
+	return __add_pb_vol_ctrl(spec, ALC_CTL_WIDGET_VOL, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_OUTPUT));
 }
 
-static inline int alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
-			     hda_nid_t nid, unsigned int chs)
+static inline int __alc662_add_sw_ctl(struct alc_spec *spec, const char *pfx,
+				      hda_nid_t nid, int idx, unsigned int chs)
 {
-	return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx,
+	return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, idx,
 			   HDA_COMPOSE_AMP_VAL(nid, chs, 0, HDA_INPUT));
 }
 
+#define alc662_add_vol_ctl(spec, pfx, nid, chs) \
+	__alc662_add_vol_ctl(spec, pfx, nid, 0, chs)
+#define alc662_add_sw_ctl(spec, pfx, nid, chs) \
+	__alc662_add_sw_ctl(spec, pfx, nid, 0, chs)
 #define alc662_add_stereo_vol(spec, pfx, nid) \
 	alc662_add_vol_ctl(spec, pfx, nid, 3)
 #define alc662_add_stereo_sw(spec, pfx, nid) \
@@ -19121,6 +19148,7 @@
 	static const char *chname[4] = {
 		"Front", "Surround", NULL /*CLFE*/, "Side"
 	};
+	const char *pfx = alc_get_line_out_pfx(cfg, true);
 	hda_nid_t nid, mix;
 	int i, err;
 
@@ -19131,7 +19159,7 @@
 		mix = alc662_dac_to_mix(codec, cfg->line_out_pins[i], nid);
 		if (!mix)
 			continue;
-		if (i == 2) {
+		if (!pfx && i == 2) {
 			/* Center/LFE */
 			err = alc662_add_vol_ctl(spec, "Center", nid, 1);
 			if (err < 0)
@@ -19146,22 +19174,13 @@
 			if (err < 0)
 				return err;
 		} else {
-			const char *pfx;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
-				if (cfg->hp_outs)
-					pfx = "Speaker";
-				else
-					pfx = "PCM";
-			} else
-				pfx = chname[i];
-			err = alc662_add_vol_ctl(spec, pfx, nid, 3);
+			const char *name = pfx;
+			if (!name)
+				name = chname[i];
+			err = __alc662_add_vol_ctl(spec, name, nid, i, 3);
 			if (err < 0)
 				return err;
-			if (cfg->line_outs == 1 &&
-			    cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
-				pfx = "Speaker";
-			err = alc662_add_sw_ctl(spec, pfx, mix, 3);
+			err = __alc662_add_sw_ctl(spec, name, mix, i, 3);
 			if (err < 0)
 				return err;
 		}
@@ -19358,9 +19377,21 @@
 		alc_inithook(codec);
 }
 
+static void alc272_fixup_mario(struct hda_codec *codec,
+			       const struct alc_fixup *fix, int pre_init) {
+	if (snd_hda_override_amp_caps(codec, 0x2, HDA_OUTPUT,
+				      (0x3b << AC_AMPCAP_OFFSET_SHIFT) |
+				      (0x3b << AC_AMPCAP_NUM_STEPS_SHIFT) |
+				      (0x03 << AC_AMPCAP_STEP_SIZE_SHIFT) |
+				      (0 << AC_AMPCAP_MUTE_SHIFT)))
+		printk(KERN_WARNING
+		       "hda_codec: failed to override amp caps for NID 0x2\n");
+}
+
 enum {
 	ALC662_FIXUP_ASPIRE,
 	ALC662_FIXUP_IDEAPAD,
+	ALC272_FIXUP_MARIO,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -19376,6 +19407,9 @@
 			{ }
 		}
 	},
+	[ALC272_FIXUP_MARIO] = {
+		.func = alc272_fixup_mario,
+	}
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -19386,6 +19420,10 @@
 	{}
 };
 
+static const struct alc_model_fixup alc662_fixup_models[] = {
+	{.id = ALC272_FIXUP_MARIO, .name = "mario"},
+	{}
+};
 
 
 static int patch_alc662(struct hda_codec *codec)
@@ -19485,7 +19523,8 @@
 	codec->patch_ops = alc_patch_ops;
 	if (board_config == ALC662_AUTO) {
 		spec->init_hook = alc662_auto_init;
-		alc_pick_fixup(codec, alc662_fixup_tbl, alc662_fixups, 0);
+		alc_pick_fixup_model(codec, alc662_fixup_models,
+				     alc662_fixup_tbl, alc662_fixups, 0);
 	}
 
 	alc_init_jacks(codec);
@@ -19612,9 +19651,9 @@
 	HDA_CODEC_MUTE("Front Playback Switch", 0x14, 0x0, HDA_OUTPUT),
 	HDA_CODEC_VOLUME("Headphone Playback Volume", 0x4, 0x0, HDA_OUTPUT),
 	HDA_CODEC_MUTE("Headphone Playback Switch", 0x16, 0x0, HDA_OUTPUT),
-	HDA_CODEC_VOLUME("Int Mic Boost", 0x12, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
-	HDA_CODEC_VOLUME("Line In Boost", 0x19, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x12, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0, HDA_INPUT),
+	HDA_CODEC_VOLUME("Line In Boost Volume", 0x19, 0, HDA_INPUT),
 	{ }
 };
 
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index f03b2ff..4ab019d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -389,6 +389,9 @@
 	0x11, 0x20, 0
 };
 
+#define STAC92HD88XXX_NUM_DMICS	STAC92HD83XXX_NUM_DMICS
+#define stac92hd88xxx_dmic_nids	stac92hd83xxx_dmic_nids
+
 #define STAC92HD87B_NUM_DMICS	 1
 static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = {
 	0x11, 0
@@ -3591,7 +3594,7 @@
 		if (check_mic_pin(codec, spec->dmic_nids[i],
 		    &fixed, &ext, &dock))
 			return 0;
-	if (!fixed && !ext && !dock)
+	if (!fixed || (!ext && !dock))
 		return 0; /* no input to switch */
 	if (!(get_wcaps(codec, ext) & AC_WCAP_UNSOL_CAP))
 		return 0; /* no unsol support */
@@ -5422,7 +5425,7 @@
 	snd_hda_codec_write_cache(codec, codec->afg, 0, 0x7ED, 0);
 	codec->no_trigger_sense = 1;
 	codec->spec = spec;
-	spec->linear_tone_beep = 1;
+	spec->linear_tone_beep = 0;
 	codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
 	spec->digbeep_nid = 0x21;
 	spec->dmic_nids = stac92hd83xxx_dmic_nids;
@@ -5462,15 +5465,21 @@
 		spec->num_dmics = stac92xx_connected_ports(codec,
 				stac92hd87b_dmic_nids,
 				STAC92HD87B_NUM_DMICS);
-		/* Fall through */
+		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
+		spec->pin_nids = stac92hd88xxx_pin_nids;
+		spec->mono_nid = 0;
+		spec->num_pwrs = 0;
+		break;
 	case 0x111d7666:
 	case 0x111d7667:
 	case 0x111d7668:
 	case 0x111d7669:
+		spec->num_dmics = stac92xx_connected_ports(codec,
+				stac92hd88xxx_dmic_nids,
+				STAC92HD88XXX_NUM_DMICS);
 		spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
 		spec->pin_nids = stac92hd88xxx_pin_nids;
 		spec->mono_nid = 0;
-		spec->digbeep_nid = 0;
 		spec->num_pwrs = 0;
 		break;
 	case 0x111d7604:
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index d1c3f8d..7f4852a 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -263,8 +263,7 @@
 		return;
 	snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
 			    !spec->vt1708_jack_detectect);
-	cancel_delayed_work(&spec->vt1708_hp_work);
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&spec->vt1708_hp_work);
 }
 
 
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index 712c171..7b62de0 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -96,6 +96,11 @@
 		tmp |= ICE1712_DELTA_AP_CCLK | ICE1712_DELTA_AP_CS_CODEC;
 		tmp &= ~ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CCLK | ICE1712_DELTA_66E_CS_CHIP_A |
+		       ICE1712_DELTA_66E_CS_CHIP_B;
+		tmp &= ~ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CCLK | ICE1712_VX442_CODEC_CHIP_A | ICE1712_VX442_CODEC_CHIP_B;
 		tmp &= ~ICE1712_VX442_CS_DIGITAL;
@@ -119,6 +124,9 @@
 	case ICE1712_SUBDEVICE_DELTA410:
 		tmp |= ICE1712_DELTA_AP_CS_DIGITAL;
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		tmp |= ICE1712_DELTA_66E_CS_CS8427;
+		break;
 	case ICE1712_SUBDEVICE_VX442:
 		tmp |= ICE1712_VX442_CS_DIGITAL;
 		break;
@@ -276,6 +284,20 @@
 }
 
 /*
+ * AK4524 on Delta66 rev E to choose the chip address
+ */
+static void delta66e_ak4524_lock(struct snd_akm4xxx *ak, int chip)
+{
+	struct snd_ak4xxx_private *priv = (void *)ak->private_value[0];
+	struct snd_ice1712 *ice = ak->private_data[0];
+
+	snd_ice1712_save_gpio_status(ice);
+	priv->cs_mask =
+	priv->cs_addr = chip == 0 ? ICE1712_DELTA_66E_CS_CHIP_A :
+				    ICE1712_DELTA_66E_CS_CHIP_B;
+}
+
+/*
  * AK4528 on VX442 to choose the chip mask
  */
 static void vx442_ak4524_lock(struct snd_akm4xxx *ak, int chip)
@@ -487,6 +509,29 @@
 	.mask_flags = 0,
 };
 
+static struct snd_akm4xxx akm_delta66e __devinitdata = {
+	.type = SND_AK4524,
+	.num_adcs = 4,
+	.num_dacs = 4,
+	.ops = {
+		.lock = delta66e_ak4524_lock,
+		.set_rate_val = delta_ak4524_set_rate_val
+	}
+};
+
+static struct snd_ak4xxx_private akm_delta66e_priv __devinitdata = {
+	.caddr = 2,
+	.cif = 0, /* the default level of the CIF pin from AK4524 */
+	.data_mask = ICE1712_DELTA_66E_DOUT,
+	.clk_mask = ICE1712_DELTA_66E_CCLK,
+	.cs_mask = 0,
+	.cs_addr = 0, /* set later */
+	.cs_none = 0,
+	.add_flags = 0,
+	.mask_flags = 0,
+};
+
+
 static struct snd_akm4xxx akm_delta44 __devinitdata = {
 	.type = SND_AK4524,
 	.num_adcs = 4,
@@ -644,9 +689,11 @@
 		err = snd_ice1712_akm4xxx_init(ak, &akm_delta44, &akm_delta44_priv, ice);
 		break;
 	case ICE1712_SUBDEVICE_VX442:
-	case ICE1712_SUBDEVICE_DELTA66E:
 		err = snd_ice1712_akm4xxx_init(ak, &akm_vx442, &akm_vx442_priv, ice);
 		break;
+	case ICE1712_SUBDEVICE_DELTA66E:
+		err = snd_ice1712_akm4xxx_init(ak, &akm_delta66e, &akm_delta66e_priv, ice);
+		break;
 	default:
 		snd_BUG();
 		return -EINVAL;
diff --git a/sound/pci/ice1712/delta.h b/sound/pci/ice1712/delta.h
index 1a0ac6c..11a9c3a 100644
--- a/sound/pci/ice1712/delta.h
+++ b/sound/pci/ice1712/delta.h
@@ -144,6 +144,17 @@
 #define ICE1712_DELTA_1010LT_CS_NONE	0x50	/* nothing */
 #define ICE1712_DELTA_1010LT_WORDCLOCK 0x80	/* sample clock source: 0 = Word Clock Input, 1 = S/PDIF Input ??? */
 
+/* M-Audio Delta 66 rev. E definitions.
+ * Newer revisions of Delta 66 have CS8427 over SPI for
+ * S/PDIF transceiver instead of CS8404/CS8414. */
+/* 0x01 = DFS */
+#define ICE1712_DELTA_66E_CCLK		0x02	/* SPI clock */
+#define ICE1712_DELTA_66E_DIN		0x04	/* data input */
+#define ICE1712_DELTA_66E_DOUT		0x08	/* data output */
+#define ICE1712_DELTA_66E_CS_CS8427	0x10	/* chip select, low = CS8427 */
+#define ICE1712_DELTA_66E_CS_CHIP_A	0x20	/* AK4524 #0 */
+#define ICE1712_DELTA_66E_CS_CHIP_B	0x40	/* AK4524 #1 */
+
 /* Digigram VX442 definitions */
 #define ICE1712_VX442_CCLK		0x02	/* SPI clock */
 #define ICE1712_VX442_DIN		0x04	/* data input */
diff --git a/sound/pci/oxygen/Makefile b/sound/pci/oxygen/Makefile
index acd8f15..0f87265 100644
--- a/sound/pci/oxygen/Makefile
+++ b/sound/pci/oxygen/Makefile
@@ -1,10 +1,8 @@
 snd-oxygen-lib-objs := oxygen_io.o oxygen_lib.o oxygen_mixer.o oxygen_pcm.o
-snd-hifier-objs := hifier.o
-snd-oxygen-objs := oxygen.o
+snd-oxygen-objs := oxygen.o xonar_dg.o
 snd-virtuoso-objs := virtuoso.o xonar_lib.o \
 	xonar_pcm179x.o xonar_cs43xx.o xonar_wm87x6.o xonar_hdmi.o
 
 obj-$(CONFIG_SND_OXYGEN_LIB) += snd-oxygen-lib.o
-obj-$(CONFIG_SND_HIFIER) += snd-hifier.o
 obj-$(CONFIG_SND_OXYGEN) += snd-oxygen.o
 obj-$(CONFIG_SND_VIRTUOSO) += snd-virtuoso.o
diff --git a/sound/pci/oxygen/cs4245.h b/sound/pci/oxygen/cs4245.h
new file mode 100644
index 0000000..5e0197e
--- /dev/null
+++ b/sound/pci/oxygen/cs4245.h
@@ -0,0 +1,107 @@
+#define CS4245_CHIP_ID		0x01
+#define CS4245_POWER_CTRL	0x02
+#define CS4245_DAC_CTRL_1	0x03
+#define CS4245_ADC_CTRL		0x04
+#define CS4245_MCLK_FREQ	0x05
+#define CS4245_SIGNAL_SEL	0x06
+#define CS4245_PGA_B_CTRL	0x07
+#define CS4245_PGA_A_CTRL	0x08
+#define CS4245_ANALOG_IN	0x09
+#define CS4245_DAC_A_CTRL	0x0a
+#define CS4245_DAC_B_CTRL	0x0b
+#define CS4245_DAC_CTRL_2	0x0c
+#define CS4245_INT_STATUS	0x0d
+#define CS4245_INT_MASK		0x0e
+#define CS4245_INT_MODE_MSB	0x0f
+#define CS4245_INT_MODE_LSB	0x10
+
+/* Chip ID */
+#define CS4245_CHIP_PART_MASK	0xf0
+#define CS4245_CHIP_REV_MASK	0x0f
+
+/* Power Control */
+#define CS4245_FREEZE		0x80
+#define CS4245_PDN_MIC		0x08
+#define CS4245_PDN_ADC		0x04
+#define CS4245_PDN_DAC		0x02
+#define CS4245_PDN		0x01
+
+/* DAC Control */
+#define CS4245_DAC_FM_MASK	0xc0
+#define CS4245_DAC_FM_SINGLE	0x00
+#define CS4245_DAC_FM_DOUBLE	0x40
+#define CS4245_DAC_FM_QUAD	0x80
+#define CS4245_DAC_DIF_MASK	0x30
+#define CS4245_DAC_DIF_LJUST	0x00
+#define CS4245_DAC_DIF_I2S	0x10
+#define CS4245_DAC_DIF_RJUST_16	0x20
+#define CS4245_DAC_DIF_RJUST_24	0x30
+#define CS4245_RESERVED_1	0x08
+#define CS4245_MUTE_DAC		0x04
+#define CS4245_DEEMPH		0x02
+#define CS4245_DAC_MASTER	0x01
+
+/* ADC Control */
+#define CS4245_ADC_FM_MASK	0xc0
+#define CS4245_ADC_FM_SINGLE	0x00
+#define CS4245_ADC_FM_DOUBLE	0x40
+#define CS4245_ADC_FM_QUAD	0x80
+#define CS4245_ADC_DIF_MASK	0x10
+#define CS4245_ADC_DIF_LJUST	0x00
+#define CS4245_ADC_DIF_I2S	0x10
+#define CS4245_MUTE_ADC		0x04
+#define CS4245_HPF_FREEZE	0x02
+#define CS4245_ADC_MASTER	0x01
+
+/* MCLK Frequency */
+#define CS4245_MCLK1_MASK	0x70
+#define CS4245_MCLK1_SHIFT	4
+#define CS4245_MCLK2_MASK	0x07
+#define CS4245_MCLK2_SHIFT	0
+#define CS4245_MCLK_1		0
+#define CS4245_MCLK_1_5		1
+#define CS4245_MCLK_2		2
+#define CS4245_MCLK_3		3
+#define CS4245_MCLK_4		4
+
+/* Signal Selection */
+#define CS4245_A_OUT_SEL_MASK	0x60
+#define CS4245_A_OUT_SEL_HIZ	0x00
+#define CS4245_A_OUT_SEL_DAC	0x20
+#define CS4245_A_OUT_SEL_PGA	0x40
+#define CS4245_LOOP		0x02
+#define CS4245_ASYNCH		0x01
+
+/* Channel B/A PGA Control */
+#define CS4245_PGA_GAIN_MASK	0x3f
+
+/* ADC Input Control */
+#define CS4245_PGA_SOFT		0x10
+#define CS4245_PGA_ZERO		0x08
+#define CS4245_SEL_MASK		0x07
+#define CS4245_SEL_MIC		0x00
+#define CS4245_SEL_INPUT_1	0x01
+#define CS4245_SEL_INPUT_2	0x02
+#define CS4245_SEL_INPUT_3	0x03
+#define CS4245_SEL_INPUT_4	0x04
+#define CS4245_SEL_INPUT_5	0x05
+#define CS4245_SEL_INPUT_6	0x06
+
+/* DAC Channel A/B Volume Control */
+#define CS4245_VOL_MASK		0xff
+
+/* DAC Control 2 */
+#define CS4245_DAC_SOFT		0x80
+#define CS4245_DAC_ZERO		0x40
+#define CS4245_INVERT_DAC	0x20
+#define CS4245_INT_ACTIVE_HIGH	0x01
+
+/* Interrupt Status/Mask/Mode */
+#define CS4245_ADC_CLK_ERR	0x08
+#define CS4245_DAC_CLK_ERR	0x04
+#define CS4245_ADC_OVFL		0x02
+#define CS4245_ADC_UNDRFL	0x01
+
+
+#define CS4245_SPI_ADDRESS	(0x9e << 16)
+#define CS4245_SPI_WRITE	(0 << 16)
diff --git a/sound/pci/oxygen/hifier.c b/sound/pci/oxygen/hifier.c
deleted file mode 100644
index 5a87d68..0000000
--- a/sound/pci/oxygen/hifier.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * C-Media CMI8788 driver for the MediaTek/TempoTec HiFier Fantasia
- *
- * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
- *
- *
- *  This driver is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License, version 2.
- *
- *  This driver is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this driver; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-/*
- * CMI8788:
- *
- * SPI 0 -> AK4396
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <sound/control.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/tlv.h>
-#include "oxygen.h"
-#include "ak4396.h"
-
-MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
-MODULE_DESCRIPTION("TempoTec HiFier driver");
-MODULE_LICENSE("GPL v2");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "card index");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "enable card");
-
-static DEFINE_PCI_DEVICE_TABLE(hifier_ids) = {
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710) },
-	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711) },
-	{ OXYGEN_PCI_SUBID_BROKEN_EEPROM },
-	{ }
-};
-MODULE_DEVICE_TABLE(pci, hifier_ids);
-
-struct hifier_data {
-	u8 ak4396_regs[5];
-};
-
-static void ak4396_write(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER  |
-			 OXYGEN_SPI_DATA_LENGTH_2 |
-			 OXYGEN_SPI_CLOCK_160 |
-			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
-			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
-			 AK4396_WRITE | (reg << 8) | value);
-	data->ak4396_regs[reg] = value;
-}
-
-static void ak4396_write_cached(struct oxygen *chip, u8 reg, u8 value)
-{
-	struct hifier_data *data = chip->model_data;
-
-	if (value != data->ak4396_regs[reg])
-		ak4396_write(chip, reg, value);
-}
-
-static void hifier_registers_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	ak4396_write(chip, AK4396_CONTROL_1, AK4396_DIF_24_MSB | AK4396_RSTN);
-	ak4396_write(chip, AK4396_CONTROL_2,
-		     data->ak4396_regs[AK4396_CONTROL_2]);
-	ak4396_write(chip, AK4396_CONTROL_3, AK4396_PCM);
-	ak4396_write(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void hifier_init(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-
-	data->ak4396_regs[AK4396_CONTROL_2] =
-		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
-	hifier_registers_init(chip);
-
-	snd_component_add(chip->card, "AK4396");
-	snd_component_add(chip->card, "CS5340");
-}
-
-static void hifier_cleanup(struct oxygen *chip)
-{
-}
-
-static void hifier_resume(struct oxygen *chip)
-{
-	hifier_registers_init(chip);
-}
-
-static void set_ak4396_params(struct oxygen *chip,
-			       struct snd_pcm_hw_params *params)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_DFS_MASK;
-	if (params_rate(params) <= 54000)
-		value |= AK4396_DFS_NORMAL;
-	else if (params_rate(params) <= 108000)
-		value |= AK4396_DFS_DOUBLE;
-	else
-		value |= AK4396_DFS_QUAD;
-
-	msleep(1); /* wait for the new MCLK to become stable */
-
-	if (value != data->ak4396_regs[AK4396_CONTROL_2]) {
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB);
-		ak4396_write(chip, AK4396_CONTROL_2, value);
-		ak4396_write(chip, AK4396_CONTROL_1,
-			     AK4396_DIF_24_MSB | AK4396_RSTN);
-	}
-}
-
-static void update_ak4396_volume(struct oxygen *chip)
-{
-	ak4396_write_cached(chip, AK4396_LCH_ATT, chip->dac_volume[0]);
-	ak4396_write_cached(chip, AK4396_RCH_ATT, chip->dac_volume[1]);
-}
-
-static void update_ak4396_mute(struct oxygen *chip)
-{
-	struct hifier_data *data = chip->model_data;
-	u8 value;
-
-	value = data->ak4396_regs[AK4396_CONTROL_2] & ~AK4396_SMUTE;
-	if (chip->dac_mute)
-		value |= AK4396_SMUTE;
-	ak4396_write_cached(chip, AK4396_CONTROL_2, value);
-}
-
-static void set_cs5340_params(struct oxygen *chip,
-			      struct snd_pcm_hw_params *params)
-{
-}
-
-static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
-
-static const struct oxygen_model model_hifier = {
-	.shortname = "C-Media CMI8787",
-	.longname = "C-Media Oxygen HD Audio",
-	.chip = "CMI8788",
-	.init = hifier_init,
-	.cleanup = hifier_cleanup,
-	.resume = hifier_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
-	.set_dac_params = set_ak4396_params,
-	.set_adc_params = set_cs5340_params,
-	.update_dac_volume = update_ak4396_volume,
-	.update_dac_mute = update_ak4396_mute,
-	.dac_tlv = ak4396_db_scale,
-	.model_data_size = sizeof(struct hifier_data),
-	.device_config = PLAYBACK_0_TO_I2S |
-			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 2,
-	.dac_volume_min = 0,
-	.dac_volume_max = 255,
-	.function_flags = OXYGEN_FUNCTION_SPI,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
-};
-
-static int __devinit get_hifier_model(struct oxygen *chip,
-				      const struct pci_device_id *id)
-{
-	chip->model = model_hifier;
-	return 0;
-}
-
-static int __devinit hifier_probe(struct pci_dev *pci,
-				  const struct pci_device_id *pci_id)
-{
-	static int dev;
-	int err;
-
-	if (dev >= SNDRV_CARDS)
-		return -ENODEV;
-	if (!enable[dev]) {
-		++dev;
-		return -ENOENT;
-	}
-	err = oxygen_pci_probe(pci, index[dev], id[dev], THIS_MODULE,
-			       hifier_ids, get_hifier_model);
-	if (err >= 0)
-		++dev;
-	return err;
-}
-
-static struct pci_driver hifier_driver = {
-	.name = "CMI8787HiFier",
-	.id_table = hifier_ids,
-	.probe = hifier_probe,
-	.remove = __devexit_p(oxygen_pci_remove),
-#ifdef CONFIG_PM
-	.suspend = oxygen_pci_suspend,
-	.resume = oxygen_pci_resume,
-#endif
-};
-
-static int __init alsa_card_hifier_init(void)
-{
-	return pci_register_driver(&hifier_driver);
-}
-
-static void __exit alsa_card_hifier_exit(void)
-{
-	pci_unregister_driver(&hifier_driver);
-}
-
-module_init(alsa_card_hifier_init)
-module_exit(alsa_card_hifier_exit)
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 98a8eb3..d7e8ddd 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -20,19 +20,32 @@
 /*
  * CMI8788:
  *
- * SPI 0 -> 1st AK4396 (front)
- * SPI 1 -> 2nd AK4396 (surround)
- * SPI 2 -> 3rd AK4396 (center/LFE)
- * SPI 3 -> WM8785
- * SPI 4 -> 4th AK4396 (back)
+ *   SPI 0 -> 1st AK4396 (front)
+ *   SPI 1 -> 2nd AK4396 (surround)
+ *   SPI 2 -> 3rd AK4396 (center/LFE)
+ *   SPI 3 -> WM8785
+ *   SPI 4 -> 4th AK4396 (back)
  *
- * GPIO 0 -> DFS0 of AK5385
- * GPIO 1 -> DFS1 of AK5385
- * GPIO 8 -> enable headphone amplifier on HT-Omega models
+ *   GPIO 0 -> DFS0 of AK5385
+ *   GPIO 1 -> DFS1 of AK5385
+ *
+ * X-Meridian models:
+ *   GPIO 4 -> enable extension S/PDIF input
+ *   GPIO 6 -> enable on-board S/PDIF input
+ *
+ * Claro models:
+ *   GPIO 6 -> S/PDIF from optical (0) or coaxial (1) input
+ *   GPIO 8 -> enable headphone amplifier
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to ADC input
  */
 
 #include <linux/delay.h>
@@ -41,18 +54,22 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/initval.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include "oxygen.h"
+#include "xonar_dg.h"
 #include "ak4396.h"
 #include "wm8785.h"
 
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("C-Media CMI8788 driver");
 MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8788}}");
+MODULE_SUPPORTED_DEVICE("{{C-Media,CMI8786}"
+			",{C-Media,CMI8787}"
+			",{C-Media,CMI8788}}");
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
 static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
@@ -66,24 +83,46 @@
 MODULE_PARM_DESC(enable, "enable card");
 
 enum {
-	MODEL_CMEDIA_REF,	/* C-Media's reference design */
-	MODEL_MERIDIAN,		/* AuzenTech X-Meridian */
-	MODEL_CLARO,		/* HT-Omega Claro */
-	MODEL_CLARO_HALO,	/* HT-Omega Claro halo */
+	MODEL_CMEDIA_REF,
+	MODEL_MERIDIAN,
+	MODEL_MERIDIAN_2G,
+	MODEL_CLARO,
+	MODEL_CLARO_HALO,
+	MODEL_FANTASIA,
+	MODEL_SERENADE,
+	MODEL_2CH_OUTPUT,
+	MODEL_HG2PCI,
+	MODEL_XONAR_DG,
 };
 
 static DEFINE_PCI_DEVICE_TABLE(oxygen_ids) = {
+	/* C-Media's reference design */
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0216), .driver_data = MODEL_CMEDIA_REF },
+	{ OXYGEN_PCI_SUBID(0x10b0, 0x0217), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0218), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x10b0, 0x0219), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0001), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x0010), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x13f6, 0x8788), .driver_data = MODEL_CMEDIA_REF },
-	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x147a, 0xa017), .driver_data = MODEL_CMEDIA_REF },
 	{ OXYGEN_PCI_SUBID(0x1a58, 0x0910), .driver_data = MODEL_CMEDIA_REF },
+	/* Asus Xonar DG */
+	{ OXYGEN_PCI_SUBID(0x1043, 0x8467), .driver_data = MODEL_XONAR_DG },
+	/* PCI 2.0 HD Audio */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0x8782), .driver_data = MODEL_2CH_OUTPUT },
+	/* Kuroutoshikou CMI8787-HG2PCI */
+	{ OXYGEN_PCI_SUBID(0x13f6, 0xffff), .driver_data = MODEL_HG2PCI },
+	/* TempoTec HiFier Fantasia */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1710), .driver_data = MODEL_FANTASIA },
+	/* TempoTec HiFier Serenade */
+	{ OXYGEN_PCI_SUBID(0x14c3, 0x1711), .driver_data = MODEL_SERENADE },
+	/* AuzenTech X-Meridian */
 	{ OXYGEN_PCI_SUBID(0x415a, 0x5431), .driver_data = MODEL_MERIDIAN },
+	/* AuzenTech X-Meridian 2G */
+	{ OXYGEN_PCI_SUBID(0x5431, 0x017a), .driver_data = MODEL_MERIDIAN_2G },
+	/* HT-Omega Claro */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9761), .driver_data = MODEL_CLARO },
+	/* HT-Omega Claro halo */
 	{ OXYGEN_PCI_SUBID(0x7284, 0x9781), .driver_data = MODEL_CLARO_HALO },
 	{ }
 };
@@ -95,9 +134,15 @@
 #define GPIO_AK5385_DFS_DOUBLE	0x0001
 #define GPIO_AK5385_DFS_QUAD	0x0002
 
+#define GPIO_MERIDIAN_DIG_MASK	0x0050
+#define GPIO_MERIDIAN_DIG_EXT	0x0010
+#define GPIO_MERIDIAN_DIG_BOARD	0x0040
+
+#define GPIO_CLARO_DIG_COAX	0x0040
 #define GPIO_CLARO_HP		0x0100
 
 struct generic_data {
+	unsigned int dacs;
 	u8 ak4396_regs[4][5];
 	u16 wm8785_regs[3];
 };
@@ -148,7 +193,7 @@
 	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write(chip, i, AK4396_CONTROL_1,
 			     AK4396_DIF_24_MSB | AK4396_RSTN);
 		ak4396_write(chip, i, AK4396_CONTROL_2,
@@ -166,6 +211,7 @@
 {
 	struct generic_data *data = chip->model_data;
 
+	data->dacs = chip->model.dac_channels_pcm / 2;
 	data->ak4396_regs[0][AK4396_CONTROL_2] =
 		AK4396_SMUTE | AK4396_DEM_OFF | AK4396_DFS_NORMAL;
 	ak4396_registers_init(chip);
@@ -207,6 +253,10 @@
 
 static void meridian_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_MERIDIAN_DIG_MASK);
+	oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+			      GPIO_MERIDIAN_DIG_BOARD, GPIO_MERIDIAN_DIG_MASK);
 	ak4396_init(chip);
 	ak5385_init(chip);
 }
@@ -220,6 +270,8 @@
 
 static void claro_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	wm8785_init(chip);
 	claro_enable_hp(chip);
@@ -227,11 +279,24 @@
 
 static void claro_halo_init(struct oxygen *chip)
 {
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_CLARO_DIG_COAX);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_CLARO_DIG_COAX);
 	ak4396_init(chip);
 	ak5385_init(chip);
 	claro_enable_hp(chip);
 }
 
+static void fantasia_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+	snd_component_add(chip->card, "CS5340");
+}
+
+static void stereo_output_init(struct oxygen *chip)
+{
+	ak4396_init(chip);
+}
+
 static void generic_cleanup(struct oxygen *chip)
 {
 }
@@ -268,6 +333,11 @@
 	claro_enable_hp(chip);
 }
 
+static void stereo_resume(struct oxygen *chip)
+{
+	ak4396_registers_init(chip);
+}
+
 static void set_ak4396_params(struct oxygen *chip,
 			      struct snd_pcm_hw_params *params)
 {
@@ -286,7 +356,7 @@
 	msleep(1); /* wait for the new MCLK to become stable */
 
 	if (value != data->ak4396_regs[0][AK4396_CONTROL_2]) {
-		for (i = 0; i < 4; ++i) {
+		for (i = 0; i < data->dacs; ++i) {
 			ak4396_write(chip, i, AK4396_CONTROL_1,
 				     AK4396_DIF_24_MSB);
 			ak4396_write(chip, i, AK4396_CONTROL_2, value);
@@ -298,9 +368,10 @@
 
 static void update_ak4396_volume(struct oxygen *chip)
 {
+	struct generic_data *data = chip->model_data;
 	unsigned int i;
 
-	for (i = 0; i < 4; ++i) {
+	for (i = 0; i < data->dacs; ++i) {
 		ak4396_write_cached(chip, i, AK4396_LCH_ATT,
 				    chip->dac_volume[i * 2]);
 		ak4396_write_cached(chip, i, AK4396_RCH_ATT,
@@ -317,7 +388,7 @@
 	value = data->ak4396_regs[0][AK4396_CONTROL_2] & ~AK4396_SMUTE;
 	if (chip->dac_mute)
 		value |= AK4396_SMUTE;
-	for (i = 0; i < 4; ++i)
+	for (i = 0; i < data->dacs; ++i)
 		ak4396_write_cached(chip, i, AK4396_CONTROL_2, value);
 }
 
@@ -356,6 +427,10 @@
 			      value, GPIO_AK5385_DFS_MASK);
 }
 
+static void set_no_params(struct oxygen *chip, struct snd_pcm_hw_params *params)
+{
+}
+
 static int rolloff_info(struct snd_kcontrol *ctl,
 			struct snd_ctl_elem_info *info)
 {
@@ -363,13 +438,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -400,7 +469,7 @@
 		reg &= ~AK4396_SLOW;
 	changed = reg != data->ak4396_regs[0][AK4396_CONTROL_2];
 	if (changed) {
-		for (i = 0; i < 4; ++i)
+		for (i = 0; i < data->dacs; ++i)
 			ak4396_write(chip, i, AK4396_CONTROL_2, reg);
 	}
 	mutex_unlock(&chip->mutex);
@@ -421,13 +490,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -466,6 +529,100 @@
 	.put = hpf_put,
 };
 
+static int meridian_dig_source_info(struct snd_kcontrol *ctl,
+				    struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "On-board", "Extension" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int claro_dig_source_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Optical", "Coaxial" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int meridian_dig_source_get(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_MERIDIAN_DIG_EXT);
+	return 0;
+}
+
+static int claro_dig_source_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	value->value.enumerated.item[0] =
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) &
+		   GPIO_CLARO_DIG_COAX);
+	return 0;
+}
+
+static int meridian_dig_source_put(struct snd_kcontrol *ctl,
+				   struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_MERIDIAN_DIG_MASK;
+	if (value->value.enumerated.item[0] == 0)
+		new_reg |= GPIO_MERIDIAN_DIG_BOARD;
+	else
+		new_reg |= GPIO_MERIDIAN_DIG_EXT;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int claro_dig_source_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 old_reg, new_reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	old_reg = oxygen_read16(chip, OXYGEN_GPIO_DATA);
+	new_reg = old_reg & ~GPIO_CLARO_DIG_COAX;
+	if (value->value.enumerated.item[0])
+		new_reg |= GPIO_CLARO_DIG_COAX;
+	changed = new_reg != old_reg;
+	if (changed)
+		oxygen_write16(chip, OXYGEN_GPIO_DATA, new_reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static const struct snd_kcontrol_new meridian_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = meridian_dig_source_info,
+	.get = meridian_dig_source_get,
+	.put = meridian_dig_source_put,
+};
+
+static const struct snd_kcontrol_new claro_dig_source_control = {
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+	.name = "IEC958 Source Capture Enum",
+	.info = claro_dig_source_info,
+	.get = claro_dig_source_get,
+	.put = claro_dig_source_put,
+};
+
 static int generic_mixer_init(struct oxygen *chip)
 {
 	return snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
@@ -484,6 +641,81 @@
 	return 0;
 }
 
+static int meridian_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&meridian_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_wm8785_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static int claro_halo_mixer_init(struct oxygen *chip)
+{
+	int err;
+
+	err = generic_mixer_init(chip);
+	if (err < 0)
+		return err;
+	err = snd_ctl_add(chip->card,
+			  snd_ctl_new1(&claro_dig_source_control, chip));
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+static void dump_ak4396_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nAK4396 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x", data->ak4396_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm8785_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct generic_data *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8785:");
+	for (i = 0; i < 3; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8785_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_oxygen_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	dump_ak4396_registers(chip, buffer);
+	dump_wm8785_registers(chip, buffer);
+}
+
 static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0);
 
 static const struct oxygen_model model_generic = {
@@ -494,11 +726,11 @@
 	.mixer_init = generic_wm8785_mixer_init,
 	.cleanup = generic_cleanup,
 	.resume = generic_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_ak4396_params,
 	.set_adc_params = set_wm8785_params,
 	.update_dac_volume = update_ak4396_volume,
 	.update_dac_mute = update_ak4396_mute,
+	.dump_registers = dump_oxygen_registers,
 	.dac_tlv = ak4396_db_scale,
 	.model_data_size = sizeof(struct generic_data),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -508,11 +740,14 @@
 			 CAPTURE_1_FROM_SPDIF |
 			 CAPTURE_2_FROM_AC97_1 |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 0,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -520,42 +755,87 @@
 static int __devinit get_oxygen_model(struct oxygen *chip,
 				      const struct pci_device_id *id)
 {
+	static const char *const names[] = {
+		[MODEL_MERIDIAN]	= "AuzenTech X-Meridian",
+		[MODEL_MERIDIAN_2G]	= "AuzenTech X-Meridian 2G",
+		[MODEL_CLARO]		= "HT-Omega Claro",
+		[MODEL_CLARO_HALO]	= "HT-Omega Claro halo",
+		[MODEL_FANTASIA]	= "TempoTec HiFier Fantasia",
+		[MODEL_SERENADE]	= "TempoTec HiFier Serenade",
+		[MODEL_HG2PCI]		= "CMI8787-HG2PCI",
+	};
+
 	chip->model = model_generic;
 	switch (id->driver_data) {
 	case MODEL_MERIDIAN:
+	case MODEL_MERIDIAN_2G:
 		chip->model.init = meridian_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = meridian_mixer_init;
 		chip->model.resume = meridian_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
+		if (id->driver_data == MODEL_MERIDIAN)
+			chip->model.device_config |= AC97_CD_INPUT;
 		break;
 	case MODEL_CLARO:
 		chip->model.init = claro_init;
+		chip->model.mixer_init = claro_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		break;
 	case MODEL_CLARO_HALO:
 		chip->model.init = claro_halo_init;
-		chip->model.mixer_init = generic_mixer_init;
+		chip->model.mixer_init = claro_halo_mixer_init;
 		chip->model.cleanup = claro_cleanup;
 		chip->model.suspend = claro_suspend;
 		chip->model.resume = claro_resume;
 		chip->model.set_adc_params = set_ak5385_params;
+		chip->model.dump_registers = dump_ak4396_registers;
 		chip->model.device_config = PLAYBACK_0_TO_I2S |
 					    PLAYBACK_1_TO_SPDIF |
 					    CAPTURE_0_FROM_I2S_2 |
 					    CAPTURE_1_FROM_SPDIF;
 		break;
+	case MODEL_FANTASIA:
+	case MODEL_SERENADE:
+	case MODEL_2CH_OUTPUT:
+	case MODEL_HG2PCI:
+		chip->model.shortname = "C-Media CMI8787";
+		chip->model.chip = "CMI8787";
+		if (id->driver_data == MODEL_FANTASIA)
+			chip->model.init = fantasia_init;
+		else
+			chip->model.init = stereo_output_init;
+		chip->model.resume = stereo_resume;
+		chip->model.mixer_init = generic_mixer_init;
+		chip->model.set_adc_params = set_no_params;
+		chip->model.dump_registers = dump_ak4396_registers;
+		chip->model.device_config = PLAYBACK_0_TO_I2S |
+					    PLAYBACK_1_TO_SPDIF;
+		if (id->driver_data == MODEL_FANTASIA) {
+			chip->model.device_config |= CAPTURE_0_FROM_I2S_1;
+			chip->model.adc_mclks = OXYGEN_MCLKS(256, 128, 128);
+		}
+		chip->model.dac_channels_pcm = 2;
+		chip->model.dac_channels_mixer = 2;
+		break;
+	case MODEL_XONAR_DG:
+		chip->model = model_xonar_dg;
+		break;
 	}
 	if (id->driver_data == MODEL_MERIDIAN ||
+	    id->driver_data == MODEL_MERIDIAN_2G ||
 	    id->driver_data == MODEL_CLARO_HALO) {
 		chip->model.misc_flags = OXYGEN_MISC_MIDI;
 		chip->model.device_config |= MIDI_OUTPUT | MIDI_INPUT;
 	}
+	if (id->driver_data < ARRAY_SIZE(names) && names[id->driver_data])
+		chip->model.shortname = names[id->driver_data];
 	return 0;
 }
 
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index 7d5222c..c2ae63d 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -16,6 +16,10 @@
 #define PCM_AC97	5
 #define PCM_COUNT	6
 
+#define OXYGEN_MCLKS(f_single, f_double, f_quad) ((MCLK_##f_single << 0) | \
+						  (MCLK_##f_double << 2) | \
+						  (MCLK_##f_quad   << 4))
+
 #define OXYGEN_IO_SIZE	0x100
 
 #define OXYGEN_EEPROM_ID	0x434d	/* "CM" */
@@ -35,6 +39,7 @@
 #define MIDI_OUTPUT		0x0800
 #define MIDI_INPUT		0x1000
 #define AC97_CD_INPUT		0x2000
+#define AC97_FMIC_SWITCH	0x4000
 
 enum {
 	CONTROL_SPDIF_PCM,
@@ -65,6 +70,7 @@
 struct snd_pcm_hw_params;
 struct snd_kcontrol_new;
 struct snd_rawmidi;
+struct snd_info_buffer;
 struct oxygen;
 
 struct oxygen_model {
@@ -79,8 +85,6 @@
 	void (*resume)(struct oxygen *chip);
 	void (*pcm_hardware_filter)(unsigned int channel,
 				    struct snd_pcm_hardware *hardware);
-	unsigned int (*get_i2s_mclk)(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 	void (*set_dac_params)(struct oxygen *chip,
 			       struct snd_pcm_hw_params *params);
 	void (*set_adc_params)(struct oxygen *chip,
@@ -92,15 +96,19 @@
 	void (*uart_input)(struct oxygen *chip);
 	void (*ac97_switch)(struct oxygen *chip,
 			    unsigned int reg, unsigned int mute);
+	void (*dump_registers)(struct oxygen *chip,
+			       struct snd_info_buffer *buffer);
 	const unsigned int *dac_tlv;
-	unsigned long private_data;
 	size_t model_data_size;
 	unsigned int device_config;
-	u8 dac_channels;
+	u8 dac_channels_pcm;
+	u8 dac_channels_mixer;
 	u8 dac_volume_min;
 	u8 dac_volume_max;
 	u8 misc_flags;
 	u8 function_flags;
+	u8 dac_mclks;
+	u8 adc_mclks;
 	u16 dac_i2s_format;
 	u16 adc_i2s_format;
 };
@@ -121,7 +129,6 @@
 	u8 pcm_running;
 	u8 dac_routing;
 	u8 spdif_playback_enable;
-	u8 revision;
 	u8 has_ac97_0;
 	u8 has_ac97_1;
 	u32 spdif_bits;
@@ -167,8 +174,6 @@
 /* oxygen_pcm.c */
 
 int oxygen_pcm_init(struct oxygen *chip);
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip, unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params);
 
 /* oxygen_io.c */
 
diff --git a/sound/pci/oxygen/oxygen_io.c b/sound/pci/oxygen/oxygen_io.c
index 09b2b2a..f5164b1 100644
--- a/sound/pci/oxygen/oxygen_io.c
+++ b/sound/pci/oxygen/oxygen_io.c
@@ -197,11 +197,11 @@
 {
 	unsigned int count;
 
-	/* should not need more than 7.68 us (24 * 320 ns) */
+	/* should not need more than 30.72 us (24 * 1.28 us) */
 	count = 10;
 	while ((oxygen_read8(chip, OXYGEN_SPI_CONTROL) & OXYGEN_SPI_BUSY)
 	       && count > 0) {
-		udelay(1);
+		udelay(4);
 		--count;
 	}
 
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index e5ebe56..70b7398 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -202,7 +202,13 @@
 	struct oxygen *chip = entry->private_data;
 	int i, j;
 
-	snd_iprintf(buffer, "CMI8788\n\n");
+	switch (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_PACKAGE_ID_MASK) {
+	case OXYGEN_PACKAGE_ID_8786: i = '6'; break;
+	case OXYGEN_PACKAGE_ID_8787: i = '7'; break;
+	case OXYGEN_PACKAGE_ID_8788: i = '8'; break;
+	default:                     i = '?'; break;
+	}
+	snd_iprintf(buffer, "CMI878%c:\n", i);
 	for (i = 0; i < OXYGEN_IO_SIZE; i += 0x10) {
 		snd_iprintf(buffer, "%02x:", i);
 		for (j = 0; j < 0x10; ++j)
@@ -212,7 +218,7 @@
 	if (mutex_lock_interruptible(&chip->mutex) < 0)
 		return;
 	if (chip->has_ac97_0) {
-		snd_iprintf(buffer, "\nAC97\n");
+		snd_iprintf(buffer, "\nAC97:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -222,7 +228,7 @@
 		}
 	}
 	if (chip->has_ac97_1) {
-		snd_iprintf(buffer, "\nAC97 2\n");
+		snd_iprintf(buffer, "\nAC97 2:\n");
 		for (i = 0; i < 0x80; i += 0x10) {
 			snd_iprintf(buffer, "%02x:", i);
 			for (j = 0; j < 0x10; j += 2)
@@ -232,13 +238,15 @@
 		}
 	}
 	mutex_unlock(&chip->mutex);
+	if (chip->model.dump_registers)
+		chip->model.dump_registers(chip, buffer);
 }
 
 static void oxygen_proc_init(struct oxygen *chip)
 {
 	struct snd_info_entry *entry;
 
-	if (!snd_card_proc_new(chip->card, "cmi8788", &entry))
+	if (!snd_card_proc_new(chip->card, "oxygen", &entry))
 		snd_info_set_text_ops(entry, chip, oxygen_proc_read);
 }
 #else
@@ -262,7 +270,7 @@
 	 */
 	subdevice = oxygen_read_eeprom(chip, 2);
 	/* use default ID if EEPROM is missing */
-	if (subdevice == 0xffff)
+	if (subdevice == 0xffff && oxygen_read_eeprom(chip, 1) == 0xffff)
 		subdevice = 0x8788;
 	/*
 	 * We use only the subsystem device ID for searching because it is
@@ -364,12 +372,7 @@
 		(IEC958_AES1_CON_PCM_CODER << OXYGEN_SPDIF_CATEGORY_SHIFT);
 	chip->spdif_pcm_bits = chip->spdif_bits;
 
-	if (oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2)
-		chip->revision = 2;
-	else
-		chip->revision = 1;
-
-	if (chip->revision == 1)
+	if (!(oxygen_read8(chip, OXYGEN_REVISION) & OXYGEN_REVISION_2))
 		oxygen_set_bits8(chip, OXYGEN_MISC,
 				 OXYGEN_MISC_PCI_MEM_W_1_CLOCK);
 
@@ -406,28 +409,40 @@
 		      (OXYGEN_FORMAT_16 << OXYGEN_MULTICH_FORMAT_SHIFT));
 	oxygen_write8(chip, OXYGEN_REC_CHANNELS, OXYGEN_REC_CHANNELS_2_2_2);
 	oxygen_write16(chip, OXYGEN_I2S_MULTICH_FORMAT,
-		       OXYGEN_RATE_48000 | chip->model.dac_i2s_format |
-		       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       chip->model.dac_i2s_format |
+		       OXYGEN_I2S_MCLK(chip->model.dac_mclks) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 	if (chip->model.device_config & CAPTURE_0_FROM_I2S_1)
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	if (chip->model.device_config & (CAPTURE_0_FROM_I2S_2 |
 					 CAPTURE_2_FROM_I2S_2))
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_RATE_48000 | chip->model.adc_i2s_format |
-			       OXYGEN_I2S_MCLK_256 | OXYGEN_I2S_BITS_16 |
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+			       OXYGEN_RATE_48000 |
+			       chip->model.adc_i2s_format |
+			       OXYGEN_I2S_MCLK(chip->model.adc_mclks) |
+			       OXYGEN_I2S_BITS_16 |
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_BCLK_64);
 	else
 		oxygen_write16(chip, OXYGEN_I2S_B_FORMAT,
-			       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+			       OXYGEN_I2S_MASTER |
+			       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_write16(chip, OXYGEN_I2S_C_FORMAT,
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_MUTE_MCLK);
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_MUTE_MCLK);
 	oxygen_clear_bits32(chip, OXYGEN_SPDIF_CONTROL,
 			    OXYGEN_SPDIF_OUT_ENABLE |
 			    OXYGEN_SPDIF_LOOPBACK);
@@ -557,7 +572,8 @@
 	oxygen_shutdown(chip);
 	if (chip->irq >= 0)
 		free_irq(chip->irq, chip);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->model.cleanup(chip);
 	kfree(chip->model_data);
 	mutex_destroy(&chip->mutex);
@@ -648,8 +664,8 @@
 
 	strcpy(card->driver, chip->model.chip);
 	strcpy(card->shortname, chip->model.shortname);
-	sprintf(card->longname, "%s (rev %u) at %#lx, irq %i",
-		chip->model.longname, chip->revision, chip->addr, chip->irq);
+	sprintf(card->longname, "%s at %#lx, irq %i",
+		chip->model.longname, chip->addr, chip->irq);
 	strcpy(card->mixername, chip->model.chip);
 	snd_component_add(card, chip->model.chip);
 
@@ -733,7 +749,8 @@
 	spin_unlock_irq(&chip->reg_lock);
 
 	synchronize_irq(chip->irq);
-	flush_scheduled_work();
+	flush_work_sync(&chip->spdif_input_bits_work);
+	flush_work_sync(&chip->gpio_work);
 	chip->interrupt_mask = saved_interrupt_mask;
 
 	pci_disable_device(pci);
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 2849b36..9bff14d 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -31,7 +31,7 @@
 	struct oxygen *chip = ctl->private_data;
 
 	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-	info->count = chip->model.dac_channels;
+	info->count = chip->model.dac_channels_mixer;
 	info->value.integer.min = chip->model.dac_volume_min;
 	info->value.integer.max = chip->model.dac_volume_max;
 	return 0;
@@ -44,7 +44,7 @@
 	unsigned int i;
 
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		value->value.integer.value[i] = chip->dac_volume[i];
 	mutex_unlock(&chip->mutex);
 	return 0;
@@ -59,7 +59,7 @@
 
 	changed = 0;
 	mutex_lock(&chip->mutex);
-	for (i = 0; i < chip->model.dac_channels; ++i)
+	for (i = 0; i < chip->model.dac_channels_mixer; ++i)
 		if (value->value.integer.value[i] != chip->dac_volume[i]) {
 			chip->dac_volume[i] = value->value.integer.value[i];
 			changed = 1;
@@ -97,6 +97,16 @@
 	return changed;
 }
 
+static unsigned int upmix_item_count(struct oxygen *chip)
+{
+	if (chip->model.dac_channels_pcm < 8)
+		return 2;
+	else if (chip->model.update_center_lfe_mix)
+		return 5;
+	else
+		return 3;
+}
+
 static int upmix_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
 {
 	static const char *const names[5] = {
@@ -107,15 +117,9 @@
 		"Front+Surround+Center/LFE+Back",
 	};
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = count;
-	if (info->value.enumerated.item >= count)
-		info->value.enumerated.item = count - 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, count, names);
 }
 
 static int upmix_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -188,7 +192,7 @@
 static int upmix_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
-	unsigned int count = chip->model.update_center_lfe_mix ? 5 : 3;
+	unsigned int count = upmix_item_count(chip);
 	int changed;
 
 	if (value->value.enumerated.item[0] >= count)
@@ -430,30 +434,31 @@
 	return 0;
 }
 
-static int spdif_loopback_get(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL)
-		   & OXYGEN_SPDIF_LOOPBACK);
+		!!(oxygen_read32(chip, OXYGEN_SPDIF_CONTROL) & bit);
 	return 0;
 }
 
-static int spdif_loopback_put(struct snd_kcontrol *ctl,
-			      struct snd_ctl_elem_value *value)
+static int spdif_bit_switch_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
 {
 	struct oxygen *chip = ctl->private_data;
+	u32 bit = ctl->private_value;
 	u32 oldreg, newreg;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	oldreg = oxygen_read32(chip, OXYGEN_SPDIF_CONTROL);
 	if (value->value.integer.value[0])
-		newreg = oldreg | OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg | bit;
 	else
-		newreg = oldreg & ~OXYGEN_SPDIF_LOOPBACK;
+		newreg = oldreg & ~bit;
 	changed = newreg != oldreg;
 	if (changed)
 		oxygen_write32(chip, OXYGEN_SPDIF_CONTROL, newreg);
@@ -644,6 +649,46 @@
 	return change;
 }
 
+static int mic_fmic_source_info(struct snd_kcontrol *ctl,
+			   struct snd_ctl_elem_info *info)
+{
+	static const char *const names[] = { "Mic Jack", "Front Panel" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int mic_fmic_source_get(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] =
+		!!(oxygen_read_ac97(chip, 0, CM9780_JACK) & CM9780_FMIC2MIC);
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int mic_fmic_source_put(struct snd_kcontrol *ctl,
+			       struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	u16 oldreg, newreg;
+	int change;
+
+	mutex_lock(&chip->mutex);
+	oldreg = oxygen_read_ac97(chip, 0, CM9780_JACK);
+	if (value->value.enumerated.item[0])
+		newreg = oldreg | CM9780_FMIC2MIC;
+	else
+		newreg = oldreg & ~CM9780_FMIC2MIC;
+	change = newreg != oldreg;
+	if (change)
+		oxygen_write_ac97(chip, 0, CM9780_JACK, newreg);
+	mutex_unlock(&chip->mutex);
+	return change;
+}
+
 static int ac97_fp_rec_volume_info(struct snd_kcontrol *ctl,
 				   struct snd_ctl_elem_info *info)
 {
@@ -791,8 +836,17 @@
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 		.name = SNDRV_CTL_NAME_IEC958("Loopback ", NONE, SWITCH),
 		.info = snd_ctl_boolean_mono_info,
-		.get = spdif_loopback_get,
-		.put = spdif_loopback_put,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_LOOPBACK,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = SNDRV_CTL_NAME_IEC958("Validity Check ",CAPTURE,SWITCH),
+		.info = snd_ctl_boolean_mono_info,
+		.get = spdif_bit_switch_get,
+		.put = spdif_bit_switch_put,
+		.private_value = OXYGEN_SPDIF_SPDVALID,
 	},
 };
 
@@ -908,6 +962,13 @@
 	AC97_VOLUME("Mic Capture Volume", 0, AC97_MIC, 0),
 	AC97_SWITCH("Mic Capture Switch", 0, AC97_MIC, 15, 1),
 	AC97_SWITCH("Mic Boost (+20dB)", 0, AC97_MIC, 6, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Mic Source Capture Enum",
+		.info = mic_fmic_source_info,
+		.get = mic_fmic_source_get,
+		.put = mic_fmic_source_put,
+	},
 	AC97_SWITCH("Line Capture Switch", 0, AC97_LINE, 15, 1),
 	AC97_VOLUME("CD Capture Volume", 0, AC97_CD, 1),
 	AC97_SWITCH("CD Capture Switch", 0, AC97_CD, 15, 1),
@@ -970,7 +1031,10 @@
 				continue;
 		}
 		if (!strcmp(template.name, "Stereo Upmixing") &&
-		    chip->model.dac_channels == 2)
+		    chip->model.dac_channels_pcm == 2)
+			continue;
+		if (!strcmp(template.name, "Mic Source Capture Enum") &&
+		    !(chip->model.device_config & AC97_FMIC_SWITCH))
 			continue;
 		if (!strncmp(template.name, "CD Capture ", 11) &&
 		    !(chip->model.device_config & AC97_CD_INPUT))
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c
index 8146674..d5533e3 100644
--- a/sound/pci/oxygen/oxygen_pcm.c
+++ b/sound/pci/oxygen/oxygen_pcm.c
@@ -39,7 +39,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -65,7 +66,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE |
 		   SNDRV_PCM_FMTBIT_S32_LE,
 	.rates = SNDRV_PCM_RATE_32000 |
@@ -91,7 +93,8 @@
 		SNDRV_PCM_INFO_MMAP_VALID |
 		SNDRV_PCM_INFO_INTERLEAVED |
 		SNDRV_PCM_INFO_PAUSE |
-		SNDRV_PCM_INFO_SYNC_START,
+		SNDRV_PCM_INFO_SYNC_START |
+		SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
 	.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	.rates = SNDRV_PCM_RATE_48000,
 	.rate_min = 48000,
@@ -140,7 +143,7 @@
 		runtime->hw.rate_min = 44100;
 		break;
 	case PCM_MULTICH:
-		runtime->hw.channels_max = chip->model.dac_channels;
+		runtime->hw.channels_max = chip->model.dac_channels_pcm;
 		break;
 	}
 	if (chip->model.pcm_hardware_filter)
@@ -271,17 +274,6 @@
 	}
 }
 
-unsigned int oxygen_default_i2s_mclk(struct oxygen *chip,
-				     unsigned int channel,
-				     struct snd_pcm_hw_params *hw_params)
-{
-	if (params_rate(hw_params) <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-EXPORT_SYMBOL(oxygen_default_i2s_mclk);
-
 static unsigned int oxygen_i2s_bits(struct snd_pcm_hw_params *hw_params)
 {
 	if (params_format(hw_params) == SNDRV_PCM_FORMAT_S32_LE)
@@ -341,6 +333,26 @@
 	return 0;
 }
 
+static u16 get_mclk(struct oxygen *chip, unsigned int channel,
+		    struct snd_pcm_hw_params *params)
+{
+	unsigned int mclks, shift;
+
+	if (channel == PCM_MULTICH)
+		mclks = chip->model.dac_mclks;
+	else
+		mclks = chip->model.adc_mclks;
+
+	if (params_rate(params) <= 48000)
+		shift = 0;
+	else if (params_rate(params) <= 96000)
+		shift = 2;
+	else
+		shift = 4;
+
+	return OXYGEN_I2S_MCLK(mclks >> shift);
+}
+
 static int oxygen_rec_a_hw_params(struct snd_pcm_substream *substream,
 				  struct snd_pcm_hw_params *hw_params)
 {
@@ -357,8 +369,8 @@
 			     OXYGEN_REC_FORMAT_A_MASK);
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT,
 			      oxygen_rate(hw_params) |
-			      chip->model.get_i2s_mclk(chip, PCM_A, hw_params) |
 			      chip->model.adc_i2s_format |
+			      get_mclk(chip, PCM_A, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -393,9 +405,8 @@
 	if (!is_ac97)
 		oxygen_write16_masked(chip, OXYGEN_I2S_B_FORMAT,
 				      oxygen_rate(hw_params) |
-				      chip->model.get_i2s_mclk(chip, PCM_B,
-							       hw_params) |
 				      chip->model.adc_i2s_format |
+				      get_mclk(chip, PCM_B, hw_params) |
 				      oxygen_i2s_bits(hw_params),
 				      OXYGEN_I2S_RATE_MASK |
 				      OXYGEN_I2S_FORMAT_MASK |
@@ -476,8 +487,7 @@
 	oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
 			      oxygen_rate(hw_params) |
 			      chip->model.dac_i2s_format |
-			      chip->model.get_i2s_mclk(chip, PCM_MULTICH,
-						       hw_params) |
+			      get_mclk(chip, PCM_MULTICH, hw_params) |
 			      oxygen_i2s_bits(hw_params),
 			      OXYGEN_I2S_RATE_MASK |
 			      OXYGEN_I2S_FORMAT_MASK |
@@ -530,7 +540,10 @@
 	oxygen_set_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 	oxygen_clear_bits8(chip, OXYGEN_DMA_FLUSH, channel_mask);
 
-	chip->interrupt_mask |= channel_mask;
+	if (substream->runtime->no_period_wakeup)
+		chip->interrupt_mask &= ~channel_mask;
+	else
+		chip->interrupt_mask |= channel_mask;
 	oxygen_write16(chip, OXYGEN_INTERRUPT_MASK, chip->interrupt_mask);
 	spin_unlock_irq(&chip->reg_lock);
 	return 0;
diff --git a/sound/pci/oxygen/oxygen_regs.h b/sound/pci/oxygen/oxygen_regs.h
index 4dcd41b..63dc7a0 100644
--- a/sound/pci/oxygen/oxygen_regs.h
+++ b/sound/pci/oxygen/oxygen_regs.h
@@ -139,9 +139,11 @@
 #define  OXYGEN_I2S_FORMAT_I2S		0x0000
 #define  OXYGEN_I2S_FORMAT_LJUST	0x0008
 #define  OXYGEN_I2S_MCLK_MASK		0x0030	/* MCLK/LRCK */
-#define  OXYGEN_I2S_MCLK_128		0x0000
-#define  OXYGEN_I2S_MCLK_256		0x0010
-#define  OXYGEN_I2S_MCLK_512		0x0020
+#define  OXYGEN_I2S_MCLK_SHIFT		4
+#define  MCLK_128			0
+#define  MCLK_256			1
+#define  MCLK_512			2
+#define  OXYGEN_I2S_MCLK(f)		(((f) & 3) << OXYGEN_I2S_MCLK_SHIFT)
 #define  OXYGEN_I2S_BITS_MASK		0x00c0
 #define  OXYGEN_I2S_BITS_16		0x0000
 #define  OXYGEN_I2S_BITS_20		0x0040
@@ -238,11 +240,11 @@
 #define  OXYGEN_SPI_DATA_LENGTH_MASK	0x02
 #define  OXYGEN_SPI_DATA_LENGTH_2	0x00
 #define  OXYGEN_SPI_DATA_LENGTH_3	0x02
-#define  OXYGEN_SPI_CLOCK_MASK		0xc0
+#define  OXYGEN_SPI_CLOCK_MASK		0x0c
 #define  OXYGEN_SPI_CLOCK_160		0x00	/* ns */
-#define  OXYGEN_SPI_CLOCK_320		0x40
-#define  OXYGEN_SPI_CLOCK_640		0x80
-#define  OXYGEN_SPI_CLOCK_1280		0xc0
+#define  OXYGEN_SPI_CLOCK_320		0x04
+#define  OXYGEN_SPI_CLOCK_640		0x08
+#define  OXYGEN_SPI_CLOCK_1280		0x0c
 #define  OXYGEN_SPI_CODEC_MASK		0x70	/* 0..5 */
 #define  OXYGEN_SPI_CODEC_SHIFT		4
 #define  OXYGEN_SPI_CEN_MASK		0x80
diff --git a/sound/pci/oxygen/xonar.h b/sound/pci/oxygen/xonar.h
index b35343b..0434c20 100644
--- a/sound/pci/oxygen/xonar.h
+++ b/sound/pci/oxygen/xonar.h
@@ -24,6 +24,8 @@
 void xonar_init_cs53x1(struct oxygen *chip);
 void xonar_set_cs53x1_params(struct oxygen *chip,
 			     struct snd_pcm_hw_params *params);
+
+#define XONAR_GPIO_BIT_INVERT	(1 << 16)
 int xonar_gpio_bit_switch_get(struct snd_kcontrol *ctl,
 			      struct snd_ctl_elem_value *value);
 int xonar_gpio_bit_switch_put(struct snd_kcontrol *ctl,
diff --git a/sound/pci/oxygen/xonar_cs43xx.c b/sound/pci/oxygen/xonar_cs43xx.c
index aa27c310..9f72d42 100644
--- a/sound/pci/oxygen/xonar_cs43xx.c
+++ b/sound/pci/oxygen/xonar_cs43xx.c
@@ -22,29 +22,28 @@
  *
  * CMI8788:
  *
- * I²C <-> CS4398 (front)
- *     <-> CS4362A (surround, center/LFE, back)
+ *   I²C <-> CS4398 (addr 1001111) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  *
- * GPI 0 <- external power present (DX only)
+ *   GPI 0 <- external power present (DX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> enable front panel I/O
- * GPIO 2 -> M0 of CS5361
- * GPIO 3 -> M1 of CS5361
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
- *
- * CS4398:
- *
- * AD0 <- 1
- * AD1 <- 1
- *
- * CS4362A:
- *
- * AD0 <- 0
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route output to front panel
+ *   GPIO 2 -> M0 of CS5361
+ *   GPIO 3 -> M1 of CS5361
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN  <- aux
+ *   MIC_IN  <- mic
+ *   FMIC_IN <- front mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5361 input
  */
 
 #include <linux/pci.h>
@@ -63,6 +62,7 @@
 #define GPI_EXT_POWER		0x01
 #define GPIO_D1_OUTPUT_ENABLE	0x0001
 #define GPIO_D1_FRONT_PANEL	0x0002
+#define GPIO_D1_MAGIC		0x00c0
 #define GPIO_D1_INPUT_ROUTE	0x0100
 
 #define I2C_DEVICE_CS4398	0x9e	/* 10011, AD1=1, AD0=1, /W=0 */
@@ -169,12 +169,12 @@
 	cs43xx_registers_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
+			  GPIO_D1_FRONT_PANEL |
+			  GPIO_D1_MAGIC |
+			  GPIO_D1_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_D1_FRONT_PANEL | GPIO_D1_INPUT_ROUTE);
 
-	oxygen_ac97_set_bits(chip, 0, CM9780_JACK, CM9780_FMIC2MIC);
-
 	xonar_init_cs53x1(chip);
 	xonar_enable_output(chip);
 
@@ -284,7 +284,7 @@
 
 static const struct snd_kcontrol_new front_panel_switch = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "Front Panel Switch",
+	.name = "Front Panel Playback Switch",
 	.info = snd_ctl_boolean_mono_info,
 	.get = xonar_gpio_bit_switch_get,
 	.put = xonar_gpio_bit_switch_put,
@@ -298,13 +298,7 @@
 		"Fast Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -380,6 +374,30 @@
 	return 0;
 }
 
+static void dump_cs4362a_registers(struct xonar_cs43xx *data,
+				   struct snd_info_buffer *buffer)
+{
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4362A:");
+	for (i = 1; i <= 14; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4362a_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_d1_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	struct xonar_cs43xx *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4398: 7?");
+	for (i = 2; i <= 8; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4398_regs[i]);
+	snd_iprintf(buffer, "\n");
+	dump_cs4362a_registers(data, buffer);
+}
+
 static const struct oxygen_model model_xonar_d1 = {
 	.longname = "Asus Virtuoso 100",
 	.chip = "AV200",
@@ -388,22 +406,26 @@
 	.cleanup = xonar_d1_cleanup,
 	.suspend = xonar_d1_suspend,
 	.resume = xonar_d1_resume,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_cs43xx_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_cs43xx_volume,
 	.update_dac_mute = update_cs43xx_mute,
 	.update_center_lfe_mix = update_cs43xx_center_lfe_mix,
 	.ac97_switch = xonar_d1_line_mic_ac97_switch,
+	.dump_registers = dump_d1_registers,
 	.dac_tlv = cs4362a_db_scale,
 	.model_data_size = sizeof(struct xonar_cs43xx),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 8,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 127 - 60,
 	.dac_volume_max = 127,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
new file mode 100644
index 0000000..e4de0b8
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -0,0 +1,572 @@
+/*
+ * card driver for the Xonar DG
+ *
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ *
+ *
+ *  This driver is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2.
+ *
+ *  This driver is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this driver; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Xonar DG
+ * --------
+ *
+ * CMI8788:
+ *
+ *   SPI 0 -> CS4245
+ *
+ *   GPIO 3 <- ?
+ *   GPIO 4 <- headphone detect
+ *   GPIO 5 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 6 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 7 -> enable rear headphone amp
+ *   GPIO 8 -> enable output to speakers
+ *
+ * CS4245:
+ *
+ *   input 1 <- aux
+ *   input 2 <- front mic
+ *   input 4 <- line/mic
+ *   aux out -> front panel headphones
+ */
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <sound/control.h>
+#include <sound/core.h>
+#include <sound/info.h>
+#include <sound/pcm.h>
+#include <sound/tlv.h>
+#include "oxygen.h"
+#include "xonar_dg.h"
+#include "cs4245.h"
+
+#define GPIO_MAGIC		0x0008
+#define GPIO_HP_DETECT		0x0010
+#define GPIO_INPUT_ROUTE	0x0060
+#define GPIO_HP_REAR		0x0080
+#define GPIO_OUTPUT_ENABLE	0x0100
+
+struct dg {
+	unsigned int output_sel;
+	s8 input_vol[4][2];
+	unsigned int input_sel;
+	u8 hp_vol_att;
+	u8 cs4245_regs[0x11];
+};
+
+static void cs4245_write(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
+			 OXYGEN_SPI_DATA_LENGTH_3 |
+			 OXYGEN_SPI_CLOCK_1280 |
+			 (0 << OXYGEN_SPI_CODEC_SHIFT) |
+			 OXYGEN_SPI_CEN_LATCH_CLOCK_HI,
+			 CS4245_SPI_ADDRESS |
+			 CS4245_SPI_WRITE |
+			 (value << 8) | reg);
+	data->cs4245_regs[reg] = value;
+}
+
+static void cs4245_write_cached(struct oxygen *chip, unsigned int reg, u8 value)
+{
+	struct dg *data = chip->model_data;
+
+	if (value != data->cs4245_regs[reg])
+		cs4245_write(chip, reg, value);
+}
+
+static void cs4245_registers_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	cs4245_write(chip, CS4245_POWER_CTRL, CS4245_PDN);
+	cs4245_write(chip, CS4245_DAC_CTRL_1,
+		     data->cs4245_regs[CS4245_DAC_CTRL_1]);
+	cs4245_write(chip, CS4245_ADC_CTRL,
+		     data->cs4245_regs[CS4245_ADC_CTRL]);
+	cs4245_write(chip, CS4245_SIGNAL_SEL,
+		     data->cs4245_regs[CS4245_SIGNAL_SEL]);
+	cs4245_write(chip, CS4245_PGA_B_CTRL,
+		     data->cs4245_regs[CS4245_PGA_B_CTRL]);
+	cs4245_write(chip, CS4245_PGA_A_CTRL,
+		     data->cs4245_regs[CS4245_PGA_A_CTRL]);
+	cs4245_write(chip, CS4245_ANALOG_IN,
+		     data->cs4245_regs[CS4245_ANALOG_IN]);
+	cs4245_write(chip, CS4245_DAC_A_CTRL,
+		     data->cs4245_regs[CS4245_DAC_A_CTRL]);
+	cs4245_write(chip, CS4245_DAC_B_CTRL,
+		     data->cs4245_regs[CS4245_DAC_B_CTRL]);
+	cs4245_write(chip, CS4245_DAC_CTRL_2,
+		     CS4245_DAC_SOFT | CS4245_DAC_ZERO | CS4245_INVERT_DAC);
+	cs4245_write(chip, CS4245_INT_MASK, 0);
+	cs4245_write(chip, CS4245_POWER_CTRL, 0);
+}
+
+static void cs4245_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->cs4245_regs[CS4245_DAC_CTRL_1] =
+		CS4245_DAC_FM_SINGLE | CS4245_DAC_DIF_LJUST;
+	data->cs4245_regs[CS4245_ADC_CTRL] =
+		CS4245_ADC_FM_SINGLE | CS4245_ADC_DIF_LJUST;
+	data->cs4245_regs[CS4245_SIGNAL_SEL] =
+		CS4245_A_OUT_SEL_HIZ | CS4245_ASYNCH;
+	data->cs4245_regs[CS4245_PGA_B_CTRL] = 0;
+	data->cs4245_regs[CS4245_PGA_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_ANALOG_IN] =
+		CS4245_PGA_SOFT | CS4245_PGA_ZERO | CS4245_SEL_INPUT_4;
+	data->cs4245_regs[CS4245_DAC_A_CTRL] = 0;
+	data->cs4245_regs[CS4245_DAC_B_CTRL] = 0;
+	cs4245_registers_init(chip);
+	snd_component_add(chip->card, "CS4245");
+}
+
+static void dg_output_enable(struct oxygen *chip)
+{
+	msleep(2500);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_init(struct oxygen *chip)
+{
+	struct dg *data = chip->model_data;
+
+	data->output_sel = 0;
+	data->input_sel = 3;
+	data->hp_vol_att = 2 * 16;
+
+	cs4245_init(chip);
+
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_CONTROL,
+			    GPIO_MAGIC | GPIO_HP_DETECT);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_INPUT_ROUTE | GPIO_HP_REAR | GPIO_OUTPUT_ENABLE);
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
+			    GPIO_INPUT_ROUTE | GPIO_HP_REAR);
+	dg_output_enable(chip);
+}
+
+static void dg_cleanup(struct oxygen *chip)
+{
+	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_OUTPUT_ENABLE);
+}
+
+static void dg_suspend(struct oxygen *chip)
+{
+	dg_cleanup(chip);
+}
+
+static void dg_resume(struct oxygen *chip)
+{
+	cs4245_registers_init(chip);
+	dg_output_enable(chip);
+}
+
+static void set_cs4245_dac_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_DAC_CTRL_1] & ~CS4245_DAC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_DAC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_DAC_FM_DOUBLE;
+	else
+		value |= CS4245_DAC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_DAC_CTRL_1, value);
+}
+
+static void set_cs4245_adc_params(struct oxygen *chip,
+				  struct snd_pcm_hw_params *params)
+{
+	struct dg *data = chip->model_data;
+	u8 value;
+
+	value = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_ADC_FM_MASK;
+	if (params_rate(params) <= 50000)
+		value |= CS4245_ADC_FM_SINGLE;
+	else if (params_rate(params) <= 100000)
+		value |= CS4245_ADC_FM_DOUBLE;
+	else
+		value |= CS4245_ADC_FM_QUAD;
+	cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
+}
+
+static int output_switch_info(struct snd_kcontrol *ctl,
+			      struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"Speakers", "Headphones", "FP Headphones"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int output_switch_get(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->output_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int output_switch_put(struct snd_kcontrol *ctl,
+			     struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->output_sel;
+	if (changed) {
+		data->output_sel = value->value.enumerated.item[0];
+
+		reg = data->cs4245_regs[CS4245_SIGNAL_SEL] &
+						~CS4245_A_OUT_SEL_MASK;
+		reg |= data->output_sel == 2 ?
+				CS4245_A_OUT_SEL_DAC : CS4245_A_OUT_SEL_HIZ;
+		cs4245_write_cached(chip, CS4245_SIGNAL_SEL, reg);
+
+		cs4245_write_cached(chip, CS4245_DAC_A_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+		cs4245_write_cached(chip, CS4245_DAC_B_CTRL,
+				    data->output_sel ? data->hp_vol_att : 0);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->output_sel == 1 ? GPIO_HP_REAR : 0,
+				      GPIO_HP_REAR);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hp_volume_offset_info(struct snd_kcontrol *ctl,
+				 struct snd_ctl_elem_info *info)
+{
+	static const char *const names[3] = {
+		"< 64 ohms", "64-150 ohms", "150-300 ohms"
+	};
+
+	return snd_ctl_enum_info(info, 1, 3, names);
+}
+
+static int hp_volume_offset_get(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	if (data->hp_vol_att > 2 * 7)
+		value->value.enumerated.item[0] = 0;
+	else if (data->hp_vol_att > 0)
+		value->value.enumerated.item[0] = 1;
+	else
+		value->value.enumerated.item[0] = 2;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int hp_volume_offset_put(struct snd_kcontrol *ctl,
+				struct snd_ctl_elem_value *value)
+{
+	static const s8 atts[3] = { 2 * 16, 2 * 7, 0 };
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	s8 att;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 2)
+		return -EINVAL;
+	att = atts[value->value.enumerated.item[0]];
+	mutex_lock(&chip->mutex);
+	changed = att != data->hp_vol_att;
+	if (changed) {
+		data->hp_vol_att = att;
+		if (data->output_sel) {
+			cs4245_write_cached(chip, CS4245_DAC_A_CTRL, att);
+			cs4245_write_cached(chip, CS4245_DAC_B_CTRL, att);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int input_vol_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	info->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	info->count = 2;
+	info->value.integer.min = 2 * -12;
+	info->value.integer.max = 2 * 12;
+	return 0;
+}
+
+static int input_vol_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+
+	mutex_lock(&chip->mutex);
+	value->value.integer.value[0] = data->input_vol[idx][0];
+	value->value.integer.value[1] = data->input_vol[idx][1];
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_vol_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	unsigned int idx = ctl->private_value;
+	int changed = 0;
+
+	if (value->value.integer.value[0] < 2 * -12 ||
+	    value->value.integer.value[0] > 2 * 12 ||
+	    value->value.integer.value[1] < 2 * -12 ||
+	    value->value.integer.value[1] > 2 * 12)
+		return -EINVAL;
+	mutex_lock(&chip->mutex);
+	changed = data->input_vol[idx][0] != value->value.integer.value[0] ||
+		  data->input_vol[idx][1] != value->value.integer.value[1];
+	if (changed) {
+		data->input_vol[idx][0] = value->value.integer.value[0];
+		data->input_vol[idx][1] = value->value.integer.value[1];
+		if (idx == data->input_sel) {
+			cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+					    data->input_vol[idx][0]);
+			cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+					    data->input_vol[idx][1]);
+		}
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static DECLARE_TLV_DB_SCALE(cs4245_pga_db_scale, -1200, 50, 0);
+
+static int input_sel_info(struct snd_kcontrol *ctl,
+			  struct snd_ctl_elem_info *info)
+{
+	static const char *const names[4] = {
+		"Mic", "Aux", "Front Mic", "Line"
+	};
+
+	return snd_ctl_enum_info(info, 1, 4, names);
+}
+
+static int input_sel_get(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	mutex_lock(&chip->mutex);
+	value->value.enumerated.item[0] = data->input_sel;
+	mutex_unlock(&chip->mutex);
+	return 0;
+}
+
+static int input_sel_put(struct snd_kcontrol *ctl,
+			 struct snd_ctl_elem_value *value)
+{
+	static const u8 sel_values[4] = {
+		CS4245_SEL_MIC,
+		CS4245_SEL_INPUT_1,
+		CS4245_SEL_INPUT_2,
+		CS4245_SEL_INPUT_4
+	};
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	int changed;
+
+	if (value->value.enumerated.item[0] > 3)
+		return -EINVAL;
+
+	mutex_lock(&chip->mutex);
+	changed = value->value.enumerated.item[0] != data->input_sel;
+	if (changed) {
+		data->input_sel = value->value.enumerated.item[0];
+
+		cs4245_write(chip, CS4245_ANALOG_IN,
+			     (data->cs4245_regs[CS4245_ANALOG_IN] &
+							~CS4245_SEL_MASK) |
+			     sel_values[data->input_sel]);
+
+		cs4245_write_cached(chip, CS4245_PGA_A_CTRL,
+				    data->input_vol[data->input_sel][0]);
+		cs4245_write_cached(chip, CS4245_PGA_B_CTRL,
+				    data->input_vol[data->input_sel][1]);
+
+		oxygen_write16_masked(chip, OXYGEN_GPIO_DATA,
+				      data->input_sel ? 0 : GPIO_INPUT_ROUTE,
+				      GPIO_INPUT_ROUTE);
+	}
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+static int hpf_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
+{
+	static const char *const names[2] = { "Active", "Frozen" };
+
+	return snd_ctl_enum_info(info, 1, 2, names);
+}
+
+static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+
+	value->value.enumerated.item[0] =
+		!!(data->cs4245_regs[CS4245_ADC_CTRL] & CS4245_HPF_FREEZE);
+	return 0;
+}
+
+static int hpf_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
+{
+	struct oxygen *chip = ctl->private_data;
+	struct dg *data = chip->model_data;
+	u8 reg;
+	int changed;
+
+	mutex_lock(&chip->mutex);
+	reg = data->cs4245_regs[CS4245_ADC_CTRL] & ~CS4245_HPF_FREEZE;
+	if (value->value.enumerated.item[0])
+		reg |= CS4245_HPF_FREEZE;
+	changed = reg != data->cs4245_regs[CS4245_ADC_CTRL];
+	if (changed)
+		cs4245_write(chip, CS4245_ADC_CTRL, reg);
+	mutex_unlock(&chip->mutex);
+	return changed;
+}
+
+#define INPUT_VOLUME(xname, index) { \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = xname, \
+	.info = input_vol_info, \
+	.get = input_vol_get, \
+	.put = input_vol_put, \
+	.tlv = { .p = cs4245_pga_db_scale }, \
+	.private_value = index, \
+}
+static const struct snd_kcontrol_new dg_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Analog Output Playback Enum",
+		.info = output_switch_info,
+		.get = output_switch_get,
+		.put = output_switch_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphones Impedance Playback Enum",
+		.info = hp_volume_offset_info,
+		.get = hp_volume_offset_get,
+		.put = hp_volume_offset_put,
+	},
+	INPUT_VOLUME("Mic Capture Volume", 0),
+	INPUT_VOLUME("Aux Capture Volume", 1),
+	INPUT_VOLUME("Front Mic Capture Volume", 2),
+	INPUT_VOLUME("Line Capture Volume", 3),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Capture Source",
+		.info = input_sel_info,
+		.get = input_sel_get,
+		.put = input_sel_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC High-pass Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+};
+
+static int dg_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		return 1;
+	return 0;
+}
+
+static int dg_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(dg_controls); ++i) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&dg_controls[i], chip));
+		if (err < 0)
+			return err;
+	}
+	return 0;
+}
+
+static void dump_cs4245_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct dg *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nCS4245:");
+	for (i = 1; i <= 0x10; ++i)
+		snd_iprintf(buffer, " %02x", data->cs4245_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+struct oxygen_model model_xonar_dg = {
+	.shortname = "Xonar DG",
+	.longname = "C-Media Oxygen HD Audio",
+	.chip = "CMI8786",
+	.init = dg_init,
+	.control_filter = dg_control_filter,
+	.mixer_init = dg_mixer_init,
+	.cleanup = dg_cleanup,
+	.suspend = dg_suspend,
+	.resume = dg_resume,
+	.set_dac_params = set_cs4245_dac_params,
+	.set_adc_params = set_cs4245_adc_params,
+	.dump_registers = dump_cs4245_registers,
+	.model_data_size = sizeof(struct dg),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_2,
+	.dac_channels_pcm = 6,
+	.dac_channels_mixer = 0,
+	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
diff --git a/sound/pci/oxygen/xonar_dg.h b/sound/pci/oxygen/xonar_dg.h
new file mode 100644
index 0000000..5688d78
--- /dev/null
+++ b/sound/pci/oxygen/xonar_dg.h
@@ -0,0 +1,8 @@
+#ifndef XONAR_DG_H_INCLUDED
+#define XONAR_DG_H_INCLUDED
+
+#include "oxygen.h"
+
+extern struct oxygen_model model_xonar_dg;
+
+#endif
diff --git a/sound/pci/oxygen/xonar_hdmi.c b/sound/pci/oxygen/xonar_hdmi.c
index b12db1f..136dac6a 100644
--- a/sound/pci/oxygen/xonar_hdmi.c
+++ b/sound/pci/oxygen/xonar_hdmi.c
@@ -1,5 +1,5 @@
 /*
- * helper functions for HDMI models (Xonar HDAV1.3)
+ * helper functions for HDMI models (Xonar HDAV1.3/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
diff --git a/sound/pci/oxygen/xonar_lib.c b/sound/pci/oxygen/xonar_lib.c
index b3ff713..0ebe7f5 100644
--- a/sound/pci/oxygen/xonar_lib.c
+++ b/sound/pci/oxygen/xonar_lib.c
@@ -104,9 +104,10 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 
 	value->value.integer.value[0] =
-		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit);
+		!!(oxygen_read16(chip, OXYGEN_GPIO_DATA) & bit) ^ invert;
 	return 0;
 }
 
@@ -115,12 +116,13 @@
 {
 	struct oxygen *chip = ctl->private_data;
 	u16 bit = ctl->private_value;
+	bool invert = ctl->private_value & XONAR_GPIO_BIT_INVERT;
 	u16 old_bits, new_bits;
 	int changed;
 
 	spin_lock_irq(&chip->reg_lock);
 	old_bits = oxygen_read16(chip, OXYGEN_GPIO_DATA);
-	if (value->value.integer.value[0])
+	if (!!value->value.integer.value[0] ^ invert)
 		new_bits = old_bits | bit;
 	else
 		new_bits = old_bits & ~bit;
diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c
index d491fd6..54cad38 100644
--- a/sound/pci/oxygen/xonar_pcm179x.c
+++ b/sound/pci/oxygen/xonar_pcm179x.c
@@ -22,20 +22,26 @@
  *
  * CMI8788:
  *
- * SPI 0 -> 1st PCM1796 (front)
- * SPI 1 -> 2nd PCM1796 (surround)
- * SPI 2 -> 3rd PCM1796 (center/LFE)
- * SPI 4 -> 4th PCM1796 (back)
+ *   SPI 0 -> 1st PCM1796 (front)
+ *   SPI 1 -> 2nd PCM1796 (surround)
+ *   SPI 2 -> 3rd PCM1796 (center/LFE)
+ *   SPI 4 -> 4th PCM1796 (back)
  *
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 5 <- external power present (D2X only)
- * GPIO 7 -> ALT
- * GPIO 8 -> enable output to speakers
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 5 <- external power present (D2X only)
+ *   GPIO 7 -> ALT
+ *   GPIO 8 -> enable output to speakers
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- CD
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  */
 
 /*
@@ -44,52 +50,53 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1796 (front)
+ *   I²C <-> PCM1796 (addr 1001100) (front)
  *
- * GPI 0 <- external power present
+ *   GPI 0 <- external power present
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable HDMI (0) or speaker (1) output
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> ?
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
- *
- * PCM1796 front: AD1,0 <- 0,0
+ *   UART <-> HDMI controller
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   CD_IN  <- CD
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * no daughterboard
  * ----------------
  *
- * GPIO 4 <- 1
+ *   GPIO 4 <- 1
  *
  * H6 daughterboard
  * ----------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 0
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 0
  *
- * I²C <-> PCM1796 (surround)
- *     <-> PCM1796 (center/LFE)
- *     <-> PCM1796 (back)
- *
- * PCM1796 surround:   AD1,0 <- 0,1
- * PCM1796 center/LFE: AD1,0 <- 1,0
- * PCM1796 back:       AD1,0 <- 1,1
+ *   I²C <-> PCM1796 (addr 1001101) (surround)
+ *       <-> PCM1796 (addr 1001110) (center/LFE)
+ *       <-> PCM1796 (addr 1001111) (back)
  *
  * unknown daughterboard
  * ---------------------
  *
- * GPIO 4 <- 0
- * GPIO 5 <- 1
+ *   GPIO 4 <- 0
+ *   GPIO 5 <- 1
  *
- * I²C <-> CS4362A (surround, center/LFE, back)
- *
- * CS4362A: AD0 <- 0
+ *   I²C <-> CS4362A (addr 0011000) (surround, center/LFE, back)
  */
 
 /*
@@ -98,32 +105,35 @@
  *
  * CMI8788:
  *
- * I²C <-> PCM1792A
- *     <-> CS2000 (ST only)
+ *   I²C <-> PCM1792A (addr 1001100)
+ *       <-> CS2000 (addr 1001110) (ST only)
  *
- * ADC1 MCLK -> REF_CLK of CS2000 (ST only)
+ *   ADC1 MCLK -> REF_CLK of CS2000 (ST only)
  *
- * GPI 0 <- external power present (STX only)
+ *   GPI 0 <- external power present (STX only)
  *
- * GPIO 0 -> enable output to speakers
- * GPIO 1 -> route HP to front panel (0) or rear jack (1)
- * GPIO 2 -> M0 of CS5381
- * GPIO 3 -> M1 of CS5381
- * GPIO 7 -> route output to speaker jacks (0) or HP (1)
- * GPIO 8 -> route input jack to line-in (0) or mic-in (1)
+ *   GPIO 0 -> enable output to speakers
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 <- daughterboard detection
+ *   GPIO 5 <- daughterboard detection
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to speaker jacks (0) or HP (1)
+ *   GPIO 8 -> route input jack to line-in (0) or mic-in (1)
  *
  * PCM1792A:
  *
- * AD1,0 <- 0,0
- * SCK <- CLK_OUT of CS2000 (ST only)
- *
- * CS2000:
- *
- * AD0 <- 0
+ *   SCK <- CLK_OUT of CS2000 (ST only)
  *
  * CM9780:
  *
- * GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN <- aux
+ *   MIC_IN <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
  *
  * H6 daughterboard
  * ----------------
@@ -133,15 +143,39 @@
  */
 
 /*
- * Xonar HDAV1.3 Slim
- * ------------------
+ * Xonar Xense
+ * -----------
  *
  * CMI8788:
  *
- * GPIO 1 -> enable output
+ *   I²C <-> PCM1796 (addr 1001100) (front)
+ *       <-> CS4362A (addr 0011000) (surround, center/LFE, back)
+ *       <-> CS2000 (addr 1001110)
  *
- * TXD -> HDMI controller
- * RXD <- HDMI controller
+ *   ADC1 MCLK -> REF_CLK of CS2000
+ *
+ *   GPI 0 <- external power present
+ *
+ *   GPIO 0 -> enable output
+ *   GPIO 1 -> route HP to front panel (0) or rear jack (1)
+ *   GPIO 2 -> M0 of CS5381
+ *   GPIO 3 -> M1 of CS5381
+ *   GPIO 4 -> enable output
+ *   GPIO 5 -> enable output
+ *   GPIO 6 -> ?
+ *   GPIO 7 -> route output to HP (0) or speaker (1)
+ *   GPIO 8 -> route input jack to mic-in (0) or line-in (1)
+ *
+ * CM9780:
+ *
+ *   LINE_OUT -> input of ADC
+ *
+ *   AUX_IN   <- aux
+ *   VIDEO_IN <- ?
+ *   FMIC_IN  <- mic
+ *
+ *   GPO 0 -> route line-in (0) or AC97 output (1) to CS5381 input
+ *   GPO 1 -> route mic-in from input jack (0) or front panel header (1)
  */
 
 #include <linux/pci.h>
@@ -150,6 +184,7 @@
 #include <sound/ac97_codec.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
@@ -167,12 +202,14 @@
 #define GPIO_INPUT_ROUTE	0x0100
 
 #define GPIO_HDAV_OUTPUT_ENABLE	0x0001
+#define GPIO_HDAV_MAGIC		0x00c0
 
 #define GPIO_DB_MASK		0x0030
 #define GPIO_DB_H6		0x0000
 
 #define GPIO_ST_OUTPUT_ENABLE	0x0001
 #define GPIO_ST_HP_REAR		0x0002
+#define GPIO_ST_MAGIC		0x0040
 #define GPIO_ST_HP		0x0080
 
 #define I2C_DEVICE_PCM1796(i)	(0x98 + ((i) << 1))	/* 10011, ii, /W=0 */
@@ -186,11 +223,12 @@
 	unsigned int dacs;
 	u8 pcm1796_regs[4][5];
 	unsigned int current_rate;
-	bool os_128;
+	bool h6;
 	bool hp_active;
 	s8 hp_gain_offset;
 	bool has_cs2000;
-	u8 cs2000_fun_cfg_1;
+	u8 cs2000_regs[0x1f];
+	bool broken_i2c;
 };
 
 struct xonar_hdav {
@@ -249,16 +287,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	oxygen_write_i2c(chip, I2C_DEVICE_CS2000, reg, value);
-	if (reg == CS2000_FUN_CFG_1)
-		data->cs2000_fun_cfg_1 = value;
+	data->cs2000_regs[reg] = value;
 }
 
 static void cs2000_write_cached(struct oxygen *chip, u8 reg, u8 value)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
-	if (reg != CS2000_FUN_CFG_1 ||
-	    value != data->cs2000_fun_cfg_1)
+	if (value != data->cs2000_regs[reg])
 		cs2000_write(chip, reg, value);
 }
 
@@ -268,6 +304,7 @@
 	unsigned int i;
 	s8 gain_offset;
 
+	msleep(1);
 	gain_offset = data->hp_active ? data->hp_gain_offset : 0;
 	for (i = 0; i < data->dacs; ++i) {
 		/* set ATLD before ATL/ATR */
@@ -282,6 +319,7 @@
 		pcm1796_write(chip, i, 20,
 			      data->pcm1796_regs[0][20 - PCM1796_REG_BASE]);
 		pcm1796_write(chip, i, 21, 0);
+		gain_offset = 0;
 	}
 }
 
@@ -290,10 +328,11 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->pcm1796_regs[0][18 - PCM1796_REG_BASE] = PCM1796_MUTE |
-		PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+		PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	data->pcm1796_regs[0][19 - PCM1796_REG_BASE] =
 		PCM1796_FLT_SHARP | PCM1796_ATS_1;
-	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] = PCM1796_OS_64;
+	data->pcm1796_regs[0][20 - PCM1796_REG_BASE] =
+		data->h6 ? PCM1796_OS_64 : PCM1796_OS_128;
 	pcm1796_registers_init(chip);
 	data->current_rate = 48000;
 }
@@ -339,18 +378,20 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 
 	data->pcm179x.generic.anti_pop_delay = 100;
 	data->pcm179x.generic.output_enable_bit = GPIO_HDAV_OUTPUT_ENABLE;
 	data->pcm179x.generic.ext_power_reg = OXYGEN_GPI_DATA;
 	data->pcm179x.generic.ext_power_int_reg = OXYGEN_GPI_INTERRUPT_MASK;
 	data->pcm179x.generic.ext_power_bit = GPI_EXT_POWER;
-	data->pcm179x.dacs = chip->model.private_data ? 4 : 1;
+	data->pcm179x.dacs = chip->model.dac_channels_mixer / 2;
+	data->pcm179x.h6 = chip->model.dac_channels_mixer > 2;
 
 	pcm1796_init(chip);
 
-	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL, GPIO_INPUT_ROUTE);
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_HDAV_MAGIC | GPIO_INPUT_ROUTE);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA, GPIO_INPUT_ROUTE);
 
 	xonar_init_cs53x1(chip);
@@ -367,7 +408,7 @@
 	oxygen_write16(chip, OXYGEN_2WIRE_BUS_STATUS,
 		       OXYGEN_2WIRE_LENGTH_8 |
 		       OXYGEN_2WIRE_INTERRUPT_MASK |
-		       OXYGEN_2WIRE_SPEED_FAST);
+		       OXYGEN_2WIRE_SPEED_STANDARD);
 }
 
 static void xonar_st_init_common(struct oxygen *chip)
@@ -375,13 +416,14 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.output_enable_bit = GPIO_ST_OUTPUT_ENABLE;
-	data->dacs = chip->model.private_data ? 4 : 1;
+	data->dacs = chip->model.dac_channels_mixer / 2;
 	data->hp_gain_offset = 2*-18;
 
 	pcm1796_init(chip);
 
 	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
-			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
+			  GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR |
+			  GPIO_ST_MAGIC | GPIO_ST_HP);
 	oxygen_clear_bits16(chip, OXYGEN_GPIO_DATA,
 			    GPIO_INPUT_ROUTE | GPIO_ST_HP_REAR | GPIO_ST_HP);
 
@@ -410,9 +452,11 @@
 	cs2000_write(chip, CS2000_RATIO_0 + 1, 0x10);
 	cs2000_write(chip, CS2000_RATIO_0 + 2, 0x00);
 	cs2000_write(chip, CS2000_RATIO_0 + 3, 0x00);
-	cs2000_write(chip, CS2000_FUN_CFG_1, data->cs2000_fun_cfg_1);
+	cs2000_write(chip, CS2000_FUN_CFG_1,
+		     data->cs2000_regs[CS2000_FUN_CFG_1]);
 	cs2000_write(chip, CS2000_FUN_CFG_2, 0);
 	cs2000_write(chip, CS2000_GLOBAL_CFG, CS2000_EN_DEV_CFG_2);
+	msleep(3); /* PLL lock delay */
 }
 
 static void xonar_st_init(struct oxygen *chip)
@@ -420,13 +464,18 @@
 	struct xonar_pcm179x *data = chip->model_data;
 
 	data->generic.anti_pop_delay = 100;
+	data->h6 = chip->model.dac_channels_mixer > 2;
 	data->has_cs2000 = 1;
-	data->cs2000_fun_cfg_1 = CS2000_REF_CLK_DIV_1;
+	data->cs2000_regs[CS2000_FUN_CFG_1] = CS2000_REF_CLK_DIV_1;
+	data->broken_i2c = true;
 
 	oxygen_write16(chip, OXYGEN_I2S_A_FORMAT,
-		       OXYGEN_RATE_48000 | OXYGEN_I2S_FORMAT_I2S |
-		       OXYGEN_I2S_MCLK_128 | OXYGEN_I2S_BITS_16 |
-		       OXYGEN_I2S_MASTER | OXYGEN_I2S_BCLK_64);
+		       OXYGEN_RATE_48000 |
+		       OXYGEN_I2S_FORMAT_I2S |
+		       OXYGEN_I2S_MCLK(data->h6 ? MCLK_256 : MCLK_512) |
+		       OXYGEN_I2S_BITS_16 |
+		       OXYGEN_I2S_MASTER |
+		       OXYGEN_I2S_BCLK_64);
 
 	xonar_st_init_i2c(chip);
 	cs2000_registers_init(chip);
@@ -507,44 +556,16 @@
 	xonar_stx_resume(chip);
 }
 
-static unsigned int mclk_from_rate(struct oxygen *chip, unsigned int rate)
-{
-	struct xonar_pcm179x *data = chip->model_data;
-
-	if (rate <= 32000)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 48000 && data->os_128)
-		return OXYGEN_I2S_MCLK_512;
-	else if (rate <= 96000)
-		return OXYGEN_I2S_MCLK_256;
-	else
-		return OXYGEN_I2S_MCLK_128;
-}
-
-static unsigned int get_pcm1796_i2s_mclk(struct oxygen *chip,
-					 unsigned int channel,
-					 struct snd_pcm_hw_params *params)
-{
-	if (channel == PCM_MULTICH)
-		return mclk_from_rate(chip, params_rate(params));
-	else
-		return oxygen_default_i2s_mclk(chip, channel, params);
-}
-
 static void update_pcm1796_oversampling(struct oxygen *chip)
 {
 	struct xonar_pcm179x *data = chip->model_data;
 	unsigned int i;
 	u8 reg;
 
-	if (data->current_rate <= 32000)
+	if (data->current_rate <= 48000 && !data->h6)
 		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 48000 && data->os_128)
-		reg = PCM1796_OS_128;
-	else if (data->current_rate <= 96000 || data->os_128)
-		reg = PCM1796_OS_64;
 	else
-		reg = PCM1796_OS_32;
+		reg = PCM1796_OS_64;
 	for (i = 0; i < data->dacs; ++i)
 		pcm1796_write_cached(chip, i, 20, reg);
 }
@@ -554,6 +575,7 @@
 {
 	struct xonar_pcm179x *data = chip->model_data;
 
+	msleep(1);
 	data->current_rate = params_rate(params);
 	update_pcm1796_oversampling(chip);
 }
@@ -570,6 +592,7 @@
 				     + gain_offset);
 		pcm1796_write_cached(chip, i, 17, chip->dac_volume[i * 2 + 1]
 				     + gain_offset);
+		gain_offset = 0;
 	}
 }
 
@@ -579,7 +602,7 @@
 	unsigned int i;
 	u8 value;
 
-	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_LJUST | PCM1796_ATLD;
+	value = PCM1796_DMF_DISABLED | PCM1796_FMT_24_I2S | PCM1796_ATLD;
 	if (chip->dac_mute)
 		value |= PCM1796_MUTE;
 	for (i = 0; i < data->dacs; ++i)
@@ -592,45 +615,35 @@
 	u8 rate_mclk, reg;
 
 	switch (rate) {
-		/* XXX Why is the I2S A MCLK half the actual I2S MCLK? */
 	case 32000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
+	case 64000:
+		rate_mclk = OXYGEN_RATE_32000;
 		break;
 	case 44100:
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_128;
-		break;
-	default: /* 48000 */
-		if (data->os_128)
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		else
-			rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_128;
-		break;
-	case 64000:
-		rate_mclk = OXYGEN_RATE_32000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 88200:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
-		break;
-	case 96000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
-		break;
 	case 176400:
-		rate_mclk = OXYGEN_RATE_44100 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_44100;
 		break;
+	default:
+	case 48000:
+	case 96000:
 	case 192000:
-		rate_mclk = OXYGEN_RATE_48000 | OXYGEN_I2S_MCLK_256;
+		rate_mclk = OXYGEN_RATE_48000;
 		break;
 	}
+
+	if (rate <= 96000 && (rate > 48000 || data->h6)) {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_256);
+		reg = CS2000_REF_CLK_DIV_1;
+	} else {
+		rate_mclk |= OXYGEN_I2S_MCLK(MCLK_512);
+		reg = CS2000_REF_CLK_DIV_2;
+	}
+
 	oxygen_write16_masked(chip, OXYGEN_I2S_A_FORMAT, rate_mclk,
 			      OXYGEN_I2S_RATE_MASK | OXYGEN_I2S_MCLK_MASK);
-	if ((rate_mclk & OXYGEN_I2S_MCLK_MASK) <= OXYGEN_I2S_MCLK_128)
-		reg = CS2000_REF_CLK_DIV_1;
-	else
-		reg = CS2000_REF_CLK_DIV_2;
 	cs2000_write_cached(chip, CS2000_FUN_CFG_1, reg);
+	msleep(3); /* PLL lock delay */
 }
 
 static void set_st_params(struct oxygen *chip,
@@ -665,13 +678,7 @@
 		"Sharp Roll-off", "Slow Roll-off"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int rolloff_get(struct snd_kcontrol *ctl,
@@ -719,57 +726,13 @@
 	.put = rolloff_put,
 };
 
-static int os_128_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info)
-{
-	static const char *const names[2] = { "64x", "128x" };
-
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
-}
-
-static int os_128_get(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-
-	value->value.enumerated.item[0] = data->os_128;
-	return 0;
-}
-
-static int os_128_put(struct snd_kcontrol *ctl,
-		      struct snd_ctl_elem_value *value)
-{
-	struct oxygen *chip = ctl->private_data;
-	struct xonar_pcm179x *data = chip->model_data;
-	int changed;
-
-	mutex_lock(&chip->mutex);
-	changed = value->value.enumerated.item[0] != data->os_128;
-	if (changed) {
-		data->os_128 = value->value.enumerated.item[0];
-		if (data->has_cs2000)
-			update_cs2000_rate(chip, data->current_rate);
-		oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT,
-				      mclk_from_rate(chip, data->current_rate),
-				      OXYGEN_I2S_MCLK_MASK);
-		update_pcm1796_oversampling(chip);
-	}
-	mutex_unlock(&chip->mutex);
-	return changed;
-}
-
-static const struct snd_kcontrol_new os_128_control = {
+static const struct snd_kcontrol_new hdav_hdmi_control = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "DAC Oversampling Playback Enum",
-	.info = os_128_info,
-	.get = os_128_get,
-	.put = os_128_put,
+	.name = "HDMI Playback Switch",
+	.info = snd_ctl_boolean_mono_info,
+	.get = xonar_gpio_bit_switch_get,
+	.put = xonar_gpio_bit_switch_put,
+	.private_value = GPIO_HDAV_OUTPUT_ENABLE | XONAR_GPIO_BIT_INVERT,
 };
 
 static int st_output_switch_info(struct snd_kcontrol *ctl,
@@ -779,13 +742,7 @@
 		"Speakers", "Headphones", "FP Headphones"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_output_switch_get(struct snd_kcontrol *ctl,
@@ -840,13 +797,7 @@
 		"< 64 ohms", "64-300 ohms", "300-600 ohms"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int st_hp_volume_offset_get(struct snd_kcontrol *ctl,
@@ -928,16 +879,25 @@
 	return 0;
 }
 
+static int xonar_st_h6_control_filter(struct snd_kcontrol_new *template)
+{
+	if (!strncmp(template->name, "Master Playback ", 16))
+		/* no volume/mute, as I²C to the third DAC does not work */
+		return 1;
+	return 0;
+}
+
 static int add_pcm1796_controls(struct oxygen *chip)
 {
+	struct xonar_pcm179x *data = chip->model_data;
 	int err;
 
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&rolloff_control, chip));
-	if (err < 0)
-		return err;
-	err = snd_ctl_add(chip->card, snd_ctl_new1(&os_128_control, chip));
-	if (err < 0)
-		return err;
+	if (!data->broken_i2c) {
+		err = snd_ctl_add(chip->card,
+				  snd_ctl_new1(&rolloff_control, chip));
+		if (err < 0)
+			return err;
+	}
 	return 0;
 }
 
@@ -956,7 +916,15 @@
 
 static int xonar_hdav_mixer_init(struct oxygen *chip)
 {
-	return add_pcm1796_controls(chip);
+	int err;
+
+	err = snd_ctl_add(chip->card, snd_ctl_new1(&hdav_hdmi_control, chip));
+	if (err < 0)
+		return err;
+	err = add_pcm1796_controls(chip);
+	if (err < 0)
+		return err;
+	return 0;
 }
 
 static int xonar_st_mixer_init(struct oxygen *chip)
@@ -976,6 +944,45 @@
 	return 0;
 }
 
+static void dump_pcm1796_registers(struct oxygen *chip,
+				   struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int dac, i;
+
+	for (dac = 0; dac < data->dacs; ++dac) {
+		snd_iprintf(buffer, "\nPCM1796 %u:", dac + 1);
+		for (i = 0; i < 5; ++i)
+			snd_iprintf(buffer, " %02x",
+				    data->pcm1796_regs[dac][i]);
+	}
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_cs2000_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_pcm179x *data = chip->model_data;
+	unsigned int i;
+
+	if (data->has_cs2000) {
+		snd_iprintf(buffer, "\nCS2000:\n00:   ");
+		for (i = 1; i < 0x10; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n10:");
+		for (i = 0x10; i < 0x1f; ++i)
+			snd_iprintf(buffer, " %02x", data->cs2000_regs[i]);
+		snd_iprintf(buffer, "\n");
+	}
+}
+
+static void dump_st_registers(struct oxygen *chip,
+			      struct snd_info_buffer *buffer)
+{
+	dump_pcm1796_registers(chip, buffer);
+	dump_cs2000_registers(chip, buffer);
+}
+
 static const struct oxygen_model model_xonar_d2 = {
 	.longname = "Asus Virtuoso 200",
 	.chip = "AV200",
@@ -985,11 +992,11 @@
 	.cleanup = xonar_d2_cleanup,
 	.suspend = xonar_d2_suspend,
 	.resume = xonar_d2_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_pcm1796_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
@@ -999,13 +1006,16 @@
 			 MIDI_OUTPUT |
 			 MIDI_INPUT |
 			 AC97_CD_INPUT,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_SPI |
 			  OXYGEN_FUNCTION_ENABLE_SPI_4_5,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1018,25 +1028,28 @@
 	.suspend = xonar_hdav_suspend,
 	.resume = xonar_hdav_resume,
 	.pcm_hardware_filter = xonar_hdmi_pcm_hardware_filter,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_hdav_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.uart_input = xonar_hdmi_uart_input,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_pcm1796_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_hdav),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_2 |
 			 CAPTURE_1_FROM_SPDIF,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.misc_flags = OXYGEN_MISC_MIDI,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1048,22 +1061,26 @@
 	.cleanup = xonar_st_cleanup,
 	.suspend = xonar_st_suspend,
 	.resume = xonar_st_resume,
-	.get_i2s_mclk = get_pcm1796_i2s_mclk,
 	.set_dac_params = set_st_params,
 	.set_adc_params = xonar_set_cs53x1_params,
 	.update_dac_volume = update_pcm1796_volume,
 	.update_dac_mute = update_pcm1796_mute,
 	.ac97_switch = xonar_line_mic_ac97_switch,
+	.dump_registers = dump_st_registers,
 	.dac_tlv = pcm1796_db_scale,
 	.model_data_size = sizeof(struct xonar_pcm179x),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
-			 CAPTURE_0_FROM_I2S_2,
-	.dac_channels = 2,
+			 CAPTURE_0_FROM_I2S_2 |
+			 AC97_FMIC_SWITCH,
+	.dac_channels_pcm = 2,
+	.dac_channels_mixer = 2,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_2WIRE,
-	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.dac_mclks = OXYGEN_MCLKS(512, 128, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 128, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_I2S,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
 
@@ -1089,7 +1106,8 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar HDAV1.3+H6";
-			chip->model.private_data = 1;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1102,8 +1120,10 @@
 			break;
 		case GPIO_DB_H6:
 			chip->model.shortname = "Xonar ST+H6";
-			chip->model.dac_channels = 8;
-			chip->model.private_data = 1;
+			chip->model.control_filter = xonar_st_h6_control_filter;
+			chip->model.dac_channels_pcm = 8;
+			chip->model.dac_channels_mixer = 8;
+			chip->model.dac_mclks = OXYGEN_MCLKS(256, 128, 128);
 			break;
 		}
 		break;
@@ -1114,9 +1134,6 @@
 		chip->model.resume = xonar_stx_resume;
 		chip->model.set_dac_params = set_pcm1796_params;
 		break;
-	case 0x835e:
-		snd_printk(KERN_ERR "the HDAV1.3 Slim is not supported\n");
-		return -ENODEV;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/oxygen/xonar_wm87x6.c b/sound/pci/oxygen/xonar_wm87x6.c
index 200f760..42d1ab1 100644
--- a/sound/pci/oxygen/xonar_wm87x6.c
+++ b/sound/pci/oxygen/xonar_wm87x6.c
@@ -1,5 +1,5 @@
 /*
- * card driver for models with WM8776/WM8766 DACs (Xonar DS)
+ * card driver for models with WM8776/WM8766 DACs (Xonar DS/HDAV1.3 Slim)
  *
  * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
  *
@@ -22,26 +22,48 @@
  *
  * CMI8788:
  *
- * SPI 0 -> WM8766 (surround, center/LFE, back)
- * SPI 1 -> WM8776 (front, input)
+ *   SPI 0 -> WM8766 (surround, center/LFE, back)
+ *   SPI 1 -> WM8776 (front, input)
  *
- * GPIO 4 <- headphone detect, 0 = plugged
- * GPIO 6 -> route input jack to mic-in (0) or line-in (1)
- * GPIO 7 -> enable output to front L/R speaker channels
- * GPIO 8 -> enable output to other speaker channels and front panel headphone
+ *   GPIO 4 <- headphone detect, 0 = plugged
+ *   GPIO 6 -> route input jack to mic-in (0) or line-in (1)
+ *   GPIO 7 -> enable output to front L/R speaker channels
+ *   GPIO 8 -> enable output to other speaker channels and front panel headphone
  *
- * WM8766:
+ * WM8776:
  *
- * input 1 <- line
- * input 2 <- mic
- * input 3 <- front mic
- * input 4 <- aux
+ *   input 1 <- line
+ *   input 2 <- mic
+ *   input 3 <- front mic
+ *   input 4 <- aux
+ */
+
+/*
+ * Xonar HDAV1.3 Slim
+ * ------------------
+ *
+ * CMI8788:
+ *
+ *   I²C <-> WM8776 (addr 0011010)
+ *
+ *   GPIO 0  -> disable HDMI output
+ *   GPIO 1  -> enable HP output
+ *   GPIO 6  -> firmware EEPROM I²C clock
+ *   GPIO 7 <-> firmware EEPROM I²C data
+ *
+ *   UART <-> HDMI controller
+ *
+ * WM8776:
+ *
+ *   input 1 <- mic
+ *   input 2 <- aux
  */
 
 #include <linux/pci.h>
 #include <linux/delay.h>
 #include <sound/control.h>
 #include <sound/core.h>
+#include <sound/info.h>
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -55,6 +77,13 @@
 #define GPIO_DS_OUTPUT_FRONTLR	0x0080
 #define GPIO_DS_OUTPUT_ENABLE	0x0100
 
+#define GPIO_SLIM_HDMI_DISABLE	0x0001
+#define GPIO_SLIM_OUTPUT_ENABLE	0x0002
+#define GPIO_SLIM_FIRMWARE_CLK	0x0040
+#define GPIO_SLIM_FIRMWARE_DATA	0x0080
+
+#define I2C_DEVICE_WM8776	0x34	/* 001101, 0, /W=0 */
+
 #define LC_CONTROL_LIMITER	0x40000000
 #define LC_CONTROL_ALC		0x20000000
 
@@ -66,19 +95,37 @@
 	struct snd_kcontrol *mic_adcmux_control;
 	struct snd_kcontrol *lc_controls[13];
 	struct snd_jack *hp_jack;
+	struct xonar_hdmi hdmi;
 };
 
-static void wm8776_write(struct oxygen *chip,
-			 unsigned int reg, unsigned int value)
+static void wm8776_write_spi(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
 {
-	struct xonar_wm87x6 *data = chip->model_data;
-
 	oxygen_write_spi(chip, OXYGEN_SPI_TRIGGER |
 			 OXYGEN_SPI_DATA_LENGTH_2 |
 			 OXYGEN_SPI_CLOCK_160 |
 			 (1 << OXYGEN_SPI_CODEC_SHIFT) |
 			 OXYGEN_SPI_CEN_LATCH_CLOCK_LO,
 			 (reg << 9) | value);
+}
+
+static void wm8776_write_i2c(struct oxygen *chip,
+			     unsigned int reg, unsigned int value)
+{
+	oxygen_write_i2c(chip, I2C_DEVICE_WM8776,
+			 (reg << 1) | (value >> 8), value);
+}
+
+static void wm8776_write(struct oxygen *chip,
+			 unsigned int reg, unsigned int value)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	if ((chip->model.function_flags & OXYGEN_FUNCTION_2WIRE_SPI_MASK) ==
+	    OXYGEN_FUNCTION_SPI)
+		wm8776_write_spi(chip, reg, value);
+	else
+		wm8776_write_i2c(chip, reg, value);
 	if (reg < ARRAY_SIZE(data->wm8776_regs)) {
 		if (reg >= WM8776_HPLVOL && reg <= WM8776_DACMASTER)
 			value &= ~WM8776_UPDATE;
@@ -245,17 +292,50 @@
 	snd_component_add(chip->card, "WM8766");
 }
 
+static void xonar_hdav_slim_init(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	data->generic.anti_pop_delay = 300;
+	data->generic.output_enable_bit = GPIO_SLIM_OUTPUT_ENABLE;
+
+	wm8776_init(chip);
+
+	oxygen_set_bits16(chip, OXYGEN_GPIO_CONTROL,
+			  GPIO_SLIM_HDMI_DISABLE |
+			  GPIO_SLIM_FIRMWARE_CLK |
+			  GPIO_SLIM_FIRMWARE_DATA);
+
+	xonar_hdmi_init(chip, &data->hdmi);
+	xonar_enable_output(chip);
+
+	snd_component_add(chip->card, "WM8776");
+}
+
 static void xonar_ds_cleanup(struct oxygen *chip)
 {
 	xonar_disable_output(chip);
 	wm8776_write(chip, WM8776_RESET, 0);
 }
 
+static void xonar_hdav_slim_cleanup(struct oxygen *chip)
+{
+	xonar_hdmi_cleanup(chip);
+	xonar_disable_output(chip);
+	wm8776_write(chip, WM8776_RESET, 0);
+	msleep(2);
+}
+
 static void xonar_ds_suspend(struct oxygen *chip)
 {
 	xonar_ds_cleanup(chip);
 }
 
+static void xonar_hdav_slim_suspend(struct oxygen *chip)
+{
+	xonar_hdav_slim_cleanup(chip);
+}
+
 static void xonar_ds_resume(struct oxygen *chip)
 {
 	wm8776_registers_init(chip);
@@ -264,6 +344,15 @@
 	xonar_ds_handle_hp_jack(chip);
 }
 
+static void xonar_hdav_slim_resume(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	wm8776_registers_init(chip);
+	xonar_hdmi_resume(chip, &data->hdmi);
+	xonar_enable_output(chip);
+}
+
 static void wm8776_adc_hardware_filter(unsigned int channel,
 				       struct snd_pcm_hardware *hardware)
 {
@@ -278,6 +367,13 @@
 	}
 }
 
+static void xonar_hdav_slim_hardware_filter(unsigned int channel,
+					    struct snd_pcm_hardware *hardware)
+{
+	wm8776_adc_hardware_filter(channel, hardware);
+	xonar_hdmi_pcm_hardware_filter(channel, hardware);
+}
+
 static void set_wm87x6_dac_params(struct oxygen *chip,
 				  struct snd_pcm_hw_params *params)
 {
@@ -294,6 +390,14 @@
 	wm8776_write_cached(chip, WM8776_MSTRCTRL, reg);
 }
 
+static void set_hdav_slim_dac_params(struct oxygen *chip,
+				     struct snd_pcm_hw_params *params)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+
+	xonar_set_hdmi_params(chip, &data->hdmi, params);
+}
+
 static void update_wm8776_volume(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -473,11 +577,6 @@
 	const char *const *names;
 
 	max = (ctl->private_value >> 12) & 0xf;
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = max + 1;
-	if (info->value.enumerated.item > max)
-		info->value.enumerated.item = max;
 	switch ((ctl->private_value >> 24) & 0x1f) {
 	case WM8776_ALCCTRL2:
 		names = hld;
@@ -501,8 +600,7 @@
 	default:
 		return -ENXIO;
 	}
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, max + 1, names);
 }
 
 static int wm8776_field_volume_info(struct snd_kcontrol *ctl,
@@ -759,13 +857,8 @@
 	static const char *const names[3] = {
 		"None", "Peak Limiter", "Automatic Level Control"
 	};
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item >= 3)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+
+	return snd_ctl_enum_info(info, 1, 3, names);
 }
 
 static int wm8776_level_control_get(struct snd_kcontrol *ctl,
@@ -851,13 +944,7 @@
 		"None", "High-pass Filter"
 	};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item >= 2)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int hpf_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value)
@@ -985,6 +1072,53 @@
 		.private_value = 0,
 	},
 };
+static const struct snd_kcontrol_new hdav_slim_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "HDMI Playback Switch",
+		.info = snd_ctl_boolean_mono_info,
+		.get = xonar_gpio_bit_switch_get,
+		.put = xonar_gpio_bit_switch_put,
+		.private_value = GPIO_SLIM_HDMI_DISABLE | XONAR_GPIO_BIT_INVERT,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Headphone Playback Volume",
+		.info = wm8776_hp_vol_info,
+		.get = wm8776_hp_vol_get,
+		.put = wm8776_hp_vol_put,
+		.tlv = { .p = wm8776_hp_db_scale },
+	},
+	WM8776_BIT_SWITCH("Headphone Playback Switch",
+			  WM8776_PWRDOWN, WM8776_HPPD, 1, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input Capture Volume",
+		.info = wm8776_input_vol_info,
+		.get = wm8776_input_vol_get,
+		.put = wm8776_input_vol_put,
+		.tlv = { .p = wm8776_adc_db_scale },
+	},
+	WM8776_BIT_SWITCH("Mic Capture Switch",
+			  WM8776_ADCMUX, 1 << 0, 0, 0),
+	WM8776_BIT_SWITCH("Aux Capture Switch",
+			  WM8776_ADCMUX, 1 << 1, 0, 0),
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "ADC Filter Capture Enum",
+		.info = hpf_info,
+		.get = hpf_get,
+		.put = hpf_put,
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Level Control Capture Enum",
+		.info = wm8776_level_control_info,
+		.get = wm8776_level_control_get,
+		.put = wm8776_level_control_put,
+		.private_value = 0,
+	},
+};
 static const struct snd_kcontrol_new lc_controls[] = {
 	WM8776_FIELD_CTL_VOLUME("Limiter Threshold",
 				WM8776_ALCCTRL1, 0, 11, 0, 15, 0xf,
@@ -1028,6 +1162,26 @@
 				LC_CONTROL_ALC, wm8776_ngth_db_scale),
 };
 
+static int add_lc_controls(struct oxygen *chip)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
+	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
+		ctl = snd_ctl_new1(&lc_controls[i], chip);
+		if (!ctl)
+			return -ENOMEM;
+		err = snd_ctl_add(chip->card, ctl);
+		if (err < 0)
+			return err;
+		data->lc_controls[i] = ctl;
+	}
+	return 0;
+}
+
 static int xonar_ds_mixer_init(struct oxygen *chip)
 {
 	struct xonar_wm87x6 *data = chip->model_data;
@@ -1049,17 +1203,54 @@
 	}
 	if (!data->line_adcmux_control || !data->mic_adcmux_control)
 		return -ENXIO;
-	BUILD_BUG_ON(ARRAY_SIZE(lc_controls) != ARRAY_SIZE(data->lc_controls));
-	for (i = 0; i < ARRAY_SIZE(lc_controls); ++i) {
-		ctl = snd_ctl_new1(&lc_controls[i], chip);
+
+	return add_lc_controls(chip);
+}
+
+static int xonar_hdav_slim_mixer_init(struct oxygen *chip)
+{
+	unsigned int i;
+	struct snd_kcontrol *ctl;
+	int err;
+
+	for (i = 0; i < ARRAY_SIZE(hdav_slim_controls); ++i) {
+		ctl = snd_ctl_new1(&hdav_slim_controls[i], chip);
 		if (!ctl)
 			return -ENOMEM;
 		err = snd_ctl_add(chip->card, ctl);
 		if (err < 0)
 			return err;
-		data->lc_controls[i] = ctl;
 	}
-	return 0;
+
+	return add_lc_controls(chip);
+}
+
+static void dump_wm8776_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	snd_iprintf(buffer, "\nWM8776:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n10:");
+	for (i = 0x10; i < 0x17; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8776_regs[i]);
+	snd_iprintf(buffer, "\n");
+}
+
+static void dump_wm87x6_registers(struct oxygen *chip,
+				  struct snd_info_buffer *buffer)
+{
+	struct xonar_wm87x6 *data = chip->model_data;
+	unsigned int i;
+
+	dump_wm8776_registers(chip, buffer);
+	snd_iprintf(buffer, "\nWM8766:\n00:");
+	for (i = 0; i < 0x10; ++i)
+		snd_iprintf(buffer, " %03x", data->wm8766_regs[i]);
+	snd_iprintf(buffer, "\n");
 }
 
 static const struct oxygen_model model_xonar_ds = {
@@ -1072,22 +1263,57 @@
 	.suspend = xonar_ds_suspend,
 	.resume = xonar_ds_resume,
 	.pcm_hardware_filter = wm8776_adc_hardware_filter,
-	.get_i2s_mclk = oxygen_default_i2s_mclk,
 	.set_dac_params = set_wm87x6_dac_params,
 	.set_adc_params = set_wm8776_adc_params,
 	.update_dac_volume = update_wm87x6_volume,
 	.update_dac_mute = update_wm87x6_mute,
 	.update_center_lfe_mix = update_wm8766_center_lfe_mix,
 	.gpio_changed = xonar_ds_gpio_changed,
+	.dump_registers = dump_wm87x6_registers,
 	.dac_tlv = wm87x6_dac_db_scale,
 	.model_data_size = sizeof(struct xonar_wm87x6),
 	.device_config = PLAYBACK_0_TO_I2S |
 			 PLAYBACK_1_TO_SPDIF |
 			 CAPTURE_0_FROM_I2S_1,
-	.dac_channels = 8,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 8,
 	.dac_volume_min = 255 - 2*60,
 	.dac_volume_max = 255,
 	.function_flags = OXYGEN_FUNCTION_SPI,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
+};
+
+static const struct oxygen_model model_xonar_hdav_slim = {
+	.shortname = "Xonar HDAV1.3 Slim",
+	.longname = "Asus Virtuoso 200",
+	.chip = "AV200",
+	.init = xonar_hdav_slim_init,
+	.mixer_init = xonar_hdav_slim_mixer_init,
+	.cleanup = xonar_hdav_slim_cleanup,
+	.suspend = xonar_hdav_slim_suspend,
+	.resume = xonar_hdav_slim_resume,
+	.pcm_hardware_filter = xonar_hdav_slim_hardware_filter,
+	.set_dac_params = set_hdav_slim_dac_params,
+	.set_adc_params = set_wm8776_adc_params,
+	.update_dac_volume = update_wm8776_volume,
+	.update_dac_mute = update_wm8776_mute,
+	.uart_input = xonar_hdmi_uart_input,
+	.dump_registers = dump_wm8776_registers,
+	.dac_tlv = wm87x6_dac_db_scale,
+	.model_data_size = sizeof(struct xonar_wm87x6),
+	.device_config = PLAYBACK_0_TO_I2S |
+			 PLAYBACK_1_TO_SPDIF |
+			 CAPTURE_0_FROM_I2S_1,
+	.dac_channels_pcm = 8,
+	.dac_channels_mixer = 2,
+	.dac_volume_min = 255 - 2*60,
+	.dac_volume_max = 255,
+	.function_flags = OXYGEN_FUNCTION_2WIRE,
+	.dac_mclks = OXYGEN_MCLKS(256, 256, 128),
+	.adc_mclks = OXYGEN_MCLKS(256, 256, 128),
 	.dac_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 	.adc_i2s_format = OXYGEN_I2S_FORMAT_LJUST,
 };
@@ -1099,6 +1325,9 @@
 	case 0x838e:
 		chip->model = model_xonar_ds;
 		break;
+	case 0x835e:
+		chip->model = model_xonar_hdav_slim;
+		break;
 	default:
 		return -EINVAL;
 	}
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 0b720cf..2d83324 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -60,6 +60,7 @@
 	        "{RME HDSP-9652},"
 		"{RME HDSP-9632}}");
 #ifdef HDSP_FW_LOADER
+MODULE_FIRMWARE("rpm_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware.bin");
 MODULE_FIRMWARE("multiface_firmware_rev11.bin");
 MODULE_FIRMWARE("digiface_firmware.bin");
@@ -81,6 +82,7 @@
 #define H9632_SS_CHANNELS	 12
 #define H9632_DS_CHANNELS	 8
 #define H9632_QS_CHANNELS	 4
+#define RPM_CHANNELS             6
 
 /* Write registers. These are defined as byte-offsets from the iobase value.
  */
@@ -191,6 +193,25 @@
 #define HDSP_PhoneGain1		  (1<<30)
 #define HDSP_QuadSpeed	  	  (1<<31)
 
+/* RPM uses some of the registers for special purposes */
+#define HDSP_RPM_Inp12            0x04A00
+#define HDSP_RPM_Inp12_Phon_6dB   0x00800  /* Dolby */
+#define HDSP_RPM_Inp12_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp12_Phon_n6dB  0x04000  /* inp_0 */
+#define HDSP_RPM_Inp12_Line_0dB   0x04200  /* Dolby+PRO */
+#define HDSP_RPM_Inp12_Line_n6dB  0x00200  /* PRO */
+
+#define HDSP_RPM_Inp34            0x32000
+#define HDSP_RPM_Inp34_Phon_6dB   0x20000  /* SyncRef1 */
+#define HDSP_RPM_Inp34_Phon_0dB   0x00000  /* .. */
+#define HDSP_RPM_Inp34_Phon_n6dB  0x02000  /* SyncRef2 */
+#define HDSP_RPM_Inp34_Line_0dB   0x30000  /* SyncRef1+SyncRef0 */
+#define HDSP_RPM_Inp34_Line_n6dB  0x10000  /* SyncRef0 */
+
+#define HDSP_RPM_Bypass           0x01000
+
+#define HDSP_RPM_Disconnect       0x00001
+
 #define HDSP_ADGainMask       (HDSP_ADGain0|HDSP_ADGain1)
 #define HDSP_ADGainMinus10dBV  HDSP_ADGainMask
 #define HDSP_ADGainPlus4dBu   (HDSP_ADGain0)
@@ -450,7 +471,7 @@
 	u32                   creg_spdif;
 	u32                   creg_spdif_stream;
 	int                   clock_source_locked;
-	char                 *card_name;	     /* digiface/multiface */
+	char                 *card_name;	 /* digiface/multiface/rpm */
 	enum HDSP_IO_Type     io_type;               /* ditto, but for code use */
         unsigned short        firmware_rev;
 	unsigned short	      state;		     /* stores state bits */
@@ -612,6 +633,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + (32 + (in));
@@ -629,6 +651,7 @@
 	switch (hdsp->io_type) {
 	case Multiface:
 	case Digiface:
+	case RPM:
 	default:
 		if (hdsp->firmware_rev == 0xa)
 			return (64 * out) + in;
@@ -655,7 +678,7 @@
 {
 	if (hdsp->io_type == H9652 || hdsp->io_type == H9632) return 0;
 	if (hdsp_read (hdsp, HDSP_statusRegister) & HDSP_ConfigError) {
-		snd_printk ("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+		snd_printk("Hammerfall-DSP: no IO box connected!\n");
 		hdsp->state &= ~HDSP_FirmwareLoaded;
 		return -EIO;
 	}
@@ -680,7 +703,7 @@
 		}
 	}
 
-	snd_printk("Hammerfall-DSP: no Digiface or Multiface connected!\n");
+	snd_printk("Hammerfall-DSP: no IO box connected!\n");
 	hdsp->state &= ~HDSP_FirmwareLoaded;
 	return -EIO;
 }
@@ -752,17 +775,21 @@
 		hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
 		hdsp_write (hdsp, HDSP_fifoData, 0);
 
-		if (hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT)) {
-			hdsp->io_type = Multiface;
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
-			hdsp_write (hdsp, HDSP_control2Reg, HDSP_S_LOAD);
-			hdsp_fifo_wait (hdsp, 0, HDSP_SHORT_WAIT);
+		if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT)) {
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_VERSION_BIT);
+			hdsp_write(hdsp, HDSP_control2Reg, HDSP_S_LOAD);
+			if (hdsp_fifo_wait(hdsp, 0, HDSP_SHORT_WAIT))
+				hdsp->io_type = RPM;
+			else
+				hdsp->io_type = Multiface;
 		} else {
 			hdsp->io_type = Digiface;
 		}
 	} else {
 		/* firmware was already loaded, get iobox type */
-		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+		if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+			hdsp->io_type = RPM;
+		else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 			hdsp->io_type = Multiface;
 		else
 			hdsp->io_type = Digiface;
@@ -1184,6 +1211,7 @@
 			hdsp->channel_map = channel_map_ds;
 	} else {
 		switch (hdsp->io_type) {
+		case RPM:
 		case Multiface:
 			hdsp->channel_map = channel_map_mf_ss;
 			break;
@@ -3231,6 +3259,318 @@
 HDSP_USE_MIDI_TASKLET("Use Midi Tasklet", 0),
 };
 
+
+static int hdsp_rpm_input12(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp12) {
+	case HDSP_RPM_Inp12_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp12_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp12_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp12_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input12(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input12(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp12;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp12_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp12_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input12(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input12(hdsp))
+		change = (hdsp_set_rpm_input12(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_input(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"Phono +6dB", "Phono 0dB", "Phono -6dB", "Line 0dB", "Line -6dB"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 5;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+static int hdsp_rpm_input34(struct hdsp *hdsp)
+{
+	switch (hdsp->control_register & HDSP_RPM_Inp34) {
+	case HDSP_RPM_Inp34_Phon_6dB:
+		return 0;
+	case HDSP_RPM_Inp34_Phon_n6dB:
+		return 2;
+	case HDSP_RPM_Inp34_Line_0dB:
+		return 3;
+	case HDSP_RPM_Inp34_Line_n6dB:
+		return 4;
+	}
+	return 1;
+}
+
+
+static int snd_hdsp_get_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.enumerated.item[0] = hdsp_rpm_input34(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_input34(struct hdsp *hdsp, int mode)
+{
+	hdsp->control_register &= ~HDSP_RPM_Inp34;
+	switch (mode) {
+	case 0:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_6dB;
+		break;
+	case 1:
+		break;
+	case 2:
+		hdsp->control_register |= HDSP_RPM_Inp34_Phon_n6dB;
+		break;
+	case 3:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_0dB;
+		break;
+	case 4:
+		hdsp->control_register |= HDSP_RPM_Inp34_Line_n6dB;
+		break;
+	default:
+		return -1;
+	}
+
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_input34(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.enumerated.item[0];
+	if (val < 0)
+		val = 0;
+	if (val > 4)
+		val = 4;
+	spin_lock_irq(&hdsp->lock);
+	if (val != hdsp_rpm_input34(hdsp))
+		change = (hdsp_set_rpm_input34(hdsp, val) == 0) ? 1 : 0;
+	else
+		change = 0;
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+/* RPM Bypass switch */
+static int hdsp_rpm_bypass(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Bypass) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_bypass(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_bypass(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Bypass;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Bypass;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_bypass(hdsp);
+	hdsp_set_rpm_bypass(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+
+static int snd_hdsp_info_rpm_bypass(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+
+/* RPM Disconnect switch */
+static int hdsp_rpm_disconnect(struct hdsp *hdsp)
+{
+	return (hdsp->control_register & HDSP_RPM_Disconnect) ? 1 : 0;
+}
+
+
+static int snd_hdsp_get_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+
+	ucontrol->value.integer.value[0] = hdsp_rpm_disconnect(hdsp);
+	return 0;
+}
+
+
+static int hdsp_set_rpm_disconnect(struct hdsp *hdsp, int on)
+{
+	if (on)
+		hdsp->control_register |= HDSP_RPM_Disconnect;
+	else
+		hdsp->control_register &= ~HDSP_RPM_Disconnect;
+	hdsp_write(hdsp, HDSP_controlRegister, hdsp->control_register);
+	return 0;
+}
+
+
+static int snd_hdsp_put_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
+{
+	struct hdsp *hdsp = snd_kcontrol_chip(kcontrol);
+	int change;
+	unsigned int val;
+
+	if (!snd_hdsp_use_is_exclusive(hdsp))
+		return -EBUSY;
+	val = ucontrol->value.integer.value[0] & 1;
+	spin_lock_irq(&hdsp->lock);
+	change = (int)val != hdsp_rpm_disconnect(hdsp);
+	hdsp_set_rpm_disconnect(hdsp, val);
+	spin_unlock_irq(&hdsp->lock);
+	return change;
+}
+
+static int snd_hdsp_info_rpm_disconnect(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
+{
+	static char *texts[] = {"On", "Off"};
+
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+	uinfo->count = 1;
+	uinfo->value.enumerated.items = 2;
+	if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
+		uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1;
+	strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
+	return 0;
+}
+
+static struct snd_kcontrol_new snd_hdsp_rpm_controls[] = {
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Bypass",
+		.get = snd_hdsp_get_rpm_bypass,
+		.put = snd_hdsp_put_rpm_bypass,
+		.info = snd_hdsp_info_rpm_bypass
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "RPM Disconnect",
+		.get = snd_hdsp_get_rpm_disconnect,
+		.put = snd_hdsp_put_rpm_disconnect,
+		.info = snd_hdsp_info_rpm_disconnect
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 1/2",
+		.get = snd_hdsp_get_rpm_input12,
+		.put = snd_hdsp_put_rpm_input12,
+		.info = snd_hdsp_info_rpm_input
+	},
+	{
+		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.name = "Input 3/4",
+		.get = snd_hdsp_get_rpm_input34,
+		.put = snd_hdsp_put_rpm_input34,
+		.info = snd_hdsp_info_rpm_input
+	},
+	HDSP_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
+	HDSP_MIXER("Mixer", 0)
+};
+
 static struct snd_kcontrol_new snd_hdsp_96xx_aeb = HDSP_AEB("Analog Extension Board", 0);
 static struct snd_kcontrol_new snd_hdsp_adat_sync_check = HDSP_ADAT_SYNC_CHECK;
 
@@ -3240,6 +3580,16 @@
 	int err;
 	struct snd_kcontrol *kctl;
 
+	if (hdsp->io_type == RPM) {
+		/* RPM Bypass, Disconnect and Input switches */
+		for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_rpm_controls); idx++) {
+			err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_rpm_controls[idx], hdsp));
+			if (err < 0)
+				return err;
+		}
+		return 0;
+	}
+
 	for (idx = 0; idx < ARRAY_SIZE(snd_hdsp_controls); idx++) {
 		if ((err = snd_ctl_add(card, kctl = snd_ctl_new1(&snd_hdsp_controls[idx], hdsp))) < 0)
 			return err;
@@ -3459,48 +3809,102 @@
 
 	snd_iprintf(buffer, "\n");
 
-	switch (hdsp_spdif_in(hdsp)) {
-	case HDSP_SPDIFIN_OPTICAL:
-		snd_iprintf(buffer, "IEC958 input: Optical\n");
-		break;
-	case HDSP_SPDIFIN_COAXIAL:
-		snd_iprintf(buffer, "IEC958 input: Coaxial\n");
-		break;
-	case HDSP_SPDIFIN_INTERNAL:
-		snd_iprintf(buffer, "IEC958 input: Internal\n");
-		break;
-	case HDSP_SPDIFIN_AES:
-		snd_iprintf(buffer, "IEC958 input: AES\n");
-		break;
-	default:
-		snd_iprintf(buffer, "IEC958 input: ???\n");
-		break;
+	if (hdsp->io_type != RPM) {
+		switch (hdsp_spdif_in(hdsp)) {
+		case HDSP_SPDIFIN_OPTICAL:
+			snd_iprintf(buffer, "IEC958 input: Optical\n");
+			break;
+		case HDSP_SPDIFIN_COAXIAL:
+			snd_iprintf(buffer, "IEC958 input: Coaxial\n");
+			break;
+		case HDSP_SPDIFIN_INTERNAL:
+			snd_iprintf(buffer, "IEC958 input: Internal\n");
+			break;
+		case HDSP_SPDIFIN_AES:
+			snd_iprintf(buffer, "IEC958 input: AES\n");
+			break;
+		default:
+			snd_iprintf(buffer, "IEC958 input: ???\n");
+			break;
+		}
 	}
 
-	if (hdsp->control_register & HDSP_SPDIFOpticalOut)
-		snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
-	else
-		snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
+	if (RPM == hdsp->io_type) {
+		if (hdsp->control_register & HDSP_RPM_Bypass)
+			snd_iprintf(buffer, "RPM Bypass: disabled\n");
+		else
+			snd_iprintf(buffer, "RPM Bypass: enabled\n");
+		if (hdsp->control_register & HDSP_RPM_Disconnect)
+			snd_iprintf(buffer, "RPM disconnected\n");
+		else
+			snd_iprintf(buffer, "RPM connected\n");
 
-	if (hdsp->control_register & HDSP_SPDIFProfessional)
-		snd_iprintf(buffer, "IEC958 quality: Professional\n");
-	else
-		snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp12) {
+		case HDSP_RPM_Inp12_Phon_6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_0dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Phon_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_0dB:
+			snd_iprintf(buffer, "Input 1/2: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp12_Line_n6dB:
+			snd_iprintf(buffer, "Input 1/2: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 1/2: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFEmphasis)
-		snd_iprintf(buffer, "IEC958 emphasis: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 emphasis: off\n");
+		switch (hdsp->control_register & HDSP_RPM_Inp34) {
+		case HDSP_RPM_Inp34_Phon_6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_0dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Phon_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Phono, -6dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_0dB:
+			snd_iprintf(buffer, "Input 3/4: Line, 0dB\n");
+			break;
+		case HDSP_RPM_Inp34_Line_n6dB:
+			snd_iprintf(buffer, "Input 3/4: Line, -6dB\n");
+			break;
+		default:
+			snd_iprintf(buffer, "Input 3/4: ???\n");
+		}
 
-	if (hdsp->control_register & HDSP_SPDIFNonAudio)
-		snd_iprintf(buffer, "IEC958 NonAudio: on\n");
-	else
-		snd_iprintf(buffer, "IEC958 NonAudio: off\n");
-	if ((x = hdsp_spdif_sample_rate (hdsp)) != 0)
-		snd_iprintf (buffer, "IEC958 sample rate: %d\n", x);
-	else
-		snd_iprintf (buffer, "IEC958 sample rate: Error flag set\n");
+	} else {
+		if (hdsp->control_register & HDSP_SPDIFOpticalOut)
+			snd_iprintf(buffer, "IEC958 output: Coaxial & ADAT1\n");
+		else
+			snd_iprintf(buffer, "IEC958 output: Coaxial only\n");
 
+		if (hdsp->control_register & HDSP_SPDIFProfessional)
+			snd_iprintf(buffer, "IEC958 quality: Professional\n");
+		else
+			snd_iprintf(buffer, "IEC958 quality: Consumer\n");
+
+		if (hdsp->control_register & HDSP_SPDIFEmphasis)
+			snd_iprintf(buffer, "IEC958 emphasis: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 emphasis: off\n");
+
+		if (hdsp->control_register & HDSP_SPDIFNonAudio)
+			snd_iprintf(buffer, "IEC958 NonAudio: on\n");
+		else
+			snd_iprintf(buffer, "IEC958 NonAudio: off\n");
+		x = hdsp_spdif_sample_rate(hdsp);
+		if (x != 0)
+			snd_iprintf(buffer, "IEC958 sample rate: %d\n", x);
+		else
+			snd_iprintf(buffer, "IEC958 sample rate: Error flag set\n");
+	}
 	snd_iprintf(buffer, "\n");
 
 	/* Sync Check */
@@ -3765,7 +4169,7 @@
 			snd_hdsp_midi_input_read (&hdsp->midi[0]);
 		}
 	}
-	if (hdsp->io_type != Multiface && hdsp->io_type != H9632 && midi1 && midi1status) {
+	if (hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632 && midi1 && midi1status) {
 		if (hdsp->use_midi_tasklet) {
 			/* we disable interrupts for this input until processing is done */
 			hdsp->control_register &= ~HDSP_Midi1InterruptEnable;
@@ -4093,7 +4497,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		6,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4122,7 +4526,7 @@
 				 SNDRV_PCM_RATE_96000),
 	.rate_min =		32000,
 	.rate_max =		96000,
-	.channels_min =		14,
+	.channels_min =		5,
 	.channels_max =		HDSP_MAX_CHANNELS,
 	.buffer_bytes_max =	HDSP_CHANNEL_BUFFER_BYTES * HDSP_MAX_CHANNELS,
 	.period_bytes_min =	(64 * 4) * 10,
@@ -4357,10 +4761,12 @@
 			     snd_hdsp_hw_rule_rate_out_channels, hdsp,
 			     SNDRV_PCM_HW_PARAM_CHANNELS, -1);
 
-	hdsp->creg_spdif_stream = hdsp->creg_spdif;
-	hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->creg_spdif_stream = hdsp->creg_spdif;
+		hdsp->spdif_ctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4375,9 +4781,11 @@
 
 	spin_unlock_irq(&hdsp->lock);
 
-	hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
-	snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
-		       SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	if (RPM != hdsp->io_type) {
+		hdsp->spdif_ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+		snd_ctl_notify(hdsp->card, SNDRV_CTL_EVENT_MASK_VALUE |
+			SNDRV_CTL_EVENT_MASK_INFO, &hdsp->spdif_ctl->id);
+	}
 	return 0;
 }
 
@@ -4616,7 +5024,7 @@
 		if (hdsp->io_type != H9632)
 		    info.adatsync_sync_check = (unsigned char)hdsp_adatsync_sync_check(hdsp);
 		info.spdif_sync_check = (unsigned char)hdsp_spdif_sync_check(hdsp);
-		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != H9632) ? 3 : 1); ++i)
+		for (i = 0; i < ((hdsp->io_type != Multiface && hdsp->io_type != RPM && hdsp->io_type != H9632) ? 3 : 1); ++i)
 			info.adat_sync_check[i] = (unsigned char)hdsp_adat_sync_check(hdsp, i);
 		info.spdif_in = (unsigned char)hdsp_spdif_in(hdsp);
 		info.spdif_out = (unsigned char)hdsp_spdif_out(hdsp);
@@ -4636,6 +5044,9 @@
 			info.phone_gain = (unsigned char)hdsp_phone_gain(hdsp);
 			info.xlr_breakout_cable = (unsigned char)hdsp_xlr_breakout_cable(hdsp);
 
+		} else if (hdsp->io_type == RPM) {
+			info.da_gain = (unsigned char) hdsp_rpm_input12(hdsp);
+			info.ad_gain = (unsigned char) hdsp_rpm_input34(hdsp);
 		}
 		if (hdsp->io_type == H9632 || hdsp->io_type == H9652)
 			info.analog_extension_board = (unsigned char)hdsp_aeb(hdsp);
@@ -4844,6 +5255,14 @@
 		hdsp->ds_in_channels = hdsp->ds_out_channels = MULTIFACE_DS_CHANNELS;
 		break;
 
+	case RPM:
+		hdsp->card_name = "RME Hammerfall DSP + RPM";
+		hdsp->ss_in_channels = RPM_CHANNELS-1;
+		hdsp->ss_out_channels = RPM_CHANNELS;
+		hdsp->ds_in_channels = RPM_CHANNELS-1;
+		hdsp->ds_out_channels = RPM_CHANNELS;
+		break;
+
 	default:
  		/* should never get here */
 		break;
@@ -4930,6 +5349,9 @@
 
 	/* caution: max length of firmware filename is 30! */
 	switch (hdsp->io_type) {
+	case RPM:
+		fwfile = "rpm_firmware.bin";
+		break;
 	case Multiface:
 		if (hdsp->firmware_rev == 0xa)
 			fwfile = "multiface_firmware.bin";
@@ -5100,7 +5522,9 @@
 			return 0;
 		} else {
 			snd_printk(KERN_INFO "Hammerfall-DSP: Firmware already present, initializing card.\n");
-			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
+			if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version2)
+				hdsp->io_type = RPM;
+			else if (hdsp_read(hdsp, HDSP_status2Register) & HDSP_version1)
 				hdsp->io_type = Multiface;
 			else
 				hdsp->io_type = Digiface;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0c98ef9..f5eadfc 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -487,7 +487,7 @@
 	struct snd_kcontrol *playback_mixer_ctls[HDSPM_MAX_CHANNELS];
 	/* but input to much, so not used */
 	struct snd_kcontrol *input_mixer_ctls[HDSPM_MAX_CHANNELS];
-	/* full mixer accessable over mixer ioctl or hwdep-device */
+	/* full mixer accessible over mixer ioctl or hwdep-device */
 	struct hdspm_mixer *mixer;
 
 };
@@ -550,7 +550,7 @@
 	return bit2freq_tab[n];
 }
 
-/* Write/read to/from HDSPM with Adresses in Bytes
+/* Write/read to/from HDSPM with Addresses in Bytes
    not words but only 32Bit writes are allowed */
 
 static inline void hdspm_write(struct hdspm * hdspm, unsigned int reg,
@@ -2908,7 +2908,7 @@
 
 	/* Channel playback mixer as default control 
 	   Note: the whole matrix would be 128*HDSPM_MIXER_CHANNELS Faders,
-	   thats too * big for any alsamixer they are accesible via special
+	   thats too * big for any alsamixer they are accessible via special
 	   IOCTL on hwdep and the mixer 2dimensional mixer control
 	*/
 
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index 5518371..c94c051 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -1389,15 +1389,9 @@
 
 static int snd_ymfpci_drec_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *info)
 {
-	static char *texts[3] = {"AC'97", "IEC958", "ZV Port"};
+	static const char *const texts[3] = {"AC'97", "IEC958", "ZV Port"};
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 3;
-	if (info->value.enumerated.item > 2)
-		info->value.enumerated.item = 2;
-	strcpy(info->value.enumerated.name, texts[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 3, texts);
 }
 
 static int snd_ymfpci_drec_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *value)
diff --git a/sound/ppc/snd_ps3.c b/sound/ppc/snd_ps3.c
index 581a670..edce8a2 100644
--- a/sound/ppc/snd_ps3.c
+++ b/sound/ppc/snd_ps3.c
@@ -51,7 +51,7 @@
 static int snd_ps3_start_delay = CONFIG_SND_PS3_DEFAULT_START_DELAY;
 
 module_param_named(start_delay, snd_ps3_start_delay, uint, 0644);
-MODULE_PARM_DESC(start_delay, "time to insert silent data in milisec");
+MODULE_PARM_DESC(start_delay, "time to insert silent data in ms");
 
 static int index = SNDRV_DEFAULT_IDX1;
 static char *id = SNDRV_DEFAULT_STR1;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 3e598e7..a3efc52 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -20,6 +20,21 @@
 
 if SND_SOC
 
+config SND_SOC_CACHE_LZO
+	bool "Support LZO compression for register caches"
+	select LZO_COMPRESS
+	select LZO_DECOMPRESS
+	---help---
+	   Select this to enable LZO compression for register caches.
+	   This will allow machine or CODEC drivers to compress register
+	   caches in memory, reducing the memory consumption at the
+	   expense of performance.  If this is not present and is used
+	   the system will fall back to uncompressed caches.
+
+	   Usually it is safe to disable this option, where cache
+	   compression in used the rbtree option will typically perform
+	   better.
+
 config SND_SOC_AC97_BUS
 	bool
 
@@ -36,7 +51,7 @@
 source "sound/soc/omap/Kconfig"
 source "sound/soc/kirkwood/Kconfig"
 source "sound/soc/pxa/Kconfig"
-source "sound/soc/s3c24xx/Kconfig"
+source "sound/soc/samsung/Kconfig"
 source "sound/soc/s6000/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/txx9/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index eb18344..ce913bf 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -14,7 +14,7 @@
 obj-$(CONFIG_SND_SOC)	+= omap/
 obj-$(CONFIG_SND_SOC)	+= kirkwood/
 obj-$(CONFIG_SND_SOC)	+= pxa/
-obj-$(CONFIG_SND_SOC)	+= s3c24xx/
+obj-$(CONFIG_SND_SOC)	+= samsung/
 obj-$(CONFIG_SND_SOC)	+= s6000/
 obj-$(CONFIG_SND_SOC)	+= sh/
 obj-$(CONFIG_SND_SOC)	+= txx9/
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
index 5f4e59f..1aac2f4 100644
--- a/sound/soc/atmel/playpaq_wm8510.c
+++ b/sound/soc/atmel/playpaq_wm8510.c
@@ -33,7 +33,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/at32ap700x.h>
 #include <mach/portmux.h>
@@ -318,27 +317,28 @@
 static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
 	/*
 	 * Add DAPM widgets
 	 */
 	for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-		snd_soc_dapm_new_control(codec, &playpaq_dapm_widgets[i]);
+		snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
 
 
 
 	/*
 	 * Setup audio path interconnects
 	 */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 
 
 	/* always connected pins */
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_sync(dapm);
 
 
 
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index e521ada..af3c730 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -44,7 +44,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -140,6 +139,7 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	printk(KERN_DEBUG
@@ -154,25 +154,25 @@
 	}
 
 	/* Add specific widgets */
-	snd_soc_dapm_new_controls(codec, at91sam9g20ek_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, at91sam9g20ek_dapm_widgets,
 				  ARRAY_SIZE(at91sam9g20ek_dapm_widgets));
 	/* Set up specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
 
 #ifdef ENABLE_MIC_INPUT
-	snd_soc_dapm_enable_pin(codec, "Int Mic");
+	snd_soc_dapm_enable_pin(dapm, "Int Mic");
 #else
-	snd_soc_dapm_nc_pin(codec, "Int Mic");
+	snd_soc_dapm_nc_pin(dapm, "Int Mic");
 #endif
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
index 86e0f85..da2208e 100644
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -105,19 +104,20 @@
 static int afeb9260_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add afeb9260 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up afeb9260 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index b62fcd3..cb99f04 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -13,7 +13,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1xxx_psc.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index 2394bff..83012da 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad193x.c b/sound/soc/blackfin/bf5xx-ad193x.c
index e4a6253..d3ccb92 100644
--- a/sound/soc/blackfin/bf5xx-ad193x.c
+++ b/sound/soc/blackfin/bf5xx-ad193x.c
@@ -29,7 +29,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c
index 900ced5..732fb8b 100644
--- a/sound/soc/blackfin/bf5xx-ad73311.c
+++ b/sound/soc/blackfin/bf5xx-ad73311.c
@@ -35,7 +35,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/blackfin.h>
diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c
index 36f2769..e902b24 100644
--- a/sound/soc/blackfin/bf5xx-ssm2602.c
+++ b/sound/soc/blackfin/bf5xx-ssm2602.c
@@ -33,7 +33,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/pcm_params.h>
 
 #include <asm/dma.h>
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 01d19e9..06b6981 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -19,10 +19,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/jack.h>
+#include <trace/events/asoc.h>
 
 #include "88pm860x-codec.h"
 
@@ -146,7 +146,6 @@
 
 	int			irq[4];
 	unsigned char		name[4][MAX_NAME_LEN];
-	unsigned char		reg_cache[REG_CACHE_SIZE];
 };
 
 /* -9450dB to 0dB in 150dB steps ( mute instead of -9450dB) */
@@ -1172,7 +1171,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable Audio PLL & Audio section */
 			data = AUDIO_PLL | AUDIO_SECTION_RESET
 				| AUDIO_SECTION_ON;
@@ -1185,7 +1184,7 @@
 		pm860x_set_bits(codec->control_data, REG_MISC2, data, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1263,6 +1262,12 @@
 	mask = pm860x->det.hs_shrt | pm860x->det.hook_det | pm860x->det.lo_shrt
 		| pm860x->det.hp_det;
 
+#ifndef CONFIG_SND_SOC_88PM860X_MODULE
+	if (status & (HEADSET_STATUS | MIC_STATUS | SHORT_HS1 | SHORT_HS2 |
+		      SHORT_LO1 | SHORT_LO2))
+		trace_snd_soc_jack_irq(dev_name(pm860x->codec->dev));
+#endif
+
 	if ((pm860x->det.hp_det & SND_JACK_HEADPHONE)
 		&& (status & HEADSET_STATUS))
 		report |= SND_JACK_HEADPHONE;
@@ -1346,6 +1351,7 @@
 static int pm860x_probe(struct snd_soc_codec *codec)
 {
 	struct pm860x_priv *pm860x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i, ret;
 
 	pm860x->codec = codec;
@@ -1358,7 +1364,7 @@
 					   pm860x->name[i], pm860x);
 		if (ret < 0) {
 			dev_err(codec->dev, "Failed to request IRQ!\n");
-			goto out_irq;
+			goto out;
 		}
 	}
 
@@ -1369,22 +1375,20 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "Failed to fill register cache: %d\n",
 			ret);
-		goto out_codec;
+		goto out;
 	}
 
 	snd_soc_add_controls(codec, pm860x_snd_controls,
 			     ARRAY_SIZE(pm860x_snd_controls));
-	snd_soc_dapm_new_controls(codec, pm860x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, pm860x_dapm_widgets,
 				  ARRAY_SIZE(pm860x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 
-out_codec:
-	i = 3;
-out_irq:
-	for (; i >= 0; i--)
+out:
+	while (--i >= 0)
 		free_irq(pm860x->irq[i], pm860x);
-	return -EINVAL;
+	return ret;
 }
 
 static int pm860x_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 3b5690d..883a312 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -22,6 +22,7 @@
 	select SND_SOC_AK4535 if I2C
 	select SND_SOC_AK4642 if I2C
 	select SND_SOC_AK4671 if I2C
+	select SND_SOC_ALC5623 if I2C
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS4270 if I2C
@@ -54,9 +55,11 @@
 	select SND_SOC_WM8727
 	select SND_SOC_WM8728 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8731 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8737 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8741 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8750 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8753 if SND_SOC_I2C_AND_SPI
+	select SND_SOC_WM8770 if SPI_MASTER
 	select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8804 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM8900 if I2C
@@ -75,6 +78,7 @@
 	select SND_SOC_WM8990 if I2C
 	select SND_SOC_WM8993 if I2C
 	select SND_SOC_WM8994 if MFD_WM8994
+	select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
 	select SND_SOC_WM9081 if I2C
 	select SND_SOC_WM9090 if I2C
 	select SND_SOC_WM9705 if SND_SOC_AC97_BUS
@@ -130,6 +134,9 @@
 config SND_SOC_AK4671
 	tristate
 
+config SND_SOC_ALC5623
+       tristate
+
 config SND_SOC_CQ0093VC
 	tristate
 
@@ -160,6 +167,9 @@
 config SND_SOC_DA7210
         tristate
 
+config SND_SOC_DMIC
+	tristate
+
 config SND_SOC_MAX98088
        tristate
 
@@ -231,6 +241,9 @@
 config SND_SOC_WM8731
 	tristate
 
+config SND_SOC_WM8737
+	tristate
+
 config SND_SOC_WM8741
 	tristate
 
@@ -240,6 +253,9 @@
 config SND_SOC_WM8753
 	tristate
 
+config SND_SOC_WM8770
+	tristate
+
 config SND_SOC_WM8776
 	tristate
 
@@ -294,6 +310,9 @@
 config SND_SOC_WM8994
 	tristate
 
+config SND_SOC_WM8995
+	tristate
+
 config SND_SOC_WM9081
 	tristate
 
@@ -318,3 +337,4 @@
 
 config SND_SOC_WM9090
 	tristate
+
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index f67a2d6..579af9c 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -14,9 +14,11 @@
 snd-soc-cs4270-objs := cs4270.o
 snd-soc-cx20442-objs := cx20442.o
 snd-soc-da7210-objs := da7210.o
+snd-soc-dmic-objs := dmic.o
 snd-soc-l3-objs := l3.o
 snd-soc-max98088-objs := max98088.o
 snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-alc5623-objs := alc5623.o
 snd-soc-spdif-objs := spdif_transciever.o
 snd-soc-ssm2602-objs := ssm2602.o
 snd-soc-stac9766-objs := stac9766.o
@@ -38,9 +40,11 @@
 snd-soc-wm8727-objs := wm8727.o
 snd-soc-wm8728-objs := wm8728.o
 snd-soc-wm8731-objs := wm8731.o
+snd-soc-wm8737-objs := wm8737.o
 snd-soc-wm8741-objs := wm8741.o
 snd-soc-wm8750-objs := wm8750.o
 snd-soc-wm8753-objs := wm8753.o
+snd-soc-wm8770-objs := wm8770.o
 snd-soc-wm8776-objs := wm8776.o
 snd-soc-wm8804-objs := wm8804.o
 snd-soc-wm8900-objs := wm8900.o
@@ -58,7 +62,8 @@
 snd-soc-wm8988-objs := wm8988.o
 snd-soc-wm8990-objs := wm8990.o
 snd-soc-wm8993-objs := wm8993.o
-snd-soc-wm8994-objs := wm8994.o
+snd-soc-wm8994-objs := wm8994.o wm8994-tables.o
+snd-soc-wm8995-objs := wm8995.o
 snd-soc-wm9081-objs := wm9081.o
 snd-soc-wm9705-objs := wm9705.o
 snd-soc-wm9712-objs := wm9712.o
@@ -88,10 +93,12 @@
 obj-$(CONFIG_SND_SOC_CS4270)	+= snd-soc-cs4270.o
 obj-$(CONFIG_SND_SOC_CX20442)	+= snd-soc-cx20442.o
 obj-$(CONFIG_SND_SOC_DA7210)	+= snd-soc-da7210.o
+obj-$(CONFIG_SND_SOC_DMIC)	+= snd-soc-dmic.o
 obj-$(CONFIG_SND_SOC_L3)	+= snd-soc-l3.o
 obj-$(CONFIG_SND_SOC_JZ4740_CODEC)	+= snd-soc-jz4740-codec.o
 obj-$(CONFIG_SND_SOC_MAX98088)	+= snd-soc-max98088.o
 obj-$(CONFIG_SND_SOC_PCM3008)	+= snd-soc-pcm3008.o
+obj-$(CONFIG_SND_SOC_ALC5623)    += snd-soc-alc5623.o
 obj-$(CONFIG_SND_SOC_SPDIF)	+= snd-soc-spdif.o
 obj-$(CONFIG_SND_SOC_SSM2602)	+= snd-soc-ssm2602.o
 obj-$(CONFIG_SND_SOC_STAC9766)	+= snd-soc-stac9766.o
@@ -113,9 +120,11 @@
 obj-$(CONFIG_SND_SOC_WM8727)	+= snd-soc-wm8727.o
 obj-$(CONFIG_SND_SOC_WM8728)	+= snd-soc-wm8728.o
 obj-$(CONFIG_SND_SOC_WM8731)	+= snd-soc-wm8731.o
+obj-$(CONFIG_SND_SOC_WM8737)	+= snd-soc-wm8737.o
 obj-$(CONFIG_SND_SOC_WM8741)	+= snd-soc-wm8741.o
 obj-$(CONFIG_SND_SOC_WM8750)	+= snd-soc-wm8750.o
 obj-$(CONFIG_SND_SOC_WM8753)	+= snd-soc-wm8753.o
+obj-$(CONFIG_SND_SOC_WM8770)	+= snd-soc-wm8770.o
 obj-$(CONFIG_SND_SOC_WM8776)	+= snd-soc-wm8776.o
 obj-$(CONFIG_SND_SOC_WM8804)	+= snd-soc-wm8804.o
 obj-$(CONFIG_SND_SOC_WM8900)	+= snd-soc-wm8900.o
@@ -134,6 +143,7 @@
 obj-$(CONFIG_SND_SOC_WM8990)	+= snd-soc-wm8990.o
 obj-$(CONFIG_SND_SOC_WM8993)	+= snd-soc-wm8993.o
 obj-$(CONFIG_SND_SOC_WM8994)	+= snd-soc-wm8994.o
+obj-$(CONFIG_SND_SOC_WM8995)	+= snd-soc-wm8995.o
 obj-$(CONFIG_SND_SOC_WM9081)	+= snd-soc-wm9081.o
 obj-$(CONFIG_SND_SOC_WM9705)	+= snd-soc-wm9705.o
 obj-$(CONFIG_SND_SOC_WM9712)	+= snd-soc-wm9712.o
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index d272534..ab63d52 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -27,7 +27,6 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include <linux/spi/spi.h>
 #include "ad1836.h"
 
@@ -220,6 +219,7 @@
 static int ad1836_probe(struct snd_soc_codec *codec)
 {
 	struct ad1836_priv *ad1836 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	codec->control_data = ad1836->control_data;
@@ -227,7 +227,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad1836);
 		return ret;
 	}
 
@@ -252,9 +251,9 @@
 
 	snd_soc_add_controls(codec, ad1836_snd_controls,
 			     ARRAY_SIZE(ad1836_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad1836_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad1836_dapm_widgets,
 				  ARRAY_SIZE(ad1836_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index fa2834c..da46479 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -19,12 +19,10 @@
 #include <sound/initval.h>
 #include <sound/soc.h>
 #include <sound/tlv.h>
-#include <sound/soc-dapm.h>
 #include "ad193x.h"
 
 /* codec private data */
 struct ad193x_priv {
-	u8 reg_cache[AD193X_NUM_REGS];
 	enum snd_soc_control_type bus_type;
 	void *control_data;
 	int sysclk;
@@ -353,6 +351,7 @@
 static int ad193x_probe(struct snd_soc_codec *codec)
 {
 	struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	codec->control_data = ad193x->control_data;
@@ -363,7 +362,6 @@
 	if (ret < 0) {
 		dev_err(codec->dev, "failed to set cache I/O: %d\n",
 				ret);
-		kfree(ad193x);
 		return ret;
 	}
 
@@ -385,9 +383,9 @@
 
 	snd_soc_add_controls(codec, ad193x_snd_controls,
 			     ARRAY_SIZE(ad193x_snd_controls));
-	snd_soc_dapm_new_controls(codec, ad193x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, ad193x_dapm_widgets,
 				  ARRAY_SIZE(ad193x_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 410ccd5..34cb51e 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -29,7 +29,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "ad1980.h"
 
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index cd88c8f..8b38739 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ak4535.h"
@@ -290,10 +289,11 @@
 
 static int ak4535_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4535_dapm_widgets,
-				  ARRAY_SIZE(ak4535_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, ak4535_dapm_widgets,
+				  ARRAY_SIZE(ak4535_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -366,9 +366,9 @@
 static int ak4535_mute(struct snd_soc_dai *dai, int mute)
 {
 	struct snd_soc_codec *codec = dai->codec;
-	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+	u16 mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 	if (!mute)
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 	else
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 	return 0;
@@ -381,11 +381,11 @@
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
-		ak4535_write(codec, AK4535_DAC, mute_reg);
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
+		ak4535_write(codec, AK4535_DAC, mute_reg & ~0x20);
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC) & 0xffdf;
+		mute_reg = ak4535_read_reg_cache(codec, AK4535_DAC);
 		ak4535_write(codec, AK4535_DAC, mute_reg | 0x20);
 		break;
 	case SND_SOC_BIAS_STANDBY:
@@ -399,7 +399,7 @@
 		ak4535_write(codec, AK4535_PM1, i & (~0x80));
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 90c90b7..f00eba3 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -26,7 +26,7 @@
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 24f5f49..2ec75ab 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -17,7 +17,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -28,7 +27,6 @@
 struct ak4671_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[AK4671_CACHEREGNUM];
 };
 
 /* ak4671 register cache & default register settings */
@@ -437,10 +435,11 @@
 
 static int ak4671_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ak4671_dapm_widgets,
-				  ARRAY_SIZE(ak4671_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, ak4671_dapm_widgets,
+				  ARRAY_SIZE(ak4671_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -602,7 +601,7 @@
 		snd_soc_write(codec, AK4671_AD_DA_POWER_MANAGEMENT, 0x00);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
new file mode 100644
index 0000000..4f377c9
--- /dev/null
+++ b/sound/soc/codecs/alc5623.c
@@ -0,0 +1,1117 @@
+/*
+ * alc5623.c  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Author: flove <flove@realtek.com> Ethan <eku@marvell.com>
+ *
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *
+ * Based on WM8753.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/alc5623.h>
+
+#include "alc5623.h"
+
+static int caps_charge = 2000;
+module_param(caps_charge, int, 0);
+MODULE_PARM_DESC(caps_charge, "ALC5623 cap charge time (msecs)");
+
+/* codec private data */
+struct alc5623_priv {
+	enum snd_soc_control_type control_type;
+	void *control_data;
+	struct mutex mutex;
+	u8 id;
+	unsigned int sysclk;
+	u16 reg_cache[ALC5623_VENDOR_ID2+2];
+	unsigned int add_ctrl;
+	unsigned int jack_det_ctrl;
+};
+
+static void alc5623_fill_cache(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* not really efficient ... */
+	for (i = 0 ; i < codec->driver->reg_cache_size ; i += step)
+		cache[i] = codec->hw_read(codec, i);
+}
+
+static inline int alc5623_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, ALC5623_RESET, 0);
+}
+
+static int amp_mixer_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	/* to power-on/off class-d amp generators/speaker */
+	/* need to write to 'index-46h' register :        */
+	/* so write index num (here 0x46) to reg 0x6a     */
+	/* and then 0xffff/0 to reg 0x6c                  */
+	snd_soc_write(w->codec, ALC5623_HID_CTRL_INDEX, 0x46);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0xFFFF);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_write(w->codec, ALC5623_HID_CTRL_DATA, 0);
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * ALC5623 Controls
+ */
+
+static const DECLARE_TLV_DB_SCALE(vol_tlv, -3450, 150, 0);
+static const DECLARE_TLV_DB_SCALE(hp_tlv, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(adc_rec_tlv, -1650, 150, 0);
+static const unsigned int boost_tlv[] = {
+	TLV_DB_RANGE_HEAD(3),
+	0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(2000, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(3000, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(dig_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new rt5621_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5622_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Speaker Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Speaker Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_vol_snd_controls[] = {
+	SOC_DOUBLE_TLV("Line Playback Volume",
+			ALC5623_SPK_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Line Playback Switch",
+			ALC5623_SPK_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("Headphone Playback Volume",
+			ALC5623_HP_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Headphone Playback Switch",
+			ALC5623_HP_OUT_VOL, 15, 7, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_snd_controls[] = {
+	SOC_DOUBLE_TLV("Auxout Playback Volume",
+			ALC5623_MONO_AUX_OUT_VOL, 8, 0, 31, 1, hp_tlv),
+	SOC_DOUBLE("Auxout Playback Switch",
+			ALC5623_MONO_AUX_OUT_VOL, 15, 7, 1, 1),
+	SOC_DOUBLE_TLV("PCM Playback Volume",
+			ALC5623_STEREO_DAC_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("AuxI Capture Volume",
+			ALC5623_AUXIN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("LineIn Capture Volume",
+			ALC5623_LINE_IN_VOL, 8, 0, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic1 Capture Volume",
+			ALC5623_MIC_VOL, 8, 31, 1, vol_tlv),
+	SOC_SINGLE_TLV("Mic2 Capture Volume",
+			ALC5623_MIC_VOL, 0, 31, 1, vol_tlv),
+	SOC_DOUBLE_TLV("Rec Capture Volume",
+			ALC5623_ADC_REC_GAIN, 7, 0, 31, 0, adc_rec_tlv),
+	SOC_SINGLE_TLV("Mic 1 Boost Volume",
+			ALC5623_MIC_CTRL, 10, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Mic 2 Boost Volume",
+			ALC5623_MIC_CTRL, 8, 2, 0, boost_tlv),
+	SOC_SINGLE_TLV("Digital Boost Volume",
+			ALC5623_ADD_CTRL_REG, 4, 3, 0, dig_tlv),
+};
+
+/*
+ * DAPM Controls
+ */
+static const struct snd_kcontrol_new alc5623_hp_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2HP Playback Switch", ALC5623_LINE_IN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("AUXI2HP Playback Switch", ALC5623_AUXIN_VOL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC12HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 15, 1, 1),
+SOC_DAPM_SINGLE("MIC22HP Playback Switch", ALC5623_MIC_ROUTING_CTRL, 7, 1, 1),
+SOC_DAPM_SINGLE("DAC2HP Playback Switch", ALC5623_STEREO_DAC_VOL, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpl_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_L Playback Switch", ALC5623_ADC_REC_GAIN, 15, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_hpr_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2HP_R Playback Switch", ALC5623_ADC_REC_GAIN, 14, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_mono_mixer_controls[] = {
+SOC_DAPM_SINGLE("ADC2MONO_L Playback Switch", ALC5623_ADC_REC_GAIN, 13, 1, 1),
+SOC_DAPM_SINGLE("ADC2MONO_R Playback Switch", ALC5623_ADC_REC_GAIN, 12, 1, 1),
+SOC_DAPM_SINGLE("LI2MONO Playback Switch", ALC5623_LINE_IN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("AUXI2MONO Playback Switch", ALC5623_AUXIN_VOL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC12MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 13, 1, 1),
+SOC_DAPM_SINGLE("MIC22MONO Playback Switch", ALC5623_MIC_ROUTING_CTRL, 5, 1, 1),
+SOC_DAPM_SINGLE("DAC2MONO Playback Switch", ALC5623_STEREO_DAC_VOL, 13, 1, 1),
+};
+
+static const struct snd_kcontrol_new alc5623_speaker_mixer_controls[] = {
+SOC_DAPM_SINGLE("LI2SPK Playback Switch", ALC5623_LINE_IN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("AUXI2SPK Playback Switch", ALC5623_AUXIN_VOL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC12SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 14, 1, 1),
+SOC_DAPM_SINGLE("MIC22SPK Playback Switch", ALC5623_MIC_ROUTING_CTRL, 6, 1, 1),
+SOC_DAPM_SINGLE("DAC2SPK Playback Switch", ALC5623_STEREO_DAC_VOL, 14, 1, 1),
+};
+
+/* Left Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureL_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 14, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 13, 1, 1),
+SOC_DAPM_SINGLE("LineInL Capture Switch", ALC5623_ADC_REC_MIXER, 12, 1, 1),
+SOC_DAPM_SINGLE("Left AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 11, 1, 1),
+SOC_DAPM_SINGLE("HPMixerL Capture Switch", ALC5623_ADC_REC_MIXER, 10, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 9, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 8, 1, 1),
+};
+
+/* Right Record Mixer */
+static const struct snd_kcontrol_new alc5623_captureR_mixer_controls[] = {
+SOC_DAPM_SINGLE("Mic1 Capture Switch", ALC5623_ADC_REC_MIXER, 6, 1, 1),
+SOC_DAPM_SINGLE("Mic2 Capture Switch", ALC5623_ADC_REC_MIXER, 5, 1, 1),
+SOC_DAPM_SINGLE("LineInR Capture Switch", ALC5623_ADC_REC_MIXER, 4, 1, 1),
+SOC_DAPM_SINGLE("Right AuxI Capture Switch", ALC5623_ADC_REC_MIXER, 3, 1, 1),
+SOC_DAPM_SINGLE("HPMixerR Capture Switch", ALC5623_ADC_REC_MIXER, 2, 1, 1),
+SOC_DAPM_SINGLE("SPKMixer Capture Switch", ALC5623_ADC_REC_MIXER, 1, 1, 1),
+SOC_DAPM_SINGLE("MonoMixer Capture Switch", ALC5623_ADC_REC_MIXER, 0, 1, 1),
+};
+
+static const char *alc5623_spk_n_sour_sel[] = {
+		"RN/-R", "RP/+R", "LN/-R", "Vmid" };
+static const char *alc5623_hpl_out_input_sel[] = {
+		"Vmid", "HP Left Mix"};
+static const char *alc5623_hpr_out_input_sel[] = {
+		"Vmid", "HP Right Mix"};
+static const char *alc5623_spkout_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+static const char *alc5623_aux_out_input_sel[] = {
+		"Vmid", "HPOut Mix", "Speaker Mix", "Mono Mix"};
+
+/* auxout output mux */
+static const struct soc_enum alc5623_aux_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 6, 4, alc5623_aux_out_input_sel);
+static const struct snd_kcontrol_new alc5623_auxout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_aux_out_input_enum);
+
+/* speaker output mux */
+static const struct soc_enum alc5623_spkout_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 10, 4, alc5623_spkout_input_sel);
+static const struct snd_kcontrol_new alc5623_spkout_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spkout_input_enum);
+
+/* headphone left output mux */
+static const struct soc_enum alc5623_hpl_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 9, 2, alc5623_hpl_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpl_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpl_out_input_enum);
+
+/* headphone right output mux */
+static const struct soc_enum alc5623_hpr_out_input_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 8, 2, alc5623_hpr_out_input_sel);
+static const struct snd_kcontrol_new alc5623_hpr_out_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_hpr_out_input_enum);
+
+/* speaker output N select */
+static const struct soc_enum alc5623_spk_n_sour_enum =
+SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 14, 4, alc5623_spk_n_sour_sel);
+static const struct snd_kcontrol_new alc5623_spkoutn_mux_controls =
+SOC_DAPM_ENUM("Route", alc5623_spk_n_sour_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_widgets[] = {
+/* Muxes */
+SND_SOC_DAPM_MUX("AuxOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_auxout_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkout_mux_controls),
+SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpl_out_mux_controls),
+SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_hpr_out_mux_controls),
+SND_SOC_DAPM_MUX("SpeakerOut N Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_spkoutn_mux_controls),
+
+/* output mixers */
+SND_SOC_DAPM_MIXER("HP Mix", SND_SOC_NOPM, 0, 0,
+	&alc5623_hp_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hp_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPR Mix", ALC5623_PWR_MANAG_ADD2, 4, 0,
+	&alc5623_hpr_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpr_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPL Mix", ALC5623_PWR_MANAG_ADD2, 5, 0,
+	&alc5623_hpl_mixer_controls[0],
+	ARRAY_SIZE(alc5623_hpl_mixer_controls)),
+SND_SOC_DAPM_MIXER("HPOut Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Mono Mix", ALC5623_PWR_MANAG_ADD2, 2, 0,
+	&alc5623_mono_mixer_controls[0],
+	ARRAY_SIZE(alc5623_mono_mixer_controls)),
+SND_SOC_DAPM_MIXER("Speaker Mix", ALC5623_PWR_MANAG_ADD2, 3, 0,
+	&alc5623_speaker_mixer_controls[0],
+	ARRAY_SIZE(alc5623_speaker_mixer_controls)),
+
+/* input mixers */
+SND_SOC_DAPM_MIXER("Left Capture Mix", ALC5623_PWR_MANAG_ADD2, 1, 0,
+	&alc5623_captureL_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureL_mixer_controls)),
+SND_SOC_DAPM_MIXER("Right Capture Mix", ALC5623_PWR_MANAG_ADD2, 0, 0,
+	&alc5623_captureR_mixer_controls[0],
+	ARRAY_SIZE(alc5623_captureR_mixer_controls)),
+
+SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 9, 0),
+SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
+	ALC5623_PWR_MANAG_ADD2, 8, 0),
+SND_SOC_DAPM_MIXER("I2S Mix", ALC5623_PWR_MANAG_ADD1, 15, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("AuxI Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_MIXER("Line Mix", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 7, 0),
+SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
+	ALC5623_PWR_MANAG_ADD2, 6, 0),
+SND_SOC_DAPM_PGA("Left Headphone", ALC5623_PWR_MANAG_ADD3, 10, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Headphone", ALC5623_PWR_MANAG_ADD3, 9, 0, NULL, 0),
+SND_SOC_DAPM_PGA("SpeakerOut", ALC5623_PWR_MANAG_ADD3, 12, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxOut", ALC5623_PWR_MANAG_ADD3, 14, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxOut", ALC5623_PWR_MANAG_ADD3, 13, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left LineIn", ALC5623_PWR_MANAG_ADD3, 7, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right LineIn", ALC5623_PWR_MANAG_ADD3, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Left AuxI", ALC5623_PWR_MANAG_ADD3, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right AuxI", ALC5623_PWR_MANAG_ADD3, 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 PGA", ALC5623_PWR_MANAG_ADD3, 3, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 PGA", ALC5623_PWR_MANAG_ADD3, 2, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC1 Pre Amp", ALC5623_PWR_MANAG_ADD3, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MIC2 Pre Amp", ALC5623_PWR_MANAG_ADD3, 0, 0, NULL, 0),
+SND_SOC_DAPM_MICBIAS("Mic Bias1", ALC5623_PWR_MANAG_ADD1, 11, 0),
+
+SND_SOC_DAPM_OUTPUT("AUXOUTL"),
+SND_SOC_DAPM_OUTPUT("AUXOUTR"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+SND_SOC_DAPM_OUTPUT("SPKOUT"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+SND_SOC_DAPM_INPUT("LINEINL"),
+SND_SOC_DAPM_INPUT("LINEINR"),
+SND_SOC_DAPM_INPUT("AUXINL"),
+SND_SOC_DAPM_INPUT("AUXINR"),
+SND_SOC_DAPM_INPUT("MIC1"),
+SND_SOC_DAPM_INPUT("MIC2"),
+SND_SOC_DAPM_VMID("Vmid"),
+};
+
+static const char *alc5623_amp_names[] = {"AB Amp", "D Amp"};
+static const struct soc_enum alc5623_amp_enum =
+	SOC_ENUM_SINGLE(ALC5623_OUTPUT_MIXER_CTRL, 13, 2, alc5623_amp_names);
+static const struct snd_kcontrol_new alc5623_amp_mux_controls =
+	SOC_DAPM_ENUM("Route", alc5623_amp_enum);
+
+static const struct snd_soc_dapm_widget alc5623_dapm_amp_widgets[] = {
+SND_SOC_DAPM_PGA_E("D Amp", ALC5623_PWR_MANAG_ADD2, 14, 0, NULL, 0,
+	amp_mixer_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_PGA("AB Amp", ALC5623_PWR_MANAG_ADD2, 15, 0, NULL, 0),
+SND_SOC_DAPM_MUX("AB-D Amp Mux", SND_SOC_NOPM, 0, 0,
+	&alc5623_amp_mux_controls),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	/* virtual mixer - mixes left & right channels */
+	{"I2S Mix", NULL,				"Left DAC"},
+	{"I2S Mix", NULL,				"Right DAC"},
+	{"Line Mix", NULL,				"Right LineIn"},
+	{"Line Mix", NULL,				"Left LineIn"},
+	{"AuxI Mix", NULL,				"Left AuxI"},
+	{"AuxI Mix", NULL,				"Right AuxI"},
+	{"AUXOUTL", NULL,				"Left AuxOut"},
+	{"AUXOUTR", NULL,				"Right AuxOut"},
+
+	/* HP mixer */
+	{"HPL Mix", "ADC2HP_L Playback Switch",		"Left Capture Mix"},
+	{"HPL Mix", NULL,				"HP Mix"},
+	{"HPR Mix", "ADC2HP_R Playback Switch",		"Right Capture Mix"},
+	{"HPR Mix", NULL,				"HP Mix"},
+	{"HP Mix", "LI2HP Playback Switch",		"Line Mix"},
+	{"HP Mix", "AUXI2HP Playback Switch",		"AuxI Mix"},
+	{"HP Mix", "MIC12HP Playback Switch",		"MIC1 PGA"},
+	{"HP Mix", "MIC22HP Playback Switch",		"MIC2 PGA"},
+	{"HP Mix", "DAC2HP Playback Switch",		"I2S Mix"},
+
+	/* speaker mixer */
+	{"Speaker Mix", "LI2SPK Playback Switch",	"Line Mix"},
+	{"Speaker Mix", "AUXI2SPK Playback Switch",	"AuxI Mix"},
+	{"Speaker Mix", "MIC12SPK Playback Switch",	"MIC1 PGA"},
+	{"Speaker Mix", "MIC22SPK Playback Switch",	"MIC2 PGA"},
+	{"Speaker Mix", "DAC2SPK Playback Switch",	"I2S Mix"},
+
+	/* mono mixer */
+	{"Mono Mix", "ADC2MONO_L Playback Switch",	"Left Capture Mix"},
+	{"Mono Mix", "ADC2MONO_R Playback Switch",	"Right Capture Mix"},
+	{"Mono Mix", "LI2MONO Playback Switch",		"Line Mix"},
+	{"Mono Mix", "AUXI2MONO Playback Switch",	"AuxI Mix"},
+	{"Mono Mix", "MIC12MONO Playback Switch",	"MIC1 PGA"},
+	{"Mono Mix", "MIC22MONO Playback Switch",	"MIC2 PGA"},
+	{"Mono Mix", "DAC2MONO Playback Switch",	"I2S Mix"},
+
+	/* Left record mixer */
+	{"Left Capture Mix", "LineInL Capture Switch",	"LINEINL"},
+	{"Left Capture Mix", "Left AuxI Capture Switch", "AUXINL"},
+	{"Left Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Left Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Left Capture Mix", "HPMixerL Capture Switch", "HPL Mix"},
+	{"Left Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Left Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/*Right record mixer */
+	{"Right Capture Mix", "LineInR Capture Switch",	"LINEINR"},
+	{"Right Capture Mix", "Right AuxI Capture Switch",	"AUXINR"},
+	{"Right Capture Mix", "Mic1 Capture Switch",	"MIC1 Pre Amp"},
+	{"Right Capture Mix", "Mic2 Capture Switch",	"MIC2 Pre Amp"},
+	{"Right Capture Mix", "HPMixerR Capture Switch", "HPR Mix"},
+	{"Right Capture Mix", "SPKMixer Capture Switch", "Speaker Mix"},
+	{"Right Capture Mix", "MonoMixer Capture Switch", "Mono Mix"},
+
+	/* headphone left mux */
+	{"Left Headphone Mux", "HP Left Mix",		"HPL Mix"},
+	{"Left Headphone Mux", "Vmid",			"Vmid"},
+
+	/* headphone right mux */
+	{"Right Headphone Mux", "HP Right Mix",		"HPR Mix"},
+	{"Right Headphone Mux", "Vmid",			"Vmid"},
+
+	/* speaker out mux */
+	{"SpeakerOut Mux", "Vmid",			"Vmid"},
+	{"SpeakerOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"SpeakerOut Mux", "Speaker Mix",		"Speaker Mix"},
+	{"SpeakerOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* Mono/Aux Out mux */
+	{"AuxOut Mux", "Vmid",				"Vmid"},
+	{"AuxOut Mux", "HPOut Mix",			"HPOut Mix"},
+	{"AuxOut Mux", "Speaker Mix",			"Speaker Mix"},
+	{"AuxOut Mux", "Mono Mix",			"Mono Mix"},
+
+	/* output pga */
+	{"HPL", NULL,					"Left Headphone"},
+	{"Left Headphone", NULL,			"Left Headphone Mux"},
+	{"HPR", NULL,					"Right Headphone"},
+	{"Right Headphone", NULL,			"Right Headphone Mux"},
+	{"Left AuxOut", NULL,				"AuxOut Mux"},
+	{"Right AuxOut", NULL,				"AuxOut Mux"},
+
+	/* input pga */
+	{"Left LineIn", NULL,				"LINEINL"},
+	{"Right LineIn", NULL,				"LINEINR"},
+	{"Left AuxI", NULL,				"AUXINL"},
+	{"Right AuxI", NULL,				"AUXINR"},
+	{"MIC1 Pre Amp", NULL,				"MIC1"},
+	{"MIC2 Pre Amp", NULL,				"MIC2"},
+	{"MIC1 PGA", NULL,				"MIC1 Pre Amp"},
+	{"MIC2 PGA", NULL,				"MIC2 Pre Amp"},
+
+	/* left ADC */
+	{"Left ADC", NULL,				"Left Capture Mix"},
+
+	/* right ADC */
+	{"Right ADC", NULL,				"Right Capture Mix"},
+
+	{"SpeakerOut N Mux", "RN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "RP/+R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "LN/-R",			"SpeakerOut"},
+	{"SpeakerOut N Mux", "Vmid",			"Vmid"},
+
+	{"SPKOUT", NULL,				"SpeakerOut"},
+	{"SPKOUTN", NULL,				"SpeakerOut N Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_spk[] = {
+	{"SpeakerOut", NULL,				"SpeakerOut Mux"},
+};
+
+static const struct snd_soc_dapm_route intercon_amp_spk[] = {
+	{"AB Amp", NULL,				"SpeakerOut Mux"},
+	{"D Amp", NULL,					"SpeakerOut Mux"},
+	{"AB-D Amp Mux", "AB Amp",			"AB Amp"},
+	{"AB-D Amp Mux", "D Amp",			"D Amp"},
+	{"SpeakerOut", NULL,				"AB-D Amp Mux"},
+};
+
+/* PLL divisors */
+struct _pll_div {
+	u32 pll_in;
+	u32 pll_out;
+	u16 regvalue;
+};
+
+/* Note : pll code from original alc5623 driver. Not sure of how good it is */
+/* usefull only for master mode */
+static const struct _pll_div codec_master_pll_div[] = {
+
+	{  2048000,  8192000,	0x0ea0},
+	{  3686400,  8192000,	0x4e27},
+	{ 12000000,  8192000,	0x456b},
+	{ 13000000,  8192000,	0x495f},
+	{ 13100000,  8192000,	0x0320},
+	{  2048000,  11289600,	0xf637},
+	{  3686400,  11289600,	0x2f22},
+	{ 12000000,  11289600,	0x3e2f},
+	{ 13000000,  11289600,	0x4d5b},
+	{ 13100000,  11289600,	0x363b},
+	{  2048000,  16384000,	0x1ea0},
+	{  3686400,  16384000,	0x9e27},
+	{ 12000000,  16384000,	0x452b},
+	{ 13000000,  16384000,	0x542f},
+	{ 13100000,  16384000,	0x03a0},
+	{  2048000,  16934400,	0xe625},
+	{  3686400,  16934400,	0x9126},
+	{ 12000000,  16934400,	0x4d2c},
+	{ 13000000,  16934400,	0x742f},
+	{ 13100000,  16934400,	0x3c27},
+	{  2048000,  22579200,	0x2aa0},
+	{  3686400,  22579200,	0x2f20},
+	{ 12000000,  22579200,	0x7e2f},
+	{ 13000000,  22579200,	0x742f},
+	{ 13100000,  22579200,	0x3c27},
+	{  2048000,  24576000,	0x2ea0},
+	{  3686400,  24576000,	0xee27},
+	{ 12000000,  24576000,	0x2915},
+	{ 13000000,  24576000,	0x772e},
+	{ 13100000,  24576000,	0x0d20},
+};
+
+static const struct _pll_div codec_slave_pll_div[] = {
+
+	{  1024000,  16384000,  0x3ea0},
+	{  1411200,  22579200,	0x3ea0},
+	{  1536000,  24576000,	0x3ea0},
+	{  2048000,  16384000,  0x1ea0},
+	{  2822400,  22579200,	0x1ea0},
+	{  3072000,  24576000,	0x1ea0},
+
+};
+
+static int alc5623_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
+		int source, unsigned int freq_in, unsigned int freq_out)
+{
+	int i;
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int gbl_clk = 0, pll_div = 0;
+	u16 reg;
+
+	if (pll_id < ALC5623_PLL_FR_MCLK || pll_id > ALC5623_PLL_FR_BCK)
+		return -ENODEV;
+
+	/* Disable PLL power */
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				0);
+
+	/* pll is not used in slave mode */
+	reg = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	if (reg & ALC5623_DAI_SDP_SLAVE_MODE)
+		return 0;
+
+	if (!freq_in || !freq_out)
+		return 0;
+
+	switch (pll_id) {
+	case ALC5623_PLL_FR_MCLK:
+		for (i = 0; i < ARRAY_SIZE(codec_master_pll_div); i++) {
+			if (codec_master_pll_div[i].pll_in == freq_in
+			   && codec_master_pll_div[i].pll_out == freq_out) {
+				/* PLL source from MCLK */
+				pll_div  = codec_master_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	case ALC5623_PLL_FR_BCK:
+		for (i = 0; i < ARRAY_SIZE(codec_slave_pll_div); i++) {
+			if (codec_slave_pll_div[i].pll_in == freq_in
+			   && codec_slave_pll_div[i].pll_out == freq_out) {
+				/* PLL source from Bitclk */
+				gbl_clk = ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK;
+				pll_div = codec_slave_pll_div[i].regvalue;
+				break;
+			}
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!pll_div)
+		return -EINVAL;
+
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+	snd_soc_write(codec, ALC5623_PLL_CTRL, pll_div);
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_PLL,
+				ALC5623_PWR_ADD2_PLL);
+	gbl_clk |= ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL;
+	snd_soc_write(codec, ALC5623_GLOBAL_CLK_CTRL_REG, gbl_clk);
+
+	return 0;
+}
+
+struct _coeff_div {
+	u16 fs;
+	u16 regvalue;
+};
+
+/* codec hifi mclk (after PLL) clock divider coefficients */
+/* values inspired from column BCLK=32Fs of Appendix A table */
+static const struct _coeff_div coeff_div[] = {
+	{256*8, 0x3a69},
+	{384*8, 0x3c6b},
+	{256*4, 0x2a69},
+	{384*4, 0x2c6b},
+	{256*2, 0x1a69},
+	{384*2, 0x1c6b},
+	{256*1, 0x0a69},
+	{384*1, 0x0c6b},
+};
+
+static int get_coeff(struct snd_soc_codec *codec, int rate)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].fs * rate == alc5623->sysclk)
+			return i;
+	}
+	return -EINVAL;
+}
+
+/*
+ * Clock after PLL and dividers
+ */
+static int alc5623_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+		int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	switch (freq) {
+	case  8192000:
+	case 11289600:
+	case 12288000:
+	case 16384000:
+	case 16934400:
+	case 18432000:
+	case 22579200:
+	case 24576000:
+		alc5623->sysclk = freq;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static int alc5623_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 iface = 0;
+
+	/* set master/slave audio interface */
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		iface = ALC5623_DAI_SDP_MASTER_MODE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		iface = ALC5623_DAI_SDP_SLAVE_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* interface format */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= ALC5623_DAI_I2S_DF_I2S;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		iface |= ALC5623_DAI_I2S_DF_RIGHT;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= ALC5623_DAI_I2S_DF_LEFT;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		iface |= ALC5623_DAI_I2S_DF_PCM;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		iface |= ALC5623_DAI_I2S_DF_PCM | ALC5623_DAI_I2S_PCM_MODE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* clock inversion */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+}
+
+static int alc5623_pcm_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	int coeff, rate;
+	u16 iface;
+
+	iface = snd_soc_read(codec, ALC5623_DAI_CONTROL);
+	iface &= ~ALC5623_DAI_I2S_DL_MASK;
+
+	/* bit size */
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		iface |= ALC5623_DAI_I2S_DL_16;
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= ALC5623_DAI_I2S_DL_20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= ALC5623_DAI_I2S_DL_24;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= ALC5623_DAI_I2S_DL_32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* set iface & srate */
+	snd_soc_write(codec, ALC5623_DAI_CONTROL, iface);
+	rate = params_rate(params);
+	coeff = get_coeff(codec, rate);
+	if (coeff < 0)
+		return -EINVAL;
+
+	coeff = coeff_div[coeff].regvalue;
+	dev_dbg(codec->dev, "%s: sysclk=%d,rate=%d,coeff=0x%04x\n",
+		__func__, alc5623->sysclk, rate, coeff);
+	snd_soc_write(codec, ALC5623_STEREO_AD_DA_CLK_CTRL, coeff);
+
+	return 0;
+}
+
+static int alc5623_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	u16 hp_mute = ALC5623_MISC_M_DAC_L_INPUT | ALC5623_MISC_M_DAC_R_INPUT;
+	u16 mute_reg = snd_soc_read(codec, ALC5623_MISC_CTRL) & ~hp_mute;
+
+	if (mute)
+		mute_reg |= hp_mute;
+
+	return snd_soc_write(codec, ALC5623_MISC_CTRL, mute_reg);
+}
+
+#define ALC5623_ADD2_POWER_EN (ALC5623_PWR_ADD2_VREF \
+	| ALC5623_PWR_ADD2_DAC_REF_CIR)
+
+#define ALC5623_ADD3_POWER_EN (ALC5623_PWR_ADD3_MAIN_BIAS \
+	| ALC5623_PWR_ADD3_MIC1_BOOST_AD)
+
+#define ALC5623_ADD1_POWER_EN \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN | ALC5623_PWR_ADD1_SOFTGEN_EN \
+	| ALC5623_PWR_ADD1_DEPOP_BUF_HP | ALC5623_PWR_ADD1_HP_OUT_AMP \
+	| ALC5623_PWR_ADD1_HP_OUT_ENH_AMP)
+
+#define ALC5623_ADD1_POWER_EN_5622 \
+	(ALC5623_PWR_ADD1_SHORT_CURR_DET_EN \
+	| ALC5623_PWR_ADD1_HP_OUT_AMP)
+
+static void enable_power_depop(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+
+	snd_soc_update_bits(codec, ALC5623_PWR_MANAG_ADD1,
+				ALC5623_PWR_ADD1_SOFTGEN_EN,
+				ALC5623_PWR_ADD1_SOFTGEN_EN);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, ALC5623_ADD3_POWER_EN);
+
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN);
+
+	msleep(500);
+
+	snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, ALC5623_ADD2_POWER_EN);
+
+	/* avoid writing '1' into 5622 reserved bits */
+	if (alc5623->id == 0x22)
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN_5622);
+	else
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1,
+			ALC5623_ADD1_POWER_EN);
+
+	/* disable HP Depop2 */
+	snd_soc_update_bits(codec, ALC5623_MISC_CTRL,
+				ALC5623_MISC_HP_DEPOP_MODE2_EN,
+				0);
+
+}
+
+static int alc5623_set_bias_level(struct snd_soc_codec *codec,
+				      enum snd_soc_bias_level level)
+{
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		enable_power_depop(codec);
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		/* everything off except vref/vmid, */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2,
+				ALC5623_PWR_ADD2_VREF);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3,
+				ALC5623_PWR_ADD3_MAIN_BIAS);
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* everything off, dac mute, inactive */
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD2, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD3, 0);
+		snd_soc_write(codec, ALC5623_PWR_MANAG_ADD1, 0);
+		break;
+	}
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define ALC5623_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE \
+			| SNDRV_PCM_FMTBIT_S24_LE \
+			| SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops alc5623_dai_ops = {
+		.hw_params = alc5623_pcm_hw_params,
+		.digital_mute = alc5623_mute,
+		.set_fmt = alc5623_set_dai_fmt,
+		.set_sysclk = alc5623_set_dai_sysclk,
+		.set_pll = alc5623_set_dai_pll,
+};
+
+static struct snd_soc_dai_driver alc5623_dai = {
+	.name = "alc5623-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 2,
+		.rate_min =	8000,
+		.rate_max =	48000,
+		.rates = SNDRV_PCM_RATE_8000_48000,
+		.formats = ALC5623_FORMATS,},
+
+	.ops = &alc5623_dai_ops,
+};
+
+static int alc5623_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int alc5623_resume(struct snd_soc_codec *codec)
+{
+	int i, step = codec->driver->reg_cache_step;
+	u16 *cache = codec->reg_cache;
+
+	/* Sync reg_cache with the hardware */
+	for (i = 2 ; i < codec->driver->reg_cache_size ; i += step)
+		snd_soc_write(codec, i, cache[i]);
+
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* charge alc5623 caps */
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
+		alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		alc5623_set_bias_level(codec, codec->dapm.bias_level);
+	}
+
+	return 0;
+}
+
+static int alc5623_probe(struct snd_soc_codec *codec)
+{
+	struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int ret;
+
+	ret = snd_soc_codec_set_cache_io(codec, 8, 16, alc5623->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	alc5623_reset(codec);
+	alc5623_fill_cache(codec);
+
+	/* power on device */
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	if (alc5623->add_ctrl) {
+		snd_soc_write(codec, ALC5623_ADD_CTRL_REG,
+				alc5623->add_ctrl);
+	}
+
+	if (alc5623->jack_det_ctrl) {
+		snd_soc_write(codec, ALC5623_JACK_DET_CTRL,
+				alc5623->jack_det_ctrl);
+	}
+
+	switch (alc5623->id) {
+	case 0x21:
+		snd_soc_add_controls(codec, rt5621_vol_snd_controls,
+			ARRAY_SIZE(rt5621_vol_snd_controls));
+		break;
+	case 0x22:
+		snd_soc_add_controls(codec, rt5622_vol_snd_controls,
+			ARRAY_SIZE(rt5622_vol_snd_controls));
+		break;
+	case 0x23:
+		snd_soc_add_controls(codec, alc5623_vol_snd_controls,
+			ARRAY_SIZE(alc5623_vol_snd_controls));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_add_controls(codec, alc5623_snd_controls,
+			ARRAY_SIZE(alc5623_snd_controls));
+
+	snd_soc_dapm_new_controls(dapm, alc5623_dapm_widgets,
+					ARRAY_SIZE(alc5623_dapm_widgets));
+
+	/* set up audio path interconnects */
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (alc5623->id) {
+	case 0x21:
+	case 0x22:
+		snd_soc_dapm_new_controls(dapm, alc5623_dapm_amp_widgets,
+					ARRAY_SIZE(alc5623_dapm_amp_widgets));
+		snd_soc_dapm_add_routes(dapm, intercon_amp_spk,
+					ARRAY_SIZE(intercon_amp_spk));
+		break;
+	case 0x23:
+		snd_soc_dapm_add_routes(dapm, intercon_spk,
+					ARRAY_SIZE(intercon_spk));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* power down chip */
+static int alc5623_remove(struct snd_soc_codec *codec)
+{
+	alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
+	.probe = alc5623_probe,
+	.remove = alc5623_remove,
+	.suspend = alc5623_suspend,
+	.resume = alc5623_resume,
+	.set_bias_level = alc5623_set_bias_level,
+	.reg_cache_size = ALC5623_VENDOR_ID2+2,
+	.reg_word_size = sizeof(u16),
+	.reg_cache_step = 2,
+};
+
+/*
+ * ALC5623 2 wire address is determined by A1 pin
+ * state during powerup.
+ *    low  = 0x1a
+ *    high = 0x1b
+ */
+static int alc5623_i2c_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	struct alc5623_platform_data *pdata;
+	struct alc5623_priv *alc5623;
+	int ret, vid1, vid2;
+
+	vid1 = i2c_smbus_read_word_data(client, ALC5623_VENDOR_ID1);
+	if (vid1 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+	vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
+
+	vid2 = i2c_smbus_read_byte_data(client, ALC5623_VENDOR_ID2);
+	if (vid2 < 0) {
+		dev_err(&client->dev, "failed to read I2C\n");
+		return -EIO;
+	}
+
+	if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
+		dev_err(&client->dev, "unknown or wrong codec\n");
+		dev_err(&client->dev, "Expected %x:%lx, got %x:%x\n",
+				0x10ec, id->driver_data,
+				vid1, vid2);
+		return -ENODEV;
+	}
+
+	dev_dbg(&client->dev, "Found codec id : alc56%02x\n", vid2);
+
+	alc5623 = kzalloc(sizeof(struct alc5623_priv), GFP_KERNEL);
+	if (alc5623 == NULL)
+		return -ENOMEM;
+
+	pdata = client->dev.platform_data;
+	if (pdata) {
+		alc5623->add_ctrl = pdata->add_ctrl;
+		alc5623->jack_det_ctrl = pdata->jack_det_ctrl;
+	}
+
+	alc5623->id = vid2;
+	switch (alc5623->id) {
+	case 0x21:
+		alc5623_dai.name = "alc5621-hifi";
+		break;
+	case 0x22:
+		alc5623_dai.name = "alc5622-hifi";
+		break;
+	case 0x23:
+		alc5623_dai.name = "alc5623-hifi";
+		break;
+	default:
+		kfree(alc5623);
+		return -EINVAL;
+	}
+
+	i2c_set_clientdata(client, alc5623);
+	alc5623->control_data = client;
+	alc5623->control_type = SND_SOC_I2C;
+	mutex_init(&alc5623->mutex);
+
+	ret =  snd_soc_register_codec(&client->dev,
+		&soc_codec_device_alc5623, &alc5623_dai, 1);
+	if (ret != 0) {
+		dev_err(&client->dev, "Failed to register codec: %d\n", ret);
+		kfree(alc5623);
+	}
+
+	return ret;
+}
+
+static int alc5623_i2c_remove(struct i2c_client *client)
+{
+	struct alc5623_priv *alc5623 = i2c_get_clientdata(client);
+
+	snd_soc_unregister_codec(&client->dev);
+	kfree(alc5623);
+	return 0;
+}
+
+static const struct i2c_device_id alc5623_i2c_table[] = {
+	{"alc5621", 0x21},
+	{"alc5622", 0x22},
+	{"alc5623", 0x23},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, alc5623_i2c_table);
+
+/*  i2c codec control layer */
+static struct i2c_driver alc5623_i2c_driver = {
+	.driver = {
+		.name = "alc562x-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = alc5623_i2c_probe,
+	.remove =  __devexit_p(alc5623_i2c_remove),
+	.id_table = alc5623_i2c_table,
+};
+
+static int __init alc5623_modinit(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&alc5623_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "%s: can't add i2c driver", __func__);
+		return ret;
+	}
+
+	return ret;
+}
+module_init(alc5623_modinit);
+
+static void __exit alc5623_modexit(void)
+{
+	i2c_del_driver(&alc5623_i2c_driver);
+}
+module_exit(alc5623_modexit);
+
+MODULE_DESCRIPTION("ASoC alc5621/2/3 driver");
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/alc5623.h b/sound/soc/codecs/alc5623.h
new file mode 100644
index 0000000..f3d6826
--- /dev/null
+++ b/sound/soc/codecs/alc5623.h
@@ -0,0 +1,161 @@
+/*
+ * alc5623.h  --  alc562[123] ALSA Soc Audio driver
+ *
+ * Copyright 2008 Realtek Microelectronics
+ * Copyright 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * Author: flove <flove@realtek.com>
+ * Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _ALC5623_H
+#define _ALC5623_H
+
+#define ALC5623_RESET				0x00
+/*				5621 5622 5623  */
+/* speaker output vol		   2    2       */
+/* line output vol                      4    2  */
+/* HP output vol		   4    0    4  */
+#define ALC5623_SPK_OUT_VOL			0x02
+#define ALC5623_HP_OUT_VOL			0x04
+#define ALC5623_MONO_AUX_OUT_VOL		0x06
+#define ALC5623_AUXIN_VOL			0x08
+#define ALC5623_LINE_IN_VOL			0x0A
+#define ALC5623_STEREO_DAC_VOL			0x0C
+#define ALC5623_MIC_VOL				0x0E
+#define ALC5623_MIC_ROUTING_CTRL		0x10
+#define ALC5623_ADC_REC_GAIN			0x12
+#define ALC5623_ADC_REC_MIXER			0x14
+#define ALC5623_SOFT_VOL_CTRL_TIME		0x16
+/* ALC5623_OUTPUT_MIXER_CTRL :			*/
+/* same remark as for reg 2 line vs speaker	*/
+#define ALC5623_OUTPUT_MIXER_CTRL		0x1C
+#define ALC5623_MIC_CTRL			0x22
+
+#define	ALC5623_DAI_CONTROL			0x34
+#define ALC5623_DAI_SDP_MASTER_MODE		(0 << 15)
+#define ALC5623_DAI_SDP_SLAVE_MODE		(1 << 15)
+#define ALC5623_DAI_I2S_PCM_MODE		(1 << 14)
+#define ALC5623_DAI_MAIN_I2S_BCLK_POL_CTRL	(1 <<  7)
+#define ALC5623_DAI_ADC_DATA_L_R_SWAP		(1 <<  5)
+#define ALC5623_DAI_DAC_DATA_L_R_SWAP		(1 <<  4)
+#define ALC5623_DAI_I2S_DL_MASK			(3 <<  2)
+#define ALC5623_DAI_I2S_DL_32			(3 <<  2)
+#define	ALC5623_DAI_I2S_DL_24			(2 <<  2)
+#define ALC5623_DAI_I2S_DL_20			(1 <<  2)
+#define ALC5623_DAI_I2S_DL_16			(0 <<  2)
+#define ALC5623_DAI_I2S_DF_PCM			(3 <<  0)
+#define	ALC5623_DAI_I2S_DF_LEFT			(2 <<  0)
+#define ALC5623_DAI_I2S_DF_RIGHT		(1 <<  0)
+#define ALC5623_DAI_I2S_DF_I2S			(0 <<  0)
+
+#define ALC5623_STEREO_AD_DA_CLK_CTRL		0x36
+#define	ALC5623_COMPANDING_CTRL			0x38
+
+#define	ALC5623_PWR_MANAG_ADD1			0x3A
+#define ALC5623_PWR_ADD1_MAIN_I2S_EN		(1 << 15)
+#define ALC5623_PWR_ADD1_ZC_DET_PD_EN		(1 << 14)
+#define ALC5623_PWR_ADD1_MIC1_BIAS_EN		(1 << 11)
+#define ALC5623_PWR_ADD1_SHORT_CURR_DET_EN	(1 << 10)
+#define ALC5623_PWR_ADD1_SOFTGEN_EN		(1 <<  8) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_DEPOP_BUF_HP		(1 <<  6) /* rsvd on 5622 */
+#define	ALC5623_PWR_ADD1_HP_OUT_AMP		(1 <<  5)
+#define	ALC5623_PWR_ADD1_HP_OUT_ENH_AMP		(1 <<  4) /* rsvd on 5622 */
+#define ALC5623_PWR_ADD1_DEPOP_BUF_AUX		(1 <<  2)
+#define ALC5623_PWR_ADD1_AUX_OUT_AMP		(1 <<  1)
+#define ALC5623_PWR_ADD1_AUX_OUT_ENH_AMP	(1 <<  0) /* rsvd on 5622 */
+
+#define ALC5623_PWR_MANAG_ADD2			0x3C
+#define ALC5623_PWR_ADD2_LINEOUT		(1 << 15) /* rt5623 */
+#define ALC5623_PWR_ADD2_CLASS_AB		(1 << 15) /* rt5621 */
+#define ALC5623_PWR_ADD2_CLASS_D		(1 << 14) /* rt5621 */
+#define ALC5623_PWR_ADD2_VREF			(1 << 13)
+#define ALC5623_PWR_ADD2_PLL			(1 << 12)
+#define ALC5623_PWR_ADD2_DAC_REF_CIR		(1 << 10)
+#define ALC5623_PWR_ADD2_L_DAC_CLK		(1 <<  9)
+#define ALC5623_PWR_ADD2_R_DAC_CLK		(1 <<  8)
+#define ALC5623_PWR_ADD2_L_ADC_CLK_GAIN		(1 <<  7)
+#define ALC5623_PWR_ADD2_R_ADC_CLK_GAIN		(1 <<  6)
+#define ALC5623_PWR_ADD2_L_HP_MIXER		(1 <<  5)
+#define ALC5623_PWR_ADD2_R_HP_MIXER		(1 <<  4)
+#define ALC5623_PWR_ADD2_SPK_MIXER		(1 <<  3)
+#define ALC5623_PWR_ADD2_MONO_MIXER		(1 <<  2)
+#define ALC5623_PWR_ADD2_L_ADC_REC_MIXER	(1 <<  1)
+#define ALC5623_PWR_ADD2_R_ADC_REC_MIXER	(1 <<  0)
+
+#define ALC5623_PWR_MANAG_ADD3			0x3E
+#define ALC5623_PWR_ADD3_MAIN_BIAS		(1 << 15)
+#define ALC5623_PWR_ADD3_AUXOUT_L_VOL_AMP	(1 << 14)
+#define ALC5623_PWR_ADD3_AUXOUT_R_VOL_AMP	(1 << 13)
+#define ALC5623_PWR_ADD3_SPK_OUT		(1 << 12)
+#define ALC5623_PWR_ADD3_HP_L_OUT_VOL		(1 << 10)
+#define ALC5623_PWR_ADD3_HP_R_OUT_VOL		(1 <<  9)
+#define ALC5623_PWR_ADD3_LINEIN_L_VOL		(1 <<  7)
+#define ALC5623_PWR_ADD3_LINEIN_R_VOL		(1 <<  6)
+#define ALC5623_PWR_ADD3_AUXIN_L_VOL		(1 <<  5)
+#define ALC5623_PWR_ADD3_AUXIN_R_VOL		(1 <<  4)
+#define ALC5623_PWR_ADD3_MIC1_FUN_CTRL		(1 <<  3)
+#define ALC5623_PWR_ADD3_MIC2_FUN_CTRL		(1 <<  2)
+#define ALC5623_PWR_ADD3_MIC1_BOOST_AD		(1 <<  1)
+#define ALC5623_PWR_ADD3_MIC2_BOOST_AD		(1 <<  0)
+
+#define ALC5623_ADD_CTRL_REG			0x40
+
+#define	ALC5623_GLOBAL_CLK_CTRL_REG		0x42
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_PLL	(1 << 15)
+#define ALC5623_GBL_CLK_SYS_SOUR_SEL_MCLK	(0 << 15)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_BITCLK	(1 << 14)
+#define ALC5623_GBL_CLK_PLL_SOUR_SEL_MCLK	(0 << 14)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV8	(3 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV4	(2 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV2	(1 <<  1)
+#define ALC5623_GBL_CLK_PLL_DIV_RATIO_DIV1	(0 <<  1)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV2		(1 <<  0)
+#define ALC5623_GBL_CLK_PLL_PRE_DIV1		(0 <<  0)
+
+#define ALC5623_PLL_CTRL			0x44
+#define ALC5623_PLL_CTRL_N_VAL(n)		(((n)&0xff) << 8)
+#define ALC5623_PLL_CTRL_K_VAL(k)		(((k)&0x7)  << 4)
+#define ALC5623_PLL_CTRL_M_VAL(m)		((m)&0xf)
+
+#define ALC5623_GPIO_OUTPUT_PIN_CTRL		0x4A
+#define ALC5623_GPIO_PIN_CONFIG			0x4C
+#define ALC5623_GPIO_PIN_POLARITY		0x4E
+#define ALC5623_GPIO_PIN_STICKY			0x50
+#define ALC5623_GPIO_PIN_WAKEUP			0x52
+#define ALC5623_GPIO_PIN_STATUS			0x54
+#define ALC5623_GPIO_PIN_SHARING		0x56
+#define	ALC5623_OVER_CURR_STATUS		0x58
+#define ALC5623_JACK_DET_CTRL			0x5A
+
+#define ALC5623_MISC_CTRL			0x5E
+#define ALC5623_MISC_DISABLE_FAST_VREG		(1 << 15)
+#define ALC5623_MISC_SPK_CLASS_AB_OC_PD		(1 << 13) /* 5621 */
+#define ALC5623_MISC_SPK_CLASS_AB_OC_DET	(1 << 12) /* 5621 */
+#define ALC5623_MISC_HP_DEPOP_MODE3_EN		(1 << 10)
+#define ALC5623_MISC_HP_DEPOP_MODE2_EN		(1 <<  9)
+#define ALC5623_MISC_HP_DEPOP_MODE1_EN		(1 <<  8)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE3_EN	(1 <<  6)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE2_EN	(1 <<  5)
+#define ALC5623_MISC_AUXOUT_DEPOP_MODE1_EN	(1 <<  4)
+#define ALC5623_MISC_M_DAC_L_INPUT		(1 <<  3)
+#define ALC5623_MISC_M_DAC_R_INPUT		(1 <<  2)
+#define ALC5623_MISC_IRQOUT_INV_CTRL		(1 <<  0)
+
+#define	ALC5623_PSEDUEO_SPATIAL_CTRL		0x60
+#define ALC5623_EQ_CTRL				0x62
+#define ALC5623_EQ_MODE_ENABLE			0x66
+#define ALC5623_AVC_CTRL			0x68
+#define ALC5623_HID_CTRL_INDEX			0x6A
+#define ALC5623_HID_CTRL_DATA			0x6C
+#define ALC5623_VENDOR_ID1			0x7C
+#define ALC5623_VENDOR_ID2			0x7E
+
+#define ALC5623_PLL_FR_MCLK			0
+#define ALC5623_PLL_FR_BCK			1
+#endif
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 8236439..46dbfd0 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -36,8 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <mach/dm365.h>
@@ -116,7 +114,7 @@
 			     DAVINCI_VC_REG12_POWER_ALL_OFF);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 6d4bdc6..8b51245 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -106,6 +106,21 @@
 #define CS4270_MUTE_DAC_A	0x01
 #define CS4270_MUTE_DAC_B	0x02
 
+/* Power-on default values for the registers
+ *
+ * This array contains the power-on default values of the registers, with the
+ * exception of the "CHIPID" register (01h).  The lower four bits of that
+ * register contain the hardware revision, so it is treated as volatile.
+ *
+ * Also note that on the CS4270, the first readable register is 1, but ASoC
+ * assumes the first register is 0.  Therfore, the array must have an entry for
+ * register 0, but we use cs4270_reg_is_readable() to tell ASoC that it can't
+ * be read.
+ */
+static const u8 cs4270_default_reg_cache[CS4270_LASTREG + 1] = {
+	0x00, 0x00, 0x00, 0x30, 0x00, 0x60, 0x20, 0x00, 0x00
+};
+
 static const char *supply_names[] = {
 	"va", "vd", "vlc"
 };
@@ -114,7 +129,6 @@
 struct cs4270_private {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[CS4270_NUMREGS];
 	unsigned int mclk; /* Input frequency of the MCLK pin */
 	unsigned int mode; /* The mode (I2S or left-justified) */
 	unsigned int slave_mode;
@@ -179,6 +193,20 @@
 /* The number of MCLK/LRCK ratios supported by the CS4270 */
 #define NUM_MCLK_RATIOS		ARRAY_SIZE(cs4270_mode_ratios)
 
+static int cs4270_reg_is_readable(unsigned int reg)
+{
+	return (reg >= CS4270_FIRSTREG) && (reg <= CS4270_LASTREG);
+}
+
+static int cs4270_reg_is_volatile(unsigned int reg)
+{
+	/* Unreadable registers are considered volatile */
+	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
+		return 1;
+
+	return reg == CS4270_CHIPID;
+}
+
 /**
  * cs4270_set_dai_sysclk - determine the CS4270 samples rates.
  * @codec_dai: the codec DAI
@@ -264,97 +292,6 @@
 }
 
 /**
- * cs4270_fill_cache - pre-fill the CS4270 register cache.
- * @codec: the codec for this CS4270
- *
- * This function fills in the CS4270 register cache by reading the register
- * values from the hardware.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- *
- * We use the auto-increment feature of the CS4270 to read all registers in
- * one shot.
- */
-static int cs4270_fill_cache(struct snd_soc_codec *codec)
-{
-	u8 *cache = codec->reg_cache;
-	struct i2c_client *i2c_client = codec->control_data;
-	s32 length;
-
-	length = i2c_smbus_read_i2c_block_data(i2c_client,
-		CS4270_FIRSTREG | CS4270_I2C_INCR, CS4270_NUMREGS, cache);
-
-	if (length != CS4270_NUMREGS) {
-		dev_err(codec->dev, "i2c read failure, addr=0x%x\n",
-		       i2c_client->addr);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-/**
- * cs4270_read_reg_cache - read from the CS4270 register cache.
- * @codec: the codec for this CS4270
- * @reg: the register to read
- *
- * This function returns the value for a given register.  It reads only from
- * the register cache, not the hardware itself.
- *
- * This CS4270 registers are cached to avoid excessive I2C I/O operations.
- * After the initial read to pre-fill the cache, the CS4270 never updates
- * the register values, so we won't have a cache coherency problem.
- */
-static unsigned int cs4270_read_reg_cache(struct snd_soc_codec *codec,
-	unsigned int reg)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	return cache[reg - CS4270_FIRSTREG];
-}
-
-/**
- * cs4270_i2c_write - write to a CS4270 register via the I2C bus.
- * @codec: the codec for this CS4270
- * @reg: the register to write
- * @value: the value to write to the register
- *
- * This function writes the given value to the given CS4270 register, and
- * also updates the register cache.
- *
- * Note that we don't use the hw_write function pointer of snd_soc_codec.
- * That's because it's too clunky: the hw_write_t prototype does not match
- * i2c_smbus_write_byte_data(), and it's just another layer of overhead.
- */
-static int cs4270_i2c_write(struct snd_soc_codec *codec, unsigned int reg,
-			    unsigned int value)
-{
-	u8 *cache = codec->reg_cache;
-
-	if ((reg < CS4270_FIRSTREG) || (reg > CS4270_LASTREG))
-		return -EIO;
-
-	/* Only perform an I2C operation if the new value is different */
-	if (cache[reg - CS4270_FIRSTREG] != value) {
-		struct i2c_client *client = codec->control_data;
-		if (i2c_smbus_write_byte_data(client, reg, value)) {
-			dev_err(codec->dev, "i2c write failed\n");
-			return -EIO;
-		}
-
-		/* We've written to the hardware, so update the cache */
-		cache[reg - CS4270_FIRSTREG] = value;
-	}
-
-	return 0;
-}
-
-/**
  * cs4270_hw_params - program the CS4270 with the given hardware parameters.
  * @substream: the audio stream
  * @params: the hardware parameters to set
@@ -551,15 +488,16 @@
 static int cs4270_probe(struct snd_soc_codec *codec)
 {
 	struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-	int i, ret, reg;
+	int i, ret;
 
 	codec->control_data = cs4270->control_data;
 
-	/* The I2C interface is set up, so pre-fill our register cache */
-
-	ret = cs4270_fill_cache(codec);
+	/* Tell ASoC what kind of I/O to use to read the registers.  ASoC will
+	 * then do the I2C transactions itself.
+	 */
+	ret = snd_soc_codec_set_cache_io(codec, 8, 8, cs4270->control_type);
 	if (ret < 0) {
-		dev_err(codec->dev, "failed to fill register cache\n");
+		dev_err(codec->dev, "failed to set cache I/O (ret=%i)\n", ret);
 		return ret;
 	}
 
@@ -568,10 +506,7 @@
 	 * this feature disabled by default.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_MUTE);
-	reg &= ~CS4270_MUTE_AUTO;
-	ret = cs4270_i2c_write(codec, CS4270_MUTE, reg);
+	ret = snd_soc_update_bits(codec, CS4270_MUTE, CS4270_MUTE_AUTO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -582,10 +517,8 @@
 	 * playback has started.  An application (e.g. alsactl) can
 	 * re-enabled it by using the controls.
 	 */
-
-	reg = cs4270_read_reg_cache(codec, CS4270_TRANS);
-	reg &= ~(CS4270_TRANS_SOFT | CS4270_TRANS_ZERO);
-	ret = cs4270_i2c_write(codec, CS4270_TRANS, reg);
+	ret = snd_soc_update_bits(codec, CS4270_TRANS,
+		CS4270_TRANS_SOFT | CS4270_TRANS_ZERO, 0);
 	if (ret < 0) {
 		dev_err(codec->dev, "i2c write failed\n");
 		return ret;
@@ -708,15 +641,16 @@
  * Assign this variable to the codec_dev field of the machine driver's
  * snd_soc_device structure.
  */
-static struct snd_soc_codec_driver soc_codec_device_cs4270 = {
-	.probe =	cs4270_probe,
-	.remove =	cs4270_remove,
-	.suspend =	cs4270_soc_suspend,
-	.resume =	cs4270_soc_resume,
-	.read = cs4270_read_reg_cache,
-	.write = cs4270_i2c_write,
-	.reg_cache_size = CS4270_NUMREGS,
-	.reg_word_size = sizeof(u8),
+static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
+	.probe =		cs4270_probe,
+	.remove =		cs4270_remove,
+	.suspend =		cs4270_soc_suspend,
+	.resume =		cs4270_soc_resume,
+	.volatile_register =	cs4270_reg_is_volatile,
+	.readable_register =	cs4270_reg_is_readable,
+	.reg_cache_size =	CS4270_LASTREG + 1,
+	.reg_word_size =	sizeof(u8),
+	.reg_cache_default =	cs4270_default_reg_cache,
 };
 
 /**
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index cb086ea..8fb7070 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -26,7 +26,6 @@
 #include <linux/slab.h>
 #include <sound/core.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <sound/pcm_params.h>
@@ -47,7 +46,6 @@
 	unsigned int mclk;
 	unsigned int audio_mode;	/* The mode (I2S or left-justified) */
 	enum master_slave_mode func;
-	u8 reg_cache[CS42L51_NUMREGS];
 };
 
 #define CS42L51_FORMATS ( \
@@ -519,6 +517,7 @@
 static int cs42l51_probe(struct snd_soc_codec *codec)
 {
 	struct cs42l51_private *cs42l51 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, reg;
 
 	codec->control_data = cs42l51->control_data;
@@ -550,9 +549,9 @@
 
 	snd_soc_add_controls(codec, cs42l51_snd_controls,
 		ARRAY_SIZE(cs42l51_snd_controls));
-	snd_soc_dapm_new_controls(codec, cs42l51_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, cs42l51_dapm_widgets,
 		ARRAY_SIZE(cs42l51_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, cs42l51_routes,
+	snd_soc_dapm_add_routes(dapm, cs42l51_routes,
 		ARRAY_SIZE(cs42l51_routes));
 
 	return 0;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index e8d27c8..03d1e86 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -18,7 +18,7 @@
 
 #include <sound/core.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 
 #include "cx20442.h"
 
@@ -26,7 +26,6 @@
 struct cx20442_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u8 reg_cache[1];
 };
 
 #define CX20442_PM		0x0
@@ -89,10 +88,11 @@
 
 static int cx20442_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, cx20442_dapm_widgets,
-				  ARRAY_SIZE(cx20442_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, cx20442_audio_map,
+	snd_soc_dapm_new_controls(dapm, cx20442_dapm_widgets,
+				  ARRAY_SIZE(cx20442_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, cx20442_audio_map,
 				ARRAY_SIZE(cx20442_audio_map));
 
 	return 0;
@@ -263,7 +263,7 @@
 	/* Prevent the codec driver from further accessing the modem */
 	codec->hw_write = NULL;
 	cx20442->control_data = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 }
 
 /* Line discipline .hangup() */
@@ -291,7 +291,7 @@
 		/* Set up codec driver access to modem controls */
 		cx20442->control_data = tty;
 		codec->hw_write = (hw_write_t)tty->ops->write;
-		codec->pop_time = 1;
+		codec->card->pop_time = 1;
 	}
 }
 
@@ -348,7 +348,7 @@
 
 	cx20442->control_data = NULL;
 	codec->hw_write = NULL;
-	codec->pop_time = 0;
+	codec->card->pop_time = 0;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
index 58bb9b9..92fd9d7 100644
--- a/sound/soc/codecs/da7210.c
+++ b/sound/soc/codecs/da7210.c
@@ -21,7 +21,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
new file mode 100644
index 0000000..57e9dac
--- /dev/null
+++ b/sound/soc/codecs/dmic.c
@@ -0,0 +1,81 @@
+/*
+ * dmic.c  --  SoC audio for Generic Digital MICs
+ *
+ * Author: Liam Girdwood <lrg@slimlogic.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+static struct snd_soc_dai_driver dmic_dai = {
+	.name = "dmic-hifi",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 1,
+		.channels_max = 8,
+		.rates = SNDRV_PCM_RATE_CONTINUOUS,
+		.formats = SNDRV_PCM_FMTBIT_S32_LE
+			| SNDRV_PCM_FMTBIT_S24_LE
+			| SNDRV_PCM_FMTBIT_S16_LE,
+	},
+};
+
+static struct snd_soc_codec_driver soc_dmic = {};
+
+static int __devinit dmic_dev_probe(struct platform_device *pdev)
+{
+	return snd_soc_register_codec(&pdev->dev,
+			&soc_dmic, &dmic_dai, 1);
+}
+
+static int __devexit dmic_dev_remove(struct platform_device *pdev)
+{
+	snd_soc_unregister_codec(&pdev->dev);
+	return 0;
+}
+
+MODULE_ALIAS("platform:dmic-codec");
+
+static struct platform_driver dmic_driver = {
+	.driver = {
+		.name = "dmic-codec",
+		.owner = THIS_MODULE,
+	},
+	.probe = dmic_dev_probe,
+	.remove = __devexit_p(dmic_dev_remove),
+};
+
+static int __init dmic_init(void)
+{
+	return platform_driver_register(&dmic_driver);
+}
+module_init(dmic_init);
+
+static void __exit dmic_exit(void)
+{
+	platform_driver_unregister(&dmic_driver);
+}
+module_exit(dmic_exit);
+
+MODULE_DESCRIPTION("Generic DMIC driver");
+MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/jz4740.c b/sound/soc/codecs/jz4740.c
index 16253ec..f7cd346f 100644
--- a/sound/soc/codecs/jz4740.c
+++ b/sound/soc/codecs/jz4740.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/initval.h>
-#include <sound/soc-dapm.h>
 #include <sound/soc.h>
 
 #define JZ4740_REG_CODEC_1 0x0
@@ -266,7 +265,7 @@
 		break;
 	case SND_SOC_BIAS_STANDBY:
 		/* The only way to clear the suspend flag is to reset the codec */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			jz4740_codec_wakeup(codec);
 
 		mask = JZ4740_CODEC_1_VREF_DISABLE |
@@ -288,23 +287,25 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
 
 static int jz4740_codec_dev_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_update_bits(codec, JZ4740_REG_CODEC_1,
 			JZ4740_CODEC_1_SW2_ENABLE, JZ4740_CODEC_1_SW2_ENABLE);
 
 	snd_soc_add_controls(codec, jz4740_codec_controls,
 		ARRAY_SIZE(jz4740_codec_controls));
 
-	snd_soc_dapm_new_controls(codec, jz4740_codec_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, jz4740_codec_dapm_widgets,
 		ARRAY_SIZE(jz4740_codec_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, jz4740_codec_dapm_routes,
+	snd_soc_dapm_add_routes(dapm, jz4740_codec_dapm_routes,
 		ARRAY_SIZE(jz4740_codec_dapm_routes));
 
 	snd_soc_dapm_new_widgets(codec);
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 6447dbb..89498f9 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <linux/slab.h>
@@ -1229,15 +1228,17 @@
 
 static int max98088_add_widgets(struct snd_soc_codec *codec)
 {
-       snd_soc_dapm_new_controls(codec, max98088_dapm_widgets,
+       struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+       snd_soc_dapm_new_controls(dapm, max98088_dapm_widgets,
                                  ARRAY_SIZE(max98088_dapm_widgets));
 
-       snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+       snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
        snd_soc_add_controls(codec, max98088_snd_controls,
                             ARRAY_SIZE(max98088_snd_controls));
 
-       snd_soc_dapm_new_widgets(codec);
+       snd_soc_dapm_new_widgets(dapm);
        return 0;
 }
 
@@ -1622,7 +1623,7 @@
                break;
 
        case SND_SOC_BIAS_STANDBY:
-               if (codec->bias_level == SND_SOC_BIAS_OFF)
+               if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
                        max98088_sync_cache(codec);
 
                snd_soc_update_bits(codec, M98088_REG_4C_PWR_EN_IN,
@@ -1635,7 +1636,7 @@
                codec->cache_sync = 1;
                break;
        }
-       codec->bias_level = level;
+       codec->dapm.bias_level = level;
        return 0;
 }
 
@@ -1957,7 +1958,7 @@
                return ret;
        }
 
-       /* initalize private data */
+       /* initialize private data */
 
        max98088->sysclk = (unsigned)-1;
        max98088->eq_textcnt = 0;
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index 6f38d61..2727bef 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -38,7 +38,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "ssm2602.h"
@@ -207,10 +206,11 @@
 
 static int ssm2602_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, ssm2602_dapm_widgets,
-				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_conn, ARRAY_SIZE(audio_conn));
+	snd_soc_dapm_new_controls(dapm, ssm2602_dapm_widgets,
+				  ARRAY_SIZE(ssm2602_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_conn, ARRAY_SIZE(audio_conn));
 
 	return 0;
 }
@@ -493,7 +493,7 @@
 		break;
 
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 061f9e5..78b2b50 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -236,7 +236,7 @@
 		stac9766_ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c
index e8652b1..54a30ef 100644
--- a/sound/soc/codecs/tlv320aic23.c
+++ b/sound/soc/codecs/tlv320aic23.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -391,11 +390,12 @@
 
 static int tlv320aic23_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
-				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
+				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -574,7 +574,7 @@
 		tlv320aic23_write(codec, TLV320AIC23_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index 6b7d71e..e2a7608 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "tlv320aic26.h"
@@ -31,7 +30,6 @@
 struct aic26 {
 	struct spi_device *spi;
 	struct snd_soc_codec codec;
-	u16 reg_cache[AIC26_NUM_REGS];	/* shadow registers */
 	int master;
 	int datfm;
 	int mclk;
@@ -355,7 +353,6 @@
  */
 static int aic26_probe(struct snd_soc_codec *codec)
 {
-	struct aic26 *aic26 = snd_soc_codec_get_drvdata(codec);
 	int ret, err, i, reg;
 
 	dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n");
@@ -373,7 +370,7 @@
 	aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
 
 	/* Fill register cache */
-	for (i = 0; i < ARRAY_SIZE(aic26->reg_cache); i++)
+	for (i = 0; i < codec->driver->reg_cache_size; i++)
 		aic26_reg_read(codec, i);
 
 	/* Register the sysfs files for debugging */
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 77b8f9a..3bedab2 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -46,7 +46,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/tlv320aic3x.h>
@@ -61,6 +60,8 @@
 	"DRVDD",	/* ADC Analog and Output Driver Voltage */
 };
 
+static LIST_HEAD(reset_list);
+
 struct aic3x_priv;
 
 struct aic3x_disable_nb {
@@ -77,6 +78,7 @@
 	struct aic3x_setup_data *setup;
 	void *control_data;
 	unsigned int sysclk;
+	struct list_head list;
 	int master;
 	int gpio_reset;
 	int power;
@@ -183,7 +185,7 @@
 
 	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
 		/* find dapm widget path assoc with kcontrol */
-		list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+		list_for_each_entry(path, &widget->dapm->card->paths, list) {
 			if (path->kcontrol != kcontrol)
 				continue;
 
@@ -199,7 +201,7 @@
 		}
 
 		if (found)
-			snd_soc_dapm_sync(widget->codec);
+			snd_soc_dapm_sync(widget->dapm);
 	}
 
 	ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
@@ -788,17 +790,19 @@
 static int aic3x_add_widgets(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	if (aic3x->model == AIC3X_MODEL_3007) {
-		snd_soc_dapm_new_controls(codec, aic3007_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, aic3007_dapm_widgets,
 			ARRAY_SIZE(aic3007_dapm_widgets));
-		snd_soc_dapm_add_routes(codec, intercon_3007, ARRAY_SIZE(intercon_3007));
+		snd_soc_dapm_add_routes(dapm, intercon_3007,
+					ARRAY_SIZE(intercon_3007));
 	}
 
 	return 0;
@@ -1075,7 +1079,7 @@
 		 * Put codec to reset and require cache sync as at least one
 		 * of the supplies was disabled
 		 */
-		if (aic3x->gpio_reset >= 0)
+		if (gpio_is_valid(aic3x->gpio_reset))
 			gpio_set_value(aic3x->gpio_reset, 0);
 		aic3x->codec->cache_sync = 1;
 	}
@@ -1102,7 +1106,7 @@
 		if (!codec->cache_sync)
 			goto out;
 
-		if (aic3x->gpio_reset >= 0) {
+		if (gpio_is_valid(aic3x->gpio_reset)) {
 			udelay(1);
 			gpio_set_value(aic3x->gpio_reset, 1);
 		}
@@ -1135,7 +1139,7 @@
 	case SND_SOC_BIAS_ON:
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY &&
 		    aic3x->master) {
 			/* enable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1146,7 +1150,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		if (!aic3x->power)
 			aic3x_set_power(codec, 1);
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE &&
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE &&
 		    aic3x->master) {
 			/* disable pll */
 			reg = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
@@ -1159,7 +1163,7 @@
 			aic3x_set_power(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1344,14 +1348,28 @@
 	return 0;
 }
 
+static bool aic3x_is_shared_reset(struct aic3x_priv *aic3x)
+{
+	struct aic3x_priv *a;
+
+	list_for_each_entry(a, &reset_list, list) {
+		if (gpio_is_valid(aic3x->gpio_reset) &&
+		    aic3x->gpio_reset == a->gpio_reset)
+			return true;
+	}
+
+	return false;
+}
+
 static int aic3x_probe(struct snd_soc_codec *codec)
 {
 	struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
 	int ret, i;
 
+	INIT_LIST_HEAD(&aic3x->list);
 	codec->control_data = aic3x->control_data;
 	aic3x->codec = codec;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, aic3x->control_type);
 	if (ret != 0) {
@@ -1359,7 +1377,8 @@
 		return ret;
 	}
 
-	if (aic3x->gpio_reset >= 0) {
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		ret = gpio_request(aic3x->gpio_reset, "tlv320aic3x reset");
 		if (ret != 0)
 			goto err_gpio;
@@ -1405,6 +1424,7 @@
 		snd_soc_add_controls(codec, &aic3x_classd_amp_gain_ctrl, 1);
 
 	aic3x_add_widgets(codec);
+	list_add(&aic3x->list, &reset_list);
 
 	return 0;
 
@@ -1414,10 +1434,10 @@
 					      &aic3x->disable_nb[i].nb);
 	regulator_bulk_free(ARRAY_SIZE(aic3x->supplies), aic3x->supplies);
 err_get:
-	if (aic3x->gpio_reset >= 0)
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x))
 		gpio_free(aic3x->gpio_reset);
 err_gpio:
-	kfree(aic3x);
 	return ret;
 }
 
@@ -1427,7 +1447,9 @@
 	int i;
 
 	aic3x_set_bias_level(codec, SND_SOC_BIAS_OFF);
-	if (aic3x->gpio_reset >= 0) {
+	list_del(&aic3x->list);
+	if (gpio_is_valid(aic3x->gpio_reset) &&
+	    !aic3x_is_shared_reset(aic3x)) {
 		gpio_set_value(aic3x->gpio_reset, 0);
 		gpio_free(aic3x->gpio_reset);
 	}
@@ -1523,21 +1545,6 @@
 	.remove = aic3x_i2c_remove,
 	.id_table = aic3x_i2c_id,
 };
-
-static inline void aic3x_i2c_init(void)
-{
-	int ret;
-
-	ret = i2c_add_driver(&aic3x_i2c_driver);
-	if (ret)
-		printk(KERN_ERR "%s: error regsitering i2c driver, %d\n",
-		       __func__, ret);
-}
-
-static inline void aic3x_i2c_exit(void)
-{
-	i2c_del_driver(&aic3x_i2c_driver);
-}
 #endif
 
 static int __init aic3x_modinit(void)
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index c5ab8c8..71d7be8 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -36,21 +36,21 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include <sound/tlv320dac33-plat.h>
 #include "tlv320dac33.h"
 
-#define DAC33_BUFFER_SIZE_BYTES		24576	/* bytes, 12288 16 bit words,
-						 * 6144 stereo */
-#define DAC33_BUFFER_SIZE_SAMPLES	6144
-
-#define NSAMPLE_MAX		5700
-
-#define MODE7_LTHR		10
-#define MODE7_UTHR		(DAC33_BUFFER_SIZE_SAMPLES - 10)
+/*
+ * The internal FIFO is 24576 bytes long
+ * It can be configured to hold 16bit or 24bit samples
+ * In 16bit configuration the FIFO can hold 6144 stereo samples
+ * In 24bit configuration the FIFO can hold 4096 stereo samples
+ */
+#define DAC33_FIFO_SIZE_16BIT	6144
+#define DAC33_FIFO_SIZE_24BIT	4096
+#define DAC33_MODE7_MARGIN	10	/* Safety margin for FIFO in Mode7 */
 
 #define BURST_BASEFREQ_HZ	49152000
 
@@ -100,16 +100,11 @@
 	unsigned int refclk;
 
 	unsigned int alarm_threshold;	/* set to be half of LATENCY_TIME_MS */
-	unsigned int nsample_min;	/* nsample should not be lower than
-					 * this */
-	unsigned int nsample_max;	/* nsample should not be higher than
-					 * this */
 	enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */
+	unsigned int fifo_size;		/* Size of the FIFO in samples */
 	unsigned int nsample;		/* burst read amount from host */
 	int mode1_latency;		/* latency caused by the i2c writes in
 					 * us */
-	int auto_fifo_config; 		/* Configure the FIFO based on the
-					 * period size */
 	u8 burst_bclkdiv;		/* BCLK divider value in burst mode */
 	unsigned int burst_rate;	/* Interface speed in Burst modes */
 
@@ -303,7 +298,6 @@
 	if (unlikely(!dac33->chip_power))
 		return;
 
-	/* 44-46: DAC Control Registers */
 	/* A : DAC sample rate Fsref/1.5 */
 	dac33_write(codec, DAC33_DAC_CTRL_A, DAC33_DACRATE(0));
 	/* B : DAC src=normal, not muted */
@@ -316,8 +310,6 @@
 	 clock source = internal osc (?) */
 	dac33_write(codec, DAC33_ANA_VOL_SOFT_STEP_CTRL, DAC33_VOLCLKEN);
 
-	dac33_write(codec, DAC33_PWR_CTRL, DAC33_PDNALLB);
-
 	/* Restore only selected registers (gains mostly) */
 	dac33_write(codec, DAC33_LDAC_DIG_VOL_CTRL,
 		    dac33_read_reg_cache(codec, DAC33_LDAC_DIG_VOL_CTRL));
@@ -328,6 +320,10 @@
 		    dac33_read_reg_cache(codec, DAC33_LINEL_TO_LLO_VOL));
 	dac33_write(codec, DAC33_LINER_TO_RLO_VOL,
 		    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
+
+	dac33_write(codec, DAC33_OUT_AMP_CTRL,
+		    dac33_read_reg_cache(codec, DAC33_OUT_AMP_CTRL));
+
 }
 
 static inline int dac33_read_id(struct snd_soc_codec *codec)
@@ -357,6 +353,21 @@
 	dac33_write(codec, DAC33_PWR_CTRL, reg);
 }
 
+static inline void dac33_disable_digital(struct snd_soc_codec *codec)
+{
+	u8 reg;
+
+	/* Stop the DAI clock */
+	reg = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B);
+	reg &= ~DAC33_BCLKON;
+	dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, reg);
+
+	/* Power down the Oscillator, and DACs */
+	reg = dac33_read_reg_cache(codec, DAC33_PWR_CTRL);
+	reg &= ~(DAC33_OSCPDNB | DAC33_DACRPDNB | DAC33_DACLPDNB);
+	dac33_write(codec, DAC33_PWR_CTRL, reg);
+}
+
 static int dac33_hard_power(struct snd_soc_codec *codec, int power)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
@@ -405,7 +416,7 @@
 	return ret;
 }
 
-static int playback_event(struct snd_soc_dapm_widget *w,
+static int dac33_playback_event(struct snd_soc_dapm_widget *w,
 		struct snd_kcontrol *kcontrol, int event)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(w->codec);
@@ -417,77 +428,13 @@
 			dac33_prepare_chip(dac33->substream);
 		}
 		break;
+	case SND_SOC_DAPM_POST_PMD:
+		dac33_disable_digital(w->codec);
+		break;
 	}
 	return 0;
 }
 
-static int dac33_get_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->nsample;
-
-	return 0;
-}
-
-static int dac33_set_nsample(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->nsample == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < dac33->nsample_min ||
-	    ucontrol->value.integer.value[0] > dac33->nsample_max) {
-		ret = -EINVAL;
-	} else {
-		dac33->nsample = ucontrol->value.integer.value[0];
-		/* Re calculate the burst time */
-		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
-						      dac33->nsample);
-	}
-
-	return ret;
-}
-
-static int dac33_get_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-
-	ucontrol->value.integer.value[0] = dac33->uthr;
-
-	return 0;
-}
-
-static int dac33_set_uthr(struct snd_kcontrol *kcontrol,
-			 struct snd_ctl_elem_value *ucontrol)
-{
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
-	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int ret = 0;
-
-	if (dac33->substream)
-		return -EBUSY;
-
-	if (dac33->uthr == ucontrol->value.integer.value[0])
-		return 0;
-
-	if (ucontrol->value.integer.value[0] < (MODE7_LTHR + 10) ||
-	    ucontrol->value.integer.value[0] > MODE7_UTHR)
-		ret = -EINVAL;
-	else
-		dac33->uthr = ucontrol->value.integer.value[0];
-
-	return ret;
-}
-
 static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
 			 struct snd_ctl_elem_value *ucontrol)
 {
@@ -572,13 +519,6 @@
 		 dac33_get_fifo_mode, dac33_set_fifo_mode),
 };
 
-static const struct snd_kcontrol_new dac33_fifo_snd_controls[] = {
-	SOC_SINGLE_EXT("nSample", 0, 0, 5900, 0,
-		dac33_get_nsample, dac33_set_nsample),
-	SOC_SINGLE_EXT("UTHR", 0, 0, MODE7_UTHR, 0,
-		 dac33_get_uthr, dac33_set_uthr),
-};
-
 /* Analog bypass */
 static const struct snd_kcontrol_new dac33_dapm_abypassl_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINEL_TO_LLO_VOL, 7, 1, 1);
@@ -586,6 +526,25 @@
 static const struct snd_kcontrol_new dac33_dapm_abypassr_control =
 	SOC_DAPM_SINGLE("Switch", DAC33_LINER_TO_RLO_VOL, 7, 1, 1);
 
+/* LOP L/R invert selection */
+static const char *dac33_lr_lom_texts[] = {"DAC", "LOP"};
+
+static const struct soc_enum dac33_left_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 3,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_left_lom_control =
+SOC_DAPM_ENUM("Route", dac33_left_lom_enum);
+
+static const struct soc_enum dac33_right_lom_enum =
+	SOC_ENUM_SINGLE(DAC33_OUT_AMP_CTRL, 2,
+			ARRAY_SIZE(dac33_lr_lom_texts),
+			dac33_lr_lom_texts);
+
+static const struct snd_kcontrol_new dac33_dapm_right_lom_control =
+SOC_DAPM_ENUM("Route", dac33_right_lom_enum);
+
 static const struct snd_soc_dapm_widget dac33_dapm_widgets[] = {
 	SND_SOC_DAPM_OUTPUT("LEFT_LO"),
 	SND_SOC_DAPM_OUTPUT("RIGHT_LO"),
@@ -593,8 +552,8 @@
 	SND_SOC_DAPM_INPUT("LINEL"),
 	SND_SOC_DAPM_INPUT("LINER"),
 
-	SND_SOC_DAPM_DAC("DACL", "Left Playback", DAC33_LDAC_PWR_CTRL, 2, 0),
-	SND_SOC_DAPM_DAC("DACR", "Right Playback", DAC33_RDAC_PWR_CTRL, 2, 0),
+	SND_SOC_DAPM_DAC("DACL", "Left Playback", SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_DAC("DACR", "Right Playback", SND_SOC_NOPM, 0, 0),
 
 	/* Analog bypass */
 	SND_SOC_DAPM_SWITCH("Analog Left Bypass", SND_SOC_NOPM, 0, 0,
@@ -602,12 +561,30 @@
 	SND_SOC_DAPM_SWITCH("Analog Right Bypass", SND_SOC_NOPM, 0, 0,
 				&dac33_dapm_abypassr_control),
 
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amp Power",
+	SND_SOC_DAPM_MUX("Left LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_left_lom_control),
+	SND_SOC_DAPM_MUX("Right LOM Inverted From", SND_SOC_NOPM, 0, 0,
+		&dac33_dapm_right_lom_control),
+	/*
+	 * For DAPM path, when only the anlog bypass path is enabled, and the
+	 * LOP inverted from the corresponding DAC side.
+	 * This is needed, so we can attach the DAC power supply in this case.
+	 */
+	SND_SOC_DAPM_PGA("Left Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("Right Bypass PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Left Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 6, 3, 3, 0),
-	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amp Power",
+	SND_SOC_DAPM_REG(snd_soc_dapm_mixer, "Output Right Amplifier",
 			 DAC33_OUT_AMP_PWR_CTRL, 4, 3, 3, 0),
 
-	SND_SOC_DAPM_PRE("Prepare Playback", playback_event),
+	SND_SOC_DAPM_SUPPLY("Left DAC Power",
+			    DAC33_LDAC_PWR_CTRL, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("Right DAC Power",
+			    DAC33_RDAC_PWR_CTRL, 2, 0, NULL, 0),
+
+	SND_SOC_DAPM_PRE("Pre Playback", dac33_playback_event),
+	SND_SOC_DAPM_POST("Post Playback", dac33_playback_event),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -615,24 +592,39 @@
 	{"Analog Left Bypass", "Switch", "LINEL"},
 	{"Analog Right Bypass", "Switch", "LINER"},
 
-	{"Output Left Amp Power", NULL, "DACL"},
-	{"Output Right Amp Power", NULL, "DACR"},
+	{"Output Left Amplifier", NULL, "DACL"},
+	{"Output Right Amplifier", NULL, "DACR"},
 
-	{"Output Left Amp Power", NULL, "Analog Left Bypass"},
-	{"Output Right Amp Power", NULL, "Analog Right Bypass"},
+	{"Left Bypass PGA", NULL, "Analog Left Bypass"},
+	{"Right Bypass PGA", NULL, "Analog Right Bypass"},
+
+	{"Left LOM Inverted From", "DAC", "Left Bypass PGA"},
+	{"Right LOM Inverted From", "DAC", "Right Bypass PGA"},
+	{"Left LOM Inverted From", "LOP", "Analog Left Bypass"},
+	{"Right LOM Inverted From", "LOP", "Analog Right Bypass"},
+
+	{"Output Left Amplifier", NULL, "Left LOM Inverted From"},
+	{"Output Right Amplifier", NULL, "Right LOM Inverted From"},
+
+	{"DACL", NULL, "Left DAC Power"},
+	{"DACR", NULL, "Right DAC Power"},
+
+	{"Left Bypass PGA", NULL, "Left DAC Power"},
+	{"Right Bypass PGA", NULL, "Right DAC Power"},
 
 	/* output */
-	{"LEFT_LO", NULL, "Output Left Amp Power"},
-	{"RIGHT_LO", NULL, "Output Right Amp Power"},
+	{"LEFT_LO", NULL, "Output Left Amplifier"},
+	{"RIGHT_LO", NULL, "Output Right Amplifier"},
 };
 
 static int dac33_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, dac33_dapm_widgets,
-				  ARRAY_SIZE(dac33_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, dac33_dapm_widgets,
+				  ARRAY_SIZE(dac33_dapm_widgets));
 	/* set up audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -640,16 +632,18 @@
 static int dac33_set_bias_level(struct snd_soc_codec *codec,
 				enum snd_soc_bias_level level)
 {
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dac33_soft_power(codec, 1);
+		if (!dac33->substream)
+			dac33_soft_power(codec, 1);
 		break;
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Coming from OFF, switch on the codec */
 			ret = dac33_hard_power(codec, 1);
 			if (ret != 0)
@@ -660,14 +654,14 @@
 		break;
 	case SND_SOC_BIAS_OFF:
 		/* Do not power off, when the codec is already off */
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			return 0;
 		ret = dac33_hard_power(codec, 0);
 		if (ret != 0)
 			return ret;
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -705,7 +699,7 @@
 		spin_unlock_irq(&dac33->lock);
 
 		dac33_write16(codec, DAC33_PREFILL_MSB,
-				DAC33_THRREG(MODE7_LTHR));
+				DAC33_THRREG(DAC33_MODE7_MARGIN));
 
 		/* Enable Upper Threshold IRQ */
 		dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MUT);
@@ -815,6 +809,8 @@
 	/* Stream started, save the substream pointer */
 	dac33->substream = substream;
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
+
 	return 0;
 }
 
@@ -826,18 +822,17 @@
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	dac33->substream = NULL;
-
-	/* Reset the nSample restrictions */
-	dac33->nsample_min = 0;
-	dac33->nsample_max = NSAMPLE_MAX;
 }
 
+#define CALC_BURST_RATE(bclkdiv, bclk_per_sample) \
+	(BURST_BASEFREQ_HZ / bclkdiv / bclk_per_sample)
 static int dac33_hw_params(struct snd_pcm_substream *substream,
 			   struct snd_pcm_hw_params *params,
 			   struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
+	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
 
 	/* Check parameters for validity */
 	switch (params_rate(params)) {
@@ -852,6 +847,12 @@
 
 	switch (params_format(params)) {
 	case SNDRV_PCM_FORMAT_S16_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_16BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 32);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		dac33->fifo_size = DAC33_FIFO_SIZE_24BIT;
+		dac33->burst_rate = CALC_BURST_RATE(dac33->burst_bclkdiv, 64);
 		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
@@ -906,6 +907,9 @@
 		aictrl_a |= (DAC33_NCYCL_16 | DAC33_WLEN_16);
 		fifoctrl_a |= DAC33_WIDTH;
 		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aictrl_a |= (DAC33_NCYCL_32 | DAC33_WLEN_24);
+		break;
 	default:
 		dev_err(codec->dev, "unsupported format %d\n",
 			substream->runtime->format);
@@ -1040,7 +1044,10 @@
 		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C,
 							dac33->burst_bclkdiv);
 	else
-		dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		if (substream->runtime->format == SNDRV_PCM_FORMAT_S16_LE)
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+		else
+			dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 16);
 
 	switch (dac33->fifo_mode) {
 	case DAC33_FIFO_MODE1:
@@ -1053,7 +1060,8 @@
 		 * at the bottom, and also at the top of the FIFO
 		 */
 		dac33_write16(codec, DAC33_UTHR_MSB, DAC33_THRREG(dac33->uthr));
-		dac33_write16(codec, DAC33_LTHR_MSB, DAC33_THRREG(MODE7_LTHR));
+		dac33_write16(codec, DAC33_LTHR_MSB,
+			      DAC33_THRREG(DAC33_MODE7_MARGIN));
 		break;
 	default:
 		break;
@@ -1082,42 +1090,21 @@
 		/* Number of samples under i2c latency */
 		dac33->alarm_threshold = US_TO_SAMPLES(rate,
 						dac33->mode1_latency);
-		nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-				dac33->alarm_threshold;
+		nsample_limit = dac33->fifo_size - dac33->alarm_threshold;
 
-		if (dac33->auto_fifo_config) {
-			if (period_size <= dac33->alarm_threshold)
-				/*
-				 * Configure nSamaple to number of periods,
-				 * which covers the latency requironment.
-				 */
-				dac33->nsample = period_size *
-				       ((dac33->alarm_threshold / period_size) +
-				       (dac33->alarm_threshold % period_size ?
-				       1 : 0));
-			else if (period_size > nsample_limit)
-				dac33->nsample = nsample_limit;
-			else
-				dac33->nsample = period_size;
-		} else {
-			/* nSample time shall not be shorter than i2c latency */
-			dac33->nsample_min = dac33->alarm_threshold;
+		if (period_size <= dac33->alarm_threshold)
 			/*
-			 * nSample should not be bigger than alsa buffer minus
-			 * size of one period to avoid overruns
+			 * Configure nSamaple to number of periods,
+			 * which covers the latency requironment.
 			 */
-			dac33->nsample_max = substream->runtime->buffer_size -
-						period_size;
-
-			if (dac33->nsample_max > nsample_limit)
-				dac33->nsample_max = nsample_limit;
-
-			/* Correct the nSample if it is outside of the ranges */
-			if (dac33->nsample < dac33->nsample_min)
-				dac33->nsample = dac33->nsample_min;
-			if (dac33->nsample > dac33->nsample_max)
-				dac33->nsample = dac33->nsample_max;
-		}
+			dac33->nsample = period_size *
+				((dac33->alarm_threshold / period_size) +
+				(dac33->alarm_threshold % period_size ?
+				1 : 0));
+		else if (period_size > nsample_limit)
+			dac33->nsample = nsample_limit;
+		else
+			dac33->nsample = period_size;
 
 		dac33->mode1_us_burst = SAMPLES_TO_US(dac33->burst_rate,
 						      dac33->nsample);
@@ -1125,19 +1112,16 @@
 		dac33->t_stamp2 = 0;
 		break;
 	case DAC33_FIFO_MODE7:
-		if (dac33->auto_fifo_config) {
-			dac33->uthr = UTHR_FROM_PERIOD_SIZE(
-					period_size,
-					rate,
-					dac33->burst_rate) + 9;
-			if (dac33->uthr > MODE7_UTHR)
-				dac33->uthr = MODE7_UTHR;
-			if (dac33->uthr < (MODE7_LTHR + 10))
-				dac33->uthr = (MODE7_LTHR + 10);
-		}
+		dac33->uthr = UTHR_FROM_PERIOD_SIZE(period_size, rate,
+						    dac33->burst_rate) + 9;
+		if (dac33->uthr > (dac33->fifo_size - DAC33_MODE7_MARGIN))
+			dac33->uthr = dac33->fifo_size - DAC33_MODE7_MARGIN;
+		if (dac33->uthr < (DAC33_MODE7_MARGIN + 10))
+			dac33->uthr = (DAC33_MODE7_MARGIN + 10);
+
 		dac33->mode7_us_to_lthr =
 				SAMPLES_TO_US(substream->runtime->rate,
-					dac33->uthr - MODE7_LTHR + 1);
+					dac33->uthr - DAC33_MODE7_MARGIN + 1);
 		dac33->t_stamp1 = 0;
 		break;
 	default:
@@ -1255,8 +1239,8 @@
 			samples += (samples_in - samples_out);
 
 			if (likely(samples > 0))
-				delay = samples > DAC33_BUFFER_SIZE_SAMPLES ?
-					DAC33_BUFFER_SIZE_SAMPLES : samples;
+				delay = samples > dac33->fifo_size ?
+					dac33->fifo_size : samples;
 			else
 				delay = 0;
 		}
@@ -1308,7 +1292,7 @@
 			samples_in = US_TO_SAMPLES(
 					dac33->burst_rate,
 					time_delta);
-			delay = MODE7_LTHR + samples_in - samples_out;
+			delay = DAC33_MODE7_MARGIN + samples_in - samples_out;
 
 			if (unlikely(delay > uthr))
 				delay = uthr;
@@ -1415,7 +1399,7 @@
 
 	codec->control_data = dac33->control_data;
 	codec->hw_write = (hw_write_t) i2c_master_send;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 	dac33->codec = codec;
 
 	/* Read the tlv320dac33 ID registers */
@@ -1459,14 +1443,10 @@
 	snd_soc_add_controls(codec, dac33_snd_controls,
 			     ARRAY_SIZE(dac33_snd_controls));
 	/* Only add the FIFO controls, if we have valid IRQ number */
-	if (dac33->irq >= 0) {
+	if (dac33->irq >= 0)
 		snd_soc_add_controls(codec, dac33_mode_snd_controls,
 				     ARRAY_SIZE(dac33_mode_snd_controls));
-		/* FIFO usage controls only, if autoio config is not selected */
-		if (!dac33->auto_fifo_config)
-			snd_soc_add_controls(codec, dac33_fifo_snd_controls,
-					ARRAY_SIZE(dac33_fifo_snd_controls));
-	}
+
 	dac33_add_widgets(codec);
 
 err_power:
@@ -1515,7 +1495,7 @@
 
 #define DAC33_RATES	(SNDRV_PCM_RATE_44100 | \
 			 SNDRV_PCM_RATE_48000)
-#define DAC33_FORMATS	SNDRV_PCM_FMTBIT_S16_LE
+#define DAC33_FORMATS	(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops dac33_dai_ops = {
 	.startup	= dac33_startup,
@@ -1563,17 +1543,11 @@
 
 	dac33->power_gpio = pdata->power_gpio;
 	dac33->burst_bclkdiv = pdata->burst_bclkdiv;
-	/* Pre calculate the burst rate */
-	dac33->burst_rate = BURST_BASEFREQ_HZ / dac33->burst_bclkdiv / 32;
 	dac33->keep_bclk = pdata->keep_bclk;
-	dac33->auto_fifo_config = pdata->auto_fifo_config;
 	dac33->mode1_latency = pdata->mode1_latency;
 	if (!dac33->mode1_latency)
 		dac33->mode1_latency = 10000; /* 10ms */
 	dac33->irq = client->irq;
-	dac33->nsample = NSAMPLE_MAX;
-	dac33->nsample_max = NSAMPLE_MAX;
-	dac33->uthr = MODE7_UTHR;
 	/* Disable FIFO use by default */
 	dac33->fifo_mode = DAC33_FIFO_BYPASS;
 
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index d2c2430..1f1ac81 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <sound/tpa6130a2-plat.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 
 #include "tpa6130a2.h"
@@ -42,7 +41,7 @@
 	unsigned char regs[TPA6130A2_CACHEREGNUM];
 	struct regulator *supply;
 	int power_gpio;
-	unsigned char power_state;
+	u8 power_state:1;
 	enum tpa_model id;
 };
 
@@ -117,7 +116,7 @@
 	return ret;
 }
 
-static int tpa6130a2_power(int power)
+static int tpa6130a2_power(u8 power)
 {
 	struct	tpa6130a2_data *data;
 	u8	val;
@@ -127,17 +126,19 @@
 	data = i2c_get_clientdata(tpa6130a2_client);
 
 	mutex_lock(&data->mutex);
-	if (power && !data->power_state) {
-		/* Power on */
-		if (data->power_gpio >= 0)
-			gpio_set_value(data->power_gpio, 1);
+	if (power == data->power_state)
+		goto exit;
 
+	if (power) {
 		ret = regulator_enable(data->supply);
 		if (ret != 0) {
 			dev_err(&tpa6130a2_client->dev,
 				"Failed to enable supply: %d\n", ret);
 			goto exit;
 		}
+		/* Power on */
+		if (data->power_gpio >= 0)
+			gpio_set_value(data->power_gpio, 1);
 
 		data->power_state = 1;
 		ret = tpa6130a2_initialize();
@@ -150,12 +151,7 @@
 			data->power_state = 0;
 			goto exit;
 		}
-
-		/* Clear SWS */
-		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
-		val &= ~TPA6130A2_SWS;
-		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-	} else if (!power && data->power_state) {
+	} else {
 		/* set SWS */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= TPA6130A2_SWS;
@@ -300,6 +296,7 @@
 		/* Enable amplifier */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= channel;
+		val &= ~TPA6130A2_SWS;
 		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
 
 		/* Unmute channel */
@@ -320,72 +317,24 @@
 	}
 }
 
-static int tpa6130a2_left_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_L, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_right_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
-{
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
-		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R, 0);
-		break;
-	}
-	return 0;
-}
-
-static int tpa6130a2_supply_event(struct snd_soc_dapm_widget *w,
-		struct snd_kcontrol *kcontrol, int event)
+int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable)
 {
 	int ret = 0;
-
-	switch (event) {
-	case SND_SOC_DAPM_POST_PMU:
+	if (enable) {
 		ret = tpa6130a2_power(1);
-		break;
-	case SND_SOC_DAPM_POST_PMD:
+		if (ret < 0)
+			return ret;
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 1);
+	} else {
+		tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L,
+					 0);
 		ret = tpa6130a2_power(0);
-		break;
 	}
+
 	return ret;
 }
-
-static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = {
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Left", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_left_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("TPA6130A2 Right", SND_SOC_NOPM,
-			0, 0, NULL, 0, tpa6130a2_right_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_SUPPLY("TPA6130A2 Enable", SND_SOC_NOPM,
-			0, 0, tpa6130a2_supply_event,
-			SND_SOC_DAPM_POST_PMU|SND_SOC_DAPM_POST_PMD),
-	/* Outputs */
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Left"),
-	SND_SOC_DAPM_OUTPUT("TPA6130A2 Headphone Right"),
-};
-
-static const struct snd_soc_dapm_route audio_map[] = {
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Left"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Right"},
-
-	{"TPA6130A2 Headphone Left", NULL, "TPA6130A2 Enable"},
-	{"TPA6130A2 Headphone Right", NULL, "TPA6130A2 Enable"},
-};
+EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable);
 
 int tpa6130a2_add_controls(struct snd_soc_codec *codec)
 {
@@ -396,18 +345,12 @@
 
 	data = i2c_get_clientdata(tpa6130a2_client);
 
-	snd_soc_dapm_new_controls(codec, tpa6130a2_dapm_widgets,
-				ARRAY_SIZE(tpa6130a2_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-
 	if (data->id == TPA6140A2)
 		return snd_soc_add_controls(codec, tpa6140a2_controls,
 						ARRAY_SIZE(tpa6140a2_controls));
 	else
 		return snd_soc_add_controls(codec, tpa6130a2_controls,
 						ARRAY_SIZE(tpa6130a2_controls));
-
 }
 EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
 
diff --git a/sound/soc/codecs/tpa6130a2.h b/sound/soc/codecs/tpa6130a2.h
index 57e867f..5df49c8 100644
--- a/sound/soc/codecs/tpa6130a2.h
+++ b/sound/soc/codecs/tpa6130a2.h
@@ -57,5 +57,6 @@
 #define TPA6130A2_VERSION_MASK		(0x0f)
 
 extern int tpa6130a2_add_controls(struct snd_soc_codec *codec);
+extern int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable);
 
 #endif /* __TPA6130A2_H__ */
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index cbebec6..e4d464b 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -32,7 +32,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -233,6 +232,16 @@
 	return 0;
 }
 
+static inline void twl4030_wait_ms(int time)
+{
+	if (time < 60) {
+		time *= 1000;
+		usleep_range(time, time + 500);
+	} else {
+		msleep(time);
+	}
+}
+
 static void twl4030_codec_enable(struct snd_soc_codec *codec, int enable)
 {
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
@@ -338,10 +347,14 @@
 	twl4030_write(codec, TWL4030_REG_ANAMICL,
 		reg | TWL4030_CNCL_OFFSET_START);
 
-	/* wait for offset cancellation to complete */
+	/*
+	 * Wait for offset cancellation to complete.
+	 * Since this takes a while, do not slam the i2c.
+	 * Start polling the status after ~20ms.
+	 */
+	msleep(20);
 	do {
-		/* this takes a little while, so don't slam i2c */
-		udelay(2000);
+		usleep_range(1000, 2000);
 		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &byte,
 				    TWL4030_REG_ANAMICL);
 	} while ((i++ < 100) &&
@@ -725,9 +738,12 @@
 	/* Base values for ramp delay calculation: 2^19 - 2^26 */
 	unsigned int ramp_base[] = {524288, 1048576, 2097152, 4194304,
 				    8388608, 16777216, 33554432, 67108864};
+	unsigned int delay;
 
 	hs_gain = twl4030_read_reg_cache(codec, TWL4030_REG_HS_GAIN_SET);
 	hs_pop = twl4030_read_reg_cache(codec, TWL4030_REG_HS_POPN_SET);
+	delay = (ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
+		twl4030->sysclk) + 1;
 
 	/* Enable external mute control, this dramatically reduces
 	 * the pop-noise */
@@ -751,16 +767,14 @@
 		hs_pop |= TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 	} else {
 		/* Headset ramp-down _not_ according to
 		 * the TRM, but in a way that it is working */
 		hs_pop &= ~TWL4030_RAMP_EN;
 		twl4030_write(codec, TWL4030_REG_HS_POPN_SET, hs_pop);
 		/* Wait ramp delay time + 1, so the VMID can settle */
-		mdelay((ramp_base[(hs_pop & TWL4030_RAMP_DELAY) >> 2] /
-			twl4030->sysclk) + 1);
+		twl4030_wait_ms(delay);
 		/* Bypass the reg_cache to mute the headset */
 		twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE,
 					hs_gain & (~0x0f),
@@ -835,7 +849,7 @@
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(w->codec);
 
 	if (twl4030->digimic_delay)
-		mdelay(twl4030->digimic_delay);
+		twl4030_wait_ms(twl4030->digimic_delay);
 	return 0;
 }
 
@@ -1621,10 +1635,11 @@
 
 static int twl4030_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl4030_dapm_widgets,
-				 ARRAY_SIZE(twl4030_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, twl4030_dapm_widgets,
+				 ARRAY_SIZE(twl4030_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -1638,14 +1653,14 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			twl4030_codec_enable(codec, 1);
 		break;
 	case SND_SOC_BIAS_OFF:
 		twl4030_codec_enable(codec, 0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1709,6 +1724,7 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
 
+	snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
 	if (twl4030->master_substream) {
 		twl4030->slave_substream = substream;
 		/* The DAI has one configuration for playback and capture, so
@@ -1833,7 +1849,7 @@
 	case SNDRV_PCM_FORMAT_S16_LE:
 		format |= TWL4030_DATA_WIDTH_16S_16W;
 		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
+	case SNDRV_PCM_FORMAT_S32_LE:
 		format |= TWL4030_DATA_WIDTH_32S_24W;
 		break;
 	default:
@@ -2166,7 +2182,7 @@
 }
 
 #define TWL4030_RATES	 (SNDRV_PCM_RATE_8000_48000)
-#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FORMAT_S24_LE)
+#define TWL4030_FORMATS	 (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
 
 static struct snd_soc_dai_ops twl4030_dai_hifi_ops = {
 	.startup	= twl4030_startup,
@@ -2245,7 +2261,7 @@
 	snd_soc_codec_set_drvdata(codec, twl4030);
 	/* Set the defaults, and power up the codec */
 	twl4030->sysclk = twl4030_codec_get_mclk() / 1000;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	twl4030_init_chip(codec);
 
@@ -2257,9 +2273,12 @@
 
 static int twl4030_soc_remove(struct snd_soc_codec *codec)
 {
+	struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
+
 	/* Reset registers to their chip default before leaving */
 	twl4030_reset_registers(codec);
 	twl4030_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	kfree(twl4030);
 	return 0;
 }
 
@@ -2291,10 +2310,7 @@
 
 static int __devexit twl4030_codec_remove(struct platform_device *pdev)
 {
-	struct twl4030_priv *twl4030 = dev_get_drvdata(&pdev->dev);
-
 	snd_soc_unregister_codec(&pdev->dev);
-	kfree(twl4030);
 	return 0;
 }
 
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 10f6e52..4bbf1b1 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -34,14 +34,46 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
 #include "twl6040.h"
 
-#define TWL6040_RATES	 (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-#define TWL6040_FORMATS	 (SNDRV_PCM_FMTBIT_S32_LE)
+#define TWL6040_RATES		SNDRV_PCM_RATE_8000_96000
+#define TWL6040_FORMATS	(SNDRV_PCM_FMTBIT_S32_LE)
+
+#define TWL6040_OUTHS_0dB 0x00
+#define TWL6040_OUTHS_M30dB 0x0F
+#define TWL6040_OUTHF_0dB 0x03
+#define TWL6040_OUTHF_M52dB 0x1D
+
+#define TWL6040_RAMP_NONE	0
+#define TWL6040_RAMP_UP		1
+#define TWL6040_RAMP_DOWN	2
+
+#define TWL6040_HSL_VOL_MASK	0x0F
+#define TWL6040_HSL_VOL_SHIFT	0
+#define TWL6040_HSR_VOL_MASK	0xF0
+#define TWL6040_HSR_VOL_SHIFT	4
+#define TWL6040_HF_VOL_MASK	0x1F
+#define TWL6040_HF_VOL_SHIFT	0
+
+struct twl6040_output {
+	u16 active;
+	u16 left_vol;
+	u16 right_vol;
+	u16 left_step;
+	u16 right_step;
+	unsigned int step_delay;
+	u16 ramp;
+	u16 mute;
+	struct completion ramp_done;
+};
+
+struct twl6040_jack_data {
+	struct snd_soc_jack *jack;
+	int report;
+};
 
 /* codec private data */
 struct twl6040_data {
@@ -53,6 +85,17 @@
 	unsigned int sysclk;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
 	struct completion ready;
+	struct twl6040_jack_data hs_jack;
+	struct snd_soc_codec *codec;
+	struct workqueue_struct *workqueue;
+	struct delayed_work delayed_work;
+	struct mutex mutex;
+	struct twl6040_output headset;
+	struct twl6040_output handsfree;
+	struct workqueue_struct *hf_workqueue;
+	struct workqueue_struct *hs_workqueue;
+	struct delayed_work hs_delayed_work;
+	struct delayed_work hf_delayed_work;
 };
 
 /*
@@ -201,7 +244,7 @@
 	if (reg >= TWL6040_CACHEREGNUM)
 		return -EIO;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &value, reg);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &value, reg);
 	twl6040_write_reg_cache(codec, reg, value);
 
 	return value;
@@ -217,7 +260,7 @@
 		return -EIO;
 
 	twl6040_write_reg_cache(codec, reg, value);
-	return twl_i2c_write_u8(TWL4030_MODULE_AUDIO_VOICE, value, reg);
+	return twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, value, reg);
 }
 
 static void twl6040_init_vio_regs(struct snd_soc_codec *codec)
@@ -254,6 +297,305 @@
 	}
 }
 
+/*
+ * Ramp HS PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hs_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *headset = &priv->headset;
+	int left_complete = 0, right_complete = 0;
+	u8 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0xF) ? 0xF : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSL_VOL_MASK);
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					(reg | (~val & TWL6040_HSL_VOL_MASK)));
+		} else {
+			left_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= left_step;
+			reg &= ~TWL6040_HSL_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN, reg |
+						(~val & TWL6040_HSL_VOL_MASK));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0xF) ? 0xF : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HSGAIN);
+	val = (~reg & TWL6040_HSR_VOL_MASK) >> TWL6040_HSR_VOL_SHIFT;
+
+	if (headset->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < headset->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+				(reg | (~val << TWL6040_HSR_VOL_SHIFT)));
+		} else {
+			right_complete = 1;
+		}
+	} else if (headset->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0x0) {
+			val -= right_step;
+			reg &= ~TWL6040_HSR_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HSGAIN,
+					 reg | (~val << TWL6040_HSR_VOL_SHIFT));
+		} else {
+			right_complete = 1;
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * Ramp HF PGA volume to minimise pops at stream startup and shutdown.
+ */
+static inline int twl6040_hf_ramp_step(struct snd_soc_codec *codec,
+			unsigned int left_step, unsigned int right_step)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *handsfree = &priv->handsfree;
+	int left_complete = 0, right_complete = 0;
+	u16 reg, val;
+
+	/* left channel */
+	left_step = (left_step > 0x1D) ? 0x1D : left_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFLGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->left_vol) {
+			val += left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= left_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFLGAIN,
+						reg | (0x1D - val));
+		} else {
+			left_complete = 1;
+		}
+	}
+
+	/* right channel */
+	right_step = (right_step > 0x1D) ? 0x1D : right_step;
+	reg = twl6040_read_reg_cache(codec, TWL6040_REG_HFRGAIN);
+	reg = 0x1D - reg;
+	val = (reg & TWL6040_HF_VOL_MASK);
+	if (handsfree->ramp == TWL6040_RAMP_UP) {
+		/* ramp step up */
+		if (val < handsfree->right_vol) {
+			val += right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		} else {
+			right_complete = 1;
+		}
+	} else if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		/* ramp step down */
+		if (val > 0) {
+			val -= right_step;
+			reg &= ~TWL6040_HF_VOL_MASK;
+			twl6040_write(codec, TWL6040_REG_HFRGAIN,
+						reg | (0x1D - val));
+		}
+	}
+
+	return left_complete & right_complete;
+}
+
+/*
+ * This work ramps both output PGAs at stream start/stop time to
+ * minimise pop associated with DAPM power switching.
+ */
+static void twl6040_pga_hs_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hs_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *headset = &priv->headset;
+	unsigned int delay = headset->step_delay;
+	int i, headset_complete;
+
+	/* do we need to ramp at all ? */
+	if (headset->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HS PGA volumes have 4 bits of resolution to ramp */
+	for (i = 0; i <= 16; i++) {
+		headset_complete = 1;
+		if (headset->ramp != TWL6040_RAMP_NONE)
+			headset_complete = twl6040_hs_ramp_step(codec,
+							headset->left_step,
+							headset->right_step);
+
+		/* ramp finished ? */
+		if (headset_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 8)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+							(delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+	if (headset->ramp == TWL6040_RAMP_DOWN) {
+		headset->active = 0;
+		complete(&headset->ramp_done);
+	} else {
+		headset->active = 1;
+	}
+	headset->ramp = TWL6040_RAMP_NONE;
+}
+
+static void twl6040_pga_hf_work(struct work_struct *work)
+{
+	struct twl6040_data *priv =
+		container_of(work, struct twl6040_data, hf_delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_output *handsfree = &priv->handsfree;
+	unsigned int delay = handsfree->step_delay;
+	int i, handsfree_complete;
+
+	/* do we need to ramp at all ? */
+	if (handsfree->ramp == TWL6040_RAMP_NONE)
+		return;
+
+	/* HF PGA volumes have 5 bits of resolution to ramp */
+	for (i = 0; i <= 32; i++) {
+		handsfree_complete = 1;
+		if (handsfree->ramp != TWL6040_RAMP_NONE)
+			handsfree_complete = twl6040_hf_ramp_step(codec,
+							handsfree->left_step,
+							handsfree->right_step);
+
+		/* ramp finished ? */
+		if (handsfree_complete)
+			break;
+
+		/*
+		 * TODO: tune: delay is longer over 0dB
+		 * as increases are larger.
+		 */
+		if (i >= 16)
+			schedule_timeout_interruptible(msecs_to_jiffies(delay +
+						       (delay >> 1)));
+		else
+			schedule_timeout_interruptible(msecs_to_jiffies(delay));
+	}
+
+
+	if (handsfree->ramp == TWL6040_RAMP_DOWN) {
+		handsfree->active = 0;
+		complete(&handsfree->ramp_done);
+	} else
+		handsfree->active = 1;
+	handsfree->ramp = TWL6040_RAMP_NONE;
+}
+
+static int pga_event(struct snd_soc_dapm_widget *w,
+			struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out;
+	struct delayed_work *work;
+	struct workqueue_struct *queue;
+
+	switch (w->shift) {
+	case 2:
+	case 3:
+		out = &priv->headset;
+		work = &priv->hs_delayed_work;
+		queue = priv->hs_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		break;
+	case 4:
+		out = &priv->handsfree;
+		work = &priv->hf_delayed_work;
+		queue = priv->hf_workqueue;
+		out->step_delay = 5;	/* 5 ms between volume ramp steps */
+		if (SND_SOC_DAPM_EVENT_ON(event))
+			priv->non_lp++;
+		else
+			priv->non_lp--;
+		break;
+	default:
+		return -1;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		if (out->active)
+			break;
+
+		/* don't use volume ramp for power-up */
+		out->left_step = out->left_vol;
+		out->right_step = out->right_vol;
+
+		if (!delayed_work_pending(work)) {
+			out->ramp = TWL6040_RAMP_UP;
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+		}
+		break;
+
+	case SND_SOC_DAPM_PRE_PMD:
+		if (!out->active)
+			break;
+
+		if (!delayed_work_pending(work)) {
+			/* use volume ramp for power-down */
+			out->left_step = 1;
+			out->right_step = 1;
+			out->ramp = TWL6040_RAMP_DOWN;
+			INIT_COMPLETION(out->ramp_done);
+
+			queue_delayed_work(queue, work,
+					msecs_to_jiffies(1));
+
+			wait_for_completion_timeout(&out->ramp_done,
+					msecs_to_jiffies(2000));
+		}
+		break;
+	}
+
+	return 0;
+}
+
 /* twl6040 codec manual power-up sequence */
 static void twl6040_power_up(struct snd_soc_codec *codec)
 {
@@ -382,6 +724,47 @@
 	return 0;
 }
 
+void twl6040_hs_jack_report(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	int status;
+
+	mutex_lock(&priv->mutex);
+
+	/* Sync status */
+	status = twl6040_read_reg_volatile(codec, TWL6040_REG_STATUS);
+	if (status & TWL6040_PLUGCOMP)
+		snd_soc_jack_report(jack, report, report);
+	else
+		snd_soc_jack_report(jack, 0, report);
+
+	mutex_unlock(&priv->mutex);
+}
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+				struct snd_soc_jack *jack, int report)
+{
+	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	hs_jack->jack = jack;
+	hs_jack->report = report;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+EXPORT_SYMBOL_GPL(twl6040_hs_jack_detect);
+
+static void twl6040_accessory_work(struct work_struct *work)
+{
+	struct twl6040_data *priv = container_of(work,
+					struct twl6040_data, delayed_work.work);
+	struct snd_soc_codec *codec = priv->codec;
+	struct twl6040_jack_data *hs_jack = &priv->hs_jack;
+
+	twl6040_hs_jack_report(codec, hs_jack->jack, hs_jack->report);
+}
+
 /* audio interrupt handler */
 static irqreturn_t twl6040_naudint_handler(int irq, void *data)
 {
@@ -389,33 +772,180 @@
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 	u8 intid;
 
-	twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid, TWL6040_REG_INTID);
 
-	switch (intid) {
-	case TWL6040_THINT:
+	if (intid & TWL6040_THINT)
 		dev_alert(codec->dev, "die temp over-limit detection\n");
-		break;
-	case TWL6040_PLUGINT:
-	case TWL6040_UNPLUGINT:
-	case TWL6040_HOOKINT:
-		break;
-	case TWL6040_HFINT:
+
+	if ((intid & TWL6040_PLUGINT) || (intid & TWL6040_UNPLUGINT))
+		queue_delayed_work(priv->workqueue, &priv->delayed_work,
+							msecs_to_jiffies(200));
+
+	if (intid & TWL6040_HOOKINT)
+		dev_info(codec->dev, "hook detection\n");
+
+	if (intid & TWL6040_HFINT)
 		dev_alert(codec->dev, "hf drivers over current detection\n");
-		break;
-	case TWL6040_VIBINT:
+
+	if (intid & TWL6040_VIBINT)
 		dev_alert(codec->dev, "vib drivers over current detection\n");
-		break;
-	case TWL6040_READYINT:
+
+	if (intid & TWL6040_READYINT)
 		complete(&priv->ready);
-		break;
-	default:
-		dev_err(codec->dev, "unknown audio interrupt %d\n", intid);
-		break;
-	}
 
 	return IRQ_HANDLED;
 }
 
+static int twl6040_put_volsw(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->headset;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	switch (reg) {
+	case TWL6040_REG_HSGAIN:
+		out = &twl6040_priv->headset;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw(kcontrol, ucontrol);
+}
+
+static int twl6040_put_volsw_2r_vu(struct snd_kcontrol *kcontrol,
+				  struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = NULL;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	int ret;
+	unsigned int reg = mc->reg;
+
+	/* For HS and HF we shadow the values and only actually write
+	 * them out when active in order to ensure the amplifier comes on
+	 * as quietly as possible. */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		break;
+	default:
+		break;
+	}
+
+	if (out) {
+		out->left_vol = ucontrol->value.integer.value[0];
+		out->right_vol = ucontrol->value.integer.value[1];
+		if (!out->active)
+			return 1;
+	}
+
+	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
+	if (ret < 0)
+		return ret;
+
+	return 1;
+}
+
+static int twl6040_get_volsw_2r(struct snd_kcontrol *kcontrol,
+				struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct twl6040_data *twl6040_priv = snd_soc_codec_get_drvdata(codec);
+	struct twl6040_output *out = &twl6040_priv->handsfree;
+	struct soc_mixer_control *mc =
+		(struct soc_mixer_control *)kcontrol->private_value;
+	unsigned int reg = mc->reg;
+
+	/* If these are cached registers use the cache */
+	switch (reg) {
+	case TWL6040_REG_HFLGAIN:
+	case TWL6040_REG_HFRGAIN:
+		out = &twl6040_priv->handsfree;
+		ucontrol->value.integer.value[0] = out->left_vol;
+		ucontrol->value.integer.value[1] = out->right_vol;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return snd_soc_get_volsw_2r(kcontrol, ucontrol);
+}
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_TLV(xname, xreg, shift_left, shift_right, xmax,\
+							xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+		 SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw, .get = twl6040_get_volsw, \
+	.put = twl6040_put_volsw, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = xreg, .shift = shift_left, .rshift = shift_right,\
+		 .max = xmax, .platform_max = xmax, .invert = xinvert} }
+
+/* double control with volume update */
+#define SOC_TWL6040_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax,\
+				xinvert, tlv_array)\
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+	.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
+		SNDRV_CTL_ELEM_ACCESS_READWRITE | \
+		SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
+	.tlv.p = (tlv_array), \
+	.info = snd_soc_info_volsw_2r, \
+	.get = twl6040_get_volsw_2r, .put = twl6040_put_volsw_2r_vu, \
+	.private_value = (unsigned long)&(struct soc_mixer_control) \
+		{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
+		 .rshift = xshift, .max = xmax, .invert = xinvert}, }
+
 /*
  * MICATT volume control:
  * from -6 to 0 dB in 6 dB steps
@@ -424,9 +954,15 @@
 
 /*
  * MICGAIN volume control:
- * from 6 to 30 dB in 6 dB steps
+ * from -6 to 30 dB in 6 dB steps
  */
-static DECLARE_TLV_DB_SCALE(mic_amp_tlv, 600, 600, 0);
+static DECLARE_TLV_DB_SCALE(mic_amp_tlv, -600, 600, 0);
+
+/*
+ * AFMGAIN volume control:
+ * from 18 to 24 dB in 6 dB steps
+ */
+static DECLARE_TLV_DB_SCALE(afm_amp_tlv, 1800, 600, 0);
 
 /*
  * HSGAIN volume control:
@@ -455,8 +991,30 @@
 	{"Headset Mic", "Sub Mic", "Aux/FM Right", "Off"};
 
 static const struct soc_enum twl6040_enum[] = {
-	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 3, twl6040_amicl_texts),
-	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 3, twl6040_amicr_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICLCTL, 3, 4, twl6040_amicl_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_MICRCTL, 3, 4, twl6040_amicr_texts),
+};
+
+static const char *twl6040_hs_texts[] = {
+	"Off", "HS DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hs_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HSLCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HSRCTL, 5, ARRAY_SIZE(twl6040_hs_texts),
+			twl6040_hs_texts),
+};
+
+static const char *twl6040_hf_texts[] = {
+	"Off", "HF DAC", "Line-In amp"
+};
+
+static const struct soc_enum twl6040_hf_enum[] = {
+	SOC_ENUM_SINGLE(TWL6040_REG_HFLCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
+	SOC_ENUM_SINGLE(TWL6040_REG_HFRCTL, 2, ARRAY_SIZE(twl6040_hf_texts),
+			twl6040_hf_texts),
 };
 
 static const struct snd_kcontrol_new amicl_control =
@@ -466,18 +1024,18 @@
 	SOC_DAPM_ENUM("Route", twl6040_enum[1]);
 
 /* Headset DAC playback switches */
-static const struct snd_kcontrol_new hsdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSLCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[0]);
 
-static const struct snd_kcontrol_new hsdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HSRCTL, 5, 1, 0);
+static const struct snd_kcontrol_new hsr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hs_enum[1]);
 
 /* Handsfree DAC playback switches */
-static const struct snd_kcontrol_new hfdacl_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFLCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfl_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[0]);
 
-static const struct snd_kcontrol_new hfdacr_switch_controls =
-	SOC_DAPM_SINGLE("Switch", TWL6040_REG_HFRCTL, 2, 1, 0);
+static const struct snd_kcontrol_new hfr_mux_controls =
+	SOC_DAPM_ENUM("Route", twl6040_hf_enum[1]);
 
 static const struct snd_kcontrol_new ep_driver_switch_controls =
 	SOC_DAPM_SINGLE("Switch", TWL6040_REG_EARCTL, 0, 1, 0);
@@ -489,10 +1047,14 @@
 	SOC_DOUBLE_TLV("Capture Volume",
 		TWL6040_REG_MICGAIN, 0, 3, 4, 0, mic_amp_tlv),
 
+	/* AFM gains */
+	SOC_DOUBLE_TLV("Aux FM Volume",
+		TWL6040_REG_LINEGAIN, 0, 4, 0xF, 0, afm_amp_tlv),
+
 	/* Playback gains */
-	SOC_DOUBLE_TLV("Headset Playback Volume",
+	SOC_TWL6040_DOUBLE_TLV("Headset Playback Volume",
 		TWL6040_REG_HSGAIN, 0, 4, 0xF, 1, hs_tlv),
-	SOC_DOUBLE_R_TLV("Handsfree Playback Volume",
+	SOC_TWL6040_DOUBLE_R_TLV("Handsfree Playback Volume",
 		TWL6040_REG_HFLGAIN, TWL6040_REG_HFRGAIN, 0, 0x1D, 1, hf_tlv),
 	SOC_SINGLE_TLV("Earphone Playback Volume",
 		TWL6040_REG_EARCTL, 1, 0xF, 1, ep_tlv),
@@ -525,6 +1087,12 @@
 	SND_SOC_DAPM_PGA("MicAmpR",
 			TWL6040_REG_MICRCTL, 0, 0, NULL, 0),
 
+	/* Auxiliary FM PGAs */
+	SND_SOC_DAPM_PGA("AFMAmpL",
+			TWL6040_REG_MICLCTL, 1, 0, NULL, 0),
+	SND_SOC_DAPM_PGA("AFMAmpR",
+			TWL6040_REG_MICRCTL, 1, 0, NULL, 0),
+
 	/* ADCs */
 	SND_SOC_DAPM_ADC("ADC Left", "Left Front Capture",
 			TWL6040_REG_MICLCTL, 2, 0),
@@ -559,29 +1127,33 @@
 			twl6040_power_mode_event,
 			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
-	/* Analog playback switches */
-	SND_SOC_DAPM_SWITCH("HSDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HSDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hsdacr_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Left Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacl_switch_controls),
-	SND_SOC_DAPM_SWITCH("HFDAC Right Playback",
-			SND_SOC_NOPM, 0, 0, &hfdacr_switch_controls),
+	SND_SOC_DAPM_MUX("HF Left Playback",
+			SND_SOC_NOPM, 0, 0, &hfl_mux_controls),
+	SND_SOC_DAPM_MUX("HF Right Playback",
+			SND_SOC_NOPM, 0, 0, &hfr_mux_controls),
+	/* Analog playback Muxes */
+	SND_SOC_DAPM_MUX("HS Left Playback",
+			SND_SOC_NOPM, 0, 0, &hsl_mux_controls),
+	SND_SOC_DAPM_MUX("HS Right Playback",
+			SND_SOC_NOPM, 0, 0, &hsr_mux_controls),
 
 	/* Analog playback drivers */
-	SND_SOC_DAPM_PGA_E("Handsfree Left Driver",
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Left Driver",
 			TWL6040_REG_HFLCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA_E("Handsfree Right Driver",
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Handsfree Right Driver",
 			TWL6040_REG_HFRCTL, 4, 0, NULL, 0,
-			twl6040_power_mode_event,
-			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
-	SND_SOC_DAPM_PGA("Headset Left Driver",
-			TWL6040_REG_HSLCTL, 2, 0, NULL, 0),
-	SND_SOC_DAPM_PGA("Headset Right Driver",
-			TWL6040_REG_HSRCTL, 2, 0, NULL, 0),
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Left Driver",
+			TWL6040_REG_HSLCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+	SND_SOC_DAPM_OUT_DRV_E("Headset Right Driver",
+			TWL6040_REG_HSRCTL, 2, 0, NULL, 0,
+			pga_event,
+			SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 	SND_SOC_DAPM_SWITCH_E("Earphone Driver",
 			SND_SOC_NOPM, 0, 0, &ep_driver_switch_controls,
 			twl6040_power_mode_event,
@@ -611,12 +1183,18 @@
 	{"ADC Left", NULL, "MicAmpL"},
 	{"ADC Right", NULL, "MicAmpR"},
 
-	/* Headset playback path */
-	{"HSDAC Left Playback", "Switch", "HSDAC Left"},
-	{"HSDAC Right Playback", "Switch", "HSDAC Right"},
+	/* AFM path */
+	{"AFMAmpL", "NULL", "AFML"},
+	{"AFMAmpR", "NULL", "AFMR"},
 
-	{"Headset Left Driver", NULL, "HSDAC Left Playback"},
-	{"Headset Right Driver", NULL, "HSDAC Right Playback"},
+	{"HS Left Playback", "HS DAC", "HSDAC Left"},
+	{"HS Left Playback", "Line-In amp", "AFMAmpL"},
+
+	{"HS Right Playback", "HS DAC", "HSDAC Right"},
+	{"HS Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"Headset Left Driver", "NULL", "HS Left Playback"},
+	{"Headset Right Driver", "NULL", "HS Right Playback"},
 
 	{"HSOL", NULL, "Headset Left Driver"},
 	{"HSOR", NULL, "Headset Right Driver"},
@@ -625,12 +1203,14 @@
 	{"Earphone Driver", "Switch", "HSDAC Left"},
 	{"EP", NULL, "Earphone Driver"},
 
-	/* Handsfree playback path */
-	{"HFDAC Left Playback", "Switch", "HFDAC Left"},
-	{"HFDAC Right Playback", "Switch", "HFDAC Right"},
+	{"HF Left Playback", "HF DAC", "HFDAC Left"},
+	{"HF Left Playback", "Line-In amp", "AFMAmpL"},
 
-	{"HFDAC Left PGA", NULL, "HFDAC Left Playback"},
-	{"HFDAC Right PGA", NULL, "HFDAC Right Playback"},
+	{"HF Right Playback", "HF DAC", "HFDAC Right"},
+	{"HF Right Playback", "Line-In amp", "AFMAmpR"},
+
+	{"HFDAC Left PGA", NULL, "HF Left Playback"},
+	{"HFDAC Right PGA", NULL, "HF Right Playback"},
 
 	{"Handsfree Left Driver", "Switch", "HFDAC Left PGA"},
 	{"Handsfree Right Driver", "Switch", "HFDAC Right PGA"},
@@ -641,12 +1221,12 @@
 
 static int twl6040_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, twl6040_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, twl6040_dapm_widgets,
 				 ARRAY_SIZE(twl6040_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
-
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_widgets(dapm);
 
 	return 0;
 }
@@ -659,10 +1239,10 @@
 	u8 intid;
 
 	time_left = wait_for_completion_timeout(&priv->ready,
-				msecs_to_jiffies(48));
+				msecs_to_jiffies(144));
 
 	if (!time_left) {
-		twl_i2c_read_u8(TWL4030_MODULE_AUDIO_VOICE, &intid,
+		twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &intid,
 							TWL6040_REG_INTID);
 		if (!(intid & TWL6040_READYINT)) {
 			dev_err(codec->dev, "timeout waiting for READYINT\n");
@@ -713,6 +1293,15 @@
 
 		/* initialize vdd/vss registers with reg_cache */
 		twl6040_init_vdd_regs(codec);
+
+		/* Set external boost GPO */
+		twl6040_write(codec, TWL6040_REG_GPOCTL, 0x02);
+
+		/* Set initial minimal gain values */
+		twl6040_write(codec, TWL6040_REG_HSGAIN, 0xFF);
+		twl6040_write(codec, TWL6040_REG_EARCTL, 0x1E);
+		twl6040_write(codec, TWL6040_REG_HFLGAIN, 0x1D);
+		twl6040_write(codec, TWL6040_REG_HFRGAIN, 0x1D);
 		break;
 	case SND_SOC_BIAS_OFF:
 		if (!priv->codec_powered)
@@ -739,7 +1328,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -772,23 +1361,6 @@
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	if (!priv->sysclk) {
-		dev_err(codec->dev,
-			"no mclk configured, call set_sysclk() on init\n");
-		return -EINVAL;
-	}
-
-	/*
-	 * capture is not supported at 17.64 MHz,
-	 * it's reserved for headset low-power playback scenario
-	 */
-	if ((priv->sysclk == 17640000) && substream->stream) {
-		dev_err(codec->dev,
-			"capture mode is not supported at %dHz\n",
-			priv->sysclk);
-		return -EINVAL;
-	}
-
 	snd_pcm_hw_constraint_list(substream->runtime, 0,
 				SNDRV_PCM_HW_PARAM_RATE,
 				priv->sysclk_constraints);
@@ -814,10 +1386,17 @@
 
 	rate = params_rate(params);
 	switch (rate) {
+	case 11250:
+	case 22500:
+	case 44100:
 	case 88200:
 		lppllctl |= TWL6040_LPLLFIN;
 		priv->sysclk = 17640000;
 		break;
+	case 8000:
+	case 16000:
+	case 32000:
+	case 48000:
 	case 96000:
 		lppllctl &= ~TWL6040_LPLLFIN;
 		priv->sysclk = 19200000;
@@ -832,31 +1411,37 @@
 	return 0;
 }
 
-static int twl6040_trigger(struct snd_pcm_substream *substream,
-			int cmd, struct snd_soc_dai *dai)
+static int twl6040_prepare(struct snd_pcm_substream *substream,
+			struct snd_soc_dai *dai)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	struct snd_soc_codec *codec = rtd->codec;
 	struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
 
-	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
-	case SNDRV_PCM_TRIGGER_RESUME:
-		/*
-		 * low-power playback mode is restricted
-		 * for headset path only
-		 */
-		if ((priv->sysclk == 17640000) && priv->non_lp) {
+	if (!priv->sysclk) {
+		dev_err(codec->dev,
+			"no mclk configured, call set_sysclk() on init\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * capture is not supported at 17.64 MHz,
+	 * it's reserved for headset low-power playback scenario
+	 */
+	if ((priv->sysclk == 17640000) &&
+			substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+		dev_err(codec->dev,
+			"capture mode is not supported at %dHz\n",
+			priv->sysclk);
+		return -EINVAL;
+	}
+
+	if ((priv->sysclk == 17640000) && priv->non_lp) {
 			dev_err(codec->dev,
 				"some enabled paths aren't supported at %dHz\n",
 				priv->sysclk);
 			return -EPERM;
-		}
-		break;
-	default:
-		break;
 	}
-
 	return 0;
 }
 
@@ -970,7 +1555,7 @@
 static struct snd_soc_dai_ops twl6040_dai_ops = {
 	.startup	= twl6040_startup,
 	.hw_params	= twl6040_hw_params,
-	.trigger	= twl6040_trigger,
+	.prepare	= twl6040_prepare,
 	.set_sysclk	= twl6040_set_dai_sysclk,
 };
 
@@ -1004,6 +1589,7 @@
 static int twl6040_resume(struct snd_soc_codec *codec)
 {
 	twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	twl6040_set_bias_level(codec, codec->dapm.suspend_bias_level);
 
 	return 0;
 }
@@ -1018,24 +1604,41 @@
 	struct twl6040_data *priv;
 	int audpwron, naudint;
 	int ret = 0;
+	u8 icrev, intmr = TWL6040_ALLINT_MSK;
 
 	priv = kzalloc(sizeof(struct twl6040_data), GFP_KERNEL);
 	if (priv == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, priv);
 
-	if (twl_codec) {
+	priv->codec = codec;
+
+	twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &icrev, TWL6040_REG_ASICREV);
+
+	if (twl_codec && (icrev > 0))
 		audpwron = twl_codec->audpwron_gpio;
-		naudint = twl_codec->naudint_irq;
-	} else {
+	else
 		audpwron = -EINVAL;
+
+	if (twl_codec)
+		naudint = twl_codec->naudint_irq;
+	else
 		naudint = 0;
-	}
 
 	priv->audpwron = audpwron;
 	priv->naudint = naudint;
+	priv->workqueue = create_singlethread_workqueue("twl6040-codec");
+
+	if (!priv->workqueue)
+		goto work_err;
+
+	INIT_DELAYED_WORK(&priv->delayed_work, twl6040_accessory_work);
+
+	mutex_init(&priv->mutex);
 
 	init_completion(&priv->ready);
+	init_completion(&priv->headset.ramp_done);
+	init_completion(&priv->handsfree.ramp_done);
 
 	if (gpio_is_valid(audpwron)) {
 		ret = gpio_request(audpwron, "audpwron");
@@ -1047,7 +1650,14 @@
 			goto gpio2_err;
 
 		priv->codec_powered = 0;
+
+		/* enable only codec ready interrupt */
+		intmr &= ~(TWL6040_READYMSK | TWL6040_PLUGMSK);
+
+		/* reset interrupt status to allow correct power up sequence */
+		twl6040_read_reg_volatile(codec, TWL6040_REG_INTID);
 	}
+	twl6040_write(codec, TWL6040_REG_INTMR, intmr);
 
 	if (naudint) {
 		/* audio interrupt */
@@ -1057,25 +1667,29 @@
 				"twl6040_codec", codec);
 		if (ret)
 			goto gpio2_err;
-	} else {
-		if (gpio_is_valid(audpwron)) {
-			/* enable only codec ready interrupt */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-					~TWL6040_READYMSK & TWL6040_ALLINT_MSK);
-		} else {
-			/* no interrupts at all */
-			twl6040_write_reg_cache(codec, TWL6040_REG_INTMR,
-						TWL6040_ALLINT_MSK);
-		}
 	}
 
 	/* init vio registers */
 	twl6040_init_vio_regs(codec);
 
+	priv->hf_workqueue = create_singlethread_workqueue("twl6040-hf");
+	if (priv->hf_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto irq_err;
+	}
+	priv->hs_workqueue = create_singlethread_workqueue("twl6040-hs");
+	if (priv->hs_workqueue == NULL) {
+		ret = -ENOMEM;
+		goto wq_err;
+	}
+
+	INIT_DELAYED_WORK(&priv->hs_delayed_work, twl6040_pga_hs_work);
+	INIT_DELAYED_WORK(&priv->hf_delayed_work, twl6040_pga_hf_work);
+
 	/* power on device */
 	ret = twl6040_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 	if (ret)
-		goto irq_err;
+		goto bias_err;
 
 	snd_soc_add_controls(codec, twl6040_snd_controls,
 				ARRAY_SIZE(twl6040_snd_controls));
@@ -1083,6 +1697,10 @@
 
 	return 0;
 
+bias_err:
+	destroy_workqueue(priv->hs_workqueue);
+wq_err:
+	destroy_workqueue(priv->hf_workqueue);
 irq_err:
 	if (naudint)
 		free_irq(naudint, codec);
@@ -1090,6 +1708,8 @@
 	if (gpio_is_valid(audpwron))
 		gpio_free(audpwron);
 gpio1_err:
+	destroy_workqueue(priv->workqueue);
+work_err:
 	kfree(priv);
 	return ret;
 }
@@ -1108,6 +1728,9 @@
 	if (naudint)
 		free_irq(naudint, codec);
 
+	destroy_workqueue(priv->workqueue);
+	destroy_workqueue(priv->hf_workqueue);
+	destroy_workqueue(priv->hs_workqueue);
 	kfree(priv);
 
 	return 0;
diff --git a/sound/soc/codecs/twl6040.h b/sound/soc/codecs/twl6040.h
index f7c77fa..23aeed0 100644
--- a/sound/soc/codecs/twl6040.h
+++ b/sound/soc/codecs/twl6040.h
@@ -79,6 +79,7 @@
 
 /* INTMR (0x04) fields */
 
+#define TWL6040_PLUGMSK			0x02
 #define TWL6040_READYMSK		0x40
 #define TWL6040_ALLINT_MSK		0x7B
 
@@ -135,4 +136,11 @@
 #define TWL6040_HPPLL_ID		1
 #define TWL6040_LPPLL_ID		2
 
+/* STATUS (0x2E) fields */
+
+#define TWL6040_PLUGCOMP		0x02
+
+void twl6040_hs_jack_detect(struct snd_soc_codec *codec,
+			    struct snd_soc_jack *jack, int report);
+
 #endif /* End of __TWL6040_H__ */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 464f0cf..e76847a 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include <sound/uda134x.h>
@@ -389,7 +388,7 @@
 			pd->power(0);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index 0c6c725..c5ca8cf 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -27,7 +27,6 @@
 #include <sound/control.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/uda1380.h>
 
@@ -36,7 +35,6 @@
 /* codec private data */
 struct uda1380_priv {
 	struct snd_soc_codec *codec;
-	u16 reg_cache[UDA1380_CACHEREGNUM];
 	unsigned int dac_clk;
 	struct work_struct work;
 	void *control_data;
@@ -414,10 +412,11 @@
 
 static int uda1380_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
-				  ARRAY_SIZE(uda1380_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -603,7 +602,7 @@
 	int reg;
 	struct uda1380_platform_data *pdata = codec->dev->platform_data;
 
-	if (codec->bias_level == level)
+	if (codec->dapm.bias_level == level)
 		return 0;
 
 	switch (level) {
@@ -613,7 +612,7 @@
 		uda1380_write(codec, UDA1380_PM, R02_PON_BIAS | pm);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			if (gpio_is_valid(pdata->gpio_power)) {
 				gpio_set_value(pdata->gpio_power, 1);
 				mdelay(1);
@@ -636,7 +635,7 @@
 		for (reg = UDA1380_MVOL; reg < UDA1380_CACHEREGNUM; reg++)
 			set_bit(reg - 0x10, &uda1380_cache_dirty);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 0c47c78..d3ffa2f 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -25,8 +25,7 @@
 #include <linux/slab.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
 #include "wl1273.h"
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 4bcd168..80ddf4f 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -36,7 +36,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -705,6 +704,7 @@
 /* Called from the machine driver */
 int wm2000_add_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	if (!wm2000_i2c) {
@@ -712,12 +712,12 @@
 		return -ENODEV;
 	}
 
-	ret = snd_soc_dapm_new_controls(codec, wm2000_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm2000_dapm_widgets,
 					ARRAY_SIZE(wm2000_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret < 0)
 		return ret;
 
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 7611add..6d6dc9e 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -24,9 +24,9 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include "wm8350.h"
 
@@ -54,6 +54,7 @@
 
 struct wm8350_jack_data {
 	struct snd_soc_jack *jack;
+	struct delayed_work work;
 	int report;
 	int short_report;
 };
@@ -230,8 +231,9 @@
  */
 static void wm8350_pga_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-	    container_of(work, struct snd_soc_codec, delayed_work.work);
+	struct snd_soc_dapm_context *dapm =
+	    container_of(work, struct snd_soc_dapm_context, delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
 	struct wm8350_data *wm8350_data = snd_soc_codec_get_drvdata(codec);
 	struct wm8350_output *out1 = &wm8350_data->out1,
 	    *out2 = &wm8350_data->out2;
@@ -302,8 +304,8 @@
 		out->ramp = WM8350_RAMP_UP;
 		out->active = 1;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 
@@ -311,8 +313,8 @@
 		out->ramp = WM8350_RAMP_DOWN;
 		out->active = 0;
 
-		if (!delayed_work_pending(&codec->delayed_work))
-			schedule_delayed_work(&codec->delayed_work,
+		if (!delayed_work_pending(&codec->dapm.delayed_work))
+			schedule_delayed_work(&codec->dapm.delayed_work,
 					      msecs_to_jiffies(1));
 		break;
 	}
@@ -786,9 +788,10 @@
 
 static int wm8350_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec,
+	ret = snd_soc_dapm_new_controls(dapm,
 					wm8350_dapm_widgets,
 					ARRAY_SIZE(wm8350_dapm_widgets));
 	if (ret != 0) {
@@ -797,7 +800,7 @@
 	}
 
 	/* set up audio paths */
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret != 0) {
 		dev_err(codec->dev, "DAPM route register failed\n");
 		return ret;
@@ -1184,7 +1187,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies),
 						    priv->supplies);
 			if (ret != 0)
@@ -1317,7 +1320,7 @@
 				       priv->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1334,37 +1337,13 @@
 	return 0;
 }
 
-static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+static void wm8350_hp_work(struct wm8350_data *priv,
+			   struct wm8350_jack_data *jack,
+			   u16 mask)
 {
-	struct wm8350_data *priv = data;
 	struct wm8350 *wm8350 = priv->codec.control_data;
 	u16 reg;
 	int report;
-	int mask;
-	struct wm8350_jack_data *jack = NULL;
-
-	switch (irq - wm8350->irq_base) {
-	case WM8350_IRQ_CODEC_JCK_DET_L:
-		jack = &priv->hpl;
-		mask = WM8350_JACK_L_LVL;
-		break;
-
-	case WM8350_IRQ_CODEC_JCK_DET_R:
-		jack = &priv->hpr;
-		mask = WM8350_JACK_R_LVL;
-		break;
-
-	default:
-		BUG();
-	}
-
-	if (!jack->jack) {
-		dev_warn(wm8350->dev, "Jack interrupt called with no jack\n");
-		return IRQ_NONE;
-	}
-
-	/* Debounce */
-	msleep(200);
 
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & mask)
@@ -1374,6 +1353,54 @@
 
 	snd_soc_jack_report(jack->jack, report, jack->report);
 
+}
+
+static void wm8350_hpl_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpl.work.work);
+
+	wm8350_hp_work(priv, &priv->hpl, WM8350_JACK_L_LVL);
+}
+
+static void wm8350_hpr_work(struct work_struct *work)
+{
+	struct wm8350_data *priv =
+	    container_of(work, struct wm8350_data, hpr.work.work);
+	
+	wm8350_hp_work(priv, &priv->hpr, WM8350_JACK_R_LVL);
+}
+
+static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
+{
+	struct wm8350_data *priv = data;
+	struct wm8350 *wm8350 = priv->codec.control_data;
+	struct wm8350_jack_data *jack = NULL;
+
+	switch (irq - wm8350->irq_base) {
+	case WM8350_IRQ_CODEC_JCK_DET_L:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPL");
+#endif
+		jack = &priv->hpl;
+		break;
+
+	case WM8350_IRQ_CODEC_JCK_DET_R:
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+		trace_snd_soc_jack_irq("WM8350 HPR");
+#endif
+		jack = &priv->hpr;
+		break;
+
+	default:
+		BUG();
+	}
+
+	if (device_may_wakeup(wm8350->dev))
+		pm_wakeup_event(wm8350->dev, 250);
+
+	schedule_delayed_work(&jack->work, 200);
+
 	return IRQ_HANDLED;
 }
 
@@ -1436,6 +1463,10 @@
 	u16 reg;
 	int report = 0;
 
+#ifndef CONFIG_SND_SOC_WM8350_MODULE
+	trace_snd_soc_jack_irq("WM8350 mic");
+#endif
+
 	reg = wm8350_reg_read(wm8350, WM8350_JACK_PIN_STATUS);
 	if (reg & WM8350_JACK_MICSCD_LVL)
 		report |= priv->mic.short_report;
@@ -1550,7 +1581,9 @@
 	/* Put the codec into reset if it wasn't already */
 	wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8350_pga_work);
+	INIT_DELAYED_WORK(&priv->hpl.work, wm8350_hpl_work);
+	INIT_DELAYED_WORK(&priv->hpr.work, wm8350_hpr_work);
 
 	/* Enable the codec */
 	wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
@@ -1626,7 +1659,6 @@
 {
 	struct wm8350_data *priv = snd_soc_codec_get_drvdata(codec);
 	struct wm8350 *wm8350 = dev_get_platdata(codec->dev);
-	int ret;
 
 	wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
 			  WM8350_JDL_ENA | WM8350_JDR_ENA);
@@ -1641,15 +1673,12 @@
 	priv->hpr.jack = NULL;
 	priv->mic.jack = NULL;
 
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(&codec->delayed_work);
+	cancel_delayed_work_sync(&priv->hpl.work);
+	cancel_delayed_work_sync(&priv->hpr.work);
 
 	/* if there was any work waiting then we run it now and
 	 * wait for its completion */
-	if (ret) {
-		schedule_delayed_work(&codec->delayed_work, 0);
-		flush_scheduled_work();
-	}
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 
 	wm8350_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 8502997..3c3bc07 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -911,10 +910,11 @@
 
 static int wm8400_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8400_dapm_widgets,
-				  ARRAY_SIZE(wm8400_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8400_dapm_widgets,
+				  ARRAY_SIZE(wm8400_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1219,7 +1219,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(power),
 						    &power[0]);
 			if (ret != 0) {
@@ -1306,7 +1306,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8510.c b/sound/soc/codecs/wm8510.c
index 8f10709..db0dced 100644
--- a/sound/soc/codecs/wm8510.c
+++ b/sound/soc/codecs/wm8510.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8510.h"
@@ -216,10 +215,11 @@
 
 static int wm8510_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8510_dapm_widgets,
-				  ARRAY_SIZE(wm8510_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8510_dapm_widgets,
+				  ARRAY_SIZE(wm8510_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -478,7 +478,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8510_POWER1_BIASEN | WM8510_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8510_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -495,7 +495,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index deca79e..5eb2f50 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -109,10 +108,11 @@
 
 static int wm8523_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8523_dapm_widgets,
-				  ARRAY_SIZE(wm8523_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8523_dapm_widgets,
+				  ARRAY_SIZE(wm8523_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -327,7 +327,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8523->supplies),
 						    wm8523->supplies);
 			if (ret != 0) {
@@ -366,7 +366,7 @@
 				       wm8523->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 8725d4e..8f6b5ee 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -31,7 +31,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 #include <asm/div64.h>
@@ -191,7 +190,6 @@
 struct wm8580_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
-	u16 reg_cache[WM8580_MAX_REGISTER + 1];
 	struct pll_state a;
 	struct pll_state b;
 	int sysclk[2];
@@ -302,10 +300,11 @@
 
 static int wm8580_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets,
-				  ARRAY_SIZE(wm8580_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets,
+				  ARRAY_SIZE(wm8580_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -507,13 +506,13 @@
 	}
 
 	/* Look up the SYSCLK ratio; accept only exact matches */
-	ratio = wm8580->sysclk[dai->id] / params_rate(params);
+	ratio = wm8580->sysclk[dai->driver->id] / params_rate(params);
 	for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++)
 		if (ratio == wm8580_sysclk_ratios[i])
 			break;
 	if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) {
 		dev_err(codec->dev, "Invalid clock ratio %d/%d\n",
-			wm8580->sysclk[dai->id], params_rate(params));
+			wm8580->sysclk[dai->driver->id], params_rate(params));
 		return -EINVAL;
 	}
 	paifa |= i;
@@ -716,7 +715,7 @@
 
 	switch (clk_id) {
 	case WM8580_CLKSRC_ADCMCLK:
-		if (dai->id != WM8580_DAI_PAIFTX)
+		if (dai->driver->id != WM8580_DAI_PAIFTX)
 			return -EINVAL;
 		sel = 0 << sel_shift;
 		break;
@@ -735,7 +734,7 @@
 	}
 
 	/* We really should validate PLL settings but not yet */
-	wm8580->sysclk[dai->id] = freq;
+	wm8580->sysclk[dai->driver->id] = freq;
 
 	return snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel);
 }
@@ -767,7 +766,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power up and get individual control of the DACs */
 			reg = snd_soc_read(codec, WM8580_PWRDN1);
 			reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
@@ -785,7 +784,7 @@
 		snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -905,7 +904,7 @@
 	.set_bias_level = wm8580_set_bias_level,
 	.reg_cache_size = ARRAY_SIZE(wm8580_reg),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8580_reg,
+	.reg_cache_default = wm8580_reg,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8711.c b/sound/soc/codecs/wm8711.c
index 54fbd76..97c3038 100644
--- a/sound/soc/codecs/wm8711.c
+++ b/sound/soc/codecs/wm8711.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/initval.h>
 
@@ -34,7 +33,6 @@
 /* codec private data */
 struct wm8711_priv {
 	enum snd_soc_control_type bus_type;
-	u16 reg_cache[WM8711_CACHEREGNUM];
 	unsigned int sysclk;
 };
 
@@ -93,10 +91,11 @@
 
 static int wm8711_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8711_dapm_widgets,
-				  ARRAY_SIZE(wm8711_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8711_dapm_widgets,
+				  ARRAY_SIZE(wm8711_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -318,7 +317,7 @@
 		snd_soc_write(codec, WM8711_PWR, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8728.c b/sound/soc/codecs/wm8728.c
index 075f35e..736b035 100644
--- a/sound/soc/codecs/wm8728.c
+++ b/sound/soc/codecs/wm8728.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -73,10 +72,11 @@
 
 static int wm8728_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8728_dapm_widgets,
-				  ARRAY_SIZE(wm8728_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8728_dapm_widgets,
+				  ARRAY_SIZE(wm8728_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -180,7 +180,7 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Power everything up... */
 			reg = snd_soc_read(codec, WM8728_DACCTL);
 			snd_soc_write(codec, WM8728_DACCTL, reg & ~0x4);
@@ -197,7 +197,7 @@
 		snd_soc_write(codec, WM8728_DACCTL, reg | 0x4);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index e725c09..0a67c31 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -44,9 +43,10 @@
 struct wm8731_priv {
 	enum snd_soc_control_type control_type;
 	struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
-	u16 reg_cache[WM8731_CACHEREGNUM];
 	unsigned int sysclk;
 	int sysclk_type;
+	int playback_fs;
+	bool deemph;
 };
 
 
@@ -65,16 +65,79 @@
 #define wm8731_reset(c)	snd_soc_write(c, WM8731_RESET, 0)
 
 static const char *wm8731_input_select[] = {"Line In", "Mic"};
-static const char *wm8731_deemph[] = {"None", "32Khz", "44.1Khz", "48Khz"};
 
-static const struct soc_enum wm8731_enum[] = {
-	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select),
-	SOC_ENUM_SINGLE(WM8731_APDIGI, 1, 4, wm8731_deemph),
-};
+static const struct soc_enum wm8731_insel_enum =
+	SOC_ENUM_SINGLE(WM8731_APANA, 2, 2, wm8731_input_select);
+
+static int wm8731_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8731_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8731->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8731_deemph); i++) {
+			if (abs(wm8731_deemph[i] - wm8731->playback_fs) <
+			    abs(wm8731_deemph[best] - wm8731->playback_fs))
+				best = i;
+		}
+
+		val = best << 1;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8731_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8731_APDIGI, 0x6, val);
+}
+
+static int wm8731_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8731->deemph;
+
+	return 0;
+}
+
+static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8731->deemph != deemph) {
+		wm8731->deemph = deemph;
+
+		wm8731_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
 
 static const DECLARE_TLV_DB_SCALE(in_tlv, -3450, 150, 0);
 static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -1500, 300, 0);
 static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(mic_tlv, 0, 2000, 0);
 
 static const struct snd_kcontrol_new wm8731_snd_controls[] = {
 
@@ -87,7 +150,7 @@
 		 in_tlv),
 SOC_DOUBLE_R("Line Capture Switch", WM8731_LINVOL, WM8731_RINVOL, 7, 1, 1),
 
-SOC_SINGLE("Mic Boost (+20dB)", WM8731_APANA, 0, 1, 0),
+SOC_SINGLE_TLV("Mic Boost Volume", WM8731_APANA, 0, 1, 0, mic_tlv),
 SOC_SINGLE("Mic Capture Switch", WM8731_APANA, 1, 1, 1),
 
 SOC_SINGLE_TLV("Sidetone Playback Volume", WM8731_APANA, 6, 3, 1,
@@ -96,7 +159,8 @@
 SOC_SINGLE("ADC High Pass Filter Switch", WM8731_APDIGI, 0, 1, 1),
 SOC_SINGLE("Store DC Offset Switch", WM8731_APDIGI, 4, 1, 0),
 
-SOC_ENUM("Playback De-emphasis", wm8731_enum[1]),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8731_get_deemph, wm8731_put_deemph),
 };
 
 /* Output Mixer */
@@ -108,7 +172,7 @@
 
 /* Input mux */
 static const struct snd_kcontrol_new wm8731_input_mux_controls =
-SOC_DAPM_ENUM("Input Select", wm8731_enum[0]);
+SOC_DAPM_ENUM("Input Select", wm8731_insel_enum);
 
 static const struct snd_soc_dapm_widget wm8731_dapm_widgets[] = {
 SND_SOC_DAPM_SUPPLY("OSC", WM8731_PWR, 5, 1, NULL, 0),
@@ -165,10 +229,11 @@
 
 static int wm8731_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
-				  ARRAY_SIZE(wm8731_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
+				  ARRAY_SIZE(wm8731_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -239,6 +304,8 @@
 	u16 srate = (coeff_div[i].sr << 2) |
 		(coeff_div[i].bosr << 1) | coeff_div[i].usb;
 
+	wm8731->playback_fs = params_rate(params);
+
 	snd_soc_write(codec, WM8731_SRATE, srate);
 
 	/* bit size */
@@ -253,6 +320,8 @@
 		break;
 	}
 
+	wm8731_set_deemph(codec);
+
 	snd_soc_write(codec, WM8731_IFACE, iface);
 	return 0;
 }
@@ -319,7 +388,7 @@
 		return -EINVAL;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -399,7 +468,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8731->supplies),
 						    wm8731->supplies);
 			if (ret != 0)
@@ -428,7 +497,7 @@
 				       wm8731->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -542,7 +611,6 @@
 err_regulator_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
 
-	kfree(wm8731);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
new file mode 100644
index 0000000..30c67d0
--- /dev/null
+++ b/sound/soc/codecs/wm8737.c
@@ -0,0 +1,754 @@
+/*
+ * wm8737.c  --  WM8737 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8737.h"
+
+#define WM8737_NUM_SUPPLIES 4
+static const char *wm8737_supply_names[WM8737_NUM_SUPPLIES] = {
+	"DCVDD",
+	"DBVDD",
+	"AVDD",
+	"MVDD",
+};
+
+/* codec private data */
+struct wm8737_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8737_NUM_SUPPLIES];
+	unsigned int mclk;
+};
+
+static const u16 wm8737_reg[WM8737_REGISTER_COUNT] = {
+	0x00C3,     /* R0  - Left PGA volume */
+	0x00C3,     /* R1  - Right PGA volume */
+	0x0007,     /* R2  - AUDIO path L */
+	0x0007,     /* R3  - AUDIO path R */
+	0x0000,     /* R4  - 3D Enhance */
+	0x0000,     /* R5  - ADC Control */
+	0x0000,     /* R6  - Power Management */
+	0x000A,     /* R7  - Audio Format */
+	0x0000,     /* R8  - Clocking */
+	0x000F,     /* R9  - MIC Preamp Control */
+	0x0003,     /* R10 - Misc Bias Control */
+	0x0000,     /* R11 - Noise Gate */
+	0x007C,     /* R12 - ALC1 */
+	0x0000,     /* R13 - ALC2 */
+	0x0032,     /* R14 - ALC3 */
+};
+
+static int wm8737_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8737_RESET, 0);
+}
+
+static const unsigned int micboost_tlv[] = {
+	TLV_DB_RANGE_HEAD(4),
+	0, 0, TLV_DB_SCALE_ITEM(1300, 0, 0),
+	1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0),
+	2, 2, TLV_DB_SCALE_ITEM(2800, 0, 0),
+	3, 3, TLV_DB_SCALE_ITEM(3300, 0, 0),
+};
+static const DECLARE_TLV_DB_SCALE(pga_tlv, -9750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(ng_tlv, -7800, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_max_tlv, -1200, 600, 0);
+static const DECLARE_TLV_DB_SCALE(alc_target_tlv, -1800, 100, 0);
+
+static const char *micbias_enum_text[] = {
+	"25%",
+	"50%",
+	"75%",
+	"100%",
+};
+
+static const struct soc_enum micbias_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 0, 4, micbias_enum_text);
+
+static const char *low_cutoff_text[] = {
+	"Low", "High"
+};
+
+static const struct soc_enum low_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 6, 2, low_cutoff_text);
+
+static const char *high_cutoff_text[] = {
+	"High", "Low"
+};
+
+static const struct soc_enum high_3d =
+	SOC_ENUM_SINGLE(WM8737_3D_ENHANCE, 5, 2, high_cutoff_text);
+
+static const char *alc_fn_text[] = {
+	"Disabled", "Right", "Left", "Stereo"
+};
+
+static const struct soc_enum alc_fn =
+	SOC_ENUM_SINGLE(WM8737_ALC1, 7, 4, alc_fn_text);
+
+static const char *alc_hold_text[] = {
+	"0", "2.67ms", "5.33ms", "10.66ms", "21.32ms", "42.64ms", "85.28ms",
+	"170.56ms", "341.12ms", "682.24ms", "1.364s", "2.728s", "5.458s",
+	"10.916s", "21.832s", "43.691s"
+};
+
+static const struct soc_enum alc_hold =
+	SOC_ENUM_SINGLE(WM8737_ALC2, 0, 16, alc_hold_text);
+
+static const char *alc_atk_text[] = {
+	"8.4ms", "16.8ms", "33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms",
+	"1.075s", "2.15s", "4.3s", "8.6s"
+};
+
+static const struct soc_enum alc_atk =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 0, 11, alc_atk_text);
+
+static const char *alc_dcy_text[] = {
+	"33.6ms", "67.2ms", "134.4ms", "268.8ms", "537.6ms", "1.075s", "2.15s",
+	"4.3s", "8.6s", "17.2s", "34.41s"
+};
+
+static const struct soc_enum alc_dcy =
+	SOC_ENUM_SINGLE(WM8737_ALC3, 4, 11, alc_dcy_text);
+
+static const struct snd_kcontrol_new wm8737_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Mic Boost Volume", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+		 6, 3, 0, micboost_tlv),
+SOC_DOUBLE_R("Mic Boost Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	     4, 1, 0),
+SOC_DOUBLE("Mic ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   3, 1, 0),
+
+SOC_DOUBLE_R_TLV("Capture Volume", WM8737_LEFT_PGA_VOLUME,
+		 WM8737_RIGHT_PGA_VOLUME, 0, 255, 0, pga_tlv),
+SOC_DOUBLE("Capture ZC Switch", WM8737_AUDIO_PATH_L, WM8737_AUDIO_PATH_R,
+	   2, 1, 0),
+
+SOC_DOUBLE("INPUT1 DC Bias Switch", WM8737_MISC_BIAS_CONTROL, 0, 1, 1, 0),
+
+SOC_ENUM("Mic PGA Bias", micbias_enum),
+SOC_SINGLE("ADC Low Power Switch", WM8737_ADC_CONTROL, 2, 1, 0),
+SOC_SINGLE("High Pass Filter Switch", WM8737_ADC_CONTROL, 0, 1, 1),
+SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
+
+SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
+SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
+SOC_ENUM("3D Low Cut-off", low_3d),
+SOC_ENUM("3D High Cut-off", low_3d),
+SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
+
+SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", WM8737_NOISE_GATE, 2, 7, 0,
+	       ng_tlv),
+
+SOC_ENUM("ALC", alc_fn),
+SOC_SINGLE_TLV("ALC Max Gain Volume", WM8737_ALC1, 4, 7, 0, alc_max_tlv),
+SOC_SINGLE_TLV("ALC Target Volume", WM8737_ALC1, 0, 15, 0, alc_target_tlv),
+SOC_ENUM("ALC Hold Time", alc_hold),
+SOC_SINGLE("ALC ZC Switch", WM8737_ALC2, 4, 1, 0),
+SOC_ENUM("ALC Attack Time", alc_atk),
+SOC_ENUM("ALC Decay Time", alc_dcy),
+};
+
+static const char *linsel_text[] = {
+	"LINPUT1", "LINPUT2", "LINPUT3", "LINPUT1 DC",
+};
+
+static const struct soc_enum linsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_L, 7, 4, linsel_text);
+
+static const struct snd_kcontrol_new linsel_mux =
+	SOC_DAPM_ENUM("LINSEL", linsel_enum);
+
+
+static const char *rinsel_text[] = {
+	"RINPUT1", "RINPUT2", "RINPUT3", "RINPUT1 DC",
+};
+
+static const struct soc_enum rinsel_enum =
+	SOC_ENUM_SINGLE(WM8737_AUDIO_PATH_R, 7, 4, rinsel_text);
+
+static const struct snd_kcontrol_new rinsel_mux =
+	SOC_DAPM_ENUM("RINSEL", rinsel_enum);
+
+static const char *bypass_text[] = {
+	"Direct", "Preamp"
+};
+
+static const struct soc_enum lbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 2, 2, bypass_text);
+
+static const struct snd_kcontrol_new lbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", lbypass_enum);
+
+
+static const struct soc_enum rbypass_enum =
+	SOC_ENUM_SINGLE(WM8737_MIC_PREAMP_CONTROL, 3, 2, bypass_text);
+
+static const struct snd_kcontrol_new rbypass_mux =
+	SOC_DAPM_ENUM("Left Bypass", rbypass_enum);
+
+static const struct snd_soc_dapm_widget wm8737_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("LINPUT1"),
+SND_SOC_DAPM_INPUT("LINPUT2"),
+SND_SOC_DAPM_INPUT("LINPUT3"),
+SND_SOC_DAPM_INPUT("RINPUT1"),
+SND_SOC_DAPM_INPUT("RINPUT2"),
+SND_SOC_DAPM_INPUT("RINPUT3"),
+SND_SOC_DAPM_INPUT("LACIN"),
+SND_SOC_DAPM_INPUT("RACIN"),
+
+SND_SOC_DAPM_MUX("LINSEL", SND_SOC_NOPM, 0, 0, &linsel_mux),
+SND_SOC_DAPM_MUX("RINSEL", SND_SOC_NOPM, 0, 0, &rinsel_mux),
+
+SND_SOC_DAPM_MUX("Left Preamp Mux", SND_SOC_NOPM, 0, 0, &lbypass_mux),
+SND_SOC_DAPM_MUX("Right Preamp Mux", SND_SOC_NOPM, 0, 0, &rbypass_mux),
+
+SND_SOC_DAPM_PGA("PGAL", WM8737_POWER_MANAGEMENT, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("PGAR", WM8737_POWER_MANAGEMENT, 4, 0, NULL, 0),
+
+SND_SOC_DAPM_DAC("ADCL", NULL, WM8737_POWER_MANAGEMENT, 3, 0),
+SND_SOC_DAPM_DAC("ADCR", NULL, WM8737_POWER_MANAGEMENT, 2, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF", "Capture", 0, WM8737_POWER_MANAGEMENT, 6, 0),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+	{ "LINSEL", "LINPUT1", "LINPUT1" },
+	{ "LINSEL", "LINPUT2", "LINPUT2" },
+	{ "LINSEL", "LINPUT3", "LINPUT3" },
+	{ "LINSEL", "LINPUT1 DC", "LINPUT1" },
+
+	{ "RINSEL", "RINPUT1", "RINPUT1" },
+	{ "RINSEL", "RINPUT2", "RINPUT2" },
+	{ "RINSEL", "RINPUT3", "RINPUT3" },
+	{ "RINSEL", "RINPUT1 DC", "RINPUT1" },
+
+	{ "Left Preamp Mux", "Preamp", "LINSEL" },
+	{ "Left Preamp Mux", "Direct", "LACIN" },
+
+	{ "Right Preamp Mux", "Preamp", "RINSEL" },
+	{ "Right Preamp Mux", "Direct", "RACIN" },
+
+	{ "PGAL", NULL, "Left Preamp Mux" },
+	{ "PGAR", NULL, "Right Preamp Mux" },
+
+	{ "ADCL", NULL, "PGAL" },
+	{ "ADCR", NULL, "PGAR" },
+
+	{ "AIF", NULL, "ADCL" },
+	{ "AIF", NULL, "ADCR" },
+};
+
+static int wm8737_add_widgets(struct snd_soc_codec *codec)
+{
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm8737_dapm_widgets,
+				  ARRAY_SIZE(wm8737_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	return 0;
+}
+
+/* codec mclk clock divider coefficients */
+static const struct {
+	u32 mclk;
+	u32 rate;
+	u8 usb;
+	u8 sr;
+} coeff_div[] = {
+	{ 12288000,  8000, 0,  0x4 },
+	{ 12288000, 12000, 0,  0x8 },
+	{ 12288000, 16000, 0,  0xa },
+	{ 12288000, 24000, 0, 0x1c },
+	{ 12288000, 32000, 0,  0xc },
+	{ 12288000, 48000, 0,    0 },
+	{ 12288000, 96000, 0,  0xe },
+
+	{ 11289600,  8000, 0, 0x14 },
+	{ 11289600, 11025, 0, 0x18 },
+	{ 11289600, 22050, 0, 0x1a },
+	{ 11289600, 44100, 0, 0x10 },
+	{ 11289600, 88200, 0, 0x1e },
+
+	{ 18432000,  8000, 0,  0x5 },
+	{ 18432000, 12000, 0,  0x9 },
+	{ 18432000, 16000, 0,  0xb },
+	{ 18432000, 24000, 0, 0x1b },
+	{ 18432000, 32000, 0,  0xd },
+	{ 18432000, 48000, 0,  0x1 },
+	{ 18432000, 96000, 0, 0x1f },
+
+	{ 16934400,  8000, 0, 0x15 },
+	{ 16934400, 11025, 0, 0x19 },
+	{ 16934400, 22050, 0, 0x1b },
+	{ 16934400, 44100, 0, 0x11 },
+	{ 16934400, 88200, 0, 0x1f },
+
+	{ 12000000,  8000, 1,  0x4 },
+	{ 12000000, 11025, 1, 0x19 },
+	{ 12000000, 12000, 1,  0x8 },
+	{ 12000000, 16000, 1,  0xa },
+	{ 12000000, 22050, 1, 0x1b },
+	{ 12000000, 24000, 1, 0x1c },
+	{ 12000000, 32000, 1,  0xc },
+	{ 12000000, 44100, 1, 0x11 },
+	{ 12000000, 48000, 1,  0x0 },
+	{ 12000000, 88200, 1, 0x1f },
+	{ 12000000, 96000, 1,  0xe },
+};
+
+static int wm8737_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_codec *codec = rtd->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+	u16 clocking = 0;
+	u16 af = 0;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (coeff_div[i].rate != params_rate(params))
+			continue;
+
+		if (coeff_div[i].mclk == wm8737->mclk)
+			break;
+
+		if (coeff_div[i].mclk == wm8737->mclk * 2) {
+			clocking |= WM8737_CLKDIV2;
+			break;
+		}
+	}
+
+	if (i == ARRAY_SIZE(coeff_div)) {
+		dev_err(codec->dev, "%dHz MCLK can't support %dHz\n",
+			wm8737->mclk, params_rate(params));
+		return -EINVAL;
+	}
+
+	clocking |= coeff_div[i].usb | (coeff_div[i].sr << WM8737_SR_SHIFT);
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		af |= 0x8;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		af |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		af |= 0x18;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT, WM8737_WL_MASK, af);
+	snd_soc_update_bits(codec, WM8737_CLOCKING,
+			    WM8737_USB_MODE | WM8737_CLKDIV2 | WM8737_SR_MASK,
+			    clocking);
+
+	return 0;
+}
+
+static int wm8737_set_dai_sysclk(struct snd_soc_dai *codec_dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(coeff_div); i++) {
+		if (freq == coeff_div[i].mclk ||
+		    freq == coeff_div[i].mclk * 2) {
+			wm8737->mclk = freq;
+			return 0;
+		}
+	}
+
+	dev_err(codec->dev, "MCLK rate %dHz not supported\n", freq);
+
+	return -EINVAL;
+}
+
+
+static int wm8737_set_dai_fmt(struct snd_soc_dai *codec_dai,
+		unsigned int fmt)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	u16 af = 0;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		af |= WM8737_MS;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		af |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		af |= 0x1;
+		break;
+	case SND_SOC_DAIFMT_DSP_A:
+		af |= 0x3;
+		break;
+	case SND_SOC_DAIFMT_DSP_B:
+		af |= 0x13;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		af |= WM8737_LRP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8737_AUDIO_FORMAT,
+			    WM8737_FORMAT_MASK | WM8737_LRP | WM8737_MS, af);
+
+	return 0;
+}
+
+static int wm8737_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret;
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+
+	case SND_SOC_BIAS_PREPARE:
+		/* VMID at 2*75k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 0);
+		break;
+
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+						    wm8737->supplies);
+			if (ret != 0) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+
+			snd_soc_cache_sync(codec);
+
+			/* Fast VMID ramp at 2*2.5k */
+			snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+					    WM8737_VMIDSEL_MASK, 0x4);
+
+			/* Bring VMID up */
+			snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK,
+					    WM8737_VMID_MASK |
+					    WM8737_VREF_MASK);
+
+			msleep(500);
+		}
+
+		/* VMID at 2*300k */
+		snd_soc_update_bits(codec, WM8737_MISC_BIAS_CONTROL,
+				    WM8737_VMIDSEL_MASK, 2);
+
+		break;
+
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8737_POWER_MANAGEMENT,
+				    WM8737_VMID_MASK | WM8737_VREF_MASK, 0);
+
+		regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies),
+				       wm8737->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8737_RATES SNDRV_PCM_RATE_8000_96000
+
+#define WM8737_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8737_dai_ops = {
+	.hw_params	= wm8737_hw_params,
+	.set_sysclk	= wm8737_set_dai_sysclk,
+	.set_fmt	= wm8737_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver wm8737_dai = {
+	.name = "wm8737",
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,  /* Mono modes not yet supported */
+		.channels_max = 2,
+		.rates = WM8737_RATES,
+		.formats = WM8737_FORMATS,
+	},
+	.ops = &wm8737_dai_ops,
+};
+
+#ifdef CONFIG_PM
+static int wm8737_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8737_resume(struct snd_soc_codec *codec)
+{
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8737_suspend NULL
+#define wm8737_resume NULL
+#endif
+
+static int wm8737_probe(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+	int ret, i;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8737->control_type);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8737->supplies); i++)
+		wm8737->supplies[i].supply = wm8737_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8737->supplies),
+				 wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8737->supplies),
+				    wm8737->supplies);
+	if (ret != 0) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_get;
+	}
+
+	ret = wm8737_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset\n");
+		goto err_enable;
+	}
+
+	snd_soc_update_bits(codec, WM8737_LEFT_PGA_VOLUME, WM8737_LVU,
+			    WM8737_LVU);
+	snd_soc_update_bits(codec, WM8737_RIGHT_PGA_VOLUME, WM8737_RVU,
+			    WM8737_RVU);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Bias level configuration will have done an extra enable */
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	snd_soc_add_controls(codec, wm8737_snd_controls,
+			     ARRAY_SIZE(wm8737_snd_controls));
+	wm8737_add_widgets(codec);
+
+	return 0;
+
+err_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+err_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+
+	return ret;
+}
+
+static int wm8737_remove(struct snd_soc_codec *codec)
+{
+	struct wm8737_priv *wm8737 = snd_soc_codec_get_drvdata(codec);
+
+	wm8737_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	regulator_bulk_free(ARRAY_SIZE(wm8737->supplies), wm8737->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8737 = {
+	.probe		= wm8737_probe,
+	.remove		= wm8737_remove,
+	.suspend	= wm8737_suspend,
+	.resume		= wm8737_resume,
+	.set_bias_level = wm8737_set_bias_level,
+
+	.reg_cache_size = WM8737_REGISTER_COUNT - 1, /* Skip reset */
+	.reg_word_size	= sizeof(u16),
+	.reg_cache_default = wm8737_reg,
+};
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8737_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, wm8737);
+	wm8737->control_type = SND_SOC_I2C;
+
+	ret =  snd_soc_register_codec(&i2c->dev,
+				      &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+
+}
+
+static __devexit int wm8737_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8737_i2c_id[] = {
+	{ "wm8737", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, wm8737_i2c_id);
+
+static struct i2c_driver wm8737_i2c_driver = {
+	.driver = {
+		.name = "wm8737",
+		.owner = THIS_MODULE,
+	},
+	.probe =    wm8737_i2c_probe,
+	.remove =   __devexit_p(wm8737_i2c_remove),
+	.id_table = wm8737_i2c_id,
+};
+#endif
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8737_spi_probe(struct spi_device *spi)
+{
+	struct wm8737_priv *wm8737;
+	int ret;
+
+	wm8737 = kzalloc(sizeof(struct wm8737_priv), GFP_KERNEL);
+	if (wm8737 == NULL)
+		return -ENOMEM;
+
+	wm8737->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8737);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8737, &wm8737_dai, 1);
+	if (ret < 0)
+		kfree(wm8737);
+	return ret;
+}
+
+static int __devexit wm8737_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8737_spi_driver = {
+	.driver = {
+		.name	= "wm8737",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= wm8737_spi_probe,
+	.remove		= __devexit_p(wm8737_spi_remove),
+};
+#endif /* CONFIG_SPI_MASTER */
+
+static int __init wm8737_modinit(void)
+{
+	int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8737_i2c_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8737_spi_driver);
+	if (ret != 0) {
+		printk(KERN_ERR "Failed to register WM8737 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return 0;
+}
+module_init(wm8737_modinit);
+
+static void __exit wm8737_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8737_spi_driver);
+#endif
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8737_i2c_driver);
+#endif
+}
+module_exit(wm8737_exit);
+
+MODULE_DESCRIPTION("ASoC WM8737 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8737.h b/sound/soc/codecs/wm8737.h
new file mode 100644
index 0000000..23d14c8
--- /dev/null
+++ b/sound/soc/codecs/wm8737.h
@@ -0,0 +1,322 @@
+#ifndef _WM8737_H
+#define _WM8737_H
+
+/*
+ * wm8737.c  --  WM8523 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Register values.
+ */
+#define WM8737_LEFT_PGA_VOLUME                  0x00
+#define WM8737_RIGHT_PGA_VOLUME                 0x01
+#define WM8737_AUDIO_PATH_L                     0x02
+#define WM8737_AUDIO_PATH_R                     0x03
+#define WM8737_3D_ENHANCE                       0x04
+#define WM8737_ADC_CONTROL                      0x05
+#define WM8737_POWER_MANAGEMENT                 0x06
+#define WM8737_AUDIO_FORMAT                     0x07
+#define WM8737_CLOCKING                         0x08
+#define WM8737_MIC_PREAMP_CONTROL               0x09
+#define WM8737_MISC_BIAS_CONTROL                0x0A
+#define WM8737_NOISE_GATE                       0x0B
+#define WM8737_ALC1                             0x0C
+#define WM8737_ALC2                             0x0D
+#define WM8737_ALC3                             0x0E
+#define WM8737_RESET                            0x0F
+
+#define WM8737_REGISTER_COUNT                   16
+#define WM8737_MAX_REGISTER                     0x0F
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Left PGA volume
+ */
+#define WM8737_LVU                              0x0100  /* LVU */
+#define WM8737_LVU_MASK                         0x0100  /* LVU */
+#define WM8737_LVU_SHIFT                             8  /* LVU */
+#define WM8737_LVU_WIDTH                             1  /* LVU */
+#define WM8737_LINVOL_MASK                      0x00FF  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_SHIFT                          0  /* LINVOL - [7:0] */
+#define WM8737_LINVOL_WIDTH                          8  /* LINVOL - [7:0] */
+
+/*
+ * R1 (0x01) - Right PGA volume
+ */
+#define WM8737_RVU                              0x0100  /* RVU */
+#define WM8737_RVU_MASK                         0x0100  /* RVU */
+#define WM8737_RVU_SHIFT                             8  /* RVU */
+#define WM8737_RVU_WIDTH                             1  /* RVU */
+#define WM8737_RINVOL_MASK                      0x00FF  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_SHIFT                          0  /* RINVOL - [7:0] */
+#define WM8737_RINVOL_WIDTH                          8  /* RINVOL - [7:0] */
+
+/*
+ * R2 (0x02) - AUDIO path L
+ */
+#define WM8737_LINSEL_MASK                      0x0180  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_SHIFT                          7  /* LINSEL - [8:7] */
+#define WM8737_LINSEL_WIDTH                          2  /* LINSEL - [8:7] */
+#define WM8737_LMICBOOST_MASK                   0x0060  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_SHIFT                       5  /* LMICBOOST - [6:5] */
+#define WM8737_LMICBOOST_WIDTH                       2  /* LMICBOOST - [6:5] */
+#define WM8737_LMBE                             0x0010  /* LMBE */
+#define WM8737_LMBE_MASK                        0x0010  /* LMBE */
+#define WM8737_LMBE_SHIFT                            4  /* LMBE */
+#define WM8737_LMBE_WIDTH                            1  /* LMBE */
+#define WM8737_LMZC                             0x0008  /* LMZC */
+#define WM8737_LMZC_MASK                        0x0008  /* LMZC */
+#define WM8737_LMZC_SHIFT                            3  /* LMZC */
+#define WM8737_LMZC_WIDTH                            1  /* LMZC */
+#define WM8737_LPZC                             0x0004  /* LPZC */
+#define WM8737_LPZC_MASK                        0x0004  /* LPZC */
+#define WM8737_LPZC_SHIFT                            2  /* LPZC */
+#define WM8737_LPZC_WIDTH                            1  /* LPZC */
+#define WM8737_LZCTO_MASK                       0x0003  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_SHIFT                           0  /* LZCTO - [1:0] */
+#define WM8737_LZCTO_WIDTH                           2  /* LZCTO - [1:0] */
+
+/*
+ * R3 (0x03) - AUDIO path R
+ */
+#define WM8737_RINSEL_MASK                      0x0180  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_SHIFT                          7  /* RINSEL - [8:7] */
+#define WM8737_RINSEL_WIDTH                          2  /* RINSEL - [8:7] */
+#define WM8737_RMICBOOST_MASK                   0x0060  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_SHIFT                       5  /* RMICBOOST - [6:5] */
+#define WM8737_RMICBOOST_WIDTH                       2  /* RMICBOOST - [6:5] */
+#define WM8737_RMBE                             0x0010  /* RMBE */
+#define WM8737_RMBE_MASK                        0x0010  /* RMBE */
+#define WM8737_RMBE_SHIFT                            4  /* RMBE */
+#define WM8737_RMBE_WIDTH                            1  /* RMBE */
+#define WM8737_RMZC                             0x0008  /* RMZC */
+#define WM8737_RMZC_MASK                        0x0008  /* RMZC */
+#define WM8737_RMZC_SHIFT                            3  /* RMZC */
+#define WM8737_RMZC_WIDTH                            1  /* RMZC */
+#define WM8737_RPZC                             0x0004  /* RPZC */
+#define WM8737_RPZC_MASK                        0x0004  /* RPZC */
+#define WM8737_RPZC_SHIFT                            2  /* RPZC */
+#define WM8737_RPZC_WIDTH                            1  /* RPZC */
+#define WM8737_RZCTO_MASK                       0x0003  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_SHIFT                           0  /* RZCTO - [1:0] */
+#define WM8737_RZCTO_WIDTH                           2  /* RZCTO - [1:0] */
+
+/*
+ * R4 (0x04) - 3D Enhance
+ */
+#define WM8737_DIV2                             0x0080  /* DIV2 */
+#define WM8737_DIV2_MASK                        0x0080  /* DIV2 */
+#define WM8737_DIV2_SHIFT                            7  /* DIV2 */
+#define WM8737_DIV2_WIDTH                            1  /* DIV2 */
+#define WM8737_3DLC                             0x0040  /* 3DLC */
+#define WM8737_3DLC_MASK                        0x0040  /* 3DLC */
+#define WM8737_3DLC_SHIFT                            6  /* 3DLC */
+#define WM8737_3DLC_WIDTH                            1  /* 3DLC */
+#define WM8737_3DUC                             0x0020  /* 3DUC */
+#define WM8737_3DUC_MASK                        0x0020  /* 3DUC */
+#define WM8737_3DUC_SHIFT                            5  /* 3DUC */
+#define WM8737_3DUC_WIDTH                            1  /* 3DUC */
+#define WM8737_3DDEPTH_MASK                     0x001E  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_SHIFT                         1  /* 3DDEPTH - [4:1] */
+#define WM8737_3DDEPTH_WIDTH                         4  /* 3DDEPTH - [4:1] */
+#define WM8737_3DE                              0x0001  /* 3DE */
+#define WM8737_3DE_MASK                         0x0001  /* 3DE */
+#define WM8737_3DE_SHIFT                             0  /* 3DE */
+#define WM8737_3DE_WIDTH                             1  /* 3DE */
+
+/*
+ * R5 (0x05) - ADC Control
+ */
+#define WM8737_MONOMIX_MASK                     0x0180  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_SHIFT                         7  /* MONOMIX - [8:7] */
+#define WM8737_MONOMIX_WIDTH                         2  /* MONOMIX - [8:7] */
+#define WM8737_POLARITY_MASK                    0x0060  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_SHIFT                        5  /* POLARITY - [6:5] */
+#define WM8737_POLARITY_WIDTH                        2  /* POLARITY - [6:5] */
+#define WM8737_HPOR                             0x0010  /* HPOR */
+#define WM8737_HPOR_MASK                        0x0010  /* HPOR */
+#define WM8737_HPOR_SHIFT                            4  /* HPOR */
+#define WM8737_HPOR_WIDTH                            1  /* HPOR */
+#define WM8737_LP                               0x0004  /* LP */
+#define WM8737_LP_MASK                          0x0004  /* LP */
+#define WM8737_LP_SHIFT                              2  /* LP */
+#define WM8737_LP_WIDTH                              1  /* LP */
+#define WM8737_MONOUT                           0x0002  /* MONOUT */
+#define WM8737_MONOUT_MASK                      0x0002  /* MONOUT */
+#define WM8737_MONOUT_SHIFT                          1  /* MONOUT */
+#define WM8737_MONOUT_WIDTH                          1  /* MONOUT */
+#define WM8737_ADCHPD                           0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_MASK                      0x0001  /* ADCHPD */
+#define WM8737_ADCHPD_SHIFT                          0  /* ADCHPD */
+#define WM8737_ADCHPD_WIDTH                          1  /* ADCHPD */
+
+/*
+ * R6 (0x06) - Power Management
+ */
+#define WM8737_VMID                             0x0100  /* VMID */
+#define WM8737_VMID_MASK                        0x0100  /* VMID */
+#define WM8737_VMID_SHIFT                            8  /* VMID */
+#define WM8737_VMID_WIDTH                            1  /* VMID */
+#define WM8737_VREF                             0x0080  /* VREF */
+#define WM8737_VREF_MASK                        0x0080  /* VREF */
+#define WM8737_VREF_SHIFT                            7  /* VREF */
+#define WM8737_VREF_WIDTH                            1  /* VREF */
+#define WM8737_AI                               0x0040  /* AI */
+#define WM8737_AI_MASK                          0x0040  /* AI */
+#define WM8737_AI_SHIFT                              6  /* AI */
+#define WM8737_AI_WIDTH                              1  /* AI */
+#define WM8737_PGL                              0x0020  /* PGL */
+#define WM8737_PGL_MASK                         0x0020  /* PGL */
+#define WM8737_PGL_SHIFT                             5  /* PGL */
+#define WM8737_PGL_WIDTH                             1  /* PGL */
+#define WM8737_PGR                              0x0010  /* PGR */
+#define WM8737_PGR_MASK                         0x0010  /* PGR */
+#define WM8737_PGR_SHIFT                             4  /* PGR */
+#define WM8737_PGR_WIDTH                             1  /* PGR */
+#define WM8737_ADL                              0x0008  /* ADL */
+#define WM8737_ADL_MASK                         0x0008  /* ADL */
+#define WM8737_ADL_SHIFT                             3  /* ADL */
+#define WM8737_ADL_WIDTH                             1  /* ADL */
+#define WM8737_ADR                              0x0004  /* ADR */
+#define WM8737_ADR_MASK                         0x0004  /* ADR */
+#define WM8737_ADR_SHIFT                             2  /* ADR */
+#define WM8737_ADR_WIDTH                             1  /* ADR */
+#define WM8737_MICBIAS_MASK                     0x0003  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_SHIFT                         0  /* MICBIAS - [1:0] */
+#define WM8737_MICBIAS_WIDTH                         2  /* MICBIAS - [1:0] */
+
+/*
+ * R7 (0x07) - Audio Format
+ */
+#define WM8737_SDODIS                           0x0080  /* SDODIS */
+#define WM8737_SDODIS_MASK                      0x0080  /* SDODIS */
+#define WM8737_SDODIS_SHIFT                          7  /* SDODIS */
+#define WM8737_SDODIS_WIDTH                          1  /* SDODIS */
+#define WM8737_MS                               0x0040  /* MS */
+#define WM8737_MS_MASK                          0x0040  /* MS */
+#define WM8737_MS_SHIFT                              6  /* MS */
+#define WM8737_MS_WIDTH                              1  /* MS */
+#define WM8737_LRP                              0x0010  /* LRP */
+#define WM8737_LRP_MASK                         0x0010  /* LRP */
+#define WM8737_LRP_SHIFT                             4  /* LRP */
+#define WM8737_LRP_WIDTH                             1  /* LRP */
+#define WM8737_WL_MASK                          0x000C  /* WL - [3:2] */
+#define WM8737_WL_SHIFT                              2  /* WL - [3:2] */
+#define WM8737_WL_WIDTH                              2  /* WL - [3:2] */
+#define WM8737_FORMAT_MASK                      0x0003  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_SHIFT                          0  /* FORMAT - [1:0] */
+#define WM8737_FORMAT_WIDTH                          2  /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Clocking
+ */
+#define WM8737_AUTODETECT                       0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_MASK                  0x0080  /* AUTODETECT */
+#define WM8737_AUTODETECT_SHIFT                      7  /* AUTODETECT */
+#define WM8737_AUTODETECT_WIDTH                      1  /* AUTODETECT */
+#define WM8737_CLKDIV2                          0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_MASK                     0x0040  /* CLKDIV2 */
+#define WM8737_CLKDIV2_SHIFT                         6  /* CLKDIV2 */
+#define WM8737_CLKDIV2_WIDTH                         1  /* CLKDIV2 */
+#define WM8737_SR_MASK                          0x003E  /* SR - [5:1] */
+#define WM8737_SR_SHIFT                              1  /* SR - [5:1] */
+#define WM8737_SR_WIDTH                              5  /* SR - [5:1] */
+#define WM8737_USB_MODE                         0x0001  /* USB MODE */
+#define WM8737_USB_MODE_MASK                    0x0001  /* USB MODE */
+#define WM8737_USB_MODE_SHIFT                        0  /* USB MODE */
+#define WM8737_USB_MODE_WIDTH                        1  /* USB MODE */
+
+/*
+ * R9 (0x09) - MIC Preamp Control
+ */
+#define WM8737_RBYPEN                           0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_MASK                      0x0008  /* RBYPEN */
+#define WM8737_RBYPEN_SHIFT                          3  /* RBYPEN */
+#define WM8737_RBYPEN_WIDTH                          1  /* RBYPEN */
+#define WM8737_LBYPEN                           0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_MASK                      0x0004  /* LBYPEN */
+#define WM8737_LBYPEN_SHIFT                          2  /* LBYPEN */
+#define WM8737_LBYPEN_WIDTH                          1  /* LBYPEN */
+#define WM8737_MBCTRL_MASK                      0x0003  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_SHIFT                          0  /* MBCTRL - [1:0] */
+#define WM8737_MBCTRL_WIDTH                          2  /* MBCTRL - [1:0] */
+
+/*
+ * R10 (0x0A) - Misc Bias Control
+ */
+#define WM8737_VMIDSEL_MASK                     0x000C  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_SHIFT                         2  /* VMIDSEL - [3:2] */
+#define WM8737_VMIDSEL_WIDTH                         2  /* VMIDSEL - [3:2] */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE           0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_MASK      0x0002  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_SHIFT          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_LINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* LINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE           0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_MASK      0x0001  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_SHIFT          0  /* RINPUT1 DC BIAS ENABLE */
+#define WM8737_RINPUT1_DC_BIAS_ENABLE_WIDTH          1  /* RINPUT1 DC BIAS ENABLE */
+
+/*
+ * R11 (0x0B) - Noise Gate
+ */
+#define WM8737_NGTH_MASK                        0x001C  /* NGTH - [4:2] */
+#define WM8737_NGTH_SHIFT                            2  /* NGTH - [4:2] */
+#define WM8737_NGTH_WIDTH                            3  /* NGTH - [4:2] */
+#define WM8737_NGAT                             0x0001  /* NGAT */
+#define WM8737_NGAT_MASK                        0x0001  /* NGAT */
+#define WM8737_NGAT_SHIFT                            0  /* NGAT */
+#define WM8737_NGAT_WIDTH                            1  /* NGAT */
+
+/*
+ * R12 (0x0C) - ALC1
+ */
+#define WM8737_ALCSEL_MASK                      0x0180  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_SHIFT                          7  /* ALCSEL - [8:7] */
+#define WM8737_ALCSEL_WIDTH                          2  /* ALCSEL - [8:7] */
+#define WM8737_MAX_GAIN_MASK                    0x0070  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_SHIFT                        4  /* MAX GAIN - [6:4] */
+#define WM8737_MAX_GAIN_WIDTH                        3  /* MAX GAIN - [6:4] */
+#define WM8737_ALCL_MASK                        0x000F  /* ALCL - [3:0] */
+#define WM8737_ALCL_SHIFT                            0  /* ALCL - [3:0] */
+#define WM8737_ALCL_WIDTH                            4  /* ALCL - [3:0] */
+
+/*
+ * R13 (0x0D) - ALC2
+ */
+#define WM8737_ALCZCE                           0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_MASK                      0x0010  /* ALCZCE */
+#define WM8737_ALCZCE_SHIFT                          4  /* ALCZCE */
+#define WM8737_ALCZCE_WIDTH                          1  /* ALCZCE */
+#define WM8737_HLD_MASK                         0x000F  /* HLD - [3:0] */
+#define WM8737_HLD_SHIFT                             0  /* HLD - [3:0] */
+#define WM8737_HLD_WIDTH                             4  /* HLD - [3:0] */
+
+/*
+ * R14 (0x0E) - ALC3
+ */
+#define WM8737_DCY_MASK                         0x00F0  /* DCY - [7:4] */
+#define WM8737_DCY_SHIFT                             4  /* DCY - [7:4] */
+#define WM8737_DCY_WIDTH                             4  /* DCY - [7:4] */
+#define WM8737_ATK_MASK                         0x000F  /* ATK - [3:0] */
+#define WM8737_ATK_SHIFT                             0  /* ATK - [3:0] */
+#define WM8737_ATK_WIDTH                             4  /* ATK - [3:0] */
+
+/*
+ * R15 (0x0F) - Reset
+ */
+#define WM8737_RESET_MASK                       0x01FF  /* RESET - [8:0] */
+#define WM8737_RESET_SHIFT                           0  /* RESET - [8:0] */
+#define WM8737_RESET_WIDTH                           9  /* RESET - [8:0] */
+
+#endif
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index aea60ef..494f2d3 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -94,10 +93,11 @@
 
 static int wm8741_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8741_dapm_widgets,
-				  ARRAY_SIZE(wm8741_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8741_dapm_widgets,
+				  ARRAY_SIZE(wm8741_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -455,7 +455,7 @@
 	.resume =	wm8741_resume,
 	.reg_cache_size = ARRAY_SIZE(wm8741_reg_defaults),
 	.reg_word_size = sizeof(u16),
-	.reg_cache_default = &wm8741_reg_defaults,
+	.reg_cache_default = wm8741_reg_defaults,
 };
 
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
diff --git a/sound/soc/codecs/wm8750.c b/sound/soc/codecs/wm8750.c
index 6c924cd..38f38fd 100644
--- a/sound/soc/codecs/wm8750.c
+++ b/sound/soc/codecs/wm8750.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8750.h"
@@ -53,7 +52,6 @@
 struct wm8750_priv {
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[ARRAY_SIZE(wm8750_reg)];
 };
 
 #define wm8750_reset(c)	snd_soc_write(c, WM8750_RESET, 0)
@@ -399,10 +397,11 @@
 
 static int wm8750_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
-				  ARRAY_SIZE(wm8750_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
+				  ARRAY_SIZE(wm8750_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -615,7 +614,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Set VMID to 5k */
 			snd_soc_write(codec, WM8750_PWR1, pwr_reg | 0x01c1);
 
@@ -630,7 +629,7 @@
 		snd_soc_write(codec, WM8750_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 87caae5..79b02ae 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -45,7 +45,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -623,10 +622,11 @@
 
 static int wm8753_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
-				  ARRAY_SIZE(wm8753_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
+				  ARRAY_SIZE(wm8753_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1245,7 +1245,7 @@
 		snd_soc_write(codec, WM8753_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -1435,9 +1435,11 @@
 
 static void wm8753_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8753_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8753_set_bias_level(codec, dapm->bias_level);
 }
 
 static int wm8753_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -1466,41 +1468,22 @@
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8753 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		schedule_delayed_work(&codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		schedule_delayed_work(&codec->dapm.delayed_work,
 			msecs_to_jiffies(caps_charge));
 	}
 
 	return 0;
 }
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 static int wm8753_probe(struct snd_soc_codec *codec)
 {
 	struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8753_work);
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8753->control_type);
 	if (ret < 0) {
@@ -1519,7 +1502,7 @@
 
 	/* charge output caps */
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
-	schedule_delayed_work(&codec->delayed_work,
+	schedule_delayed_work(&codec->dapm.delayed_work,
 			      msecs_to_jiffies(caps_charge));
 
 	/* set the update bits */
@@ -1544,7 +1527,7 @@
 /* power down chip */
 static int wm8753_remove(struct snd_soc_codec *codec)
 {
-	run_delayed_work(&codec->delayed_work);
+	flush_delayed_work_sync(&codec->dapm.delayed_work);
 	wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
 	return 0;
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
new file mode 100644
index 0000000..19b92ba
--- /dev/null
+++ b/sound/soc/codecs/wm8770.c
@@ -0,0 +1,749 @@
+/*
+ * wm8770.c  --  WM8770 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8770.h"
+
+#define WM8770_NUM_SUPPLIES 3
+static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = {
+	"AVDD1",
+	"AVDD2",
+	"DVDD"
+};
+
+static const u16 wm8770_reg_defs[WM8770_CACHEREGNUM] = {
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0x7f, 0x7f, 0x7f,
+	0x7f, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0xff, 0xff,
+	0xff, 0xff, 0, 0x90, 0,
+	0, 0x22, 0x22, 0x3e,
+	0xc, 0xc, 0x100, 0x189,
+	0x189, 0x8770
+};
+
+struct wm8770_priv {
+	enum snd_soc_control_type control_type;
+	struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
+	struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
+	struct snd_soc_codec *codec;
+	int sysclk;
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event);
+
+/*
+ * We can't use the same notifier block for more than one supply and
+ * there's no way I can see to get from a callback to the caller
+ * except container_of().
+ */
+#define WM8770_REGULATOR_EVENT(n) \
+static int wm8770_regulator_event_##n(struct notifier_block *nb, \
+				      unsigned long event, void *data)    \
+{ \
+	struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \
+				     disable_nb[n]); \
+	if (event & REGULATOR_EVENT_DISABLE) { \
+		wm8770->codec->cache_sync = 1; \
+	} \
+	return 0; \
+}
+
+WM8770_REGULATOR_EVENT(0)
+WM8770_REGULATOR_EVENT(1)
+WM8770_REGULATOR_EVENT(2)
+
+static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
+static const DECLARE_TLV_DB_SCALE(dac_dig_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(dac_alg_tlv, -12700, 100, 1);
+
+static const char *dac_phase_text[][2] = {
+	{ "DAC1 Normal", "DAC1 Inverted" },
+	{ "DAC2 Normal", "DAC2 Inverted" },
+	{ "DAC3 Normal", "DAC3 Inverted" },
+	{ "DAC4 Normal", "DAC4 Inverted" },
+};
+
+static const struct soc_enum dac_phase[] = {
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 0, 1, 2, dac_phase_text[0]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 2, 3, 2, dac_phase_text[1]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 4, 5, 2, dac_phase_text[2]),
+	SOC_ENUM_DOUBLE(WM8770_DACPHASE, 6, 7, 2, dac_phase_text[3]),
+};
+
+static const struct snd_kcontrol_new wm8770_snd_controls[] = {
+	/* global DAC playback controls */
+	SOC_SINGLE_TLV("DAC Playback Volume", WM8770_MSDIGVOL, 0, 255, 0,
+		dac_dig_tlv),
+	SOC_SINGLE("DAC Playback Switch", WM8770_DACMUTE, 4, 1, 1),
+	SOC_SINGLE("DAC Playback ZC Switch", WM8770_DACCTRL1, 0, 1, 0),
+
+	/* global VOUT playback controls */
+	SOC_SINGLE_TLV("VOUT Playback Volume", WM8770_MSALGVOL, 0, 127, 0,
+		dac_alg_tlv),
+	SOC_SINGLE("VOUT Playback ZC Switch", WM8770_MSALGVOL, 7, 1, 0),
+
+	/* VOUT1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("VOUT1 Playback Volume", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT1 Playback ZC Switch", WM8770_VOUT1LVOL,
+		WM8770_VOUT1RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT2 Playback Volume", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT2 Playback ZC Switch", WM8770_VOUT2LVOL,
+		WM8770_VOUT2RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT3 Playback Volume", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT3 Playback ZC Switch", WM8770_VOUT3LVOL,
+		WM8770_VOUT3RVOL, 7, 1, 0),
+	SOC_DOUBLE_R_TLV("VOUT4 Playback Volume", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 0, 127, 0, dac_alg_tlv),
+	SOC_DOUBLE_R("VOUT4 Playback ZC Switch", WM8770_VOUT4LVOL,
+		WM8770_VOUT4RVOL, 7, 1, 0),
+
+	/* DAC1/2/3/4 specific controls */
+	SOC_DOUBLE_R_TLV("DAC1 Playback Volume", WM8770_DAC1LVOL,
+		WM8770_DAC1RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC1 Deemphasis Switch", WM8770_DACCTRL2, 0, 1, 0),
+	SOC_ENUM("DAC1 Phase", dac_phase[0]),
+	SOC_DOUBLE_R_TLV("DAC2 Playback Volume", WM8770_DAC2LVOL,
+		WM8770_DAC2RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC2 Deemphasis Switch", WM8770_DACCTRL2, 1, 1, 0),
+	SOC_ENUM("DAC2 Phase", dac_phase[1]),
+	SOC_DOUBLE_R_TLV("DAC3 Playback Volume", WM8770_DAC3LVOL,
+		WM8770_DAC3RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC3 Deemphasis Switch", WM8770_DACCTRL2, 2, 1, 0),
+	SOC_ENUM("DAC3 Phase", dac_phase[2]),
+	SOC_DOUBLE_R_TLV("DAC4 Playback Volume", WM8770_DAC4LVOL,
+		WM8770_DAC4RVOL, 0, 255, 0, dac_dig_tlv),
+	SOC_SINGLE("DAC4 Deemphasis Switch", WM8770_DACCTRL2, 3, 1, 0),
+	SOC_ENUM("DAC4 Phase", dac_phase[3]),
+
+	/* ADC specific controls */
+	SOC_DOUBLE_R_TLV("Capture Volume", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		0, 31, 0, adc_tlv),
+	SOC_DOUBLE_R("Capture Switch", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
+		5, 1, 1),
+
+	/* other controls */
+	SOC_SINGLE("ADC 128x Oversampling Switch", WM8770_MSTRCTRL, 3, 1, 0),
+	SOC_SINGLE("ADC Highpass Filter Switch", WM8770_IFACECTRL, 8, 1, 1)
+};
+
+static const char *ain_text[] = {
+	"AIN1", "AIN2", "AIN3", "AIN4",
+	"AIN5", "AIN6", "AIN7", "AIN8"
+};
+
+static const struct soc_enum ain_enum =
+	SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
+
+static const struct snd_kcontrol_new ain_mux =
+	SOC_DAPM_ENUM("Capture Mux", ain_enum);
+
+static const struct snd_kcontrol_new vout1_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC1 Switch", WM8770_OUTMUX1, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX1 Switch", WM8770_OUTMUX1, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout2_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC2 Switch", WM8770_OUTMUX1, 3, 1, 0),
+	SOC_DAPM_SINGLE("AUX2 Switch", WM8770_OUTMUX1, 4, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 5, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout3_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC3 Switch", WM8770_OUTMUX2, 0, 1, 0),
+	SOC_DAPM_SINGLE("AUX3 Switch", WM8770_OUTMUX2, 1, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 2, 1, 0)
+};
+
+static const struct snd_kcontrol_new vout4_mix_controls[] = {
+	SOC_DAPM_SINGLE("DAC4 Switch", WM8770_OUTMUX2, 3, 1, 0),
+	SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 4, 1, 0)
+};
+
+static const struct snd_soc_dapm_widget wm8770_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("AUX1"),
+	SND_SOC_DAPM_INPUT("AUX2"),
+	SND_SOC_DAPM_INPUT("AUX3"),
+
+	SND_SOC_DAPM_INPUT("AIN1"),
+	SND_SOC_DAPM_INPUT("AIN2"),
+	SND_SOC_DAPM_INPUT("AIN3"),
+	SND_SOC_DAPM_INPUT("AIN4"),
+	SND_SOC_DAPM_INPUT("AIN5"),
+	SND_SOC_DAPM_INPUT("AIN6"),
+	SND_SOC_DAPM_INPUT("AIN7"),
+	SND_SOC_DAPM_INPUT("AIN8"),
+
+	SND_SOC_DAPM_MUX("Capture Mux", WM8770_ADCMUX, 8, 1, &ain_mux),
+
+	SND_SOC_DAPM_ADC("ADC", "Capture", WM8770_PWDNCTRL, 1, 1),
+
+	SND_SOC_DAPM_DAC("DAC1", "Playback", WM8770_PWDNCTRL, 2, 1),
+	SND_SOC_DAPM_DAC("DAC2", "Playback", WM8770_PWDNCTRL, 3, 1),
+	SND_SOC_DAPM_DAC("DAC3", "Playback", WM8770_PWDNCTRL, 4, 1),
+	SND_SOC_DAPM_DAC("DAC4", "Playback", WM8770_PWDNCTRL, 5, 1),
+
+	SND_SOC_DAPM_SUPPLY("VOUT12 Supply", SND_SOC_NOPM, 0, 0,
+		vout12supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+	SND_SOC_DAPM_SUPPLY("VOUT34 Supply", SND_SOC_NOPM, 0, 0,
+		vout34supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+	SND_SOC_DAPM_MIXER("VOUT1 Mixer", SND_SOC_NOPM, 0, 0,
+		vout1_mix_controls, ARRAY_SIZE(vout1_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT2 Mixer", SND_SOC_NOPM, 0, 0,
+		vout2_mix_controls, ARRAY_SIZE(vout2_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT3 Mixer", SND_SOC_NOPM, 0, 0,
+		vout3_mix_controls, ARRAY_SIZE(vout3_mix_controls)),
+	SND_SOC_DAPM_MIXER("VOUT4 Mixer", SND_SOC_NOPM, 0, 0,
+		vout4_mix_controls, ARRAY_SIZE(vout4_mix_controls)),
+
+	SND_SOC_DAPM_OUTPUT("VOUT1"),
+	SND_SOC_DAPM_OUTPUT("VOUT2"),
+	SND_SOC_DAPM_OUTPUT("VOUT3"),
+	SND_SOC_DAPM_OUTPUT("VOUT4")
+};
+
+static const struct snd_soc_dapm_route wm8770_intercon[] = {
+	{ "Capture Mux", "AIN1", "AIN1" },
+	{ "Capture Mux", "AIN2", "AIN2" },
+	{ "Capture Mux", "AIN3", "AIN3" },
+	{ "Capture Mux", "AIN4", "AIN4" },
+	{ "Capture Mux", "AIN5", "AIN5" },
+	{ "Capture Mux", "AIN6", "AIN6" },
+	{ "Capture Mux", "AIN7", "AIN7" },
+	{ "Capture Mux", "AIN8", "AIN8" },
+
+	{ "ADC", NULL, "Capture Mux" },
+
+	{ "VOUT1 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT1 Mixer", "DAC1 Switch", "DAC1" },
+	{ "VOUT1 Mixer", "AUX1 Switch", "AUX1" },
+	{ "VOUT1 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT2 Mixer", NULL, "VOUT12 Supply" },
+	{ "VOUT2 Mixer", "DAC2 Switch", "DAC2" },
+	{ "VOUT2 Mixer", "AUX2 Switch", "AUX2" },
+	{ "VOUT2 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT3 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT3 Mixer", "DAC3 Switch", "DAC3" },
+	{ "VOUT3 Mixer", "AUX3 Switch", "AUX3" },
+	{ "VOUT3 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT4 Mixer", NULL, "VOUT34 Supply" },
+	{ "VOUT4 Mixer", "DAC4 Switch", "DAC4" },
+	{ "VOUT4 Mixer", "Bypass Switch", "Capture Mux" },
+
+	{ "VOUT1", NULL, "VOUT1 Mixer" },
+	{ "VOUT2", NULL, "VOUT2 Mixer" },
+	{ "VOUT3", NULL, "VOUT3 Mixer" },
+	{ "VOUT4", NULL, "VOUT4 Mixer" }
+};
+
+static int vout12supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int vout34supply_event(struct snd_soc_dapm_widget *w,
+	struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0x180);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8770_reset(struct snd_soc_codec *codec)
+{
+	return snd_soc_write(codec, WM8770_RESET, 0);
+}
+
+static int wm8770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int iface, master;
+
+	codec = dai->codec;
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = 0x100;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		master = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iface = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_I2S:
+		iface |= 0x2;
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		iface |= 0x1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_IB_IF:
+		iface |= 0xc;
+		break;
+	case SND_SOC_DAIFMT_IB_NF:
+		iface |= 0x8;
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		iface |= 0x4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0xf, iface);
+	snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x100, master);
+
+	return 0;
+}
+
+static const int mclk_ratios[] = {
+	128,
+	192,
+	256,
+	384,
+	512,
+	768
+};
+
+static int wm8770_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+	int i;
+	int iface;
+	int shift;
+	int ratio;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	iface = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		iface |= 0x10;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		iface |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		iface |= 0x30;
+		break;
+	}
+
+	switch (substream->stream) {
+	case SNDRV_PCM_STREAM_PLAYBACK:
+		i = 0;
+		shift = 4;
+		break;
+	case SNDRV_PCM_STREAM_CAPTURE:
+		i = 2;
+		shift = 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Only need to set MCLK/LRCLK ratio if we're master */
+	if (snd_soc_read(codec, WM8770_MSTRCTRL) & 0x100) {
+		for (; i < ARRAY_SIZE(mclk_ratios); ++i) {
+			ratio = wm8770->sysclk / params_rate(params);
+			if (ratio == mclk_ratios[i])
+				break;
+		}
+
+		if (i == ARRAY_SIZE(mclk_ratios)) {
+			dev_err(codec->dev,
+				"Unable to configure MCLK ratio %d/%d\n",
+				wm8770->sysclk, params_rate(params));
+			return -EINVAL;
+		}
+
+		dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
+
+		snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x7 << shift,
+				    i << shift);
+	}
+
+	snd_soc_update_bits(codec, WM8770_IFACECTRL, 0x30, iface);
+
+	return 0;
+}
+
+static int wm8770_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec;
+
+	codec = dai->codec;
+	return snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10,
+				   !!mute << 4);
+}
+
+static int wm8770_set_sysclk(struct snd_soc_dai *dai,
+			     int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8770_priv *wm8770;
+
+	codec = dai->codec;
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->sysclk = freq;
+	return 0;
+}
+
+static void wm8770_sync_cache(struct snd_soc_codec *codec)
+{
+	int i;
+	u16 *cache;
+
+	if (!codec->cache_sync)
+		return;
+
+	codec->cache_only = 0;
+	cache = codec->reg_cache;
+	for (i = 0; i < codec->driver->reg_cache_size; i++) {
+		if (i == WM8770_RESET || cache[i] == wm8770_reg_defs[i])
+			continue;
+		snd_soc_write(codec, i, cache[i]);
+	}
+	codec->cache_sync = 0;
+}
+
+static int wm8770_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	int ret;
+	struct wm8770_priv *wm8770;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+		break;
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+						    wm8770->supplies);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to enable supplies: %d\n",
+					ret);
+				return ret;
+			}
+			wm8770_sync_cache(codec);
+			/* global powerup */
+			snd_soc_write(codec, WM8770_PWDNCTRL, 0);
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		/* global powerdown */
+		snd_soc_write(codec, WM8770_PWDNCTRL, 1);
+		regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies),
+				       wm8770->supplies);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#define WM8770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8770_dai_ops = {
+	.digital_mute = wm8770_mute,
+	.hw_params = wm8770_hw_params,
+	.set_fmt = wm8770_set_fmt,
+	.set_sysclk = wm8770_set_sysclk,
+};
+
+static struct snd_soc_dai_driver wm8770_dai = {
+	.name = "wm8770-hifi",
+	.playback = {
+		.stream_name = "Playback",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_192000,
+		.formats = WM8770_FORMATS
+	},
+	.capture = {
+		.stream_name = "Capture",
+		.channels_min = 2,
+		.channels_max = 2,
+		.rates = SNDRV_PCM_RATE_8000_96000,
+		.formats = WM8770_FORMATS
+	},
+	.ops = &wm8770_dai_ops,
+	.symmetric_rates = 1
+};
+
+#ifdef CONFIG_PM
+static int wm8770_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8770_resume(struct snd_soc_codec *codec)
+{
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8770_suspend NULL
+#define wm8770_resume NULL
+#endif
+
+static int wm8770_probe(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770->codec = codec;
+
+	codec->dapm.idle_bias_off = 1;
+
+	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8770->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+		return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
+		wm8770->supplies[i].supply = wm8770_supply_names[i];
+
+	ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8770->supplies),
+				 wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+		return ret;
+	}
+
+	wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
+	wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
+	wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
+
+	/* This should really be moved into the regulator core */
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
+		ret = regulator_register_notifier(wm8770->supplies[i].consumer,
+						  &wm8770->disable_nb[i]);
+		if (ret) {
+			dev_err(codec->dev,
+				"Failed to register regulator notifier: %d\n",
+				ret);
+		}
+	}
+
+	ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
+				    wm8770->supplies);
+	if (ret) {
+		dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+		goto err_reg_get;
+	}
+
+	ret = wm8770_reset(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		goto err_reg_enable;
+	}
+
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* latch the volume update bits */
+	snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_VOUT4RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC1RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC2RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC3RVOL, 0x100, 0x100);
+	snd_soc_update_bits(codec, WM8770_DAC4RVOL, 0x100, 0x100);
+
+	/* mute all DACs */
+	snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10);
+
+	snd_soc_add_controls(codec, wm8770_snd_controls,
+			     ARRAY_SIZE(wm8770_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8770_dapm_widgets,
+				  ARRAY_SIZE(wm8770_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8770_intercon,
+				ARRAY_SIZE(wm8770_intercon));
+	return 0;
+
+err_reg_enable:
+	regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+err_reg_get:
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return ret;
+}
+
+static int wm8770_remove(struct snd_soc_codec *codec)
+{
+	struct wm8770_priv *wm8770;
+	int i;
+
+	wm8770 = snd_soc_codec_get_drvdata(codec);
+	wm8770_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+	for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
+		regulator_unregister_notifier(wm8770->supplies[i].consumer,
+					      &wm8770->disable_nb[i]);
+	regulator_bulk_free(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
+	return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
+	.probe = wm8770_probe,
+	.remove = wm8770_remove,
+	.suspend = wm8770_suspend,
+	.resume = wm8770_resume,
+	.set_bias_level = wm8770_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8770_reg_defs),
+	.reg_word_size = sizeof (u16),
+	.reg_cache_default = wm8770_reg_defs
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8770_spi_probe(struct spi_device *spi)
+{
+	struct wm8770_priv *wm8770;
+	int ret;
+
+	wm8770 = kzalloc(sizeof(struct wm8770_priv), GFP_KERNEL);
+	if (!wm8770)
+		return -ENOMEM;
+
+	wm8770->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8770);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8770, &wm8770_dai, 1);
+	if (ret < 0)
+		kfree(wm8770);
+	return ret;
+}
+
+static int __devexit wm8770_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8770_spi_driver = {
+	.driver = {
+		.name = "wm8770",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8770_spi_probe,
+	.remove = __devexit_p(wm8770_spi_remove)
+};
+#endif
+
+static int __init wm8770_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8770_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8770 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+module_init(wm8770_modinit);
+
+static void __exit wm8770_exit(void)
+{
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8770_spi_driver);
+#endif
+}
+module_exit(wm8770_exit);
+
+MODULE_DESCRIPTION("ASoC WM8770 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8770.h b/sound/soc/codecs/wm8770.h
new file mode 100644
index 0000000..5f1b3bd
--- /dev/null
+++ b/sound/soc/codecs/wm8770.h
@@ -0,0 +1,51 @@
+/*
+ * wm8770.h  --  WM8770 ASoC driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8770_H
+#define _WM8770_H
+
+/* Registers */
+#define WM8770_VOUT1LVOL                0
+#define WM8770_VOUT1RVOL                0x1
+#define WM8770_VOUT2LVOL                0x2
+#define WM8770_VOUT2RVOL                0x3
+#define WM8770_VOUT3LVOL                0x4
+#define WM8770_VOUT3RVOL                0x5
+#define WM8770_VOUT4LVOL                0x6
+#define WM8770_VOUT4RVOL                0x7
+#define WM8770_MSALGVOL                 0x8
+#define WM8770_DAC1LVOL                 0x9
+#define WM8770_DAC1RVOL                 0xa
+#define WM8770_DAC2LVOL                 0xb
+#define WM8770_DAC2RVOL                 0xc
+#define WM8770_DAC3LVOL                 0xd
+#define WM8770_DAC3RVOL                 0xe
+#define WM8770_DAC4LVOL                 0xf
+#define WM8770_DAC4RVOL                 0x10
+#define WM8770_MSDIGVOL                 0x11
+#define WM8770_DACPHASE                 0x12
+#define WM8770_DACCTRL1                 0x13
+#define WM8770_DACMUTE                  0x14
+#define WM8770_DACCTRL2                 0x15
+#define WM8770_IFACECTRL                0x16
+#define WM8770_MSTRCTRL                 0x17
+#define WM8770_PWDNCTRL                 0x18
+#define WM8770_ADCLCTRL                 0x19
+#define WM8770_ADCRCTRL                 0x1a
+#define WM8770_ADCMUX                   0x1b
+#define WM8770_OUTMUX1                  0x1c
+#define WM8770_OUTMUX2                  0x1d
+#define WM8770_RESET                    0x31
+
+#define WM8770_CACHEREGNUM 0x20
+
+#endif
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 0132a27..8e7953b 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -306,7 +305,7 @@
 	case SND_SOC_BIAS_PREPARE:
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable the global powerdown; DAPM does the rest */
 			snd_soc_update_bits(codec, WM8776_PWRDOWN, 1, 0);
 		}
@@ -317,7 +316,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -404,6 +403,7 @@
 static int wm8776_probe(struct snd_soc_codec *codec)
 {
 	struct wm8776_priv *wm8776 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 
 	ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8776->control_type);
@@ -427,9 +427,9 @@
 
 	snd_soc_add_controls(codec, wm8776_snd_controls,
 			     ARRAY_SIZE(wm8776_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8776_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8776_dapm_widgets,
 				  ARRAY_SIZE(wm8776_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 4599e8e..6dae1b4 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -515,7 +514,7 @@
 		snd_soc_update_bits(codec, WM8804_PWRDN, 0x9, 0);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8804->supplies),
 						    wm8804->supplies);
 			if (ret) {
@@ -537,7 +536,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -581,7 +580,7 @@
 	wm8804 = snd_soc_codec_get_drvdata(codec);
 	wm8804->codec = codec;
 
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 8, 8, wm8804->control_type);
 	if (ret < 0) {
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index aca4b1e..cd09599 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -30,7 +30,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -140,7 +139,6 @@
 
 struct wm8900_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8900_MAXREG];
 
 	u32 fll_in; /* FLL input frequency */
 	u32 fll_out; /* FLL output frequency */
@@ -611,10 +609,11 @@
 
 static int wm8900_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8900_dapm_widgets,
-				  ARRAY_SIZE(wm8900_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8900_dapm_widgets,
+				  ARRAY_SIZE(wm8900_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1051,7 +1050,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Charge capacitors if initial power up */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* STARTUP_BIAS_ENA on */
 			snd_soc_write(codec, WM8900_REG_POWER1,
 				     WM8900_REG_POWER1_STARTUP_BIAS_ENA);
@@ -1119,7 +1118,7 @@
 			     WM8900_REG_POWER2_SYSCLK_ENA);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 622b602..987476a 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -29,9 +29,9 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8903.h>
+#include <trace/events/asoc.h>
 
 #include "wm8903.h"
 
@@ -214,15 +214,14 @@
 
 struct wm8903_priv {
 
-	u16 reg_cache[ARRAY_SIZE(wm8903_reg_defaults)];
-
 	int sysclk;
 	int irq;
 
-	/* Reference counts */
+	int fs;
+	int deemph;
+
+	/* Reference count */
 	int class_w_users;
-	int playback_active;
-	int capture_active;
 
 	struct completion wseq;
 
@@ -231,9 +230,6 @@
 	int mic_short;
 	int mic_last_report;
 	int mic_delay;
-
-	struct snd_pcm_substream *master_substream;
-	struct snd_pcm_substream *slave_substream;
 };
 
 static int wm8903_volatile_register(unsigned int reg)
@@ -463,6 +459,72 @@
 	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) }
 
 
+static int wm8903_deemph[] = { 0, 32000, 44100, 48000 };
+
+static int wm8903_set_deemph(struct snd_soc_codec *codec)
+{
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int val, i, best;
+
+	/* If we're using deemphasis select the nearest available sample
+	 * rate.
+	 */
+	if (wm8903->deemph) {
+		best = 1;
+		for (i = 2; i < ARRAY_SIZE(wm8903_deemph); i++) {
+			if (abs(wm8903_deemph[i] - wm8903->fs) <
+			    abs(wm8903_deemph[best] - wm8903->fs))
+				best = i;
+		}
+
+		val = best << WM8903_DEEMPH_SHIFT;
+	} else {
+		best = 0;
+		val = 0;
+	}
+
+	dev_dbg(codec->dev, "Set deemphasis %d (%dHz)\n",
+		best, wm8903_deemph[best]);
+
+	return snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1,
+				   WM8903_DEEMPH_MASK, val);
+}
+
+static int wm8903_get_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8903->deemph;
+
+	return 0;
+}
+
+static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
+			     struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
+	int deemph = ucontrol->value.enumerated.item[0];
+	int ret = 0;
+
+	if (deemph > 1)
+		return -EINVAL;
+
+	mutex_lock(&codec->mutex);
+	if (wm8903->deemph != deemph) {
+		wm8903->deemph = deemph;
+
+		wm8903_set_deemph(codec);
+
+		ret = 1;
+	}
+	mutex_unlock(&codec->mutex);
+
+	return ret;
+}
+
 /* ALSA can only do steps of .01dB */
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 
@@ -475,6 +537,23 @@
 static const DECLARE_TLV_DB_SCALE(drc_tlv_max, 1200, 600, 0);
 static const DECLARE_TLV_DB_SCALE(drc_tlv_startup, -300, 50, 0);
 
+static const char *hpf_mode_text[] = {
+	"Hi-fi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum hpf_mode =
+	SOC_ENUM_SINGLE(WM8903_ADC_DIGITAL_0, 5, 4, hpf_mode_text);
+
+static const char *osr_text[] = {
+	"Low power", "High performance"
+};
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8903_ANALOGUE_ADC_0, 0, 2, osr_text);
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 0, 2, osr_text);
+
 static const char *drc_slope_text[] = {
 	"1", "1/2", "1/4", "1/8", "1/16", "0"
 };
@@ -537,13 +616,6 @@
 static const struct soc_enum mute_mode =
 	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 9, 2, mute_mode_text);
 
-static const char *dac_deemphasis_text[] = {
-	"Disabled", "32kHz", "44.1kHz", "48kHz"
-};
-
-static const struct soc_enum dac_deemphasis =
-	SOC_ENUM_SINGLE(WM8903_DAC_DIGITAL_1, 1, 4, dac_deemphasis_text);
-
 static const char *companding_text[] = {
 	"ulaw", "alaw"
 };
@@ -613,6 +685,9 @@
 	   6, 1, 0),
 
 /* ADCs */
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_SINGLE("HPF Switch", WM8903_ADC_DIGITAL_0, 4, 1, 0),
+SOC_ENUM("HPF Mode", hpf_mode),
 SOC_SINGLE("DRC Switch", WM8903_DRC_0, 15, 1, 0),
 SOC_ENUM("DRC Compressor Slope R0", drc_slope_r0),
 SOC_ENUM("DRC Compressor Slope R1", drc_slope_r1),
@@ -642,14 +717,16 @@
 	       12, 0, digital_sidetone_tlv),
 
 /* DAC */
+SOC_ENUM("DAC OSR", dac_osr),
 SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8903_DAC_DIGITAL_VOLUME_LEFT,
 		 WM8903_DAC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
 SOC_ENUM("DAC Soft Mute Rate", soft_mute),
 SOC_ENUM("DAC Mute Mode", mute_mode),
 SOC_SINGLE("DAC Mono Switch", WM8903_DAC_DIGITAL_1, 12, 1, 0),
-SOC_ENUM("DAC De-emphasis", dac_deemphasis),
 SOC_ENUM("DAC Companding Mode", dac_companding),
 SOC_SINGLE("DAC Companding Switch", WM8903_AUDIO_INTERFACE_0, 1, 1, 0),
+SOC_SINGLE_BOOL_EXT("Playback Deemphasis Switch", 0,
+		    wm8903_get_deemph, wm8903_put_deemph),
 
 /* Headphones */
 SOC_DOUBLE_R("Headphone Switch",
@@ -923,10 +1000,11 @@
 
 static int wm8903_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8903_dapm_widgets,
-				  ARRAY_SIZE(wm8903_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_new_controls(dapm, wm8903_dapm_widgets,
+				  ARRAY_SIZE(wm8903_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
 
 	return 0;
 }
@@ -934,7 +1012,7 @@
 static int wm8903_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
-	u16 reg, reg2;
+	u16 reg;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
@@ -946,7 +1024,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			snd_soc_write(codec, WM8903_CLOCK_RATES_2,
 				     WM8903_CLK_SYS_ENA);
 
@@ -958,23 +1036,15 @@
 			wm8903_run_sequence(codec, 0);
 			wm8903_sync_reg_cache(codec, codec->reg_cache);
 
-			/* Enable low impedence charge pump output */
-			reg = snd_soc_read(codec,
-					  WM8903_CONTROL_INTERFACE_TEST_1);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg | WM8903_TEST_KEY);
-			reg2 = snd_soc_read(codec, WM8903_CHARGE_PUMP_TEST_1);
-			snd_soc_write(codec, WM8903_CHARGE_PUMP_TEST_1,
-				     reg2 | WM8903_CP_SW_KELVIN_MODE_MASK);
-			snd_soc_write(codec, WM8903_CONTROL_INTERFACE_TEST_1,
-				     reg);
-
 			/* By default no bypass paths are enabled so
 			 * enable Class W support.
 			 */
 			dev_dbg(codec->dev, "Enabling Class W\n");
-			snd_soc_write(codec, WM8903_CLASS_W_0, reg |
-				     WM8903_CP_DYN_FREQ | WM8903_CP_DYN_V);
+			snd_soc_update_bits(codec, WM8903_CLASS_W_0,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V,
+					    WM8903_CP_DYN_FREQ |
+					    WM8903_CP_DYN_V);
 		}
 
 		reg = snd_soc_read(codec, WM8903_VMID_CONTROL_0);
@@ -991,7 +1061,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1222,58 +1292,6 @@
 	{ 0,      0 },
 };
 
-static int wm8903_startup(struct snd_pcm_substream *substream,
-			  struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-	struct snd_pcm_runtime *master_runtime;
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active++;
-	else
-		wm8903->capture_active++;
-
-	/* The DAI has shared clocks so if we already have a playback or
-	 * capture going then constrain this substream to match it.
-	 */
-	if (wm8903->master_substream) {
-		master_runtime = wm8903->master_substream->runtime;
-
-		dev_dbg(codec->dev, "Constraining to %d bits\n",
-			master_runtime->sample_bits);
-
-		snd_pcm_hw_constraint_minmax(substream->runtime,
-					     SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
-					     master_runtime->sample_bits,
-					     master_runtime->sample_bits);
-
-		wm8903->slave_substream = substream;
-	} else
-		wm8903->master_substream = substream;
-
-	return 0;
-}
-
-static void wm8903_shutdown(struct snd_pcm_substream *substream,
-			    struct snd_soc_dai *dai)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->codec;
-	struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		wm8903->playback_active--;
-	else
-		wm8903->capture_active--;
-
-	if (wm8903->master_substream == substream)
-		wm8903->master_substream = wm8903->slave_substream;
-
-	wm8903->slave_substream = NULL;
-}
-
 static int wm8903_hw_params(struct snd_pcm_substream *substream,
 			    struct snd_pcm_hw_params *params,
 			    struct snd_soc_dai *dai)
@@ -1298,11 +1316,6 @@
 	u16 clock1 = snd_soc_read(codec, WM8903_CLOCK_RATES_1);
 	u16 dac_digital1 = snd_soc_read(codec, WM8903_DAC_DIGITAL_1);
 
-	if (substream == wm8903->slave_substream) {
-		dev_dbg(codec->dev, "Ignoring hw_params for slave substream\n");
-		return 0;
-	}
-
 	/* Enable sloping stopband filter for low sample rates */
 	if (fs <= 24000)
 		dac_digital1 |= WM8903_DAC_SB_FILT;
@@ -1320,19 +1333,6 @@
 		}
 	}
 
-	/* Constraints should stop us hitting this but let's make sure */
-	if (wm8903->capture_active)
-		switch (sample_rates[dsp_config].rate) {
-		case 88200:
-		case 96000:
-			dev_err(codec->dev, "%dHz unsupported by ADC\n",
-				fs);
-			return -EINVAL;
-
-		default:
-			break;
-		}
-
 	dev_dbg(codec->dev, "DSP fs = %dHz\n", sample_rates[dsp_config].rate);
 	clock1 &= ~WM8903_SAMPLE_RATE_MASK;
 	clock1 |= sample_rates[dsp_config].value;
@@ -1428,6 +1428,9 @@
 	aif2 |= bclk_divs[bclk_div].div;
 	aif3 |= bclk / fs;
 
+	wm8903->fs = params_rate(params);
+	wm8903_set_deemph(codec);
+
 	snd_soc_write(codec, WM8903_CLOCK_RATES_0, clock0);
 	snd_soc_write(codec, WM8903_CLOCK_RATES_1, clock1);
 	snd_soc_write(codec, WM8903_AUDIO_INTERFACE_1, aif1);
@@ -1521,6 +1524,11 @@
 	mic_report = wm8903->mic_last_report;
 	int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1);
 
+#ifndef CONFIG_SND_SOC_WM8903_MODULE
+	if (int_val & (WM8903_MICSHRT_EINT | WM8903_MICDET_EINT))
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	if (int_val & WM8903_MICSHRT_EINT) {
 		dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol);
 
@@ -1571,8 +1579,6 @@
 			SNDRV_PCM_FMTBIT_S24_LE)
 
 static struct snd_soc_dai_ops wm8903_dai_ops = {
-	.startup	= wm8903_startup,
-	.shutdown	= wm8903_shutdown,
 	.hw_params	= wm8903_hw_params,
 	.digital_mute	= wm8903_digital_mute,
 	.set_fmt	= wm8903_set_dai_fmt,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index 996435e..e8490f3 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -19,10 +19,6 @@
 			     struct snd_soc_jack *jack,
 			     int det, int shrt);
 
-#define WM8903_MCLK_DIV_2 1
-#define WM8903_CLK_SYS    2
-#define WM8903_BCLK       3
-#define WM8903_LRCLK      4
 
 /*
  * Register values.
@@ -98,8 +94,6 @@
 #define WM8903_INTERRUPT_STATUS_1_MASK          0x7A
 #define WM8903_INTERRUPT_POLARITY_1             0x7B
 #define WM8903_INTERRUPT_CONTROL                0x7E
-#define WM8903_CONTROL_INTERFACE_TEST_1         0x81
-#define WM8903_CHARGE_PUMP_TEST_1               0x95
 #define WM8903_CLOCK_RATE_TEST_4                0xA4
 #define WM8903_ANALOGUE_OUTPUT_BIAS_0           0xAC
 
@@ -1206,25 +1200,6 @@
 #define WM8903_IRQ_POL_WIDTH                         1  /* IRQ_POL */
 
 /*
- * R129 (0x81) - Control Interface Test 1
- */
-#define WM8903_USER_KEY                         0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_MASK                    0x0002  /* USER_KEY */
-#define WM8903_USER_KEY_SHIFT                        1  /* USER_KEY */
-#define WM8903_USER_KEY_WIDTH                        1  /* USER_KEY */
-#define WM8903_TEST_KEY                         0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_MASK                    0x0001  /* TEST_KEY */
-#define WM8903_TEST_KEY_SHIFT                        0  /* TEST_KEY */
-#define WM8903_TEST_KEY_WIDTH                        1  /* TEST_KEY */
-
-/*
- * R149 (0x95) - Charge Pump Test 1
- */
-#define WM8903_CP_SW_KELVIN_MODE_MASK           0x0006  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_SHIFT               1  /* CP_SW_KELVIN_MODE - [2:1] */
-#define WM8903_CP_SW_KELVIN_MODE_WIDTH               2  /* CP_SW_KELVIN_MODE - [2:1] */
-
-/*
  * R164 (0xA4) - Clock Rate Test 4
  */
 #define WM8903_ADC_DIG_MIC                      0x0200  /* ADC_DIG_MIC */
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 1ec12ef..9de44a4 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8904.h>
@@ -1427,10 +1426,11 @@
 static int wm8904_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8904_core_dapm_widgets,
 				  ARRAY_SIZE(wm8904_core_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, core_intercon,
+	snd_soc_dapm_add_routes(dapm, core_intercon,
 				ARRAY_SIZE(core_intercon));
 
 	switch (wm8904->devtype) {
@@ -1442,20 +1442,20 @@
 		snd_soc_add_controls(codec, wm8904_snd_controls,
 				     ARRAY_SIZE(wm8904_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_adc_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_adc_dapm_widgets,
 					  ARRAY_SIZE(wm8904_adc_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
-		snd_soc_dapm_new_controls(codec, wm8904_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, core_intercon,
+		snd_soc_dapm_add_routes(dapm, core_intercon,
 					ARRAY_SIZE(core_intercon));
-		snd_soc_dapm_add_routes(codec, adc_intercon,
+		snd_soc_dapm_add_routes(dapm, adc_intercon,
 					ARRAY_SIZE(adc_intercon));
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8904_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8904_intercon,
 					ARRAY_SIZE(wm8904_intercon));
 		break;
 
@@ -1463,17 +1463,17 @@
 		snd_soc_add_controls(codec, wm8904_dac_snd_controls,
 				     ARRAY_SIZE(wm8904_dac_snd_controls));
 
-		snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8904_dac_dapm_widgets,
 					  ARRAY_SIZE(wm8904_dac_dapm_widgets));
 
-		snd_soc_dapm_add_routes(codec, dac_intercon,
+		snd_soc_dapm_add_routes(dapm, dac_intercon,
 					ARRAY_SIZE(dac_intercon));
-		snd_soc_dapm_add_routes(codec, wm8912_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8912_intercon,
 					ARRAY_SIZE(wm8912_intercon));
 		break;
 	}
 
-	snd_soc_dapm_new_widgets(codec);
+	snd_soc_dapm_new_widgets(dapm);
 	return 0;
 }
 
@@ -1589,7 +1589,7 @@
 		       - wm8904->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8904->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8904->fs);;
+			       clk_sys_rates[i].ratio) - wm8904->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -2138,7 +2138,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
 						    wm8904->supplies);
 			if (ret != 0) {
@@ -2197,7 +2197,7 @@
 				       wm8904->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -2373,7 +2373,7 @@
 	int ret, i;
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	switch (wm8904->devtype) {
 	case WM8904:
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index 23086e2..25580e3 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -35,7 +35,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -43,7 +42,6 @@
 
 struct wm8940_priv {
 	unsigned int sysclk;
-	u16 reg_cache[WM8940_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 };
@@ -291,13 +289,14 @@
 
 static int wm8940_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	ret = snd_soc_dapm_new_controls(codec, wm8940_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, wm8940_dapm_widgets,
 					ARRAY_SIZE(wm8940_dapm_widgets));
 	if (ret)
 		goto error_ret;
-	ret = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	ret = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (ret)
 		goto error_ret;
 
@@ -735,7 +734,6 @@
 		return ret;
 
 	return ret;
-;
 }
 
 static int wm8940_remove(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
index 2ac35b0..7167dfc 100644
--- a/sound/soc/codecs/wm8955.c
+++ b/sound/soc/codecs/wm8955.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8955.h>
@@ -576,13 +575,14 @@
 
 static int wm8955_add_widgets(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	snd_soc_add_controls(codec, wm8955_snd_controls,
 			     ARRAY_SIZE(wm8955_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, wm8955_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8955_dapm_widgets,
 				  ARRAY_SIZE(wm8955_dapm_widgets));
-
-	snd_soc_dapm_add_routes(codec, wm8955_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8955_intercon,
 				ARRAY_SIZE(wm8955_intercon));
 
 	return 0;
@@ -786,7 +786,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
 						    wm8955->supplies);
 			if (ret != 0) {
@@ -850,7 +850,7 @@
 				       wm8955->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff6ff2f..4393394 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8960.h>
@@ -72,7 +71,6 @@
 };
 
 struct wm8960_priv {
-	u16 reg_cache[WM8960_CACHEREGNUM];
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	int (*set_bias_level)(struct snd_soc_codec *,
@@ -389,27 +387,28 @@
 {
 	struct wm8960_data *pdata = codec->dev->platform_data;
 	struct wm8960_priv *wm8960 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dapm_widget *w;
 
-	snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets,
 				  ARRAY_SIZE(wm8960_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	/* In capless mode OUT3 is used to provide VMID for the
 	 * headphone outputs, otherwise it is used as a mono mixer.
 	 */
 	if (pdata && pdata->capless) {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_capless,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_capless,
 					  ARRAY_SIZE(wm8960_dapm_widgets_capless));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_capless,
+		snd_soc_dapm_add_routes(dapm, audio_paths_capless,
 					ARRAY_SIZE(audio_paths_capless));
 	} else {
-		snd_soc_dapm_new_controls(codec, wm8960_dapm_widgets_out3,
+		snd_soc_dapm_new_controls(dapm, wm8960_dapm_widgets_out3,
 					  ARRAY_SIZE(wm8960_dapm_widgets_out3));
 
-		snd_soc_dapm_add_routes(codec, audio_paths_out3,
+		snd_soc_dapm_add_routes(dapm, audio_paths_out3,
 					ARRAY_SIZE(audio_paths_out3));
 	}
 
@@ -418,7 +417,9 @@
 	 * list each time to find the desired power state do so now
 	 * and save the result.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 		if (strcmp(w->name, "LOUT1 PGA") == 0)
 			wm8960->lout1 = w;
 		if (strcmp(w->name, "ROUT1 PGA") == 0)
@@ -573,7 +574,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable anti-pop features */
 			snd_soc_write(codec, WM8960_APOP1,
 				      WM8960_POBCTRL | WM8960_SOFT_ST |
@@ -611,7 +612,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -627,7 +628,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_STANDBY:
 			/* Enable anti pop mode */
 			snd_soc_update_bits(codec, WM8960_APOP1,
@@ -682,7 +683,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		switch (codec->bias_level) {
+		switch (codec->dapm.bias_level) {
 		case SND_SOC_BIAS_PREPARE:
 			/* Disable HP discharge */
 			snd_soc_update_bits(codec, WM8960_APOP2,
@@ -706,7 +707,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 8340485..55252e7 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -290,7 +289,6 @@
 struct wm8961_priv {
 	enum snd_soc_control_type control_type;
 	int sysclk;
-	u16 reg_cache[WM8961_MAX_REGISTER];
 };
 
 static int wm8961_volatile_register(unsigned int reg)
@@ -882,7 +880,7 @@
 		break;
 
 	case SND_SOC_BIAS_PREPARE:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Enable bias generation */
 			reg = snd_soc_read(codec, WM8961_ANTI_POP);
 			reg |= WM8961_BUFIOEN | WM8961_BUFDCOPEN;
@@ -897,7 +895,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_PREPARE) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_PREPARE) {
 			/* VREF off */
 			reg = snd_soc_read(codec, WM8961_PWR_MGMT_1);
 			reg &= ~WM8961_VREF;
@@ -919,7 +917,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -959,6 +957,7 @@
 
 static int wm8961_probe(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -1024,9 +1023,9 @@
 
 	snd_soc_add_controls(codec, wm8961_snd_controls,
 				ARRAY_SIZE(wm8961_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8961_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8961_dapm_widgets,
 				  ARRAY_SIZE(wm8961_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 7c421cc..b9cb1fc 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -29,10 +29,10 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <sound/wm8962.h>
+#include <trace/events/asoc.h>
 
 #include "wm8962.h"
 
@@ -1956,7 +1956,7 @@
 
 static int wm8962_reset(struct snd_soc_codec *codec)
 {
-	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0);
+	return snd_soc_write(codec, WM8962_SOFTWARE_RESET, 0x6243);
 }
 
 static const DECLARE_TLV_DB_SCALE(inpga_tlv, -2325, 75, 0);
@@ -2677,6 +2677,7 @@
 static int wm8962_add_widgets(struct snd_soc_codec *codec)
 {
 	struct wm8962_pdata *pdata = dev_get_platdata(codec->dev);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	snd_soc_add_controls(codec, wm8962_snd_controls,
 			     ARRAY_SIZE(wm8962_snd_controls));
@@ -2688,26 +2689,26 @@
 				     ARRAY_SIZE(wm8962_spk_stereo_controls));
 
 
-	snd_soc_dapm_new_controls(codec, wm8962_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8962_dapm_widgets,
 				  ARRAY_SIZE(wm8962_dapm_widgets));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_mono_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_mono_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_mono_widgets));
 	else
-		snd_soc_dapm_new_controls(codec, wm8962_dapm_spk_stereo_widgets,
+		snd_soc_dapm_new_controls(dapm, wm8962_dapm_spk_stereo_widgets,
 					  ARRAY_SIZE(wm8962_dapm_spk_stereo_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm8962_intercon,
+	snd_soc_dapm_add_routes(dapm, wm8962_intercon,
 				ARRAY_SIZE(wm8962_intercon));
 	if (pdata && pdata->spk_mono)
-		snd_soc_dapm_add_routes(codec, wm8962_spk_mono_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_mono_intercon,
 					ARRAY_SIZE(wm8962_spk_mono_intercon));
 	else
-		snd_soc_dapm_add_routes(codec, wm8962_spk_stereo_intercon,
+		snd_soc_dapm_add_routes(dapm, wm8962_spk_stereo_intercon,
 					ARRAY_SIZE(wm8962_spk_stereo_intercon));
 
 
-	snd_soc_dapm_disable_pin(codec, "Beep");
+	snd_soc_dapm_disable_pin(dapm, "Beep");
 
 	return 0;
 }
@@ -2814,7 +2815,7 @@
 	struct wm8962_priv *wm8962 = snd_soc_codec_get_drvdata(codec);
 	int ret;
 
-	if (level == codec->bias_level)
+	if (level == codec->dapm.bias_level)
 		return 0;
 
 	switch (level) {
@@ -2828,7 +2829,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
 						    wm8962->supplies);
 			if (ret != 0) {
@@ -2878,7 +2879,7 @@
 				       wm8962->supplies);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -3348,6 +3349,12 @@
 	if (active & (WM8962_MICSCD_EINT | WM8962_MICD_EINT)) {
 		dev_dbg(codec->dev, "Microphone event detected\n");
 
+#ifndef CONFIG_SND_SOC_WM8962_MODULE
+		trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+		pm_wakeup_event(codec->dev, 300);
+
 		schedule_delayed_work(&wm8962->mic_work,
 				      msecs_to_jiffies(250));
 	}
@@ -3433,6 +3440,7 @@
 	struct wm8962_priv *wm8962 =
 		container_of(work, struct wm8962_priv, beep_work);
 	struct snd_soc_codec *codec = wm8962->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 	int reg = 0;
 	int best = 0;
@@ -3449,16 +3457,16 @@
 
 		reg = WM8962_BEEP_ENA | (best << WM8962_BEEP_RATE_SHIFT);
 
-		snd_soc_dapm_enable_pin(codec, "Beep");
+		snd_soc_dapm_enable_pin(dapm, "Beep");
 	} else {
 		dev_dbg(codec->dev, "Disabling beep\n");
-		snd_soc_dapm_disable_pin(codec, "Beep");
+		snd_soc_dapm_disable_pin(dapm, "Beep");
 	}
 
 	snd_soc_update_bits(codec, WM8962_BEEP_GENERATOR_1,
 			    WM8962_BEEP_ENA | WM8962_BEEP_RATE_MASK, reg);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 /* For usability define a way of injecting beep events for the device -
@@ -3706,7 +3714,7 @@
 	INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work);
 
 	codec->cache_sync = 1;
-	codec->idle_bias_off = 1;
+	codec->dapm.idle_bias_off = 1;
 
 	ret = snd_soc_codec_set_cache_io(codec, 16, 16, SND_SOC_I2C);
 	if (ret != 0) {
@@ -3865,7 +3873,6 @@
 err_get:
 	regulator_bulk_free(ARRAY_SIZE(wm8962->supplies), wm8962->supplies);
 err:
-	kfree(wm8962);
 	return ret;
 }
 
diff --git a/sound/soc/codecs/wm8971.c b/sound/soc/codecs/wm8971.c
index 9f18db6..572bb80 100644
--- a/sound/soc/codecs/wm8971.c
+++ b/sound/soc/codecs/wm8971.c
@@ -25,7 +25,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8971.h"
@@ -333,10 +332,11 @@
 
 static int wm8971_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8971_dapm_widgets,
-				  ARRAY_SIZE(wm8971_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8971_dapm_widgets,
+				  ARRAY_SIZE(wm8971_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -553,7 +553,7 @@
 		snd_soc_write(codec, WM8971_PWR1, 0x0001);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -590,9 +590,11 @@
 
 static void wm8971_work(struct work_struct *work)
 {
-	struct snd_soc_codec *codec =
-		container_of(work, struct snd_soc_codec, delayed_work.work);
-	wm8971_set_bias_level(codec, codec->bias_level);
+	struct snd_soc_dapm_context *dapm =
+		container_of(work, struct snd_soc_dapm_context,
+			     delayed_work.work);
+	struct snd_soc_codec *codec = dapm->codec;
+	wm8971_set_bias_level(codec, codec->dapm.bias_level);
 }
 
 static int wm8971_suspend(struct snd_soc_codec *codec, pm_message_t state)
@@ -620,11 +622,11 @@
 	wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
 	/* charge wm8971 caps */
-	if (codec->suspend_bias_level == SND_SOC_BIAS_ON) {
+	if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
 		reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 		snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-		codec->bias_level = SND_SOC_BIAS_ON;
-		queue_delayed_work(wm8971_workq, &codec->delayed_work,
+		codec->dapm.bias_level = SND_SOC_BIAS_ON;
+		queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 			msecs_to_jiffies(1000));
 	}
 
@@ -643,7 +645,7 @@
 		return ret;
 	}
 
-	INIT_DELAYED_WORK(&codec->delayed_work, wm8971_work);
+	INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work);
 	wm8971_workq = create_workqueue("wm8971");
 	if (wm8971_workq == NULL)
 		return -ENOMEM;
@@ -653,8 +655,8 @@
 	/* charge output caps - set vmid to 5k for quick power up */
 	reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e;
 	snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0);
-	codec->bias_level = SND_SOC_BIAS_STANDBY;
-	queue_delayed_work(wm8971_workq, &codec->delayed_work,
+	codec->dapm.bias_level = SND_SOC_BIAS_STANDBY;
+	queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work,
 		msecs_to_jiffies(1000));
 
 	/* set the update bits */
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index b4363f6..ca646a8 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -52,7 +51,6 @@
 
 struct wm8974_priv {
 	enum snd_soc_control_type control_type;
-	u16 reg_cache[WM8974_CACHEREGNUM];
 };
 
 #define wm8974_reset(c)	snd_soc_write(c, WM8974_RESET, 0)
@@ -274,10 +272,11 @@
 
 static int wm8974_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8974_dapm_widgets,
-				  ARRAY_SIZE(wm8974_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm8974_dapm_widgets,
+				  ARRAY_SIZE(wm8974_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -530,7 +529,7 @@
 	case SND_SOC_BIAS_STANDBY:
 		power1 |= WM8974_POWER1_BIASEN | WM8974_POWER1_BUFIOEN;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8974_POWER1, power1 | 0x3);
 			mdelay(100);
@@ -547,7 +546,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 13b979a..4bbc344 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -24,7 +24,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -60,7 +59,6 @@
 	unsigned int f_opclk;
 	int mclk_idx;
 	enum wm8978_sysclk_src sysclk;
-	u16 reg_cache[WM8978_CACHEREGNUM];
 };
 
 static const char *wm8978_companding[] = {"Off", "NC", "u-law", "A-law"};
@@ -355,11 +353,12 @@
 
 static int wm8978_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8978_dapm_widgets,
-				  ARRAY_SIZE(wm8978_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8978_dapm_widgets,
+				  ARRAY_SIZE(wm8978_dapm_widgets));
 	/* set up the WM8978 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -837,7 +836,7 @@
 		/* bit 3: enable bias, bit 2: enable I/O tie off buffer */
 		power1 |= 0xc;
 
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Initial cap charge at VMID 5k */
 			snd_soc_write(codec, WM8978_POWER_MANAGEMENT_1,
 				      power1 | 0x3);
@@ -857,7 +856,7 @@
 
 	dev_dbg(codec->dev, "%s: %d, %x\n", __func__, level, power1);
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8985.c b/sound/soc/codecs/wm8985.c
index fd2e7cc..bae510a 100644
--- a/sound/soc/codecs/wm8985.c
+++ b/sound/soc/codecs/wm8985.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -533,10 +532,11 @@
 
 static int wm8985_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8985_dapm_widgets,
-				  ARRAY_SIZE(wm8985_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map,
+	snd_soc_dapm_new_controls(dapm, wm8985_dapm_widgets,
+				  ARRAY_SIZE(wm8985_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map,
 				ARRAY_SIZE(audio_map));
 	return 0;
 }
@@ -879,7 +879,7 @@
 				    1 << WM8985_VMIDSEL_SHIFT);
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8985->supplies),
 						    wm8985->supplies);
 			if (ret) {
@@ -939,7 +939,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8988.c b/sound/soc/codecs/wm8988.c
index d7f2597..d7170f1 100644
--- a/sound/soc/codecs/wm8988.c
+++ b/sound/soc/codecs/wm8988.c
@@ -25,7 +25,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "wm8988.h"
@@ -54,7 +53,6 @@
 	unsigned int sysclk;
 	enum snd_soc_control_type control_type;
 	struct snd_pcm_hw_constraint_list *sysclk_constraints;
-	u16 reg_cache[WM8988_NUM_REG];
 };
 
 
@@ -677,7 +675,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* VREF, VMID=2x5k */
 			snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1);
 
@@ -693,7 +691,7 @@
 		snd_soc_write(codec, WM8988_PWR1, 0x0000);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
@@ -759,6 +757,7 @@
 static int wm8988_probe(struct snd_soc_codec *codec)
 {
 	struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret = 0;
 	u16 reg;
 
@@ -790,9 +789,9 @@
 
 	snd_soc_add_controls(codec, wm8988_snd_controls,
 				ARRAY_SIZE(wm8988_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8988_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8988_dapm_widgets,
 				  ARRAY_SIZE(wm8988_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 264828e..5c87a63 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 #include <asm/div64.h>
@@ -914,11 +913,12 @@
 
 static int wm8990_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm8990_dapm_widgets,
-				  ARRAY_SIZE(wm8990_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
+	snd_soc_dapm_new_controls(dapm, wm8990_dapm_widgets,
+				  ARRAY_SIZE(wm8990_dapm_widgets));
 	/* set up the WM8990 audio map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1170,7 +1170,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Enable all output discharge bits */
 			snd_soc_write(codec, WM8990_ANTIPOP1, WM8990_DIS_LLINE |
 				WM8990_DIS_RLINE | WM8990_DIS_OUT3 |
@@ -1266,7 +1266,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 589e3fa..18c0d9c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -24,7 +24,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/wm8993.h>
 
@@ -226,7 +225,6 @@
 
 struct wm8993_priv {
 	struct wm_hubs_data hubs_data;
-	u16 reg_cache[WM8993_REGISTER_COUNT];
 	struct regulator_bulk_data supplies[WM8993_NUM_SUPPLIES];
 	struct wm8993_platform_data pdata;
 	enum snd_soc_control_type control_type;
@@ -735,6 +733,7 @@
 					    0);
 		}
 		wm8993->class_w_users++;
+		wm8993->hubs_data.class_w = true;
 	}
 
 	/* Implement the change */
@@ -751,6 +750,7 @@
 					    WM8993_CP_DYN_V);
 		}
 		wm8993->class_w_users--;
+		wm8993->hubs_data.class_w = false;
 	}
 
 	dev_dbg(codec->dev, "Indirect DAC use count now %d\n",
@@ -968,7 +968,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			ret = regulator_bulk_enable(ARRAY_SIZE(wm8993->supplies),
 						    wm8993->supplies);
 			if (ret != 0)
@@ -1029,6 +1029,12 @@
 				    WM8993_VMID_SEL_MASK | WM8993_BIAS_ENA,
 				    0);
 
+		snd_soc_update_bits(codec, WM8993_ANTIPOP2,
+				    WM8993_STARTUP_BIAS_ENA |
+				    WM8993_VMID_BUF_ENA |
+				    WM8993_VMID_RAMP_MASK |
+				    WM8993_BIAS_SRC, 0);
+
 #ifdef CONFIG_REGULATOR
                /* Post 2.6.34 we will be able to get a callback when
                 * the regulators are disabled which we can use but
@@ -1043,7 +1049,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1225,7 +1231,7 @@
 		       - wm8993->fs);
 	for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
 		cur_val = abs((wm8993->sysclk_rate /
-			       clk_sys_rates[i].ratio) - wm8993->fs);;
+			       clk_sys_rates[i].ratio) - wm8993->fs);
 		if (cur_val < best_val) {
 			best = i;
 			best_val = cur_val;
@@ -1422,6 +1428,7 @@
 static int wm8993_probe(struct snd_soc_codec *codec)
 {
 	struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i, val;
 
 	wm8993->hubs_data.hp_startup_mode = 1;
@@ -1503,11 +1510,11 @@
 				     ARRAY_SIZE(wm8993_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm8993_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8993_dapm_widgets,
 				  ARRAY_SIZE(wm8993_dapm_widgets));
 	wm_hubs_add_analogue_controls(codec);
 
-	snd_soc_dapm_add_routes(codec, routes, ARRAY_SIZE(routes));
+	snd_soc_dapm_add_routes(dapm, routes, ARRAY_SIZE(routes));
 	wm_hubs_add_analogue_routes(codec, wm8993->pdata.lineout1_diff,
 				    wm8993->pdata.lineout2_diff);
 
diff --git a/sound/soc/codecs/wm8994-tables.c b/sound/soc/codecs/wm8994-tables.c
new file mode 100644
index 0000000..68e9b02
--- /dev/null
+++ b/sound/soc/codecs/wm8994-tables.c
@@ -0,0 +1,3147 @@
+#include "wm8994.h"
+
+const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE] = {
+	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
+	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
+	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
+	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
+	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
+	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
+	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
+	{ 0x0000, 0x0000 }, /* R7 */
+	{ 0x0000, 0x0000 }, /* R8 */
+	{ 0x0000, 0x0000 }, /* R9 */
+	{ 0x0000, 0x0000 }, /* R10 */
+	{ 0x0000, 0x0000 }, /* R11 */
+	{ 0x0000, 0x0000 }, /* R12 */
+	{ 0x0000, 0x0000 }, /* R13 */
+	{ 0x0000, 0x0000 }, /* R14 */
+	{ 0x0000, 0x0000 }, /* R15 */
+	{ 0x0000, 0x0000 }, /* R16 */
+	{ 0x0000, 0x0000 }, /* R17 */
+	{ 0x0000, 0x0000 }, /* R18 */
+	{ 0x0000, 0x0000 }, /* R19 */
+	{ 0x0000, 0x0000 }, /* R20 */
+	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
+	{ 0x0000, 0x0000 }, /* R22 */
+	{ 0x0000, 0x0000 }, /* R23 */
+	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
+	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
+	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
+	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
+	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
+	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
+	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
+	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
+	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
+	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
+	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
+	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
+	{ 0x003F, 0x003F }, /* R37    - ClassD */
+	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
+	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
+	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
+	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
+	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
+	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
+	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
+	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
+	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
+	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
+	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
+	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
+	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
+	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
+	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
+	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
+	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
+	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
+	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
+	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
+	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
+	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
+	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
+	{ 0x0000, 0x0000 }, /* R61 */
+	{ 0x0000, 0x0000 }, /* R62 */
+	{ 0x0000, 0x0000 }, /* R63 */
+	{ 0x0000, 0x0000 }, /* R64 */
+	{ 0x0000, 0x0000 }, /* R65 */
+	{ 0x0000, 0x0000 }, /* R66 */
+	{ 0x0000, 0x0000 }, /* R67 */
+	{ 0x0000, 0x0000 }, /* R68 */
+	{ 0x0000, 0x0000 }, /* R69 */
+	{ 0x0000, 0x0000 }, /* R70 */
+	{ 0x0000, 0x0000 }, /* R71 */
+	{ 0x0000, 0x0000 }, /* R72 */
+	{ 0x0000, 0x0000 }, /* R73 */
+	{ 0x0000, 0x0000 }, /* R74 */
+	{ 0x0000, 0x0000 }, /* R75 */
+	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
+	{ 0x0000, 0x0000 }, /* R77 */
+	{ 0x0000, 0x0000 }, /* R78 */
+	{ 0x0000, 0x0000 }, /* R79 */
+	{ 0x0000, 0x0000 }, /* R80 */
+	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
+	{ 0x0000, 0x0000 }, /* R82 */
+	{ 0x0000, 0x0000 }, /* R83 */
+	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
+	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
+	{ 0x0000, 0x0000 }, /* R86 */
+	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
+	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
+	{ 0x0000, 0x0000 }, /* R89 */
+	{ 0x0000, 0x0000 }, /* R90 */
+	{ 0x0000, 0x0000 }, /* R91 */
+	{ 0x0000, 0x0000 }, /* R92 */
+	{ 0x0000, 0x0000 }, /* R93 */
+	{ 0x0000, 0x0000 }, /* R94 */
+	{ 0x0000, 0x0000 }, /* R95 */
+	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
+	{ 0x0000, 0x0000 }, /* R97 */
+	{ 0x0000, 0x0000 }, /* R98 */
+	{ 0x0000, 0x0000 }, /* R99 */
+	{ 0x0000, 0x0000 }, /* R100 */
+	{ 0x0000, 0x0000 }, /* R101 */
+	{ 0x0000, 0x0000 }, /* R102 */
+	{ 0x0000, 0x0000 }, /* R103 */
+	{ 0x0000, 0x0000 }, /* R104 */
+	{ 0x0000, 0x0000 }, /* R105 */
+	{ 0x0000, 0x0000 }, /* R106 */
+	{ 0x0000, 0x0000 }, /* R107 */
+	{ 0x0000, 0x0000 }, /* R108 */
+	{ 0x0000, 0x0000 }, /* R109 */
+	{ 0x0000, 0x0000 }, /* R110 */
+	{ 0x0000, 0x0000 }, /* R111 */
+	{ 0x0000, 0x0000 }, /* R112 */
+	{ 0x0000, 0x0000 }, /* R113 */
+	{ 0x0000, 0x0000 }, /* R114 */
+	{ 0x0000, 0x0000 }, /* R115 */
+	{ 0x0000, 0x0000 }, /* R116 */
+	{ 0x0000, 0x0000 }, /* R117 */
+	{ 0x0000, 0x0000 }, /* R118 */
+	{ 0x0000, 0x0000 }, /* R119 */
+	{ 0x0000, 0x0000 }, /* R120 */
+	{ 0x0000, 0x0000 }, /* R121 */
+	{ 0x0000, 0x0000 }, /* R122 */
+	{ 0x0000, 0x0000 }, /* R123 */
+	{ 0x0000, 0x0000 }, /* R124 */
+	{ 0x0000, 0x0000 }, /* R125 */
+	{ 0x0000, 0x0000 }, /* R126 */
+	{ 0x0000, 0x0000 }, /* R127 */
+	{ 0x0000, 0x0000 }, /* R128 */
+	{ 0x0000, 0x0000 }, /* R129 */
+	{ 0x0000, 0x0000 }, /* R130 */
+	{ 0x0000, 0x0000 }, /* R131 */
+	{ 0x0000, 0x0000 }, /* R132 */
+	{ 0x0000, 0x0000 }, /* R133 */
+	{ 0x0000, 0x0000 }, /* R134 */
+	{ 0x0000, 0x0000 }, /* R135 */
+	{ 0x0000, 0x0000 }, /* R136 */
+	{ 0x0000, 0x0000 }, /* R137 */
+	{ 0x0000, 0x0000 }, /* R138 */
+	{ 0x0000, 0x0000 }, /* R139 */
+	{ 0x0000, 0x0000 }, /* R140 */
+	{ 0x0000, 0x0000 }, /* R141 */
+	{ 0x0000, 0x0000 }, /* R142 */
+	{ 0x0000, 0x0000 }, /* R143 */
+	{ 0x0000, 0x0000 }, /* R144 */
+	{ 0x0000, 0x0000 }, /* R145 */
+	{ 0x0000, 0x0000 }, /* R146 */
+	{ 0x0000, 0x0000 }, /* R147 */
+	{ 0x0000, 0x0000 }, /* R148 */
+	{ 0x0000, 0x0000 }, /* R149 */
+	{ 0x0000, 0x0000 }, /* R150 */
+	{ 0x0000, 0x0000 }, /* R151 */
+	{ 0x0000, 0x0000 }, /* R152 */
+	{ 0x0000, 0x0000 }, /* R153 */
+	{ 0x0000, 0x0000 }, /* R154 */
+	{ 0x0000, 0x0000 }, /* R155 */
+	{ 0x0000, 0x0000 }, /* R156 */
+	{ 0x0000, 0x0000 }, /* R157 */
+	{ 0x0000, 0x0000 }, /* R158 */
+	{ 0x0000, 0x0000 }, /* R159 */
+	{ 0x0000, 0x0000 }, /* R160 */
+	{ 0x0000, 0x0000 }, /* R161 */
+	{ 0x0000, 0x0000 }, /* R162 */
+	{ 0x0000, 0x0000 }, /* R163 */
+	{ 0x0000, 0x0000 }, /* R164 */
+	{ 0x0000, 0x0000 }, /* R165 */
+	{ 0x0000, 0x0000 }, /* R166 */
+	{ 0x0000, 0x0000 }, /* R167 */
+	{ 0x0000, 0x0000 }, /* R168 */
+	{ 0x0000, 0x0000 }, /* R169 */
+	{ 0x0000, 0x0000 }, /* R170 */
+	{ 0x0000, 0x0000 }, /* R171 */
+	{ 0x0000, 0x0000 }, /* R172 */
+	{ 0x0000, 0x0000 }, /* R173 */
+	{ 0x0000, 0x0000 }, /* R174 */
+	{ 0x0000, 0x0000 }, /* R175 */
+	{ 0x0000, 0x0000 }, /* R176 */
+	{ 0x0000, 0x0000 }, /* R177 */
+	{ 0x0000, 0x0000 }, /* R178 */
+	{ 0x0000, 0x0000 }, /* R179 */
+	{ 0x0000, 0x0000 }, /* R180 */
+	{ 0x0000, 0x0000 }, /* R181 */
+	{ 0x0000, 0x0000 }, /* R182 */
+	{ 0x0000, 0x0000 }, /* R183 */
+	{ 0x0000, 0x0000 }, /* R184 */
+	{ 0x0000, 0x0000 }, /* R185 */
+	{ 0x0000, 0x0000 }, /* R186 */
+	{ 0x0000, 0x0000 }, /* R187 */
+	{ 0x0000, 0x0000 }, /* R188 */
+	{ 0x0000, 0x0000 }, /* R189 */
+	{ 0x0000, 0x0000 }, /* R190 */
+	{ 0x0000, 0x0000 }, /* R191 */
+	{ 0x0000, 0x0000 }, /* R192 */
+	{ 0x0000, 0x0000 }, /* R193 */
+	{ 0x0000, 0x0000 }, /* R194 */
+	{ 0x0000, 0x0000 }, /* R195 */
+	{ 0x0000, 0x0000 }, /* R196 */
+	{ 0x0000, 0x0000 }, /* R197 */
+	{ 0x0000, 0x0000 }, /* R198 */
+	{ 0x0000, 0x0000 }, /* R199 */
+	{ 0x0000, 0x0000 }, /* R200 */
+	{ 0x0000, 0x0000 }, /* R201 */
+	{ 0x0000, 0x0000 }, /* R202 */
+	{ 0x0000, 0x0000 }, /* R203 */
+	{ 0x0000, 0x0000 }, /* R204 */
+	{ 0x0000, 0x0000 }, /* R205 */
+	{ 0x0000, 0x0000 }, /* R206 */
+	{ 0x0000, 0x0000 }, /* R207 */
+	{ 0x0000, 0x0000 }, /* R208 */
+	{ 0x0000, 0x0000 }, /* R209 */
+	{ 0x0000, 0x0000 }, /* R210 */
+	{ 0x0000, 0x0000 }, /* R211 */
+	{ 0x0000, 0x0000 }, /* R212 */
+	{ 0x0000, 0x0000 }, /* R213 */
+	{ 0x0000, 0x0000 }, /* R214 */
+	{ 0x0000, 0x0000 }, /* R215 */
+	{ 0x0000, 0x0000 }, /* R216 */
+	{ 0x0000, 0x0000 }, /* R217 */
+	{ 0x0000, 0x0000 }, /* R218 */
+	{ 0x0000, 0x0000 }, /* R219 */
+	{ 0x0000, 0x0000 }, /* R220 */
+	{ 0x0000, 0x0000 }, /* R221 */
+	{ 0x0000, 0x0000 }, /* R222 */
+	{ 0x0000, 0x0000 }, /* R223 */
+	{ 0x0000, 0x0000 }, /* R224 */
+	{ 0x0000, 0x0000 }, /* R225 */
+	{ 0x0000, 0x0000 }, /* R226 */
+	{ 0x0000, 0x0000 }, /* R227 */
+	{ 0x0000, 0x0000 }, /* R228 */
+	{ 0x0000, 0x0000 }, /* R229 */
+	{ 0x0000, 0x0000 }, /* R230 */
+	{ 0x0000, 0x0000 }, /* R231 */
+	{ 0x0000, 0x0000 }, /* R232 */
+	{ 0x0000, 0x0000 }, /* R233 */
+	{ 0x0000, 0x0000 }, /* R234 */
+	{ 0x0000, 0x0000 }, /* R235 */
+	{ 0x0000, 0x0000 }, /* R236 */
+	{ 0x0000, 0x0000 }, /* R237 */
+	{ 0x0000, 0x0000 }, /* R238 */
+	{ 0x0000, 0x0000 }, /* R239 */
+	{ 0x0000, 0x0000 }, /* R240 */
+	{ 0x0000, 0x0000 }, /* R241 */
+	{ 0x0000, 0x0000 }, /* R242 */
+	{ 0x0000, 0x0000 }, /* R243 */
+	{ 0x0000, 0x0000 }, /* R244 */
+	{ 0x0000, 0x0000 }, /* R245 */
+	{ 0x0000, 0x0000 }, /* R246 */
+	{ 0x0000, 0x0000 }, /* R247 */
+	{ 0x0000, 0x0000 }, /* R248 */
+	{ 0x0000, 0x0000 }, /* R249 */
+	{ 0x0000, 0x0000 }, /* R250 */
+	{ 0x0000, 0x0000 }, /* R251 */
+	{ 0x0000, 0x0000 }, /* R252 */
+	{ 0x0000, 0x0000 }, /* R253 */
+	{ 0x0000, 0x0000 }, /* R254 */
+	{ 0x0000, 0x0000 }, /* R255 */
+	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
+	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
+	{ 0x0000, 0x0000 }, /* R258 */
+	{ 0x0000, 0x0000 }, /* R259 */
+	{ 0x0000, 0x0000 }, /* R260 */
+	{ 0x0000, 0x0000 }, /* R261 */
+	{ 0x0000, 0x0000 }, /* R262 */
+	{ 0x0000, 0x0000 }, /* R263 */
+	{ 0x0000, 0x0000 }, /* R264 */
+	{ 0x0000, 0x0000 }, /* R265 */
+	{ 0x0000, 0x0000 }, /* R266 */
+	{ 0x0000, 0x0000 }, /* R267 */
+	{ 0x0000, 0x0000 }, /* R268 */
+	{ 0x0000, 0x0000 }, /* R269 */
+	{ 0x0000, 0x0000 }, /* R270 */
+	{ 0x0000, 0x0000 }, /* R271 */
+	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
+	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
+	{ 0x0000, 0x0000 }, /* R274 */
+	{ 0x0000, 0x0000 }, /* R275 */
+	{ 0x0000, 0x0000 }, /* R276 */
+	{ 0x0000, 0x0000 }, /* R277 */
+	{ 0x0000, 0x0000 }, /* R278 */
+	{ 0x0000, 0x0000 }, /* R279 */
+	{ 0x0000, 0x0000 }, /* R280 */
+	{ 0x0000, 0x0000 }, /* R281 */
+	{ 0x0000, 0x0000 }, /* R282 */
+	{ 0x0000, 0x0000 }, /* R283 */
+	{ 0x0000, 0x0000 }, /* R284 */
+	{ 0x0000, 0x0000 }, /* R285 */
+	{ 0x0000, 0x0000 }, /* R286 */
+	{ 0x0000, 0x0000 }, /* R287 */
+	{ 0x0000, 0x0000 }, /* R288 */
+	{ 0x0000, 0x0000 }, /* R289 */
+	{ 0x0000, 0x0000 }, /* R290 */
+	{ 0x0000, 0x0000 }, /* R291 */
+	{ 0x0000, 0x0000 }, /* R292 */
+	{ 0x0000, 0x0000 }, /* R293 */
+	{ 0x0000, 0x0000 }, /* R294 */
+	{ 0x0000, 0x0000 }, /* R295 */
+	{ 0x0000, 0x0000 }, /* R296 */
+	{ 0x0000, 0x0000 }, /* R297 */
+	{ 0x0000, 0x0000 }, /* R298 */
+	{ 0x0000, 0x0000 }, /* R299 */
+	{ 0x0000, 0x0000 }, /* R300 */
+	{ 0x0000, 0x0000 }, /* R301 */
+	{ 0x0000, 0x0000 }, /* R302 */
+	{ 0x0000, 0x0000 }, /* R303 */
+	{ 0x0000, 0x0000 }, /* R304 */
+	{ 0x0000, 0x0000 }, /* R305 */
+	{ 0x0000, 0x0000 }, /* R306 */
+	{ 0x0000, 0x0000 }, /* R307 */
+	{ 0x0000, 0x0000 }, /* R308 */
+	{ 0x0000, 0x0000 }, /* R309 */
+	{ 0x0000, 0x0000 }, /* R310 */
+	{ 0x0000, 0x0000 }, /* R311 */
+	{ 0x0000, 0x0000 }, /* R312 */
+	{ 0x0000, 0x0000 }, /* R313 */
+	{ 0x0000, 0x0000 }, /* R314 */
+	{ 0x0000, 0x0000 }, /* R315 */
+	{ 0x0000, 0x0000 }, /* R316 */
+	{ 0x0000, 0x0000 }, /* R317 */
+	{ 0x0000, 0x0000 }, /* R318 */
+	{ 0x0000, 0x0000 }, /* R319 */
+	{ 0x0000, 0x0000 }, /* R320 */
+	{ 0x0000, 0x0000 }, /* R321 */
+	{ 0x0000, 0x0000 }, /* R322 */
+	{ 0x0000, 0x0000 }, /* R323 */
+	{ 0x0000, 0x0000 }, /* R324 */
+	{ 0x0000, 0x0000 }, /* R325 */
+	{ 0x0000, 0x0000 }, /* R326 */
+	{ 0x0000, 0x0000 }, /* R327 */
+	{ 0x0000, 0x0000 }, /* R328 */
+	{ 0x0000, 0x0000 }, /* R329 */
+	{ 0x0000, 0x0000 }, /* R330 */
+	{ 0x0000, 0x0000 }, /* R331 */
+	{ 0x0000, 0x0000 }, /* R332 */
+	{ 0x0000, 0x0000 }, /* R333 */
+	{ 0x0000, 0x0000 }, /* R334 */
+	{ 0x0000, 0x0000 }, /* R335 */
+	{ 0x0000, 0x0000 }, /* R336 */
+	{ 0x0000, 0x0000 }, /* R337 */
+	{ 0x0000, 0x0000 }, /* R338 */
+	{ 0x0000, 0x0000 }, /* R339 */
+	{ 0x0000, 0x0000 }, /* R340 */
+	{ 0x0000, 0x0000 }, /* R341 */
+	{ 0x0000, 0x0000 }, /* R342 */
+	{ 0x0000, 0x0000 }, /* R343 */
+	{ 0x0000, 0x0000 }, /* R344 */
+	{ 0x0000, 0x0000 }, /* R345 */
+	{ 0x0000, 0x0000 }, /* R346 */
+	{ 0x0000, 0x0000 }, /* R347 */
+	{ 0x0000, 0x0000 }, /* R348 */
+	{ 0x0000, 0x0000 }, /* R349 */
+	{ 0x0000, 0x0000 }, /* R350 */
+	{ 0x0000, 0x0000 }, /* R351 */
+	{ 0x0000, 0x0000 }, /* R352 */
+	{ 0x0000, 0x0000 }, /* R353 */
+	{ 0x0000, 0x0000 }, /* R354 */
+	{ 0x0000, 0x0000 }, /* R355 */
+	{ 0x0000, 0x0000 }, /* R356 */
+	{ 0x0000, 0x0000 }, /* R357 */
+	{ 0x0000, 0x0000 }, /* R358 */
+	{ 0x0000, 0x0000 }, /* R359 */
+	{ 0x0000, 0x0000 }, /* R360 */
+	{ 0x0000, 0x0000 }, /* R361 */
+	{ 0x0000, 0x0000 }, /* R362 */
+	{ 0x0000, 0x0000 }, /* R363 */
+	{ 0x0000, 0x0000 }, /* R364 */
+	{ 0x0000, 0x0000 }, /* R365 */
+	{ 0x0000, 0x0000 }, /* R366 */
+	{ 0x0000, 0x0000 }, /* R367 */
+	{ 0x0000, 0x0000 }, /* R368 */
+	{ 0x0000, 0x0000 }, /* R369 */
+	{ 0x0000, 0x0000 }, /* R370 */
+	{ 0x0000, 0x0000 }, /* R371 */
+	{ 0x0000, 0x0000 }, /* R372 */
+	{ 0x0000, 0x0000 }, /* R373 */
+	{ 0x0000, 0x0000 }, /* R374 */
+	{ 0x0000, 0x0000 }, /* R375 */
+	{ 0x0000, 0x0000 }, /* R376 */
+	{ 0x0000, 0x0000 }, /* R377 */
+	{ 0x0000, 0x0000 }, /* R378 */
+	{ 0x0000, 0x0000 }, /* R379 */
+	{ 0x0000, 0x0000 }, /* R380 */
+	{ 0x0000, 0x0000 }, /* R381 */
+	{ 0x0000, 0x0000 }, /* R382 */
+	{ 0x0000, 0x0000 }, /* R383 */
+	{ 0x0000, 0x0000 }, /* R384 */
+	{ 0x0000, 0x0000 }, /* R385 */
+	{ 0x0000, 0x0000 }, /* R386 */
+	{ 0x0000, 0x0000 }, /* R387 */
+	{ 0x0000, 0x0000 }, /* R388 */
+	{ 0x0000, 0x0000 }, /* R389 */
+	{ 0x0000, 0x0000 }, /* R390 */
+	{ 0x0000, 0x0000 }, /* R391 */
+	{ 0x0000, 0x0000 }, /* R392 */
+	{ 0x0000, 0x0000 }, /* R393 */
+	{ 0x0000, 0x0000 }, /* R394 */
+	{ 0x0000, 0x0000 }, /* R395 */
+	{ 0x0000, 0x0000 }, /* R396 */
+	{ 0x0000, 0x0000 }, /* R397 */
+	{ 0x0000, 0x0000 }, /* R398 */
+	{ 0x0000, 0x0000 }, /* R399 */
+	{ 0x0000, 0x0000 }, /* R400 */
+	{ 0x0000, 0x0000 }, /* R401 */
+	{ 0x0000, 0x0000 }, /* R402 */
+	{ 0x0000, 0x0000 }, /* R403 */
+	{ 0x0000, 0x0000 }, /* R404 */
+	{ 0x0000, 0x0000 }, /* R405 */
+	{ 0x0000, 0x0000 }, /* R406 */
+	{ 0x0000, 0x0000 }, /* R407 */
+	{ 0x0000, 0x0000 }, /* R408 */
+	{ 0x0000, 0x0000 }, /* R409 */
+	{ 0x0000, 0x0000 }, /* R410 */
+	{ 0x0000, 0x0000 }, /* R411 */
+	{ 0x0000, 0x0000 }, /* R412 */
+	{ 0x0000, 0x0000 }, /* R413 */
+	{ 0x0000, 0x0000 }, /* R414 */
+	{ 0x0000, 0x0000 }, /* R415 */
+	{ 0x0000, 0x0000 }, /* R416 */
+	{ 0x0000, 0x0000 }, /* R417 */
+	{ 0x0000, 0x0000 }, /* R418 */
+	{ 0x0000, 0x0000 }, /* R419 */
+	{ 0x0000, 0x0000 }, /* R420 */
+	{ 0x0000, 0x0000 }, /* R421 */
+	{ 0x0000, 0x0000 }, /* R422 */
+	{ 0x0000, 0x0000 }, /* R423 */
+	{ 0x0000, 0x0000 }, /* R424 */
+	{ 0x0000, 0x0000 }, /* R425 */
+	{ 0x0000, 0x0000 }, /* R426 */
+	{ 0x0000, 0x0000 }, /* R427 */
+	{ 0x0000, 0x0000 }, /* R428 */
+	{ 0x0000, 0x0000 }, /* R429 */
+	{ 0x0000, 0x0000 }, /* R430 */
+	{ 0x0000, 0x0000 }, /* R431 */
+	{ 0x0000, 0x0000 }, /* R432 */
+	{ 0x0000, 0x0000 }, /* R433 */
+	{ 0x0000, 0x0000 }, /* R434 */
+	{ 0x0000, 0x0000 }, /* R435 */
+	{ 0x0000, 0x0000 }, /* R436 */
+	{ 0x0000, 0x0000 }, /* R437 */
+	{ 0x0000, 0x0000 }, /* R438 */
+	{ 0x0000, 0x0000 }, /* R439 */
+	{ 0x0000, 0x0000 }, /* R440 */
+	{ 0x0000, 0x0000 }, /* R441 */
+	{ 0x0000, 0x0000 }, /* R442 */
+	{ 0x0000, 0x0000 }, /* R443 */
+	{ 0x0000, 0x0000 }, /* R444 */
+	{ 0x0000, 0x0000 }, /* R445 */
+	{ 0x0000, 0x0000 }, /* R446 */
+	{ 0x0000, 0x0000 }, /* R447 */
+	{ 0x0000, 0x0000 }, /* R448 */
+	{ 0x0000, 0x0000 }, /* R449 */
+	{ 0x0000, 0x0000 }, /* R450 */
+	{ 0x0000, 0x0000 }, /* R451 */
+	{ 0x0000, 0x0000 }, /* R452 */
+	{ 0x0000, 0x0000 }, /* R453 */
+	{ 0x0000, 0x0000 }, /* R454 */
+	{ 0x0000, 0x0000 }, /* R455 */
+	{ 0x0000, 0x0000 }, /* R456 */
+	{ 0x0000, 0x0000 }, /* R457 */
+	{ 0x0000, 0x0000 }, /* R458 */
+	{ 0x0000, 0x0000 }, /* R459 */
+	{ 0x0000, 0x0000 }, /* R460 */
+	{ 0x0000, 0x0000 }, /* R461 */
+	{ 0x0000, 0x0000 }, /* R462 */
+	{ 0x0000, 0x0000 }, /* R463 */
+	{ 0x0000, 0x0000 }, /* R464 */
+	{ 0x0000, 0x0000 }, /* R465 */
+	{ 0x0000, 0x0000 }, /* R466 */
+	{ 0x0000, 0x0000 }, /* R467 */
+	{ 0x0000, 0x0000 }, /* R468 */
+	{ 0x0000, 0x0000 }, /* R469 */
+	{ 0x0000, 0x0000 }, /* R470 */
+	{ 0x0000, 0x0000 }, /* R471 */
+	{ 0x0000, 0x0000 }, /* R472 */
+	{ 0x0000, 0x0000 }, /* R473 */
+	{ 0x0000, 0x0000 }, /* R474 */
+	{ 0x0000, 0x0000 }, /* R475 */
+	{ 0x0000, 0x0000 }, /* R476 */
+	{ 0x0000, 0x0000 }, /* R477 */
+	{ 0x0000, 0x0000 }, /* R478 */
+	{ 0x0000, 0x0000 }, /* R479 */
+	{ 0x0000, 0x0000 }, /* R480 */
+	{ 0x0000, 0x0000 }, /* R481 */
+	{ 0x0000, 0x0000 }, /* R482 */
+	{ 0x0000, 0x0000 }, /* R483 */
+	{ 0x0000, 0x0000 }, /* R484 */
+	{ 0x0000, 0x0000 }, /* R485 */
+	{ 0x0000, 0x0000 }, /* R486 */
+	{ 0x0000, 0x0000 }, /* R487 */
+	{ 0x0000, 0x0000 }, /* R488 */
+	{ 0x0000, 0x0000 }, /* R489 */
+	{ 0x0000, 0x0000 }, /* R490 */
+	{ 0x0000, 0x0000 }, /* R491 */
+	{ 0x0000, 0x0000 }, /* R492 */
+	{ 0x0000, 0x0000 }, /* R493 */
+	{ 0x0000, 0x0000 }, /* R494 */
+	{ 0x0000, 0x0000 }, /* R495 */
+	{ 0x0000, 0x0000 }, /* R496 */
+	{ 0x0000, 0x0000 }, /* R497 */
+	{ 0x0000, 0x0000 }, /* R498 */
+	{ 0x0000, 0x0000 }, /* R499 */
+	{ 0x0000, 0x0000 }, /* R500 */
+	{ 0x0000, 0x0000 }, /* R501 */
+	{ 0x0000, 0x0000 }, /* R502 */
+	{ 0x0000, 0x0000 }, /* R503 */
+	{ 0x0000, 0x0000 }, /* R504 */
+	{ 0x0000, 0x0000 }, /* R505 */
+	{ 0x0000, 0x0000 }, /* R506 */
+	{ 0x0000, 0x0000 }, /* R507 */
+	{ 0x0000, 0x0000 }, /* R508 */
+	{ 0x0000, 0x0000 }, /* R509 */
+	{ 0x0000, 0x0000 }, /* R510 */
+	{ 0x0000, 0x0000 }, /* R511 */
+	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R514 */
+	{ 0x0000, 0x0000 }, /* R515 */
+	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
+	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R518 */
+	{ 0x0000, 0x0000 }, /* R519 */
+	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
+	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
+	{ 0x0000, 0x0000 }, /* R522 */
+	{ 0x0000, 0x0000 }, /* R523 */
+	{ 0x0000, 0x0000 }, /* R524 */
+	{ 0x0000, 0x0000 }, /* R525 */
+	{ 0x0000, 0x0000 }, /* R526 */
+	{ 0x0000, 0x0000 }, /* R527 */
+	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
+	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
+	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
+	{ 0x0000, 0x0000 }, /* R531 */
+	{ 0x0000, 0x0000 }, /* R532 */
+	{ 0x0000, 0x0000 }, /* R533 */
+	{ 0x0000, 0x0000 }, /* R534 */
+	{ 0x0000, 0x0000 }, /* R535 */
+	{ 0x0000, 0x0000 }, /* R536 */
+	{ 0x0000, 0x0000 }, /* R537 */
+	{ 0x0000, 0x0000 }, /* R538 */
+	{ 0x0000, 0x0000 }, /* R539 */
+	{ 0x0000, 0x0000 }, /* R540 */
+	{ 0x0000, 0x0000 }, /* R541 */
+	{ 0x0000, 0x0000 }, /* R542 */
+	{ 0x0000, 0x0000 }, /* R543 */
+	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
+	{ 0x0000, 0x0000 }, /* R549 */
+	{ 0x0000, 0x0000 }, /* R550 */
+	{ 0x0000, 0x0000 }, /* R551 */
+	{ 0x0000, 0x0000 }, /* R552 */
+	{ 0x0000, 0x0000 }, /* R553 */
+	{ 0x0000, 0x0000 }, /* R554 */
+	{ 0x0000, 0x0000 }, /* R555 */
+	{ 0x0000, 0x0000 }, /* R556 */
+	{ 0x0000, 0x0000 }, /* R557 */
+	{ 0x0000, 0x0000 }, /* R558 */
+	{ 0x0000, 0x0000 }, /* R559 */
+	{ 0x0000, 0x0000 }, /* R560 */
+	{ 0x0000, 0x0000 }, /* R561 */
+	{ 0x0000, 0x0000 }, /* R562 */
+	{ 0x0000, 0x0000 }, /* R563 */
+	{ 0x0000, 0x0000 }, /* R564 */
+	{ 0x0000, 0x0000 }, /* R565 */
+	{ 0x0000, 0x0000 }, /* R566 */
+	{ 0x0000, 0x0000 }, /* R567 */
+	{ 0x0000, 0x0000 }, /* R568 */
+	{ 0x0000, 0x0000 }, /* R569 */
+	{ 0x0000, 0x0000 }, /* R570 */
+	{ 0x0000, 0x0000 }, /* R571 */
+	{ 0x0000, 0x0000 }, /* R572 */
+	{ 0x0000, 0x0000 }, /* R573 */
+	{ 0x0000, 0x0000 }, /* R574 */
+	{ 0x0000, 0x0000 }, /* R575 */
+	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
+	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
+	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
+	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
+	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
+	{ 0x0000, 0x0000 }, /* R581 */
+	{ 0x0000, 0x0000 }, /* R582 */
+	{ 0x0000, 0x0000 }, /* R583 */
+	{ 0x0000, 0x0000 }, /* R584 */
+	{ 0x0000, 0x0000 }, /* R585 */
+	{ 0x0000, 0x0000 }, /* R586 */
+	{ 0x0000, 0x0000 }, /* R587 */
+	{ 0x0000, 0x0000 }, /* R588 */
+	{ 0x0000, 0x0000 }, /* R589 */
+	{ 0x0000, 0x0000 }, /* R590 */
+	{ 0x0000, 0x0000 }, /* R591 */
+	{ 0x0000, 0x0000 }, /* R592 */
+	{ 0x0000, 0x0000 }, /* R593 */
+	{ 0x0000, 0x0000 }, /* R594 */
+	{ 0x0000, 0x0000 }, /* R595 */
+	{ 0x0000, 0x0000 }, /* R596 */
+	{ 0x0000, 0x0000 }, /* R597 */
+	{ 0x0000, 0x0000 }, /* R598 */
+	{ 0x0000, 0x0000 }, /* R599 */
+	{ 0x0000, 0x0000 }, /* R600 */
+	{ 0x0000, 0x0000 }, /* R601 */
+	{ 0x0000, 0x0000 }, /* R602 */
+	{ 0x0000, 0x0000 }, /* R603 */
+	{ 0x0000, 0x0000 }, /* R604 */
+	{ 0x0000, 0x0000 }, /* R605 */
+	{ 0x0000, 0x0000 }, /* R606 */
+	{ 0x0000, 0x0000 }, /* R607 */
+	{ 0x0000, 0x0000 }, /* R608 */
+	{ 0x0000, 0x0000 }, /* R609 */
+	{ 0x0000, 0x0000 }, /* R610 */
+	{ 0x0000, 0x0000 }, /* R611 */
+	{ 0x0000, 0x0000 }, /* R612 */
+	{ 0x0000, 0x0000 }, /* R613 */
+	{ 0x0000, 0x0000 }, /* R614 */
+	{ 0x0000, 0x0000 }, /* R615 */
+	{ 0x0000, 0x0000 }, /* R616 */
+	{ 0x0000, 0x0000 }, /* R617 */
+	{ 0x0000, 0x0000 }, /* R618 */
+	{ 0x0000, 0x0000 }, /* R619 */
+	{ 0x0000, 0x0000 }, /* R620 */
+	{ 0x0000, 0x0000 }, /* R621 */
+	{ 0x0000, 0x0000 }, /* R622 */
+	{ 0x0000, 0x0000 }, /* R623 */
+	{ 0x0000, 0x0000 }, /* R624 */
+	{ 0x0000, 0x0000 }, /* R625 */
+	{ 0x0000, 0x0000 }, /* R626 */
+	{ 0x0000, 0x0000 }, /* R627 */
+	{ 0x0000, 0x0000 }, /* R628 */
+	{ 0x0000, 0x0000 }, /* R629 */
+	{ 0x0000, 0x0000 }, /* R630 */
+	{ 0x0000, 0x0000 }, /* R631 */
+	{ 0x0000, 0x0000 }, /* R632 */
+	{ 0x0000, 0x0000 }, /* R633 */
+	{ 0x0000, 0x0000 }, /* R634 */
+	{ 0x0000, 0x0000 }, /* R635 */
+	{ 0x0000, 0x0000 }, /* R636 */
+	{ 0x0000, 0x0000 }, /* R637 */
+	{ 0x0000, 0x0000 }, /* R638 */
+	{ 0x0000, 0x0000 }, /* R639 */
+	{ 0x0000, 0x0000 }, /* R640 */
+	{ 0x0000, 0x0000 }, /* R641 */
+	{ 0x0000, 0x0000 }, /* R642 */
+	{ 0x0000, 0x0000 }, /* R643 */
+	{ 0x0000, 0x0000 }, /* R644 */
+	{ 0x0000, 0x0000 }, /* R645 */
+	{ 0x0000, 0x0000 }, /* R646 */
+	{ 0x0000, 0x0000 }, /* R647 */
+	{ 0x0000, 0x0000 }, /* R648 */
+	{ 0x0000, 0x0000 }, /* R649 */
+	{ 0x0000, 0x0000 }, /* R650 */
+	{ 0x0000, 0x0000 }, /* R651 */
+	{ 0x0000, 0x0000 }, /* R652 */
+	{ 0x0000, 0x0000 }, /* R653 */
+	{ 0x0000, 0x0000 }, /* R654 */
+	{ 0x0000, 0x0000 }, /* R655 */
+	{ 0x0000, 0x0000 }, /* R656 */
+	{ 0x0000, 0x0000 }, /* R657 */
+	{ 0x0000, 0x0000 }, /* R658 */
+	{ 0x0000, 0x0000 }, /* R659 */
+	{ 0x0000, 0x0000 }, /* R660 */
+	{ 0x0000, 0x0000 }, /* R661 */
+	{ 0x0000, 0x0000 }, /* R662 */
+	{ 0x0000, 0x0000 }, /* R663 */
+	{ 0x0000, 0x0000 }, /* R664 */
+	{ 0x0000, 0x0000 }, /* R665 */
+	{ 0x0000, 0x0000 }, /* R666 */
+	{ 0x0000, 0x0000 }, /* R667 */
+	{ 0x0000, 0x0000 }, /* R668 */
+	{ 0x0000, 0x0000 }, /* R669 */
+	{ 0x0000, 0x0000 }, /* R670 */
+	{ 0x0000, 0x0000 }, /* R671 */
+	{ 0x0000, 0x0000 }, /* R672 */
+	{ 0x0000, 0x0000 }, /* R673 */
+	{ 0x0000, 0x0000 }, /* R674 */
+	{ 0x0000, 0x0000 }, /* R675 */
+	{ 0x0000, 0x0000 }, /* R676 */
+	{ 0x0000, 0x0000 }, /* R677 */
+	{ 0x0000, 0x0000 }, /* R678 */
+	{ 0x0000, 0x0000 }, /* R679 */
+	{ 0x0000, 0x0000 }, /* R680 */
+	{ 0x0000, 0x0000 }, /* R681 */
+	{ 0x0000, 0x0000 }, /* R682 */
+	{ 0x0000, 0x0000 }, /* R683 */
+	{ 0x0000, 0x0000 }, /* R684 */
+	{ 0x0000, 0x0000 }, /* R685 */
+	{ 0x0000, 0x0000 }, /* R686 */
+	{ 0x0000, 0x0000 }, /* R687 */
+	{ 0x0000, 0x0000 }, /* R688 */
+	{ 0x0000, 0x0000 }, /* R689 */
+	{ 0x0000, 0x0000 }, /* R690 */
+	{ 0x0000, 0x0000 }, /* R691 */
+	{ 0x0000, 0x0000 }, /* R692 */
+	{ 0x0000, 0x0000 }, /* R693 */
+	{ 0x0000, 0x0000 }, /* R694 */
+	{ 0x0000, 0x0000 }, /* R695 */
+	{ 0x0000, 0x0000 }, /* R696 */
+	{ 0x0000, 0x0000 }, /* R697 */
+	{ 0x0000, 0x0000 }, /* R698 */
+	{ 0x0000, 0x0000 }, /* R699 */
+	{ 0x0000, 0x0000 }, /* R700 */
+	{ 0x0000, 0x0000 }, /* R701 */
+	{ 0x0000, 0x0000 }, /* R702 */
+	{ 0x0000, 0x0000 }, /* R703 */
+	{ 0x0000, 0x0000 }, /* R704 */
+	{ 0x0000, 0x0000 }, /* R705 */
+	{ 0x0000, 0x0000 }, /* R706 */
+	{ 0x0000, 0x0000 }, /* R707 */
+	{ 0x0000, 0x0000 }, /* R708 */
+	{ 0x0000, 0x0000 }, /* R709 */
+	{ 0x0000, 0x0000 }, /* R710 */
+	{ 0x0000, 0x0000 }, /* R711 */
+	{ 0x0000, 0x0000 }, /* R712 */
+	{ 0x0000, 0x0000 }, /* R713 */
+	{ 0x0000, 0x0000 }, /* R714 */
+	{ 0x0000, 0x0000 }, /* R715 */
+	{ 0x0000, 0x0000 }, /* R716 */
+	{ 0x0000, 0x0000 }, /* R717 */
+	{ 0x0000, 0x0000 }, /* R718 */
+	{ 0x0000, 0x0000 }, /* R719 */
+	{ 0x0000, 0x0000 }, /* R720 */
+	{ 0x0000, 0x0000 }, /* R721 */
+	{ 0x0000, 0x0000 }, /* R722 */
+	{ 0x0000, 0x0000 }, /* R723 */
+	{ 0x0000, 0x0000 }, /* R724 */
+	{ 0x0000, 0x0000 }, /* R725 */
+	{ 0x0000, 0x0000 }, /* R726 */
+	{ 0x0000, 0x0000 }, /* R727 */
+	{ 0x0000, 0x0000 }, /* R728 */
+	{ 0x0000, 0x0000 }, /* R729 */
+	{ 0x0000, 0x0000 }, /* R730 */
+	{ 0x0000, 0x0000 }, /* R731 */
+	{ 0x0000, 0x0000 }, /* R732 */
+	{ 0x0000, 0x0000 }, /* R733 */
+	{ 0x0000, 0x0000 }, /* R734 */
+	{ 0x0000, 0x0000 }, /* R735 */
+	{ 0x0000, 0x0000 }, /* R736 */
+	{ 0x0000, 0x0000 }, /* R737 */
+	{ 0x0000, 0x0000 }, /* R738 */
+	{ 0x0000, 0x0000 }, /* R739 */
+	{ 0x0000, 0x0000 }, /* R740 */
+	{ 0x0000, 0x0000 }, /* R741 */
+	{ 0x0000, 0x0000 }, /* R742 */
+	{ 0x0000, 0x0000 }, /* R743 */
+	{ 0x0000, 0x0000 }, /* R744 */
+	{ 0x0000, 0x0000 }, /* R745 */
+	{ 0x0000, 0x0000 }, /* R746 */
+	{ 0x0000, 0x0000 }, /* R747 */
+	{ 0x0000, 0x0000 }, /* R748 */
+	{ 0x0000, 0x0000 }, /* R749 */
+	{ 0x0000, 0x0000 }, /* R750 */
+	{ 0x0000, 0x0000 }, /* R751 */
+	{ 0x0000, 0x0000 }, /* R752 */
+	{ 0x0000, 0x0000 }, /* R753 */
+	{ 0x0000, 0x0000 }, /* R754 */
+	{ 0x0000, 0x0000 }, /* R755 */
+	{ 0x0000, 0x0000 }, /* R756 */
+	{ 0x0000, 0x0000 }, /* R757 */
+	{ 0x0000, 0x0000 }, /* R758 */
+	{ 0x0000, 0x0000 }, /* R759 */
+	{ 0x0000, 0x0000 }, /* R760 */
+	{ 0x0000, 0x0000 }, /* R761 */
+	{ 0x0000, 0x0000 }, /* R762 */
+	{ 0x0000, 0x0000 }, /* R763 */
+	{ 0x0000, 0x0000 }, /* R764 */
+	{ 0x0000, 0x0000 }, /* R765 */
+	{ 0x0000, 0x0000 }, /* R766 */
+	{ 0x0000, 0x0000 }, /* R767 */
+	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
+	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
+	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
+	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
+	{ 0x0000, 0x0000 }, /* R776 */
+	{ 0x0000, 0x0000 }, /* R777 */
+	{ 0x0000, 0x0000 }, /* R778 */
+	{ 0x0000, 0x0000 }, /* R779 */
+	{ 0x0000, 0x0000 }, /* R780 */
+	{ 0x0000, 0x0000 }, /* R781 */
+	{ 0x0000, 0x0000 }, /* R782 */
+	{ 0x0000, 0x0000 }, /* R783 */
+	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
+	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
+	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
+	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
+	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
+	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
+	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
+	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
+	{ 0x0000, 0x0000 }, /* R792 */
+	{ 0x0000, 0x0000 }, /* R793 */
+	{ 0x0000, 0x0000 }, /* R794 */
+	{ 0x0000, 0x0000 }, /* R795 */
+	{ 0x0000, 0x0000 }, /* R796 */
+	{ 0x0000, 0x0000 }, /* R797 */
+	{ 0x0000, 0x0000 }, /* R798 */
+	{ 0x0000, 0x0000 }, /* R799 */
+	{ 0x0000, 0x0000 }, /* R800 */
+	{ 0x0000, 0x0000 }, /* R801 */
+	{ 0x0000, 0x0000 }, /* R802 */
+	{ 0x0000, 0x0000 }, /* R803 */
+	{ 0x0000, 0x0000 }, /* R804 */
+	{ 0x0000, 0x0000 }, /* R805 */
+	{ 0x0000, 0x0000 }, /* R806 */
+	{ 0x0000, 0x0000 }, /* R807 */
+	{ 0x0000, 0x0000 }, /* R808 */
+	{ 0x0000, 0x0000 }, /* R809 */
+	{ 0x0000, 0x0000 }, /* R810 */
+	{ 0x0000, 0x0000 }, /* R811 */
+	{ 0x0000, 0x0000 }, /* R812 */
+	{ 0x0000, 0x0000 }, /* R813 */
+	{ 0x0000, 0x0000 }, /* R814 */
+	{ 0x0000, 0x0000 }, /* R815 */
+	{ 0x0000, 0x0000 }, /* R816 */
+	{ 0x0000, 0x0000 }, /* R817 */
+	{ 0x0000, 0x0000 }, /* R818 */
+	{ 0x0000, 0x0000 }, /* R819 */
+	{ 0x0000, 0x0000 }, /* R820 */
+	{ 0x0000, 0x0000 }, /* R821 */
+	{ 0x0000, 0x0000 }, /* R822 */
+	{ 0x0000, 0x0000 }, /* R823 */
+	{ 0x0000, 0x0000 }, /* R824 */
+	{ 0x0000, 0x0000 }, /* R825 */
+	{ 0x0000, 0x0000 }, /* R826 */
+	{ 0x0000, 0x0000 }, /* R827 */
+	{ 0x0000, 0x0000 }, /* R828 */
+	{ 0x0000, 0x0000 }, /* R829 */
+	{ 0x0000, 0x0000 }, /* R830 */
+	{ 0x0000, 0x0000 }, /* R831 */
+	{ 0x0000, 0x0000 }, /* R832 */
+	{ 0x0000, 0x0000 }, /* R833 */
+	{ 0x0000, 0x0000 }, /* R834 */
+	{ 0x0000, 0x0000 }, /* R835 */
+	{ 0x0000, 0x0000 }, /* R836 */
+	{ 0x0000, 0x0000 }, /* R837 */
+	{ 0x0000, 0x0000 }, /* R838 */
+	{ 0x0000, 0x0000 }, /* R839 */
+	{ 0x0000, 0x0000 }, /* R840 */
+	{ 0x0000, 0x0000 }, /* R841 */
+	{ 0x0000, 0x0000 }, /* R842 */
+	{ 0x0000, 0x0000 }, /* R843 */
+	{ 0x0000, 0x0000 }, /* R844 */
+	{ 0x0000, 0x0000 }, /* R845 */
+	{ 0x0000, 0x0000 }, /* R846 */
+	{ 0x0000, 0x0000 }, /* R847 */
+	{ 0x0000, 0x0000 }, /* R848 */
+	{ 0x0000, 0x0000 }, /* R849 */
+	{ 0x0000, 0x0000 }, /* R850 */
+	{ 0x0000, 0x0000 }, /* R851 */
+	{ 0x0000, 0x0000 }, /* R852 */
+	{ 0x0000, 0x0000 }, /* R853 */
+	{ 0x0000, 0x0000 }, /* R854 */
+	{ 0x0000, 0x0000 }, /* R855 */
+	{ 0x0000, 0x0000 }, /* R856 */
+	{ 0x0000, 0x0000 }, /* R857 */
+	{ 0x0000, 0x0000 }, /* R858 */
+	{ 0x0000, 0x0000 }, /* R859 */
+	{ 0x0000, 0x0000 }, /* R860 */
+	{ 0x0000, 0x0000 }, /* R861 */
+	{ 0x0000, 0x0000 }, /* R862 */
+	{ 0x0000, 0x0000 }, /* R863 */
+	{ 0x0000, 0x0000 }, /* R864 */
+	{ 0x0000, 0x0000 }, /* R865 */
+	{ 0x0000, 0x0000 }, /* R866 */
+	{ 0x0000, 0x0000 }, /* R867 */
+	{ 0x0000, 0x0000 }, /* R868 */
+	{ 0x0000, 0x0000 }, /* R869 */
+	{ 0x0000, 0x0000 }, /* R870 */
+	{ 0x0000, 0x0000 }, /* R871 */
+	{ 0x0000, 0x0000 }, /* R872 */
+	{ 0x0000, 0x0000 }, /* R873 */
+	{ 0x0000, 0x0000 }, /* R874 */
+	{ 0x0000, 0x0000 }, /* R875 */
+	{ 0x0000, 0x0000 }, /* R876 */
+	{ 0x0000, 0x0000 }, /* R877 */
+	{ 0x0000, 0x0000 }, /* R878 */
+	{ 0x0000, 0x0000 }, /* R879 */
+	{ 0x0000, 0x0000 }, /* R880 */
+	{ 0x0000, 0x0000 }, /* R881 */
+	{ 0x0000, 0x0000 }, /* R882 */
+	{ 0x0000, 0x0000 }, /* R883 */
+	{ 0x0000, 0x0000 }, /* R884 */
+	{ 0x0000, 0x0000 }, /* R885 */
+	{ 0x0000, 0x0000 }, /* R886 */
+	{ 0x0000, 0x0000 }, /* R887 */
+	{ 0x0000, 0x0000 }, /* R888 */
+	{ 0x0000, 0x0000 }, /* R889 */
+	{ 0x0000, 0x0000 }, /* R890 */
+	{ 0x0000, 0x0000 }, /* R891 */
+	{ 0x0000, 0x0000 }, /* R892 */
+	{ 0x0000, 0x0000 }, /* R893 */
+	{ 0x0000, 0x0000 }, /* R894 */
+	{ 0x0000, 0x0000 }, /* R895 */
+	{ 0x0000, 0x0000 }, /* R896 */
+	{ 0x0000, 0x0000 }, /* R897 */
+	{ 0x0000, 0x0000 }, /* R898 */
+	{ 0x0000, 0x0000 }, /* R899 */
+	{ 0x0000, 0x0000 }, /* R900 */
+	{ 0x0000, 0x0000 }, /* R901 */
+	{ 0x0000, 0x0000 }, /* R902 */
+	{ 0x0000, 0x0000 }, /* R903 */
+	{ 0x0000, 0x0000 }, /* R904 */
+	{ 0x0000, 0x0000 }, /* R905 */
+	{ 0x0000, 0x0000 }, /* R906 */
+	{ 0x0000, 0x0000 }, /* R907 */
+	{ 0x0000, 0x0000 }, /* R908 */
+	{ 0x0000, 0x0000 }, /* R909 */
+	{ 0x0000, 0x0000 }, /* R910 */
+	{ 0x0000, 0x0000 }, /* R911 */
+	{ 0x0000, 0x0000 }, /* R912 */
+	{ 0x0000, 0x0000 }, /* R913 */
+	{ 0x0000, 0x0000 }, /* R914 */
+	{ 0x0000, 0x0000 }, /* R915 */
+	{ 0x0000, 0x0000 }, /* R916 */
+	{ 0x0000, 0x0000 }, /* R917 */
+	{ 0x0000, 0x0000 }, /* R918 */
+	{ 0x0000, 0x0000 }, /* R919 */
+	{ 0x0000, 0x0000 }, /* R920 */
+	{ 0x0000, 0x0000 }, /* R921 */
+	{ 0x0000, 0x0000 }, /* R922 */
+	{ 0x0000, 0x0000 }, /* R923 */
+	{ 0x0000, 0x0000 }, /* R924 */
+	{ 0x0000, 0x0000 }, /* R925 */
+	{ 0x0000, 0x0000 }, /* R926 */
+	{ 0x0000, 0x0000 }, /* R927 */
+	{ 0x0000, 0x0000 }, /* R928 */
+	{ 0x0000, 0x0000 }, /* R929 */
+	{ 0x0000, 0x0000 }, /* R930 */
+	{ 0x0000, 0x0000 }, /* R931 */
+	{ 0x0000, 0x0000 }, /* R932 */
+	{ 0x0000, 0x0000 }, /* R933 */
+	{ 0x0000, 0x0000 }, /* R934 */
+	{ 0x0000, 0x0000 }, /* R935 */
+	{ 0x0000, 0x0000 }, /* R936 */
+	{ 0x0000, 0x0000 }, /* R937 */
+	{ 0x0000, 0x0000 }, /* R938 */
+	{ 0x0000, 0x0000 }, /* R939 */
+	{ 0x0000, 0x0000 }, /* R940 */
+	{ 0x0000, 0x0000 }, /* R941 */
+	{ 0x0000, 0x0000 }, /* R942 */
+	{ 0x0000, 0x0000 }, /* R943 */
+	{ 0x0000, 0x0000 }, /* R944 */
+	{ 0x0000, 0x0000 }, /* R945 */
+	{ 0x0000, 0x0000 }, /* R946 */
+	{ 0x0000, 0x0000 }, /* R947 */
+	{ 0x0000, 0x0000 }, /* R948 */
+	{ 0x0000, 0x0000 }, /* R949 */
+	{ 0x0000, 0x0000 }, /* R950 */
+	{ 0x0000, 0x0000 }, /* R951 */
+	{ 0x0000, 0x0000 }, /* R952 */
+	{ 0x0000, 0x0000 }, /* R953 */
+	{ 0x0000, 0x0000 }, /* R954 */
+	{ 0x0000, 0x0000 }, /* R955 */
+	{ 0x0000, 0x0000 }, /* R956 */
+	{ 0x0000, 0x0000 }, /* R957 */
+	{ 0x0000, 0x0000 }, /* R958 */
+	{ 0x0000, 0x0000 }, /* R959 */
+	{ 0x0000, 0x0000 }, /* R960 */
+	{ 0x0000, 0x0000 }, /* R961 */
+	{ 0x0000, 0x0000 }, /* R962 */
+	{ 0x0000, 0x0000 }, /* R963 */
+	{ 0x0000, 0x0000 }, /* R964 */
+	{ 0x0000, 0x0000 }, /* R965 */
+	{ 0x0000, 0x0000 }, /* R966 */
+	{ 0x0000, 0x0000 }, /* R967 */
+	{ 0x0000, 0x0000 }, /* R968 */
+	{ 0x0000, 0x0000 }, /* R969 */
+	{ 0x0000, 0x0000 }, /* R970 */
+	{ 0x0000, 0x0000 }, /* R971 */
+	{ 0x0000, 0x0000 }, /* R972 */
+	{ 0x0000, 0x0000 }, /* R973 */
+	{ 0x0000, 0x0000 }, /* R974 */
+	{ 0x0000, 0x0000 }, /* R975 */
+	{ 0x0000, 0x0000 }, /* R976 */
+	{ 0x0000, 0x0000 }, /* R977 */
+	{ 0x0000, 0x0000 }, /* R978 */
+	{ 0x0000, 0x0000 }, /* R979 */
+	{ 0x0000, 0x0000 }, /* R980 */
+	{ 0x0000, 0x0000 }, /* R981 */
+	{ 0x0000, 0x0000 }, /* R982 */
+	{ 0x0000, 0x0000 }, /* R983 */
+	{ 0x0000, 0x0000 }, /* R984 */
+	{ 0x0000, 0x0000 }, /* R985 */
+	{ 0x0000, 0x0000 }, /* R986 */
+	{ 0x0000, 0x0000 }, /* R987 */
+	{ 0x0000, 0x0000 }, /* R988 */
+	{ 0x0000, 0x0000 }, /* R989 */
+	{ 0x0000, 0x0000 }, /* R990 */
+	{ 0x0000, 0x0000 }, /* R991 */
+	{ 0x0000, 0x0000 }, /* R992 */
+	{ 0x0000, 0x0000 }, /* R993 */
+	{ 0x0000, 0x0000 }, /* R994 */
+	{ 0x0000, 0x0000 }, /* R995 */
+	{ 0x0000, 0x0000 }, /* R996 */
+	{ 0x0000, 0x0000 }, /* R997 */
+	{ 0x0000, 0x0000 }, /* R998 */
+	{ 0x0000, 0x0000 }, /* R999 */
+	{ 0x0000, 0x0000 }, /* R1000 */
+	{ 0x0000, 0x0000 }, /* R1001 */
+	{ 0x0000, 0x0000 }, /* R1002 */
+	{ 0x0000, 0x0000 }, /* R1003 */
+	{ 0x0000, 0x0000 }, /* R1004 */
+	{ 0x0000, 0x0000 }, /* R1005 */
+	{ 0x0000, 0x0000 }, /* R1006 */
+	{ 0x0000, 0x0000 }, /* R1007 */
+	{ 0x0000, 0x0000 }, /* R1008 */
+	{ 0x0000, 0x0000 }, /* R1009 */
+	{ 0x0000, 0x0000 }, /* R1010 */
+	{ 0x0000, 0x0000 }, /* R1011 */
+	{ 0x0000, 0x0000 }, /* R1012 */
+	{ 0x0000, 0x0000 }, /* R1013 */
+	{ 0x0000, 0x0000 }, /* R1014 */
+	{ 0x0000, 0x0000 }, /* R1015 */
+	{ 0x0000, 0x0000 }, /* R1016 */
+	{ 0x0000, 0x0000 }, /* R1017 */
+	{ 0x0000, 0x0000 }, /* R1018 */
+	{ 0x0000, 0x0000 }, /* R1019 */
+	{ 0x0000, 0x0000 }, /* R1020 */
+	{ 0x0000, 0x0000 }, /* R1021 */
+	{ 0x0000, 0x0000 }, /* R1022 */
+	{ 0x0000, 0x0000 }, /* R1023 */
+	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
+	{ 0x0000, 0x0000 }, /* R1032 */
+	{ 0x0000, 0x0000 }, /* R1033 */
+	{ 0x0000, 0x0000 }, /* R1034 */
+	{ 0x0000, 0x0000 }, /* R1035 */
+	{ 0x0000, 0x0000 }, /* R1036 */
+	{ 0x0000, 0x0000 }, /* R1037 */
+	{ 0x0000, 0x0000 }, /* R1038 */
+	{ 0x0000, 0x0000 }, /* R1039 */
+	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
+	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
+	{ 0x0000, 0x0000 }, /* R1042 */
+	{ 0x0000, 0x0000 }, /* R1043 */
+	{ 0x0000, 0x0000 }, /* R1044 */
+	{ 0x0000, 0x0000 }, /* R1045 */
+	{ 0x0000, 0x0000 }, /* R1046 */
+	{ 0x0000, 0x0000 }, /* R1047 */
+	{ 0x0000, 0x0000 }, /* R1048 */
+	{ 0x0000, 0x0000 }, /* R1049 */
+	{ 0x0000, 0x0000 }, /* R1050 */
+	{ 0x0000, 0x0000 }, /* R1051 */
+	{ 0x0000, 0x0000 }, /* R1052 */
+	{ 0x0000, 0x0000 }, /* R1053 */
+	{ 0x0000, 0x0000 }, /* R1054 */
+	{ 0x0000, 0x0000 }, /* R1055 */
+	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
+	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1060 */
+	{ 0x0000, 0x0000 }, /* R1061 */
+	{ 0x0000, 0x0000 }, /* R1062 */
+	{ 0x0000, 0x0000 }, /* R1063 */
+	{ 0x0000, 0x0000 }, /* R1064 */
+	{ 0x0000, 0x0000 }, /* R1065 */
+	{ 0x0000, 0x0000 }, /* R1066 */
+	{ 0x0000, 0x0000 }, /* R1067 */
+	{ 0x0000, 0x0000 }, /* R1068 */
+	{ 0x0000, 0x0000 }, /* R1069 */
+	{ 0x0000, 0x0000 }, /* R1070 */
+	{ 0x0000, 0x0000 }, /* R1071 */
+	{ 0x0000, 0x0000 }, /* R1072 */
+	{ 0x0000, 0x0000 }, /* R1073 */
+	{ 0x0000, 0x0000 }, /* R1074 */
+	{ 0x0000, 0x0000 }, /* R1075 */
+	{ 0x0000, 0x0000 }, /* R1076 */
+	{ 0x0000, 0x0000 }, /* R1077 */
+	{ 0x0000, 0x0000 }, /* R1078 */
+	{ 0x0000, 0x0000 }, /* R1079 */
+	{ 0x0000, 0x0000 }, /* R1080 */
+	{ 0x0000, 0x0000 }, /* R1081 */
+	{ 0x0000, 0x0000 }, /* R1082 */
+	{ 0x0000, 0x0000 }, /* R1083 */
+	{ 0x0000, 0x0000 }, /* R1084 */
+	{ 0x0000, 0x0000 }, /* R1085 */
+	{ 0x0000, 0x0000 }, /* R1086 */
+	{ 0x0000, 0x0000 }, /* R1087 */
+	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
+	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
+	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
+	{ 0x0000, 0x0000 }, /* R1093 */
+	{ 0x0000, 0x0000 }, /* R1094 */
+	{ 0x0000, 0x0000 }, /* R1095 */
+	{ 0x0000, 0x0000 }, /* R1096 */
+	{ 0x0000, 0x0000 }, /* R1097 */
+	{ 0x0000, 0x0000 }, /* R1098 */
+	{ 0x0000, 0x0000 }, /* R1099 */
+	{ 0x0000, 0x0000 }, /* R1100 */
+	{ 0x0000, 0x0000 }, /* R1101 */
+	{ 0x0000, 0x0000 }, /* R1102 */
+	{ 0x0000, 0x0000 }, /* R1103 */
+	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
+	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
+	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
+	{ 0x0000, 0x0000 }, /* R1109 */
+	{ 0x0000, 0x0000 }, /* R1110 */
+	{ 0x0000, 0x0000 }, /* R1111 */
+	{ 0x0000, 0x0000 }, /* R1112 */
+	{ 0x0000, 0x0000 }, /* R1113 */
+	{ 0x0000, 0x0000 }, /* R1114 */
+	{ 0x0000, 0x0000 }, /* R1115 */
+	{ 0x0000, 0x0000 }, /* R1116 */
+	{ 0x0000, 0x0000 }, /* R1117 */
+	{ 0x0000, 0x0000 }, /* R1118 */
+	{ 0x0000, 0x0000 }, /* R1119 */
+	{ 0x0000, 0x0000 }, /* R1120 */
+	{ 0x0000, 0x0000 }, /* R1121 */
+	{ 0x0000, 0x0000 }, /* R1122 */
+	{ 0x0000, 0x0000 }, /* R1123 */
+	{ 0x0000, 0x0000 }, /* R1124 */
+	{ 0x0000, 0x0000 }, /* R1125 */
+	{ 0x0000, 0x0000 }, /* R1126 */
+	{ 0x0000, 0x0000 }, /* R1127 */
+	{ 0x0000, 0x0000 }, /* R1128 */
+	{ 0x0000, 0x0000 }, /* R1129 */
+	{ 0x0000, 0x0000 }, /* R1130 */
+	{ 0x0000, 0x0000 }, /* R1131 */
+	{ 0x0000, 0x0000 }, /* R1132 */
+	{ 0x0000, 0x0000 }, /* R1133 */
+	{ 0x0000, 0x0000 }, /* R1134 */
+	{ 0x0000, 0x0000 }, /* R1135 */
+	{ 0x0000, 0x0000 }, /* R1136 */
+	{ 0x0000, 0x0000 }, /* R1137 */
+	{ 0x0000, 0x0000 }, /* R1138 */
+	{ 0x0000, 0x0000 }, /* R1139 */
+	{ 0x0000, 0x0000 }, /* R1140 */
+	{ 0x0000, 0x0000 }, /* R1141 */
+	{ 0x0000, 0x0000 }, /* R1142 */
+	{ 0x0000, 0x0000 }, /* R1143 */
+	{ 0x0000, 0x0000 }, /* R1144 */
+	{ 0x0000, 0x0000 }, /* R1145 */
+	{ 0x0000, 0x0000 }, /* R1146 */
+	{ 0x0000, 0x0000 }, /* R1147 */
+	{ 0x0000, 0x0000 }, /* R1148 */
+	{ 0x0000, 0x0000 }, /* R1149 */
+	{ 0x0000, 0x0000 }, /* R1150 */
+	{ 0x0000, 0x0000 }, /* R1151 */
+	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1172 */
+	{ 0x0000, 0x0000 }, /* R1173 */
+	{ 0x0000, 0x0000 }, /* R1174 */
+	{ 0x0000, 0x0000 }, /* R1175 */
+	{ 0x0000, 0x0000 }, /* R1176 */
+	{ 0x0000, 0x0000 }, /* R1177 */
+	{ 0x0000, 0x0000 }, /* R1178 */
+	{ 0x0000, 0x0000 }, /* R1179 */
+	{ 0x0000, 0x0000 }, /* R1180 */
+	{ 0x0000, 0x0000 }, /* R1181 */
+	{ 0x0000, 0x0000 }, /* R1182 */
+	{ 0x0000, 0x0000 }, /* R1183 */
+	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1204 */
+	{ 0x0000, 0x0000 }, /* R1205 */
+	{ 0x0000, 0x0000 }, /* R1206 */
+	{ 0x0000, 0x0000 }, /* R1207 */
+	{ 0x0000, 0x0000 }, /* R1208 */
+	{ 0x0000, 0x0000 }, /* R1209 */
+	{ 0x0000, 0x0000 }, /* R1210 */
+	{ 0x0000, 0x0000 }, /* R1211 */
+	{ 0x0000, 0x0000 }, /* R1212 */
+	{ 0x0000, 0x0000 }, /* R1213 */
+	{ 0x0000, 0x0000 }, /* R1214 */
+	{ 0x0000, 0x0000 }, /* R1215 */
+	{ 0x0000, 0x0000 }, /* R1216 */
+	{ 0x0000, 0x0000 }, /* R1217 */
+	{ 0x0000, 0x0000 }, /* R1218 */
+	{ 0x0000, 0x0000 }, /* R1219 */
+	{ 0x0000, 0x0000 }, /* R1220 */
+	{ 0x0000, 0x0000 }, /* R1221 */
+	{ 0x0000, 0x0000 }, /* R1222 */
+	{ 0x0000, 0x0000 }, /* R1223 */
+	{ 0x0000, 0x0000 }, /* R1224 */
+	{ 0x0000, 0x0000 }, /* R1225 */
+	{ 0x0000, 0x0000 }, /* R1226 */
+	{ 0x0000, 0x0000 }, /* R1227 */
+	{ 0x0000, 0x0000 }, /* R1228 */
+	{ 0x0000, 0x0000 }, /* R1229 */
+	{ 0x0000, 0x0000 }, /* R1230 */
+	{ 0x0000, 0x0000 }, /* R1231 */
+	{ 0x0000, 0x0000 }, /* R1232 */
+	{ 0x0000, 0x0000 }, /* R1233 */
+	{ 0x0000, 0x0000 }, /* R1234 */
+	{ 0x0000, 0x0000 }, /* R1235 */
+	{ 0x0000, 0x0000 }, /* R1236 */
+	{ 0x0000, 0x0000 }, /* R1237 */
+	{ 0x0000, 0x0000 }, /* R1238 */
+	{ 0x0000, 0x0000 }, /* R1239 */
+	{ 0x0000, 0x0000 }, /* R1240 */
+	{ 0x0000, 0x0000 }, /* R1241 */
+	{ 0x0000, 0x0000 }, /* R1242 */
+	{ 0x0000, 0x0000 }, /* R1243 */
+	{ 0x0000, 0x0000 }, /* R1244 */
+	{ 0x0000, 0x0000 }, /* R1245 */
+	{ 0x0000, 0x0000 }, /* R1246 */
+	{ 0x0000, 0x0000 }, /* R1247 */
+	{ 0x0000, 0x0000 }, /* R1248 */
+	{ 0x0000, 0x0000 }, /* R1249 */
+	{ 0x0000, 0x0000 }, /* R1250 */
+	{ 0x0000, 0x0000 }, /* R1251 */
+	{ 0x0000, 0x0000 }, /* R1252 */
+	{ 0x0000, 0x0000 }, /* R1253 */
+	{ 0x0000, 0x0000 }, /* R1254 */
+	{ 0x0000, 0x0000 }, /* R1255 */
+	{ 0x0000, 0x0000 }, /* R1256 */
+	{ 0x0000, 0x0000 }, /* R1257 */
+	{ 0x0000, 0x0000 }, /* R1258 */
+	{ 0x0000, 0x0000 }, /* R1259 */
+	{ 0x0000, 0x0000 }, /* R1260 */
+	{ 0x0000, 0x0000 }, /* R1261 */
+	{ 0x0000, 0x0000 }, /* R1262 */
+	{ 0x0000, 0x0000 }, /* R1263 */
+	{ 0x0000, 0x0000 }, /* R1264 */
+	{ 0x0000, 0x0000 }, /* R1265 */
+	{ 0x0000, 0x0000 }, /* R1266 */
+	{ 0x0000, 0x0000 }, /* R1267 */
+	{ 0x0000, 0x0000 }, /* R1268 */
+	{ 0x0000, 0x0000 }, /* R1269 */
+	{ 0x0000, 0x0000 }, /* R1270 */
+	{ 0x0000, 0x0000 }, /* R1271 */
+	{ 0x0000, 0x0000 }, /* R1272 */
+	{ 0x0000, 0x0000 }, /* R1273 */
+	{ 0x0000, 0x0000 }, /* R1274 */
+	{ 0x0000, 0x0000 }, /* R1275 */
+	{ 0x0000, 0x0000 }, /* R1276 */
+	{ 0x0000, 0x0000 }, /* R1277 */
+	{ 0x0000, 0x0000 }, /* R1278 */
+	{ 0x0000, 0x0000 }, /* R1279 */
+	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
+	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
+	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
+	{ 0x0000, 0x0000 }, /* R1284 */
+	{ 0x0000, 0x0000 }, /* R1285 */
+	{ 0x0000, 0x0000 }, /* R1286 */
+	{ 0x0000, 0x0000 }, /* R1287 */
+	{ 0x0000, 0x0000 }, /* R1288 */
+	{ 0x0000, 0x0000 }, /* R1289 */
+	{ 0x0000, 0x0000 }, /* R1290 */
+	{ 0x0000, 0x0000 }, /* R1291 */
+	{ 0x0000, 0x0000 }, /* R1292 */
+	{ 0x0000, 0x0000 }, /* R1293 */
+	{ 0x0000, 0x0000 }, /* R1294 */
+	{ 0x0000, 0x0000 }, /* R1295 */
+	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
+	{ 0x0000, 0x0000 }, /* R1297 */
+	{ 0x0000, 0x0000 }, /* R1298 */
+	{ 0x0000, 0x0000 }, /* R1299 */
+	{ 0x0000, 0x0000 }, /* R1300 */
+	{ 0x0000, 0x0000 }, /* R1301 */
+	{ 0x0000, 0x0000 }, /* R1302 */
+	{ 0x0000, 0x0000 }, /* R1303 */
+	{ 0x0000, 0x0000 }, /* R1304 */
+	{ 0x0000, 0x0000 }, /* R1305 */
+	{ 0x0000, 0x0000 }, /* R1306 */
+	{ 0x0000, 0x0000 }, /* R1307 */
+	{ 0x0000, 0x0000 }, /* R1308 */
+	{ 0x0000, 0x0000 }, /* R1309 */
+	{ 0x0000, 0x0000 }, /* R1310 */
+	{ 0x0000, 0x0000 }, /* R1311 */
+	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
+	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
+	{ 0x0000, 0x0000 }, /* R1314 */
+	{ 0x0000, 0x0000 }, /* R1315 */
+	{ 0x0000, 0x0000 }, /* R1316 */
+	{ 0x0000, 0x0000 }, /* R1317 */
+	{ 0x0000, 0x0000 }, /* R1318 */
+	{ 0x0000, 0x0000 }, /* R1319 */
+	{ 0x0000, 0x0000 }, /* R1320 */
+	{ 0x0000, 0x0000 }, /* R1321 */
+	{ 0x0000, 0x0000 }, /* R1322 */
+	{ 0x0000, 0x0000 }, /* R1323 */
+	{ 0x0000, 0x0000 }, /* R1324 */
+	{ 0x0000, 0x0000 }, /* R1325 */
+	{ 0x0000, 0x0000 }, /* R1326 */
+	{ 0x0000, 0x0000 }, /* R1327 */
+	{ 0x0000, 0x0000 }, /* R1328 */
+	{ 0x0000, 0x0000 }, /* R1329 */
+	{ 0x0000, 0x0000 }, /* R1330 */
+	{ 0x0000, 0x0000 }, /* R1331 */
+	{ 0x0000, 0x0000 }, /* R1332 */
+	{ 0x0000, 0x0000 }, /* R1333 */
+	{ 0x0000, 0x0000 }, /* R1334 */
+	{ 0x0000, 0x0000 }, /* R1335 */
+	{ 0x0000, 0x0000 }, /* R1336 */
+	{ 0x0000, 0x0000 }, /* R1337 */
+	{ 0x0000, 0x0000 }, /* R1338 */
+	{ 0x0000, 0x0000 }, /* R1339 */
+	{ 0x0000, 0x0000 }, /* R1340 */
+	{ 0x0000, 0x0000 }, /* R1341 */
+	{ 0x0000, 0x0000 }, /* R1342 */
+	{ 0x0000, 0x0000 }, /* R1343 */
+	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
+	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
+	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
+	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
+	{ 0x0000, 0x0000 }, /* R1349 */
+	{ 0x0000, 0x0000 }, /* R1350 */
+	{ 0x0000, 0x0000 }, /* R1351 */
+	{ 0x0000, 0x0000 }, /* R1352 */
+	{ 0x0000, 0x0000 }, /* R1353 */
+	{ 0x0000, 0x0000 }, /* R1354 */
+	{ 0x0000, 0x0000 }, /* R1355 */
+	{ 0x0000, 0x0000 }, /* R1356 */
+	{ 0x0000, 0x0000 }, /* R1357 */
+	{ 0x0000, 0x0000 }, /* R1358 */
+	{ 0x0000, 0x0000 }, /* R1359 */
+	{ 0x0000, 0x0000 }, /* R1360 */
+	{ 0x0000, 0x0000 }, /* R1361 */
+	{ 0x0000, 0x0000 }, /* R1362 */
+	{ 0x0000, 0x0000 }, /* R1363 */
+	{ 0x0000, 0x0000 }, /* R1364 */
+	{ 0x0000, 0x0000 }, /* R1365 */
+	{ 0x0000, 0x0000 }, /* R1366 */
+	{ 0x0000, 0x0000 }, /* R1367 */
+	{ 0x0000, 0x0000 }, /* R1368 */
+	{ 0x0000, 0x0000 }, /* R1369 */
+	{ 0x0000, 0x0000 }, /* R1370 */
+	{ 0x0000, 0x0000 }, /* R1371 */
+	{ 0x0000, 0x0000 }, /* R1372 */
+	{ 0x0000, 0x0000 }, /* R1373 */
+	{ 0x0000, 0x0000 }, /* R1374 */
+	{ 0x0000, 0x0000 }, /* R1375 */
+	{ 0x0000, 0x0000 }, /* R1376 */
+	{ 0x0000, 0x0000 }, /* R1377 */
+	{ 0x0000, 0x0000 }, /* R1378 */
+	{ 0x0000, 0x0000 }, /* R1379 */
+	{ 0x0000, 0x0000 }, /* R1380 */
+	{ 0x0000, 0x0000 }, /* R1381 */
+	{ 0x0000, 0x0000 }, /* R1382 */
+	{ 0x0000, 0x0000 }, /* R1383 */
+	{ 0x0000, 0x0000 }, /* R1384 */
+	{ 0x0000, 0x0000 }, /* R1385 */
+	{ 0x0000, 0x0000 }, /* R1386 */
+	{ 0x0000, 0x0000 }, /* R1387 */
+	{ 0x0000, 0x0000 }, /* R1388 */
+	{ 0x0000, 0x0000 }, /* R1389 */
+	{ 0x0000, 0x0000 }, /* R1390 */
+	{ 0x0000, 0x0000 }, /* R1391 */
+	{ 0x0000, 0x0000 }, /* R1392 */
+	{ 0x0000, 0x0000 }, /* R1393 */
+	{ 0x0000, 0x0000 }, /* R1394 */
+	{ 0x0000, 0x0000 }, /* R1395 */
+	{ 0x0000, 0x0000 }, /* R1396 */
+	{ 0x0000, 0x0000 }, /* R1397 */
+	{ 0x0000, 0x0000 }, /* R1398 */
+	{ 0x0000, 0x0000 }, /* R1399 */
+	{ 0x0000, 0x0000 }, /* R1400 */
+	{ 0x0000, 0x0000 }, /* R1401 */
+	{ 0x0000, 0x0000 }, /* R1402 */
+	{ 0x0000, 0x0000 }, /* R1403 */
+	{ 0x0000, 0x0000 }, /* R1404 */
+	{ 0x0000, 0x0000 }, /* R1405 */
+	{ 0x0000, 0x0000 }, /* R1406 */
+	{ 0x0000, 0x0000 }, /* R1407 */
+	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
+	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
+	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
+	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
+	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
+	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
+	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
+	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
+	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
+	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
+	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
+	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
+	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
+	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
+	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
+	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
+	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
+	{ 0x0000, 0x0000 }, /* R1428 */
+	{ 0x0000, 0x0000 }, /* R1429 */
+	{ 0x0000, 0x0000 }, /* R1430 */
+	{ 0x0000, 0x0000 }, /* R1431 */
+	{ 0x0000, 0x0000 }, /* R1432 */
+	{ 0x0000, 0x0000 }, /* R1433 */
+	{ 0x0000, 0x0000 }, /* R1434 */
+	{ 0x0000, 0x0000 }, /* R1435 */
+	{ 0x0000, 0x0000 }, /* R1436 */
+	{ 0x0000, 0x0000 }, /* R1437 */
+	{ 0x0000, 0x0000 }, /* R1438 */
+	{ 0x0000, 0x0000 }, /* R1439 */
+	{ 0x0000, 0x0000 }, /* R1440 */
+	{ 0x0000, 0x0000 }, /* R1441 */
+	{ 0x0000, 0x0000 }, /* R1442 */
+	{ 0x0000, 0x0000 }, /* R1443 */
+	{ 0x0000, 0x0000 }, /* R1444 */
+	{ 0x0000, 0x0000 }, /* R1445 */
+	{ 0x0000, 0x0000 }, /* R1446 */
+	{ 0x0000, 0x0000 }, /* R1447 */
+	{ 0x0000, 0x0000 }, /* R1448 */
+	{ 0x0000, 0x0000 }, /* R1449 */
+	{ 0x0000, 0x0000 }, /* R1450 */
+	{ 0x0000, 0x0000 }, /* R1451 */
+	{ 0x0000, 0x0000 }, /* R1452 */
+	{ 0x0000, 0x0000 }, /* R1453 */
+	{ 0x0000, 0x0000 }, /* R1454 */
+	{ 0x0000, 0x0000 }, /* R1455 */
+	{ 0x0000, 0x0000 }, /* R1456 */
+	{ 0x0000, 0x0000 }, /* R1457 */
+	{ 0x0000, 0x0000 }, /* R1458 */
+	{ 0x0000, 0x0000 }, /* R1459 */
+	{ 0x0000, 0x0000 }, /* R1460 */
+	{ 0x0000, 0x0000 }, /* R1461 */
+	{ 0x0000, 0x0000 }, /* R1462 */
+	{ 0x0000, 0x0000 }, /* R1463 */
+	{ 0x0000, 0x0000 }, /* R1464 */
+	{ 0x0000, 0x0000 }, /* R1465 */
+	{ 0x0000, 0x0000 }, /* R1466 */
+	{ 0x0000, 0x0000 }, /* R1467 */
+	{ 0x0000, 0x0000 }, /* R1468 */
+	{ 0x0000, 0x0000 }, /* R1469 */
+	{ 0x0000, 0x0000 }, /* R1470 */
+	{ 0x0000, 0x0000 }, /* R1471 */
+	{ 0x0000, 0x0000 }, /* R1472 */
+	{ 0x0000, 0x0000 }, /* R1473 */
+	{ 0x0000, 0x0000 }, /* R1474 */
+	{ 0x0000, 0x0000 }, /* R1475 */
+	{ 0x0000, 0x0000 }, /* R1476 */
+	{ 0x0000, 0x0000 }, /* R1477 */
+	{ 0x0000, 0x0000 }, /* R1478 */
+	{ 0x0000, 0x0000 }, /* R1479 */
+	{ 0x0000, 0x0000 }, /* R1480 */
+	{ 0x0000, 0x0000 }, /* R1481 */
+	{ 0x0000, 0x0000 }, /* R1482 */
+	{ 0x0000, 0x0000 }, /* R1483 */
+	{ 0x0000, 0x0000 }, /* R1484 */
+	{ 0x0000, 0x0000 }, /* R1485 */
+	{ 0x0000, 0x0000 }, /* R1486 */
+	{ 0x0000, 0x0000 }, /* R1487 */
+	{ 0x0000, 0x0000 }, /* R1488 */
+	{ 0x0000, 0x0000 }, /* R1489 */
+	{ 0x0000, 0x0000 }, /* R1490 */
+	{ 0x0000, 0x0000 }, /* R1491 */
+	{ 0x0000, 0x0000 }, /* R1492 */
+	{ 0x0000, 0x0000 }, /* R1493 */
+	{ 0x0000, 0x0000 }, /* R1494 */
+	{ 0x0000, 0x0000 }, /* R1495 */
+	{ 0x0000, 0x0000 }, /* R1496 */
+	{ 0x0000, 0x0000 }, /* R1497 */
+	{ 0x0000, 0x0000 }, /* R1498 */
+	{ 0x0000, 0x0000 }, /* R1499 */
+	{ 0x0000, 0x0000 }, /* R1500 */
+	{ 0x0000, 0x0000 }, /* R1501 */
+	{ 0x0000, 0x0000 }, /* R1502 */
+	{ 0x0000, 0x0000 }, /* R1503 */
+	{ 0x0000, 0x0000 }, /* R1504 */
+	{ 0x0000, 0x0000 }, /* R1505 */
+	{ 0x0000, 0x0000 }, /* R1506 */
+	{ 0x0000, 0x0000 }, /* R1507 */
+	{ 0x0000, 0x0000 }, /* R1508 */
+	{ 0x0000, 0x0000 }, /* R1509 */
+	{ 0x0000, 0x0000 }, /* R1510 */
+	{ 0x0000, 0x0000 }, /* R1511 */
+	{ 0x0000, 0x0000 }, /* R1512 */
+	{ 0x0000, 0x0000 }, /* R1513 */
+	{ 0x0000, 0x0000 }, /* R1514 */
+	{ 0x0000, 0x0000 }, /* R1515 */
+	{ 0x0000, 0x0000 }, /* R1516 */
+	{ 0x0000, 0x0000 }, /* R1517 */
+	{ 0x0000, 0x0000 }, /* R1518 */
+	{ 0x0000, 0x0000 }, /* R1519 */
+	{ 0x0000, 0x0000 }, /* R1520 */
+	{ 0x0000, 0x0000 }, /* R1521 */
+	{ 0x0000, 0x0000 }, /* R1522 */
+	{ 0x0000, 0x0000 }, /* R1523 */
+	{ 0x0000, 0x0000 }, /* R1524 */
+	{ 0x0000, 0x0000 }, /* R1525 */
+	{ 0x0000, 0x0000 }, /* R1526 */
+	{ 0x0000, 0x0000 }, /* R1527 */
+	{ 0x0000, 0x0000 }, /* R1528 */
+	{ 0x0000, 0x0000 }, /* R1529 */
+	{ 0x0000, 0x0000 }, /* R1530 */
+	{ 0x0000, 0x0000 }, /* R1531 */
+	{ 0x0000, 0x0000 }, /* R1532 */
+	{ 0x0000, 0x0000 }, /* R1533 */
+	{ 0x0000, 0x0000 }, /* R1534 */
+	{ 0x0000, 0x0000 }, /* R1535 */
+	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
+	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
+	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
+	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
+	{ 0x0000, 0x0000 }, /* R1546 */
+	{ 0x0000, 0x0000 }, /* R1547 */
+	{ 0x0000, 0x0000 }, /* R1548 */
+	{ 0x0000, 0x0000 }, /* R1549 */
+	{ 0x0000, 0x0000 }, /* R1550 */
+	{ 0x0000, 0x0000 }, /* R1551 */
+	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
+	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
+	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
+	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
+	{ 0x0000, 0x0000 }, /* R1557 */
+	{ 0x0000, 0x0000 }, /* R1558 */
+	{ 0x0000, 0x0000 }, /* R1559 */
+	{ 0x0000, 0x0000 }, /* R1560 */
+	{ 0x0000, 0x0000 }, /* R1561 */
+	{ 0x0000, 0x0000 }, /* R1562 */
+	{ 0x0000, 0x0000 }, /* R1563 */
+	{ 0x0000, 0x0000 }, /* R1564 */
+	{ 0x0000, 0x0000 }, /* R1565 */
+	{ 0x0000, 0x0000 }, /* R1566 */
+	{ 0x0000, 0x0000 }, /* R1567 */
+	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
+	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
+};
+
+const __devinitdata u16 wm8994_reg_defaults[WM8994_CACHE_SIZE] = {
+	0x8994,     /* R0     - Software Reset */
+	0x0000,     /* R1     - Power Management (1) */
+	0x6000,     /* R2     - Power Management (2) */
+	0x0000,     /* R3     - Power Management (3) */
+	0x0000,     /* R4     - Power Management (4) */
+	0x0000,     /* R5     - Power Management (5) */
+	0x0000,     /* R6     - Power Management (6) */
+	0x0000,     /* R7 */
+	0x0000,     /* R8 */
+	0x0000,     /* R9 */
+	0x0000,     /* R10 */
+	0x0000,     /* R11 */
+	0x0000,     /* R12 */
+	0x0000,     /* R13 */
+	0x0000,     /* R14 */
+	0x0000,     /* R15 */
+	0x0000,     /* R16 */
+	0x0000,     /* R17 */
+	0x0000,     /* R18 */
+	0x0000,     /* R19 */
+	0x0000,     /* R20 */
+	0x0000,     /* R21    - Input Mixer (1) */
+	0x0000,     /* R22 */
+	0x0000,     /* R23 */
+	0x008B,     /* R24    - Left Line Input 1&2 Volume */
+	0x008B,     /* R25    - Left Line Input 3&4 Volume */
+	0x008B,     /* R26    - Right Line Input 1&2 Volume */
+	0x008B,     /* R27    - Right Line Input 3&4 Volume */
+	0x006D,     /* R28    - Left Output Volume */
+	0x006D,     /* R29    - Right Output Volume */
+	0x0066,     /* R30    - Line Outputs Volume */
+	0x0020,     /* R31    - HPOUT2 Volume */
+	0x0079,     /* R32    - Left OPGA Volume */
+	0x0079,     /* R33    - Right OPGA Volume */
+	0x0003,     /* R34    - SPKMIXL Attenuation */
+	0x0003,     /* R35    - SPKMIXR Attenuation */
+	0x0011,     /* R36    - SPKOUT Mixers */
+	0x0140,     /* R37    - ClassD */
+	0x0079,     /* R38    - Speaker Volume Left */
+	0x0079,     /* R39    - Speaker Volume Right */
+	0x0000,     /* R40    - Input Mixer (2) */
+	0x0000,     /* R41    - Input Mixer (3) */
+	0x0000,     /* R42    - Input Mixer (4) */
+	0x0000,     /* R43    - Input Mixer (5) */
+	0x0000,     /* R44    - Input Mixer (6) */
+	0x0000,     /* R45    - Output Mixer (1) */
+	0x0000,     /* R46    - Output Mixer (2) */
+	0x0000,     /* R47    - Output Mixer (3) */
+	0x0000,     /* R48    - Output Mixer (4) */
+	0x0000,     /* R49    - Output Mixer (5) */
+	0x0000,     /* R50    - Output Mixer (6) */
+	0x0000,     /* R51    - HPOUT2 Mixer */
+	0x0000,     /* R52    - Line Mixer (1) */
+	0x0000,     /* R53    - Line Mixer (2) */
+	0x0000,     /* R54    - Speaker Mixer */
+	0x0000,     /* R55    - Additional Control */
+	0x0000,     /* R56    - AntiPOP (1) */
+	0x0000,     /* R57    - AntiPOP (2) */
+	0x0000,     /* R58    - MICBIAS */
+	0x000D,     /* R59    - LDO 1 */
+	0x0003,     /* R60    - LDO 2 */
+	0x0000,     /* R61 */
+	0x0000,     /* R62 */
+	0x0000,     /* R63 */
+	0x0000,     /* R64 */
+	0x0000,     /* R65 */
+	0x0000,     /* R66 */
+	0x0000,     /* R67 */
+	0x0000,     /* R68 */
+	0x0000,     /* R69 */
+	0x0000,     /* R70 */
+	0x0000,     /* R71 */
+	0x0000,     /* R72 */
+	0x0000,     /* R73 */
+	0x0000,     /* R74 */
+	0x0000,     /* R75 */
+	0x1F25,     /* R76    - Charge Pump (1) */
+	0x0000,     /* R77 */
+	0x0000,     /* R78 */
+	0x0000,     /* R79 */
+	0x0000,     /* R80 */
+	0x0004,     /* R81    - Class W (1) */
+	0x0000,     /* R82 */
+	0x0000,     /* R83 */
+	0x0000,     /* R84    - DC Servo (1) */
+	0x054A,     /* R85    - DC Servo (2) */
+	0x0000,     /* R86 */
+	0x0000,     /* R87    - DC Servo (4) */
+	0x0000,     /* R88    - DC Servo Readback */
+	0x0000,     /* R89 */
+	0x0000,     /* R90 */
+	0x0000,     /* R91 */
+	0x0000,     /* R92 */
+	0x0000,     /* R93 */
+	0x0000,     /* R94 */
+	0x0000,     /* R95 */
+	0x0000,     /* R96    - Analogue HP (1) */
+	0x0000,     /* R97 */
+	0x0000,     /* R98 */
+	0x0000,     /* R99 */
+	0x0000,     /* R100 */
+	0x0000,     /* R101 */
+	0x0000,     /* R102 */
+	0x0000,     /* R103 */
+	0x0000,     /* R104 */
+	0x0000,     /* R105 */
+	0x0000,     /* R106 */
+	0x0000,     /* R107 */
+	0x0000,     /* R108 */
+	0x0000,     /* R109 */
+	0x0000,     /* R110 */
+	0x0000,     /* R111 */
+	0x0000,     /* R112 */
+	0x0000,     /* R113 */
+	0x0000,     /* R114 */
+	0x0000,     /* R115 */
+	0x0000,     /* R116 */
+	0x0000,     /* R117 */
+	0x0000,     /* R118 */
+	0x0000,     /* R119 */
+	0x0000,     /* R120 */
+	0x0000,     /* R121 */
+	0x0000,     /* R122 */
+	0x0000,     /* R123 */
+	0x0000,     /* R124 */
+	0x0000,     /* R125 */
+	0x0000,     /* R126 */
+	0x0000,     /* R127 */
+	0x0000,     /* R128 */
+	0x0000,     /* R129 */
+	0x0000,     /* R130 */
+	0x0000,     /* R131 */
+	0x0000,     /* R132 */
+	0x0000,     /* R133 */
+	0x0000,     /* R134 */
+	0x0000,     /* R135 */
+	0x0000,     /* R136 */
+	0x0000,     /* R137 */
+	0x0000,     /* R138 */
+	0x0000,     /* R139 */
+	0x0000,     /* R140 */
+	0x0000,     /* R141 */
+	0x0000,     /* R142 */
+	0x0000,     /* R143 */
+	0x0000,     /* R144 */
+	0x0000,     /* R145 */
+	0x0000,     /* R146 */
+	0x0000,     /* R147 */
+	0x0000,     /* R148 */
+	0x0000,     /* R149 */
+	0x0000,     /* R150 */
+	0x0000,     /* R151 */
+	0x0000,     /* R152 */
+	0x0000,     /* R153 */
+	0x0000,     /* R154 */
+	0x0000,     /* R155 */
+	0x0000,     /* R156 */
+	0x0000,     /* R157 */
+	0x0000,     /* R158 */
+	0x0000,     /* R159 */
+	0x0000,     /* R160 */
+	0x0000,     /* R161 */
+	0x0000,     /* R162 */
+	0x0000,     /* R163 */
+	0x0000,     /* R164 */
+	0x0000,     /* R165 */
+	0x0000,     /* R166 */
+	0x0000,     /* R167 */
+	0x0000,     /* R168 */
+	0x0000,     /* R169 */
+	0x0000,     /* R170 */
+	0x0000,     /* R171 */
+	0x0000,     /* R172 */
+	0x0000,     /* R173 */
+	0x0000,     /* R174 */
+	0x0000,     /* R175 */
+	0x0000,     /* R176 */
+	0x0000,     /* R177 */
+	0x0000,     /* R178 */
+	0x0000,     /* R179 */
+	0x0000,     /* R180 */
+	0x0000,     /* R181 */
+	0x0000,     /* R182 */
+	0x0000,     /* R183 */
+	0x0000,     /* R184 */
+	0x0000,     /* R185 */
+	0x0000,     /* R186 */
+	0x0000,     /* R187 */
+	0x0000,     /* R188 */
+	0x0000,     /* R189 */
+	0x0000,     /* R190 */
+	0x0000,     /* R191 */
+	0x0000,     /* R192 */
+	0x0000,     /* R193 */
+	0x0000,     /* R194 */
+	0x0000,     /* R195 */
+	0x0000,     /* R196 */
+	0x0000,     /* R197 */
+	0x0000,     /* R198 */
+	0x0000,     /* R199 */
+	0x0000,     /* R200 */
+	0x0000,     /* R201 */
+	0x0000,     /* R202 */
+	0x0000,     /* R203 */
+	0x0000,     /* R204 */
+	0x0000,     /* R205 */
+	0x0000,     /* R206 */
+	0x0000,     /* R207 */
+	0x0000,     /* R208 */
+	0x0000,     /* R209 */
+	0x0000,     /* R210 */
+	0x0000,     /* R211 */
+	0x0000,     /* R212 */
+	0x0000,     /* R213 */
+	0x0000,     /* R214 */
+	0x0000,     /* R215 */
+	0x0000,     /* R216 */
+	0x0000,     /* R217 */
+	0x0000,     /* R218 */
+	0x0000,     /* R219 */
+	0x0000,     /* R220 */
+	0x0000,     /* R221 */
+	0x0000,     /* R222 */
+	0x0000,     /* R223 */
+	0x0000,     /* R224 */
+	0x0000,     /* R225 */
+	0x0000,     /* R226 */
+	0x0000,     /* R227 */
+	0x0000,     /* R228 */
+	0x0000,     /* R229 */
+	0x0000,     /* R230 */
+	0x0000,     /* R231 */
+	0x0000,     /* R232 */
+	0x0000,     /* R233 */
+	0x0000,     /* R234 */
+	0x0000,     /* R235 */
+	0x0000,     /* R236 */
+	0x0000,     /* R237 */
+	0x0000,     /* R238 */
+	0x0000,     /* R239 */
+	0x0000,     /* R240 */
+	0x0000,     /* R241 */
+	0x0000,     /* R242 */
+	0x0000,     /* R243 */
+	0x0000,     /* R244 */
+	0x0000,     /* R245 */
+	0x0000,     /* R246 */
+	0x0000,     /* R247 */
+	0x0000,     /* R248 */
+	0x0000,     /* R249 */
+	0x0000,     /* R250 */
+	0x0000,     /* R251 */
+	0x0000,     /* R252 */
+	0x0000,     /* R253 */
+	0x0000,     /* R254 */
+	0x0000,     /* R255 */
+	0x0003,     /* R256   - Chip Revision */
+	0x8004,     /* R257   - Control Interface */
+	0x0000,     /* R258 */
+	0x0000,     /* R259 */
+	0x0000,     /* R260 */
+	0x0000,     /* R261 */
+	0x0000,     /* R262 */
+	0x0000,     /* R263 */
+	0x0000,     /* R264 */
+	0x0000,     /* R265 */
+	0x0000,     /* R266 */
+	0x0000,     /* R267 */
+	0x0000,     /* R268 */
+	0x0000,     /* R269 */
+	0x0000,     /* R270 */
+	0x0000,     /* R271 */
+	0x0000,     /* R272   - Write Sequencer Ctrl (1) */
+	0x0000,     /* R273   - Write Sequencer Ctrl (2) */
+	0x0000,     /* R274 */
+	0x0000,     /* R275 */
+	0x0000,     /* R276 */
+	0x0000,     /* R277 */
+	0x0000,     /* R278 */
+	0x0000,     /* R279 */
+	0x0000,     /* R280 */
+	0x0000,     /* R281 */
+	0x0000,     /* R282 */
+	0x0000,     /* R283 */
+	0x0000,     /* R284 */
+	0x0000,     /* R285 */
+	0x0000,     /* R286 */
+	0x0000,     /* R287 */
+	0x0000,     /* R288 */
+	0x0000,     /* R289 */
+	0x0000,     /* R290 */
+	0x0000,     /* R291 */
+	0x0000,     /* R292 */
+	0x0000,     /* R293 */
+	0x0000,     /* R294 */
+	0x0000,     /* R295 */
+	0x0000,     /* R296 */
+	0x0000,     /* R297 */
+	0x0000,     /* R298 */
+	0x0000,     /* R299 */
+	0x0000,     /* R300 */
+	0x0000,     /* R301 */
+	0x0000,     /* R302 */
+	0x0000,     /* R303 */
+	0x0000,     /* R304 */
+	0x0000,     /* R305 */
+	0x0000,     /* R306 */
+	0x0000,     /* R307 */
+	0x0000,     /* R308 */
+	0x0000,     /* R309 */
+	0x0000,     /* R310 */
+	0x0000,     /* R311 */
+	0x0000,     /* R312 */
+	0x0000,     /* R313 */
+	0x0000,     /* R314 */
+	0x0000,     /* R315 */
+	0x0000,     /* R316 */
+	0x0000,     /* R317 */
+	0x0000,     /* R318 */
+	0x0000,     /* R319 */
+	0x0000,     /* R320 */
+	0x0000,     /* R321 */
+	0x0000,     /* R322 */
+	0x0000,     /* R323 */
+	0x0000,     /* R324 */
+	0x0000,     /* R325 */
+	0x0000,     /* R326 */
+	0x0000,     /* R327 */
+	0x0000,     /* R328 */
+	0x0000,     /* R329 */
+	0x0000,     /* R330 */
+	0x0000,     /* R331 */
+	0x0000,     /* R332 */
+	0x0000,     /* R333 */
+	0x0000,     /* R334 */
+	0x0000,     /* R335 */
+	0x0000,     /* R336 */
+	0x0000,     /* R337 */
+	0x0000,     /* R338 */
+	0x0000,     /* R339 */
+	0x0000,     /* R340 */
+	0x0000,     /* R341 */
+	0x0000,     /* R342 */
+	0x0000,     /* R343 */
+	0x0000,     /* R344 */
+	0x0000,     /* R345 */
+	0x0000,     /* R346 */
+	0x0000,     /* R347 */
+	0x0000,     /* R348 */
+	0x0000,     /* R349 */
+	0x0000,     /* R350 */
+	0x0000,     /* R351 */
+	0x0000,     /* R352 */
+	0x0000,     /* R353 */
+	0x0000,     /* R354 */
+	0x0000,     /* R355 */
+	0x0000,     /* R356 */
+	0x0000,     /* R357 */
+	0x0000,     /* R358 */
+	0x0000,     /* R359 */
+	0x0000,     /* R360 */
+	0x0000,     /* R361 */
+	0x0000,     /* R362 */
+	0x0000,     /* R363 */
+	0x0000,     /* R364 */
+	0x0000,     /* R365 */
+	0x0000,     /* R366 */
+	0x0000,     /* R367 */
+	0x0000,     /* R368 */
+	0x0000,     /* R369 */
+	0x0000,     /* R370 */
+	0x0000,     /* R371 */
+	0x0000,     /* R372 */
+	0x0000,     /* R373 */
+	0x0000,     /* R374 */
+	0x0000,     /* R375 */
+	0x0000,     /* R376 */
+	0x0000,     /* R377 */
+	0x0000,     /* R378 */
+	0x0000,     /* R379 */
+	0x0000,     /* R380 */
+	0x0000,     /* R381 */
+	0x0000,     /* R382 */
+	0x0000,     /* R383 */
+	0x0000,     /* R384 */
+	0x0000,     /* R385 */
+	0x0000,     /* R386 */
+	0x0000,     /* R387 */
+	0x0000,     /* R388 */
+	0x0000,     /* R389 */
+	0x0000,     /* R390 */
+	0x0000,     /* R391 */
+	0x0000,     /* R392 */
+	0x0000,     /* R393 */
+	0x0000,     /* R394 */
+	0x0000,     /* R395 */
+	0x0000,     /* R396 */
+	0x0000,     /* R397 */
+	0x0000,     /* R398 */
+	0x0000,     /* R399 */
+	0x0000,     /* R400 */
+	0x0000,     /* R401 */
+	0x0000,     /* R402 */
+	0x0000,     /* R403 */
+	0x0000,     /* R404 */
+	0x0000,     /* R405 */
+	0x0000,     /* R406 */
+	0x0000,     /* R407 */
+	0x0000,     /* R408 */
+	0x0000,     /* R409 */
+	0x0000,     /* R410 */
+	0x0000,     /* R411 */
+	0x0000,     /* R412 */
+	0x0000,     /* R413 */
+	0x0000,     /* R414 */
+	0x0000,     /* R415 */
+	0x0000,     /* R416 */
+	0x0000,     /* R417 */
+	0x0000,     /* R418 */
+	0x0000,     /* R419 */
+	0x0000,     /* R420 */
+	0x0000,     /* R421 */
+	0x0000,     /* R422 */
+	0x0000,     /* R423 */
+	0x0000,     /* R424 */
+	0x0000,     /* R425 */
+	0x0000,     /* R426 */
+	0x0000,     /* R427 */
+	0x0000,     /* R428 */
+	0x0000,     /* R429 */
+	0x0000,     /* R430 */
+	0x0000,     /* R431 */
+	0x0000,     /* R432 */
+	0x0000,     /* R433 */
+	0x0000,     /* R434 */
+	0x0000,     /* R435 */
+	0x0000,     /* R436 */
+	0x0000,     /* R437 */
+	0x0000,     /* R438 */
+	0x0000,     /* R439 */
+	0x0000,     /* R440 */
+	0x0000,     /* R441 */
+	0x0000,     /* R442 */
+	0x0000,     /* R443 */
+	0x0000,     /* R444 */
+	0x0000,     /* R445 */
+	0x0000,     /* R446 */
+	0x0000,     /* R447 */
+	0x0000,     /* R448 */
+	0x0000,     /* R449 */
+	0x0000,     /* R450 */
+	0x0000,     /* R451 */
+	0x0000,     /* R452 */
+	0x0000,     /* R453 */
+	0x0000,     /* R454 */
+	0x0000,     /* R455 */
+	0x0000,     /* R456 */
+	0x0000,     /* R457 */
+	0x0000,     /* R458 */
+	0x0000,     /* R459 */
+	0x0000,     /* R460 */
+	0x0000,     /* R461 */
+	0x0000,     /* R462 */
+	0x0000,     /* R463 */
+	0x0000,     /* R464 */
+	0x0000,     /* R465 */
+	0x0000,     /* R466 */
+	0x0000,     /* R467 */
+	0x0000,     /* R468 */
+	0x0000,     /* R469 */
+	0x0000,     /* R470 */
+	0x0000,     /* R471 */
+	0x0000,     /* R472 */
+	0x0000,     /* R473 */
+	0x0000,     /* R474 */
+	0x0000,     /* R475 */
+	0x0000,     /* R476 */
+	0x0000,     /* R477 */
+	0x0000,     /* R478 */
+	0x0000,     /* R479 */
+	0x0000,     /* R480 */
+	0x0000,     /* R481 */
+	0x0000,     /* R482 */
+	0x0000,     /* R483 */
+	0x0000,     /* R484 */
+	0x0000,     /* R485 */
+	0x0000,     /* R486 */
+	0x0000,     /* R487 */
+	0x0000,     /* R488 */
+	0x0000,     /* R489 */
+	0x0000,     /* R490 */
+	0x0000,     /* R491 */
+	0x0000,     /* R492 */
+	0x0000,     /* R493 */
+	0x0000,     /* R494 */
+	0x0000,     /* R495 */
+	0x0000,     /* R496 */
+	0x0000,     /* R497 */
+	0x0000,     /* R498 */
+	0x0000,     /* R499 */
+	0x0000,     /* R500 */
+	0x0000,     /* R501 */
+	0x0000,     /* R502 */
+	0x0000,     /* R503 */
+	0x0000,     /* R504 */
+	0x0000,     /* R505 */
+	0x0000,     /* R506 */
+	0x0000,     /* R507 */
+	0x0000,     /* R508 */
+	0x0000,     /* R509 */
+	0x0000,     /* R510 */
+	0x0000,     /* R511 */
+	0x0000,     /* R512   - AIF1 Clocking (1) */
+	0x0000,     /* R513   - AIF1 Clocking (2) */
+	0x0000,     /* R514 */
+	0x0000,     /* R515 */
+	0x0000,     /* R516   - AIF2 Clocking (1) */
+	0x0000,     /* R517   - AIF2 Clocking (2) */
+	0x0000,     /* R518 */
+	0x0000,     /* R519 */
+	0x0000,     /* R520   - Clocking (1) */
+	0x0000,     /* R521   - Clocking (2) */
+	0x0000,     /* R522 */
+	0x0000,     /* R523 */
+	0x0000,     /* R524 */
+	0x0000,     /* R525 */
+	0x0000,     /* R526 */
+	0x0000,     /* R527 */
+	0x0083,     /* R528   - AIF1 Rate */
+	0x0083,     /* R529   - AIF2 Rate */
+	0x0000,     /* R530   - Rate Status */
+	0x0000,     /* R531 */
+	0x0000,     /* R532 */
+	0x0000,     /* R533 */
+	0x0000,     /* R534 */
+	0x0000,     /* R535 */
+	0x0000,     /* R536 */
+	0x0000,     /* R537 */
+	0x0000,     /* R538 */
+	0x0000,     /* R539 */
+	0x0000,     /* R540 */
+	0x0000,     /* R541 */
+	0x0000,     /* R542 */
+	0x0000,     /* R543 */
+	0x0000,     /* R544   - FLL1 Control (1) */
+	0x0000,     /* R545   - FLL1 Control (2) */
+	0x0000,     /* R546   - FLL1 Control (3) */
+	0x0000,     /* R547   - FLL1 Control (4) */
+	0x0C80,     /* R548   - FLL1 Control (5) */
+	0x0000,     /* R549 */
+	0x0000,     /* R550 */
+	0x0000,     /* R551 */
+	0x0000,     /* R552 */
+	0x0000,     /* R553 */
+	0x0000,     /* R554 */
+	0x0000,     /* R555 */
+	0x0000,     /* R556 */
+	0x0000,     /* R557 */
+	0x0000,     /* R558 */
+	0x0000,     /* R559 */
+	0x0000,     /* R560 */
+	0x0000,     /* R561 */
+	0x0000,     /* R562 */
+	0x0000,     /* R563 */
+	0x0000,     /* R564 */
+	0x0000,     /* R565 */
+	0x0000,     /* R566 */
+	0x0000,     /* R567 */
+	0x0000,     /* R568 */
+	0x0000,     /* R569 */
+	0x0000,     /* R570 */
+	0x0000,     /* R571 */
+	0x0000,     /* R572 */
+	0x0000,     /* R573 */
+	0x0000,     /* R574 */
+	0x0000,     /* R575 */
+	0x0000,     /* R576   - FLL2 Control (1) */
+	0x0000,     /* R577   - FLL2 Control (2) */
+	0x0000,     /* R578   - FLL2 Control (3) */
+	0x0000,     /* R579   - FLL2 Control (4) */
+	0x0C80,     /* R580   - FLL2 Control (5) */
+	0x0000,     /* R581 */
+	0x0000,     /* R582 */
+	0x0000,     /* R583 */
+	0x0000,     /* R584 */
+	0x0000,     /* R585 */
+	0x0000,     /* R586 */
+	0x0000,     /* R587 */
+	0x0000,     /* R588 */
+	0x0000,     /* R589 */
+	0x0000,     /* R590 */
+	0x0000,     /* R591 */
+	0x0000,     /* R592 */
+	0x0000,     /* R593 */
+	0x0000,     /* R594 */
+	0x0000,     /* R595 */
+	0x0000,     /* R596 */
+	0x0000,     /* R597 */
+	0x0000,     /* R598 */
+	0x0000,     /* R599 */
+	0x0000,     /* R600 */
+	0x0000,     /* R601 */
+	0x0000,     /* R602 */
+	0x0000,     /* R603 */
+	0x0000,     /* R604 */
+	0x0000,     /* R605 */
+	0x0000,     /* R606 */
+	0x0000,     /* R607 */
+	0x0000,     /* R608 */
+	0x0000,     /* R609 */
+	0x0000,     /* R610 */
+	0x0000,     /* R611 */
+	0x0000,     /* R612 */
+	0x0000,     /* R613 */
+	0x0000,     /* R614 */
+	0x0000,     /* R615 */
+	0x0000,     /* R616 */
+	0x0000,     /* R617 */
+	0x0000,     /* R618 */
+	0x0000,     /* R619 */
+	0x0000,     /* R620 */
+	0x0000,     /* R621 */
+	0x0000,     /* R622 */
+	0x0000,     /* R623 */
+	0x0000,     /* R624 */
+	0x0000,     /* R625 */
+	0x0000,     /* R626 */
+	0x0000,     /* R627 */
+	0x0000,     /* R628 */
+	0x0000,     /* R629 */
+	0x0000,     /* R630 */
+	0x0000,     /* R631 */
+	0x0000,     /* R632 */
+	0x0000,     /* R633 */
+	0x0000,     /* R634 */
+	0x0000,     /* R635 */
+	0x0000,     /* R636 */
+	0x0000,     /* R637 */
+	0x0000,     /* R638 */
+	0x0000,     /* R639 */
+	0x0000,     /* R640 */
+	0x0000,     /* R641 */
+	0x0000,     /* R642 */
+	0x0000,     /* R643 */
+	0x0000,     /* R644 */
+	0x0000,     /* R645 */
+	0x0000,     /* R646 */
+	0x0000,     /* R647 */
+	0x0000,     /* R648 */
+	0x0000,     /* R649 */
+	0x0000,     /* R650 */
+	0x0000,     /* R651 */
+	0x0000,     /* R652 */
+	0x0000,     /* R653 */
+	0x0000,     /* R654 */
+	0x0000,     /* R655 */
+	0x0000,     /* R656 */
+	0x0000,     /* R657 */
+	0x0000,     /* R658 */
+	0x0000,     /* R659 */
+	0x0000,     /* R660 */
+	0x0000,     /* R661 */
+	0x0000,     /* R662 */
+	0x0000,     /* R663 */
+	0x0000,     /* R664 */
+	0x0000,     /* R665 */
+	0x0000,     /* R666 */
+	0x0000,     /* R667 */
+	0x0000,     /* R668 */
+	0x0000,     /* R669 */
+	0x0000,     /* R670 */
+	0x0000,     /* R671 */
+	0x0000,     /* R672 */
+	0x0000,     /* R673 */
+	0x0000,     /* R674 */
+	0x0000,     /* R675 */
+	0x0000,     /* R676 */
+	0x0000,     /* R677 */
+	0x0000,     /* R678 */
+	0x0000,     /* R679 */
+	0x0000,     /* R680 */
+	0x0000,     /* R681 */
+	0x0000,     /* R682 */
+	0x0000,     /* R683 */
+	0x0000,     /* R684 */
+	0x0000,     /* R685 */
+	0x0000,     /* R686 */
+	0x0000,     /* R687 */
+	0x0000,     /* R688 */
+	0x0000,     /* R689 */
+	0x0000,     /* R690 */
+	0x0000,     /* R691 */
+	0x0000,     /* R692 */
+	0x0000,     /* R693 */
+	0x0000,     /* R694 */
+	0x0000,     /* R695 */
+	0x0000,     /* R696 */
+	0x0000,     /* R697 */
+	0x0000,     /* R698 */
+	0x0000,     /* R699 */
+	0x0000,     /* R700 */
+	0x0000,     /* R701 */
+	0x0000,     /* R702 */
+	0x0000,     /* R703 */
+	0x0000,     /* R704 */
+	0x0000,     /* R705 */
+	0x0000,     /* R706 */
+	0x0000,     /* R707 */
+	0x0000,     /* R708 */
+	0x0000,     /* R709 */
+	0x0000,     /* R710 */
+	0x0000,     /* R711 */
+	0x0000,     /* R712 */
+	0x0000,     /* R713 */
+	0x0000,     /* R714 */
+	0x0000,     /* R715 */
+	0x0000,     /* R716 */
+	0x0000,     /* R717 */
+	0x0000,     /* R718 */
+	0x0000,     /* R719 */
+	0x0000,     /* R720 */
+	0x0000,     /* R721 */
+	0x0000,     /* R722 */
+	0x0000,     /* R723 */
+	0x0000,     /* R724 */
+	0x0000,     /* R725 */
+	0x0000,     /* R726 */
+	0x0000,     /* R727 */
+	0x0000,     /* R728 */
+	0x0000,     /* R729 */
+	0x0000,     /* R730 */
+	0x0000,     /* R731 */
+	0x0000,     /* R732 */
+	0x0000,     /* R733 */
+	0x0000,     /* R734 */
+	0x0000,     /* R735 */
+	0x0000,     /* R736 */
+	0x0000,     /* R737 */
+	0x0000,     /* R738 */
+	0x0000,     /* R739 */
+	0x0000,     /* R740 */
+	0x0000,     /* R741 */
+	0x0000,     /* R742 */
+	0x0000,     /* R743 */
+	0x0000,     /* R744 */
+	0x0000,     /* R745 */
+	0x0000,     /* R746 */
+	0x0000,     /* R747 */
+	0x0000,     /* R748 */
+	0x0000,     /* R749 */
+	0x0000,     /* R750 */
+	0x0000,     /* R751 */
+	0x0000,     /* R752 */
+	0x0000,     /* R753 */
+	0x0000,     /* R754 */
+	0x0000,     /* R755 */
+	0x0000,     /* R756 */
+	0x0000,     /* R757 */
+	0x0000,     /* R758 */
+	0x0000,     /* R759 */
+	0x0000,     /* R760 */
+	0x0000,     /* R761 */
+	0x0000,     /* R762 */
+	0x0000,     /* R763 */
+	0x0000,     /* R764 */
+	0x0000,     /* R765 */
+	0x0000,     /* R766 */
+	0x0000,     /* R767 */
+	0x4050,     /* R768   - AIF1 Control (1) */
+	0x4000,     /* R769   - AIF1 Control (2) */
+	0x0000,     /* R770   - AIF1 Master/Slave */
+	0x0040,     /* R771   - AIF1 BCLK */
+	0x0040,     /* R772   - AIF1ADC LRCLK */
+	0x0040,     /* R773   - AIF1DAC LRCLK */
+	0x0004,     /* R774   - AIF1DAC Data */
+	0x0100,     /* R775   - AIF1ADC Data */
+	0x0000,     /* R776 */
+	0x0000,     /* R777 */
+	0x0000,     /* R778 */
+	0x0000,     /* R779 */
+	0x0000,     /* R780 */
+	0x0000,     /* R781 */
+	0x0000,     /* R782 */
+	0x0000,     /* R783 */
+	0x4050,     /* R784   - AIF2 Control (1) */
+	0x4000,     /* R785   - AIF2 Control (2) */
+	0x0000,     /* R786   - AIF2 Master/Slave */
+	0x0040,     /* R787   - AIF2 BCLK */
+	0x0040,     /* R788   - AIF2ADC LRCLK */
+	0x0040,     /* R789   - AIF2DAC LRCLK */
+	0x0000,     /* R790   - AIF2DAC Data */
+	0x0000,     /* R791   - AIF2ADC Data */
+	0x0000,     /* R792 */
+	0x0000,     /* R793 */
+	0x0000,     /* R794 */
+	0x0000,     /* R795 */
+	0x0000,     /* R796 */
+	0x0000,     /* R797 */
+	0x0000,     /* R798 */
+	0x0000,     /* R799 */
+	0x0000,     /* R800 */
+	0x0000,     /* R801 */
+	0x0000,     /* R802 */
+	0x0000,     /* R803 */
+	0x0000,     /* R804 */
+	0x0000,     /* R805 */
+	0x0000,     /* R806 */
+	0x0000,     /* R807 */
+	0x0000,     /* R808 */
+	0x0000,     /* R809 */
+	0x0000,     /* R810 */
+	0x0000,     /* R811 */
+	0x0000,     /* R812 */
+	0x0000,     /* R813 */
+	0x0000,     /* R814 */
+	0x0000,     /* R815 */
+	0x0000,     /* R816 */
+	0x0000,     /* R817 */
+	0x0000,     /* R818 */
+	0x0000,     /* R819 */
+	0x0000,     /* R820 */
+	0x0000,     /* R821 */
+	0x0000,     /* R822 */
+	0x0000,     /* R823 */
+	0x0000,     /* R824 */
+	0x0000,     /* R825 */
+	0x0000,     /* R826 */
+	0x0000,     /* R827 */
+	0x0000,     /* R828 */
+	0x0000,     /* R829 */
+	0x0000,     /* R830 */
+	0x0000,     /* R831 */
+	0x0000,     /* R832 */
+	0x0000,     /* R833 */
+	0x0000,     /* R834 */
+	0x0000,     /* R835 */
+	0x0000,     /* R836 */
+	0x0000,     /* R837 */
+	0x0000,     /* R838 */
+	0x0000,     /* R839 */
+	0x0000,     /* R840 */
+	0x0000,     /* R841 */
+	0x0000,     /* R842 */
+	0x0000,     /* R843 */
+	0x0000,     /* R844 */
+	0x0000,     /* R845 */
+	0x0000,     /* R846 */
+	0x0000,     /* R847 */
+	0x0000,     /* R848 */
+	0x0000,     /* R849 */
+	0x0000,     /* R850 */
+	0x0000,     /* R851 */
+	0x0000,     /* R852 */
+	0x0000,     /* R853 */
+	0x0000,     /* R854 */
+	0x0000,     /* R855 */
+	0x0000,     /* R856 */
+	0x0000,     /* R857 */
+	0x0000,     /* R858 */
+	0x0000,     /* R859 */
+	0x0000,     /* R860 */
+	0x0000,     /* R861 */
+	0x0000,     /* R862 */
+	0x0000,     /* R863 */
+	0x0000,     /* R864 */
+	0x0000,     /* R865 */
+	0x0000,     /* R866 */
+	0x0000,     /* R867 */
+	0x0000,     /* R868 */
+	0x0000,     /* R869 */
+	0x0000,     /* R870 */
+	0x0000,     /* R871 */
+	0x0000,     /* R872 */
+	0x0000,     /* R873 */
+	0x0000,     /* R874 */
+	0x0000,     /* R875 */
+	0x0000,     /* R876 */
+	0x0000,     /* R877 */
+	0x0000,     /* R878 */
+	0x0000,     /* R879 */
+	0x0000,     /* R880 */
+	0x0000,     /* R881 */
+	0x0000,     /* R882 */
+	0x0000,     /* R883 */
+	0x0000,     /* R884 */
+	0x0000,     /* R885 */
+	0x0000,     /* R886 */
+	0x0000,     /* R887 */
+	0x0000,     /* R888 */
+	0x0000,     /* R889 */
+	0x0000,     /* R890 */
+	0x0000,     /* R891 */
+	0x0000,     /* R892 */
+	0x0000,     /* R893 */
+	0x0000,     /* R894 */
+	0x0000,     /* R895 */
+	0x0000,     /* R896 */
+	0x0000,     /* R897 */
+	0x0000,     /* R898 */
+	0x0000,     /* R899 */
+	0x0000,     /* R900 */
+	0x0000,     /* R901 */
+	0x0000,     /* R902 */
+	0x0000,     /* R903 */
+	0x0000,     /* R904 */
+	0x0000,     /* R905 */
+	0x0000,     /* R906 */
+	0x0000,     /* R907 */
+	0x0000,     /* R908 */
+	0x0000,     /* R909 */
+	0x0000,     /* R910 */
+	0x0000,     /* R911 */
+	0x0000,     /* R912 */
+	0x0000,     /* R913 */
+	0x0000,     /* R914 */
+	0x0000,     /* R915 */
+	0x0000,     /* R916 */
+	0x0000,     /* R917 */
+	0x0000,     /* R918 */
+	0x0000,     /* R919 */
+	0x0000,     /* R920 */
+	0x0000,     /* R921 */
+	0x0000,     /* R922 */
+	0x0000,     /* R923 */
+	0x0000,     /* R924 */
+	0x0000,     /* R925 */
+	0x0000,     /* R926 */
+	0x0000,     /* R927 */
+	0x0000,     /* R928 */
+	0x0000,     /* R929 */
+	0x0000,     /* R930 */
+	0x0000,     /* R931 */
+	0x0000,     /* R932 */
+	0x0000,     /* R933 */
+	0x0000,     /* R934 */
+	0x0000,     /* R935 */
+	0x0000,     /* R936 */
+	0x0000,     /* R937 */
+	0x0000,     /* R938 */
+	0x0000,     /* R939 */
+	0x0000,     /* R940 */
+	0x0000,     /* R941 */
+	0x0000,     /* R942 */
+	0x0000,     /* R943 */
+	0x0000,     /* R944 */
+	0x0000,     /* R945 */
+	0x0000,     /* R946 */
+	0x0000,     /* R947 */
+	0x0000,     /* R948 */
+	0x0000,     /* R949 */
+	0x0000,     /* R950 */
+	0x0000,     /* R951 */
+	0x0000,     /* R952 */
+	0x0000,     /* R953 */
+	0x0000,     /* R954 */
+	0x0000,     /* R955 */
+	0x0000,     /* R956 */
+	0x0000,     /* R957 */
+	0x0000,     /* R958 */
+	0x0000,     /* R959 */
+	0x0000,     /* R960 */
+	0x0000,     /* R961 */
+	0x0000,     /* R962 */
+	0x0000,     /* R963 */
+	0x0000,     /* R964 */
+	0x0000,     /* R965 */
+	0x0000,     /* R966 */
+	0x0000,     /* R967 */
+	0x0000,     /* R968 */
+	0x0000,     /* R969 */
+	0x0000,     /* R970 */
+	0x0000,     /* R971 */
+	0x0000,     /* R972 */
+	0x0000,     /* R973 */
+	0x0000,     /* R974 */
+	0x0000,     /* R975 */
+	0x0000,     /* R976 */
+	0x0000,     /* R977 */
+	0x0000,     /* R978 */
+	0x0000,     /* R979 */
+	0x0000,     /* R980 */
+	0x0000,     /* R981 */
+	0x0000,     /* R982 */
+	0x0000,     /* R983 */
+	0x0000,     /* R984 */
+	0x0000,     /* R985 */
+	0x0000,     /* R986 */
+	0x0000,     /* R987 */
+	0x0000,     /* R988 */
+	0x0000,     /* R989 */
+	0x0000,     /* R990 */
+	0x0000,     /* R991 */
+	0x0000,     /* R992 */
+	0x0000,     /* R993 */
+	0x0000,     /* R994 */
+	0x0000,     /* R995 */
+	0x0000,     /* R996 */
+	0x0000,     /* R997 */
+	0x0000,     /* R998 */
+	0x0000,     /* R999 */
+	0x0000,     /* R1000 */
+	0x0000,     /* R1001 */
+	0x0000,     /* R1002 */
+	0x0000,     /* R1003 */
+	0x0000,     /* R1004 */
+	0x0000,     /* R1005 */
+	0x0000,     /* R1006 */
+	0x0000,     /* R1007 */
+	0x0000,     /* R1008 */
+	0x0000,     /* R1009 */
+	0x0000,     /* R1010 */
+	0x0000,     /* R1011 */
+	0x0000,     /* R1012 */
+	0x0000,     /* R1013 */
+	0x0000,     /* R1014 */
+	0x0000,     /* R1015 */
+	0x0000,     /* R1016 */
+	0x0000,     /* R1017 */
+	0x0000,     /* R1018 */
+	0x0000,     /* R1019 */
+	0x0000,     /* R1020 */
+	0x0000,     /* R1021 */
+	0x0000,     /* R1022 */
+	0x0000,     /* R1023 */
+	0x00C0,     /* R1024  - AIF1 ADC1 Left Volume */
+	0x00C0,     /* R1025  - AIF1 ADC1 Right Volume */
+	0x00C0,     /* R1026  - AIF1 DAC1 Left Volume */
+	0x00C0,     /* R1027  - AIF1 DAC1 Right Volume */
+	0x00C0,     /* R1028  - AIF1 ADC2 Left Volume */
+	0x00C0,     /* R1029  - AIF1 ADC2 Right Volume */
+	0x00C0,     /* R1030  - AIF1 DAC2 Left Volume */
+	0x00C0,     /* R1031  - AIF1 DAC2 Right Volume */
+	0x0000,     /* R1032 */
+	0x0000,     /* R1033 */
+	0x0000,     /* R1034 */
+	0x0000,     /* R1035 */
+	0x0000,     /* R1036 */
+	0x0000,     /* R1037 */
+	0x0000,     /* R1038 */
+	0x0000,     /* R1039 */
+	0x0000,     /* R1040  - AIF1 ADC1 Filters */
+	0x0000,     /* R1041  - AIF1 ADC2 Filters */
+	0x0000,     /* R1042 */
+	0x0000,     /* R1043 */
+	0x0000,     /* R1044 */
+	0x0000,     /* R1045 */
+	0x0000,     /* R1046 */
+	0x0000,     /* R1047 */
+	0x0000,     /* R1048 */
+	0x0000,     /* R1049 */
+	0x0000,     /* R1050 */
+	0x0000,     /* R1051 */
+	0x0000,     /* R1052 */
+	0x0000,     /* R1053 */
+	0x0000,     /* R1054 */
+	0x0000,     /* R1055 */
+	0x0200,     /* R1056  - AIF1 DAC1 Filters (1) */
+	0x0010,     /* R1057  - AIF1 DAC1 Filters (2) */
+	0x0200,     /* R1058  - AIF1 DAC2 Filters (1) */
+	0x0010,     /* R1059  - AIF1 DAC2 Filters (2) */
+	0x0000,     /* R1060 */
+	0x0000,     /* R1061 */
+	0x0000,     /* R1062 */
+	0x0000,     /* R1063 */
+	0x0000,     /* R1064 */
+	0x0000,     /* R1065 */
+	0x0000,     /* R1066 */
+	0x0000,     /* R1067 */
+	0x0000,     /* R1068 */
+	0x0000,     /* R1069 */
+	0x0000,     /* R1070 */
+	0x0000,     /* R1071 */
+	0x0000,     /* R1072 */
+	0x0000,     /* R1073 */
+	0x0000,     /* R1074 */
+	0x0000,     /* R1075 */
+	0x0000,     /* R1076 */
+	0x0000,     /* R1077 */
+	0x0000,     /* R1078 */
+	0x0000,     /* R1079 */
+	0x0000,     /* R1080 */
+	0x0000,     /* R1081 */
+	0x0000,     /* R1082 */
+	0x0000,     /* R1083 */
+	0x0000,     /* R1084 */
+	0x0000,     /* R1085 */
+	0x0000,     /* R1086 */
+	0x0000,     /* R1087 */
+	0x0098,     /* R1088  - AIF1 DRC1 (1) */
+	0x0845,     /* R1089  - AIF1 DRC1 (2) */
+	0x0000,     /* R1090  - AIF1 DRC1 (3) */
+	0x0000,     /* R1091  - AIF1 DRC1 (4) */
+	0x0000,     /* R1092  - AIF1 DRC1 (5) */
+	0x0000,     /* R1093 */
+	0x0000,     /* R1094 */
+	0x0000,     /* R1095 */
+	0x0000,     /* R1096 */
+	0x0000,     /* R1097 */
+	0x0000,     /* R1098 */
+	0x0000,     /* R1099 */
+	0x0000,     /* R1100 */
+	0x0000,     /* R1101 */
+	0x0000,     /* R1102 */
+	0x0000,     /* R1103 */
+	0x0098,     /* R1104  - AIF1 DRC2 (1) */
+	0x0845,     /* R1105  - AIF1 DRC2 (2) */
+	0x0000,     /* R1106  - AIF1 DRC2 (3) */
+	0x0000,     /* R1107  - AIF1 DRC2 (4) */
+	0x0000,     /* R1108  - AIF1 DRC2 (5) */
+	0x0000,     /* R1109 */
+	0x0000,     /* R1110 */
+	0x0000,     /* R1111 */
+	0x0000,     /* R1112 */
+	0x0000,     /* R1113 */
+	0x0000,     /* R1114 */
+	0x0000,     /* R1115 */
+	0x0000,     /* R1116 */
+	0x0000,     /* R1117 */
+	0x0000,     /* R1118 */
+	0x0000,     /* R1119 */
+	0x0000,     /* R1120 */
+	0x0000,     /* R1121 */
+	0x0000,     /* R1122 */
+	0x0000,     /* R1123 */
+	0x0000,     /* R1124 */
+	0x0000,     /* R1125 */
+	0x0000,     /* R1126 */
+	0x0000,     /* R1127 */
+	0x0000,     /* R1128 */
+	0x0000,     /* R1129 */
+	0x0000,     /* R1130 */
+	0x0000,     /* R1131 */
+	0x0000,     /* R1132 */
+	0x0000,     /* R1133 */
+	0x0000,     /* R1134 */
+	0x0000,     /* R1135 */
+	0x0000,     /* R1136 */
+	0x0000,     /* R1137 */
+	0x0000,     /* R1138 */
+	0x0000,     /* R1139 */
+	0x0000,     /* R1140 */
+	0x0000,     /* R1141 */
+	0x0000,     /* R1142 */
+	0x0000,     /* R1143 */
+	0x0000,     /* R1144 */
+	0x0000,     /* R1145 */
+	0x0000,     /* R1146 */
+	0x0000,     /* R1147 */
+	0x0000,     /* R1148 */
+	0x0000,     /* R1149 */
+	0x0000,     /* R1150 */
+	0x0000,     /* R1151 */
+	0x6318,     /* R1152  - AIF1 DAC1 EQ Gains (1) */
+	0x6300,     /* R1153  - AIF1 DAC1 EQ Gains (2) */
+	0x0FCA,     /* R1154  - AIF1 DAC1 EQ Band 1 A */
+	0x0400,     /* R1155  - AIF1 DAC1 EQ Band 1 B */
+	0x00D8,     /* R1156  - AIF1 DAC1 EQ Band 1 PG */
+	0x1EB5,     /* R1157  - AIF1 DAC1 EQ Band 2 A */
+	0xF145,     /* R1158  - AIF1 DAC1 EQ Band 2 B */
+	0x0B75,     /* R1159  - AIF1 DAC1 EQ Band 2 C */
+	0x01C5,     /* R1160  - AIF1 DAC1 EQ Band 2 PG */
+	0x1C58,     /* R1161  - AIF1 DAC1 EQ Band 3 A */
+	0xF373,     /* R1162  - AIF1 DAC1 EQ Band 3 B */
+	0x0A54,     /* R1163  - AIF1 DAC1 EQ Band 3 C */
+	0x0558,     /* R1164  - AIF1 DAC1 EQ Band 3 PG */
+	0x168E,     /* R1165  - AIF1 DAC1 EQ Band 4 A */
+	0xF829,     /* R1166  - AIF1 DAC1 EQ Band 4 B */
+	0x07AD,     /* R1167  - AIF1 DAC1 EQ Band 4 C */
+	0x1103,     /* R1168  - AIF1 DAC1 EQ Band 4 PG */
+	0x0564,     /* R1169  - AIF1 DAC1 EQ Band 5 A */
+	0x0559,     /* R1170  - AIF1 DAC1 EQ Band 5 B */
+	0x4000,     /* R1171  - AIF1 DAC1 EQ Band 5 PG */
+	0x0000,     /* R1172 */
+	0x0000,     /* R1173 */
+	0x0000,     /* R1174 */
+	0x0000,     /* R1175 */
+	0x0000,     /* R1176 */
+	0x0000,     /* R1177 */
+	0x0000,     /* R1178 */
+	0x0000,     /* R1179 */
+	0x0000,     /* R1180 */
+	0x0000,     /* R1181 */
+	0x0000,     /* R1182 */
+	0x0000,     /* R1183 */
+	0x6318,     /* R1184  - AIF1 DAC2 EQ Gains (1) */
+	0x6300,     /* R1185  - AIF1 DAC2 EQ Gains (2) */
+	0x0FCA,     /* R1186  - AIF1 DAC2 EQ Band 1 A */
+	0x0400,     /* R1187  - AIF1 DAC2 EQ Band 1 B */
+	0x00D8,     /* R1188  - AIF1 DAC2 EQ Band 1 PG */
+	0x1EB5,     /* R1189  - AIF1 DAC2 EQ Band 2 A */
+	0xF145,     /* R1190  - AIF1 DAC2 EQ Band 2 B */
+	0x0B75,     /* R1191  - AIF1 DAC2 EQ Band 2 C */
+	0x01C5,     /* R1192  - AIF1 DAC2 EQ Band 2 PG */
+	0x1C58,     /* R1193  - AIF1 DAC2 EQ Band 3 A */
+	0xF373,     /* R1194  - AIF1 DAC2 EQ Band 3 B */
+	0x0A54,     /* R1195  - AIF1 DAC2 EQ Band 3 C */
+	0x0558,     /* R1196  - AIF1 DAC2 EQ Band 3 PG */
+	0x168E,     /* R1197  - AIF1 DAC2 EQ Band 4 A */
+	0xF829,     /* R1198  - AIF1 DAC2 EQ Band 4 B */
+	0x07AD,     /* R1199  - AIF1 DAC2 EQ Band 4 C */
+	0x1103,     /* R1200  - AIF1 DAC2 EQ Band 4 PG */
+	0x0564,     /* R1201  - AIF1 DAC2 EQ Band 5 A */
+	0x0559,     /* R1202  - AIF1 DAC2 EQ Band 5 B */
+	0x4000,     /* R1203  - AIF1 DAC2 EQ Band 5 PG */
+	0x0000,     /* R1204 */
+	0x0000,     /* R1205 */
+	0x0000,     /* R1206 */
+	0x0000,     /* R1207 */
+	0x0000,     /* R1208 */
+	0x0000,     /* R1209 */
+	0x0000,     /* R1210 */
+	0x0000,     /* R1211 */
+	0x0000,     /* R1212 */
+	0x0000,     /* R1213 */
+	0x0000,     /* R1214 */
+	0x0000,     /* R1215 */
+	0x0000,     /* R1216 */
+	0x0000,     /* R1217 */
+	0x0000,     /* R1218 */
+	0x0000,     /* R1219 */
+	0x0000,     /* R1220 */
+	0x0000,     /* R1221 */
+	0x0000,     /* R1222 */
+	0x0000,     /* R1223 */
+	0x0000,     /* R1224 */
+	0x0000,     /* R1225 */
+	0x0000,     /* R1226 */
+	0x0000,     /* R1227 */
+	0x0000,     /* R1228 */
+	0x0000,     /* R1229 */
+	0x0000,     /* R1230 */
+	0x0000,     /* R1231 */
+	0x0000,     /* R1232 */
+	0x0000,     /* R1233 */
+	0x0000,     /* R1234 */
+	0x0000,     /* R1235 */
+	0x0000,     /* R1236 */
+	0x0000,     /* R1237 */
+	0x0000,     /* R1238 */
+	0x0000,     /* R1239 */
+	0x0000,     /* R1240 */
+	0x0000,     /* R1241 */
+	0x0000,     /* R1242 */
+	0x0000,     /* R1243 */
+	0x0000,     /* R1244 */
+	0x0000,     /* R1245 */
+	0x0000,     /* R1246 */
+	0x0000,     /* R1247 */
+	0x0000,     /* R1248 */
+	0x0000,     /* R1249 */
+	0x0000,     /* R1250 */
+	0x0000,     /* R1251 */
+	0x0000,     /* R1252 */
+	0x0000,     /* R1253 */
+	0x0000,     /* R1254 */
+	0x0000,     /* R1255 */
+	0x0000,     /* R1256 */
+	0x0000,     /* R1257 */
+	0x0000,     /* R1258 */
+	0x0000,     /* R1259 */
+	0x0000,     /* R1260 */
+	0x0000,     /* R1261 */
+	0x0000,     /* R1262 */
+	0x0000,     /* R1263 */
+	0x0000,     /* R1264 */
+	0x0000,     /* R1265 */
+	0x0000,     /* R1266 */
+	0x0000,     /* R1267 */
+	0x0000,     /* R1268 */
+	0x0000,     /* R1269 */
+	0x0000,     /* R1270 */
+	0x0000,     /* R1271 */
+	0x0000,     /* R1272 */
+	0x0000,     /* R1273 */
+	0x0000,     /* R1274 */
+	0x0000,     /* R1275 */
+	0x0000,     /* R1276 */
+	0x0000,     /* R1277 */
+	0x0000,     /* R1278 */
+	0x0000,     /* R1279 */
+	0x00C0,     /* R1280  - AIF2 ADC Left Volume */
+	0x00C0,     /* R1281  - AIF2 ADC Right Volume */
+	0x00C0,     /* R1282  - AIF2 DAC Left Volume */
+	0x00C0,     /* R1283  - AIF2 DAC Right Volume */
+	0x0000,     /* R1284 */
+	0x0000,     /* R1285 */
+	0x0000,     /* R1286 */
+	0x0000,     /* R1287 */
+	0x0000,     /* R1288 */
+	0x0000,     /* R1289 */
+	0x0000,     /* R1290 */
+	0x0000,     /* R1291 */
+	0x0000,     /* R1292 */
+	0x0000,     /* R1293 */
+	0x0000,     /* R1294 */
+	0x0000,     /* R1295 */
+	0x0000,     /* R1296  - AIF2 ADC Filters */
+	0x0000,     /* R1297 */
+	0x0000,     /* R1298 */
+	0x0000,     /* R1299 */
+	0x0000,     /* R1300 */
+	0x0000,     /* R1301 */
+	0x0000,     /* R1302 */
+	0x0000,     /* R1303 */
+	0x0000,     /* R1304 */
+	0x0000,     /* R1305 */
+	0x0000,     /* R1306 */
+	0x0000,     /* R1307 */
+	0x0000,     /* R1308 */
+	0x0000,     /* R1309 */
+	0x0000,     /* R1310 */
+	0x0000,     /* R1311 */
+	0x0200,     /* R1312  - AIF2 DAC Filters (1) */
+	0x0010,     /* R1313  - AIF2 DAC Filters (2) */
+	0x0000,     /* R1314 */
+	0x0000,     /* R1315 */
+	0x0000,     /* R1316 */
+	0x0000,     /* R1317 */
+	0x0000,     /* R1318 */
+	0x0000,     /* R1319 */
+	0x0000,     /* R1320 */
+	0x0000,     /* R1321 */
+	0x0000,     /* R1322 */
+	0x0000,     /* R1323 */
+	0x0000,     /* R1324 */
+	0x0000,     /* R1325 */
+	0x0000,     /* R1326 */
+	0x0000,     /* R1327 */
+	0x0000,     /* R1328 */
+	0x0000,     /* R1329 */
+	0x0000,     /* R1330 */
+	0x0000,     /* R1331 */
+	0x0000,     /* R1332 */
+	0x0000,     /* R1333 */
+	0x0000,     /* R1334 */
+	0x0000,     /* R1335 */
+	0x0000,     /* R1336 */
+	0x0000,     /* R1337 */
+	0x0000,     /* R1338 */
+	0x0000,     /* R1339 */
+	0x0000,     /* R1340 */
+	0x0000,     /* R1341 */
+	0x0000,     /* R1342 */
+	0x0000,     /* R1343 */
+	0x0098,     /* R1344  - AIF2 DRC (1) */
+	0x0845,     /* R1345  - AIF2 DRC (2) */
+	0x0000,     /* R1346  - AIF2 DRC (3) */
+	0x0000,     /* R1347  - AIF2 DRC (4) */
+	0x0000,     /* R1348  - AIF2 DRC (5) */
+	0x0000,     /* R1349 */
+	0x0000,     /* R1350 */
+	0x0000,     /* R1351 */
+	0x0000,     /* R1352 */
+	0x0000,     /* R1353 */
+	0x0000,     /* R1354 */
+	0x0000,     /* R1355 */
+	0x0000,     /* R1356 */
+	0x0000,     /* R1357 */
+	0x0000,     /* R1358 */
+	0x0000,     /* R1359 */
+	0x0000,     /* R1360 */
+	0x0000,     /* R1361 */
+	0x0000,     /* R1362 */
+	0x0000,     /* R1363 */
+	0x0000,     /* R1364 */
+	0x0000,     /* R1365 */
+	0x0000,     /* R1366 */
+	0x0000,     /* R1367 */
+	0x0000,     /* R1368 */
+	0x0000,     /* R1369 */
+	0x0000,     /* R1370 */
+	0x0000,     /* R1371 */
+	0x0000,     /* R1372 */
+	0x0000,     /* R1373 */
+	0x0000,     /* R1374 */
+	0x0000,     /* R1375 */
+	0x0000,     /* R1376 */
+	0x0000,     /* R1377 */
+	0x0000,     /* R1378 */
+	0x0000,     /* R1379 */
+	0x0000,     /* R1380 */
+	0x0000,     /* R1381 */
+	0x0000,     /* R1382 */
+	0x0000,     /* R1383 */
+	0x0000,     /* R1384 */
+	0x0000,     /* R1385 */
+	0x0000,     /* R1386 */
+	0x0000,     /* R1387 */
+	0x0000,     /* R1388 */
+	0x0000,     /* R1389 */
+	0x0000,     /* R1390 */
+	0x0000,     /* R1391 */
+	0x0000,     /* R1392 */
+	0x0000,     /* R1393 */
+	0x0000,     /* R1394 */
+	0x0000,     /* R1395 */
+	0x0000,     /* R1396 */
+	0x0000,     /* R1397 */
+	0x0000,     /* R1398 */
+	0x0000,     /* R1399 */
+	0x0000,     /* R1400 */
+	0x0000,     /* R1401 */
+	0x0000,     /* R1402 */
+	0x0000,     /* R1403 */
+	0x0000,     /* R1404 */
+	0x0000,     /* R1405 */
+	0x0000,     /* R1406 */
+	0x0000,     /* R1407 */
+	0x6318,     /* R1408  - AIF2 EQ Gains (1) */
+	0x6300,     /* R1409  - AIF2 EQ Gains (2) */
+	0x0FCA,     /* R1410  - AIF2 EQ Band 1 A */
+	0x0400,     /* R1411  - AIF2 EQ Band 1 B */
+	0x00D8,     /* R1412  - AIF2 EQ Band 1 PG */
+	0x1EB5,     /* R1413  - AIF2 EQ Band 2 A */
+	0xF145,     /* R1414  - AIF2 EQ Band 2 B */
+	0x0B75,     /* R1415  - AIF2 EQ Band 2 C */
+	0x01C5,     /* R1416  - AIF2 EQ Band 2 PG */
+	0x1C58,     /* R1417  - AIF2 EQ Band 3 A */
+	0xF373,     /* R1418  - AIF2 EQ Band 3 B */
+	0x0A54,     /* R1419  - AIF2 EQ Band 3 C */
+	0x0558,     /* R1420  - AIF2 EQ Band 3 PG */
+	0x168E,     /* R1421  - AIF2 EQ Band 4 A */
+	0xF829,     /* R1422  - AIF2 EQ Band 4 B */
+	0x07AD,     /* R1423  - AIF2 EQ Band 4 C */
+	0x1103,     /* R1424  - AIF2 EQ Band 4 PG */
+	0x0564,     /* R1425  - AIF2 EQ Band 5 A */
+	0x0559,     /* R1426  - AIF2 EQ Band 5 B */
+	0x4000,     /* R1427  - AIF2 EQ Band 5 PG */
+	0x0000,     /* R1428 */
+	0x0000,     /* R1429 */
+	0x0000,     /* R1430 */
+	0x0000,     /* R1431 */
+	0x0000,     /* R1432 */
+	0x0000,     /* R1433 */
+	0x0000,     /* R1434 */
+	0x0000,     /* R1435 */
+	0x0000,     /* R1436 */
+	0x0000,     /* R1437 */
+	0x0000,     /* R1438 */
+	0x0000,     /* R1439 */
+	0x0000,     /* R1440 */
+	0x0000,     /* R1441 */
+	0x0000,     /* R1442 */
+	0x0000,     /* R1443 */
+	0x0000,     /* R1444 */
+	0x0000,     /* R1445 */
+	0x0000,     /* R1446 */
+	0x0000,     /* R1447 */
+	0x0000,     /* R1448 */
+	0x0000,     /* R1449 */
+	0x0000,     /* R1450 */
+	0x0000,     /* R1451 */
+	0x0000,     /* R1452 */
+	0x0000,     /* R1453 */
+	0x0000,     /* R1454 */
+	0x0000,     /* R1455 */
+	0x0000,     /* R1456 */
+	0x0000,     /* R1457 */
+	0x0000,     /* R1458 */
+	0x0000,     /* R1459 */
+	0x0000,     /* R1460 */
+	0x0000,     /* R1461 */
+	0x0000,     /* R1462 */
+	0x0000,     /* R1463 */
+	0x0000,     /* R1464 */
+	0x0000,     /* R1465 */
+	0x0000,     /* R1466 */
+	0x0000,     /* R1467 */
+	0x0000,     /* R1468 */
+	0x0000,     /* R1469 */
+	0x0000,     /* R1470 */
+	0x0000,     /* R1471 */
+	0x0000,     /* R1472 */
+	0x0000,     /* R1473 */
+	0x0000,     /* R1474 */
+	0x0000,     /* R1475 */
+	0x0000,     /* R1476 */
+	0x0000,     /* R1477 */
+	0x0000,     /* R1478 */
+	0x0000,     /* R1479 */
+	0x0000,     /* R1480 */
+	0x0000,     /* R1481 */
+	0x0000,     /* R1482 */
+	0x0000,     /* R1483 */
+	0x0000,     /* R1484 */
+	0x0000,     /* R1485 */
+	0x0000,     /* R1486 */
+	0x0000,     /* R1487 */
+	0x0000,     /* R1488 */
+	0x0000,     /* R1489 */
+	0x0000,     /* R1490 */
+	0x0000,     /* R1491 */
+	0x0000,     /* R1492 */
+	0x0000,     /* R1493 */
+	0x0000,     /* R1494 */
+	0x0000,     /* R1495 */
+	0x0000,     /* R1496 */
+	0x0000,     /* R1497 */
+	0x0000,     /* R1498 */
+	0x0000,     /* R1499 */
+	0x0000,     /* R1500 */
+	0x0000,     /* R1501 */
+	0x0000,     /* R1502 */
+	0x0000,     /* R1503 */
+	0x0000,     /* R1504 */
+	0x0000,     /* R1505 */
+	0x0000,     /* R1506 */
+	0x0000,     /* R1507 */
+	0x0000,     /* R1508 */
+	0x0000,     /* R1509 */
+	0x0000,     /* R1510 */
+	0x0000,     /* R1511 */
+	0x0000,     /* R1512 */
+	0x0000,     /* R1513 */
+	0x0000,     /* R1514 */
+	0x0000,     /* R1515 */
+	0x0000,     /* R1516 */
+	0x0000,     /* R1517 */
+	0x0000,     /* R1518 */
+	0x0000,     /* R1519 */
+	0x0000,     /* R1520 */
+	0x0000,     /* R1521 */
+	0x0000,     /* R1522 */
+	0x0000,     /* R1523 */
+	0x0000,     /* R1524 */
+	0x0000,     /* R1525 */
+	0x0000,     /* R1526 */
+	0x0000,     /* R1527 */
+	0x0000,     /* R1528 */
+	0x0000,     /* R1529 */
+	0x0000,     /* R1530 */
+	0x0000,     /* R1531 */
+	0x0000,     /* R1532 */
+	0x0000,     /* R1533 */
+	0x0000,     /* R1534 */
+	0x0000,     /* R1535 */
+	0x0000,     /* R1536  - DAC1 Mixer Volumes */
+	0x0000,     /* R1537  - DAC1 Left Mixer Routing */
+	0x0000,     /* R1538  - DAC1 Right Mixer Routing */
+	0x0000,     /* R1539  - DAC2 Mixer Volumes */
+	0x0000,     /* R1540  - DAC2 Left Mixer Routing */
+	0x0000,     /* R1541  - DAC2 Right Mixer Routing */
+	0x0000,     /* R1542  - AIF1 ADC1 Left Mixer Routing */
+	0x0000,     /* R1543  - AIF1 ADC1 Right Mixer Routing */
+	0x0000,     /* R1544  - AIF1 ADC2 Left Mixer Routing */
+	0x0000,     /* R1545  - AIF1 ADC2 Right mixer Routing */
+	0x0000,     /* R1546 */
+	0x0000,     /* R1547 */
+	0x0000,     /* R1548 */
+	0x0000,     /* R1549 */
+	0x0000,     /* R1550 */
+	0x0000,     /* R1551 */
+	0x02C0,     /* R1552  - DAC1 Left Volume */
+	0x02C0,     /* R1553  - DAC1 Right Volume */
+	0x02C0,     /* R1554  - DAC2 Left Volume */
+	0x02C0,     /* R1555  - DAC2 Right Volume */
+	0x0000,     /* R1556  - DAC Softmute */
+	0x0000,     /* R1557 */
+	0x0000,     /* R1558 */
+	0x0000,     /* R1559 */
+	0x0000,     /* R1560 */
+	0x0000,     /* R1561 */
+	0x0000,     /* R1562 */
+	0x0000,     /* R1563 */
+	0x0000,     /* R1564 */
+	0x0000,     /* R1565 */
+	0x0000,     /* R1566 */
+	0x0000,     /* R1567 */
+	0x0002,     /* R1568  - Oversampling */
+	0x0000,     /* R1569  - Sidetone */
+};
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 4d3e6f1a..247a6a9 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -18,15 +18,17 @@
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
+#include <trace/events/asoc.h>
 
 #include <linux/mfd/wm8994/core.h>
 #include <linux/mfd/wm8994/registers.h>
@@ -57,8 +59,6 @@
 	WM8994_AIF2_EQ_GAINS_1,
 };
 
-#define WM8994_REG_CACHE_SIZE  0x621
-
 struct wm8994_micdet {
 	struct snd_soc_jack *jack;
 	int det;
@@ -71,7 +71,6 @@
 	enum snd_soc_control_type control_type;
 	void *control_data;
 	struct snd_soc_codec *codec;
-	u16 reg_cache[WM8994_REG_CACHE_SIZE + 1];
 	int sysclk[2];
 	int sysclk_rate[2];
 	int mclk[2];
@@ -81,6 +80,8 @@
 	int dac_rates[2];
 	int lrclk_shared[2];
 
+	int mbc_ena[3];
+
 	/* Platform dependant DRC configuration */
 	const char **drc_texts;
 	int drc_cfg[WM8994_NUM_DRC];
@@ -92,1588 +93,22 @@
 	int retune_mobile_cfg[WM8994_NUM_EQ];
 	struct soc_enum retune_mobile_enum;
 
+	/* Platform dependant MBC configuration */
+	int mbc_cfg;
+	const char **mbc_texts;
+	struct soc_enum mbc_enum;
+
 	struct wm8994_micdet micdet[2];
 
+	wm8958_micdet_cb jack_cb;
+	void *jack_cb_data;
+	bool jack_is_mic;
+	bool jack_is_video;
+
 	int revision;
 	struct wm8994_pdata *pdata;
 };
 
-static const struct {
-	unsigned short readable;   /* Mask of readable bits */
-	unsigned short writable;   /* Mask of writable bits */
-} access_masks[] = {
-	{ 0xFFFF, 0xFFFF }, /* R0     - Software Reset */
-	{ 0x3B37, 0x3B37 }, /* R1     - Power Management (1) */
-	{ 0x6BF0, 0x6BF0 }, /* R2     - Power Management (2) */
-	{ 0x3FF0, 0x3FF0 }, /* R3     - Power Management (3) */
-	{ 0x3F3F, 0x3F3F }, /* R4     - Power Management (4) */
-	{ 0x3F0F, 0x3F0F }, /* R5     - Power Management (5) */
-	{ 0x003F, 0x003F }, /* R6     - Power Management (6) */
-	{ 0x0000, 0x0000 }, /* R7 */
-	{ 0x0000, 0x0000 }, /* R8 */
-	{ 0x0000, 0x0000 }, /* R9 */
-	{ 0x0000, 0x0000 }, /* R10 */
-	{ 0x0000, 0x0000 }, /* R11 */
-	{ 0x0000, 0x0000 }, /* R12 */
-	{ 0x0000, 0x0000 }, /* R13 */
-	{ 0x0000, 0x0000 }, /* R14 */
-	{ 0x0000, 0x0000 }, /* R15 */
-	{ 0x0000, 0x0000 }, /* R16 */
-	{ 0x0000, 0x0000 }, /* R17 */
-	{ 0x0000, 0x0000 }, /* R18 */
-	{ 0x0000, 0x0000 }, /* R19 */
-	{ 0x0000, 0x0000 }, /* R20 */
-	{ 0x01C0, 0x01C0 }, /* R21    - Input Mixer (1) */
-	{ 0x0000, 0x0000 }, /* R22 */
-	{ 0x0000, 0x0000 }, /* R23 */
-	{ 0x00DF, 0x01DF }, /* R24    - Left Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R25    - Left Line Input 3&4 Volume */
-	{ 0x00DF, 0x01DF }, /* R26    - Right Line Input 1&2 Volume */
-	{ 0x00DF, 0x01DF }, /* R27    - Right Line Input 3&4 Volume */
-	{ 0x00FF, 0x01FF }, /* R28    - Left Output Volume */
-	{ 0x00FF, 0x01FF }, /* R29    - Right Output Volume */
-	{ 0x0077, 0x0077 }, /* R30    - Line Outputs Volume */
-	{ 0x0030, 0x0030 }, /* R31    - HPOUT2 Volume */
-	{ 0x00FF, 0x01FF }, /* R32    - Left OPGA Volume */
-	{ 0x00FF, 0x01FF }, /* R33    - Right OPGA Volume */
-	{ 0x007F, 0x007F }, /* R34    - SPKMIXL Attenuation */
-	{ 0x017F, 0x017F }, /* R35    - SPKMIXR Attenuation */
-	{ 0x003F, 0x003F }, /* R36    - SPKOUT Mixers */
-	{ 0x003F, 0x003F }, /* R37    - ClassD */
-	{ 0x00FF, 0x01FF }, /* R38    - Speaker Volume Left */
-	{ 0x00FF, 0x01FF }, /* R39    - Speaker Volume Right */
-	{ 0x00FF, 0x00FF }, /* R40    - Input Mixer (2) */
-	{ 0x01B7, 0x01B7 }, /* R41    - Input Mixer (3) */
-	{ 0x01B7, 0x01B7 }, /* R42    - Input Mixer (4) */
-	{ 0x01C7, 0x01C7 }, /* R43    - Input Mixer (5) */
-	{ 0x01C7, 0x01C7 }, /* R44    - Input Mixer (6) */
-	{ 0x01FF, 0x01FF }, /* R45    - Output Mixer (1) */
-	{ 0x01FF, 0x01FF }, /* R46    - Output Mixer (2) */
-	{ 0x0FFF, 0x0FFF }, /* R47    - Output Mixer (3) */
-	{ 0x0FFF, 0x0FFF }, /* R48    - Output Mixer (4) */
-	{ 0x0FFF, 0x0FFF }, /* R49    - Output Mixer (5) */
-	{ 0x0FFF, 0x0FFF }, /* R50    - Output Mixer (6) */
-	{ 0x0038, 0x0038 }, /* R51    - HPOUT2 Mixer */
-	{ 0x0077, 0x0077 }, /* R52    - Line Mixer (1) */
-	{ 0x0077, 0x0077 }, /* R53    - Line Mixer (2) */
-	{ 0x03FF, 0x03FF }, /* R54    - Speaker Mixer */
-	{ 0x00C1, 0x00C1 }, /* R55    - Additional Control */
-	{ 0x00F0, 0x00F0 }, /* R56    - AntiPOP (1) */
-	{ 0x01EF, 0x01EF }, /* R57    - AntiPOP (2) */
-	{ 0x00FF, 0x00FF }, /* R58    - MICBIAS */
-	{ 0x000F, 0x000F }, /* R59    - LDO 1 */
-	{ 0x0007, 0x0007 }, /* R60    - LDO 2 */
-	{ 0x0000, 0x0000 }, /* R61 */
-	{ 0x0000, 0x0000 }, /* R62 */
-	{ 0x0000, 0x0000 }, /* R63 */
-	{ 0x0000, 0x0000 }, /* R64 */
-	{ 0x0000, 0x0000 }, /* R65 */
-	{ 0x0000, 0x0000 }, /* R66 */
-	{ 0x0000, 0x0000 }, /* R67 */
-	{ 0x0000, 0x0000 }, /* R68 */
-	{ 0x0000, 0x0000 }, /* R69 */
-	{ 0x0000, 0x0000 }, /* R70 */
-	{ 0x0000, 0x0000 }, /* R71 */
-	{ 0x0000, 0x0000 }, /* R72 */
-	{ 0x0000, 0x0000 }, /* R73 */
-	{ 0x0000, 0x0000 }, /* R74 */
-	{ 0x0000, 0x0000 }, /* R75 */
-	{ 0x8000, 0x8000 }, /* R76    - Charge Pump (1) */
-	{ 0x0000, 0x0000 }, /* R77 */
-	{ 0x0000, 0x0000 }, /* R78 */
-	{ 0x0000, 0x0000 }, /* R79 */
-	{ 0x0000, 0x0000 }, /* R80 */
-	{ 0x0301, 0x0301 }, /* R81    - Class W (1) */
-	{ 0x0000, 0x0000 }, /* R82 */
-	{ 0x0000, 0x0000 }, /* R83 */
-	{ 0x333F, 0x333F }, /* R84    - DC Servo (1) */
-	{ 0x0FEF, 0x0FEF }, /* R85    - DC Servo (2) */
-	{ 0x0000, 0x0000 }, /* R86 */
-	{ 0xFFFF, 0xFFFF }, /* R87    - DC Servo (4) */
-	{ 0x0333, 0x0000 }, /* R88    - DC Servo Readback */
-	{ 0x0000, 0x0000 }, /* R89 */
-	{ 0x0000, 0x0000 }, /* R90 */
-	{ 0x0000, 0x0000 }, /* R91 */
-	{ 0x0000, 0x0000 }, /* R92 */
-	{ 0x0000, 0x0000 }, /* R93 */
-	{ 0x0000, 0x0000 }, /* R94 */
-	{ 0x0000, 0x0000 }, /* R95 */
-	{ 0x00EE, 0x00EE }, /* R96    - Analogue HP (1) */
-	{ 0x0000, 0x0000 }, /* R97 */
-	{ 0x0000, 0x0000 }, /* R98 */
-	{ 0x0000, 0x0000 }, /* R99 */
-	{ 0x0000, 0x0000 }, /* R100 */
-	{ 0x0000, 0x0000 }, /* R101 */
-	{ 0x0000, 0x0000 }, /* R102 */
-	{ 0x0000, 0x0000 }, /* R103 */
-	{ 0x0000, 0x0000 }, /* R104 */
-	{ 0x0000, 0x0000 }, /* R105 */
-	{ 0x0000, 0x0000 }, /* R106 */
-	{ 0x0000, 0x0000 }, /* R107 */
-	{ 0x0000, 0x0000 }, /* R108 */
-	{ 0x0000, 0x0000 }, /* R109 */
-	{ 0x0000, 0x0000 }, /* R110 */
-	{ 0x0000, 0x0000 }, /* R111 */
-	{ 0x0000, 0x0000 }, /* R112 */
-	{ 0x0000, 0x0000 }, /* R113 */
-	{ 0x0000, 0x0000 }, /* R114 */
-	{ 0x0000, 0x0000 }, /* R115 */
-	{ 0x0000, 0x0000 }, /* R116 */
-	{ 0x0000, 0x0000 }, /* R117 */
-	{ 0x0000, 0x0000 }, /* R118 */
-	{ 0x0000, 0x0000 }, /* R119 */
-	{ 0x0000, 0x0000 }, /* R120 */
-	{ 0x0000, 0x0000 }, /* R121 */
-	{ 0x0000, 0x0000 }, /* R122 */
-	{ 0x0000, 0x0000 }, /* R123 */
-	{ 0x0000, 0x0000 }, /* R124 */
-	{ 0x0000, 0x0000 }, /* R125 */
-	{ 0x0000, 0x0000 }, /* R126 */
-	{ 0x0000, 0x0000 }, /* R127 */
-	{ 0x0000, 0x0000 }, /* R128 */
-	{ 0x0000, 0x0000 }, /* R129 */
-	{ 0x0000, 0x0000 }, /* R130 */
-	{ 0x0000, 0x0000 }, /* R131 */
-	{ 0x0000, 0x0000 }, /* R132 */
-	{ 0x0000, 0x0000 }, /* R133 */
-	{ 0x0000, 0x0000 }, /* R134 */
-	{ 0x0000, 0x0000 }, /* R135 */
-	{ 0x0000, 0x0000 }, /* R136 */
-	{ 0x0000, 0x0000 }, /* R137 */
-	{ 0x0000, 0x0000 }, /* R138 */
-	{ 0x0000, 0x0000 }, /* R139 */
-	{ 0x0000, 0x0000 }, /* R140 */
-	{ 0x0000, 0x0000 }, /* R141 */
-	{ 0x0000, 0x0000 }, /* R142 */
-	{ 0x0000, 0x0000 }, /* R143 */
-	{ 0x0000, 0x0000 }, /* R144 */
-	{ 0x0000, 0x0000 }, /* R145 */
-	{ 0x0000, 0x0000 }, /* R146 */
-	{ 0x0000, 0x0000 }, /* R147 */
-	{ 0x0000, 0x0000 }, /* R148 */
-	{ 0x0000, 0x0000 }, /* R149 */
-	{ 0x0000, 0x0000 }, /* R150 */
-	{ 0x0000, 0x0000 }, /* R151 */
-	{ 0x0000, 0x0000 }, /* R152 */
-	{ 0x0000, 0x0000 }, /* R153 */
-	{ 0x0000, 0x0000 }, /* R154 */
-	{ 0x0000, 0x0000 }, /* R155 */
-	{ 0x0000, 0x0000 }, /* R156 */
-	{ 0x0000, 0x0000 }, /* R157 */
-	{ 0x0000, 0x0000 }, /* R158 */
-	{ 0x0000, 0x0000 }, /* R159 */
-	{ 0x0000, 0x0000 }, /* R160 */
-	{ 0x0000, 0x0000 }, /* R161 */
-	{ 0x0000, 0x0000 }, /* R162 */
-	{ 0x0000, 0x0000 }, /* R163 */
-	{ 0x0000, 0x0000 }, /* R164 */
-	{ 0x0000, 0x0000 }, /* R165 */
-	{ 0x0000, 0x0000 }, /* R166 */
-	{ 0x0000, 0x0000 }, /* R167 */
-	{ 0x0000, 0x0000 }, /* R168 */
-	{ 0x0000, 0x0000 }, /* R169 */
-	{ 0x0000, 0x0000 }, /* R170 */
-	{ 0x0000, 0x0000 }, /* R171 */
-	{ 0x0000, 0x0000 }, /* R172 */
-	{ 0x0000, 0x0000 }, /* R173 */
-	{ 0x0000, 0x0000 }, /* R174 */
-	{ 0x0000, 0x0000 }, /* R175 */
-	{ 0x0000, 0x0000 }, /* R176 */
-	{ 0x0000, 0x0000 }, /* R177 */
-	{ 0x0000, 0x0000 }, /* R178 */
-	{ 0x0000, 0x0000 }, /* R179 */
-	{ 0x0000, 0x0000 }, /* R180 */
-	{ 0x0000, 0x0000 }, /* R181 */
-	{ 0x0000, 0x0000 }, /* R182 */
-	{ 0x0000, 0x0000 }, /* R183 */
-	{ 0x0000, 0x0000 }, /* R184 */
-	{ 0x0000, 0x0000 }, /* R185 */
-	{ 0x0000, 0x0000 }, /* R186 */
-	{ 0x0000, 0x0000 }, /* R187 */
-	{ 0x0000, 0x0000 }, /* R188 */
-	{ 0x0000, 0x0000 }, /* R189 */
-	{ 0x0000, 0x0000 }, /* R190 */
-	{ 0x0000, 0x0000 }, /* R191 */
-	{ 0x0000, 0x0000 }, /* R192 */
-	{ 0x0000, 0x0000 }, /* R193 */
-	{ 0x0000, 0x0000 }, /* R194 */
-	{ 0x0000, 0x0000 }, /* R195 */
-	{ 0x0000, 0x0000 }, /* R196 */
-	{ 0x0000, 0x0000 }, /* R197 */
-	{ 0x0000, 0x0000 }, /* R198 */
-	{ 0x0000, 0x0000 }, /* R199 */
-	{ 0x0000, 0x0000 }, /* R200 */
-	{ 0x0000, 0x0000 }, /* R201 */
-	{ 0x0000, 0x0000 }, /* R202 */
-	{ 0x0000, 0x0000 }, /* R203 */
-	{ 0x0000, 0x0000 }, /* R204 */
-	{ 0x0000, 0x0000 }, /* R205 */
-	{ 0x0000, 0x0000 }, /* R206 */
-	{ 0x0000, 0x0000 }, /* R207 */
-	{ 0x0000, 0x0000 }, /* R208 */
-	{ 0x0000, 0x0000 }, /* R209 */
-	{ 0x0000, 0x0000 }, /* R210 */
-	{ 0x0000, 0x0000 }, /* R211 */
-	{ 0x0000, 0x0000 }, /* R212 */
-	{ 0x0000, 0x0000 }, /* R213 */
-	{ 0x0000, 0x0000 }, /* R214 */
-	{ 0x0000, 0x0000 }, /* R215 */
-	{ 0x0000, 0x0000 }, /* R216 */
-	{ 0x0000, 0x0000 }, /* R217 */
-	{ 0x0000, 0x0000 }, /* R218 */
-	{ 0x0000, 0x0000 }, /* R219 */
-	{ 0x0000, 0x0000 }, /* R220 */
-	{ 0x0000, 0x0000 }, /* R221 */
-	{ 0x0000, 0x0000 }, /* R222 */
-	{ 0x0000, 0x0000 }, /* R223 */
-	{ 0x0000, 0x0000 }, /* R224 */
-	{ 0x0000, 0x0000 }, /* R225 */
-	{ 0x0000, 0x0000 }, /* R226 */
-	{ 0x0000, 0x0000 }, /* R227 */
-	{ 0x0000, 0x0000 }, /* R228 */
-	{ 0x0000, 0x0000 }, /* R229 */
-	{ 0x0000, 0x0000 }, /* R230 */
-	{ 0x0000, 0x0000 }, /* R231 */
-	{ 0x0000, 0x0000 }, /* R232 */
-	{ 0x0000, 0x0000 }, /* R233 */
-	{ 0x0000, 0x0000 }, /* R234 */
-	{ 0x0000, 0x0000 }, /* R235 */
-	{ 0x0000, 0x0000 }, /* R236 */
-	{ 0x0000, 0x0000 }, /* R237 */
-	{ 0x0000, 0x0000 }, /* R238 */
-	{ 0x0000, 0x0000 }, /* R239 */
-	{ 0x0000, 0x0000 }, /* R240 */
-	{ 0x0000, 0x0000 }, /* R241 */
-	{ 0x0000, 0x0000 }, /* R242 */
-	{ 0x0000, 0x0000 }, /* R243 */
-	{ 0x0000, 0x0000 }, /* R244 */
-	{ 0x0000, 0x0000 }, /* R245 */
-	{ 0x0000, 0x0000 }, /* R246 */
-	{ 0x0000, 0x0000 }, /* R247 */
-	{ 0x0000, 0x0000 }, /* R248 */
-	{ 0x0000, 0x0000 }, /* R249 */
-	{ 0x0000, 0x0000 }, /* R250 */
-	{ 0x0000, 0x0000 }, /* R251 */
-	{ 0x0000, 0x0000 }, /* R252 */
-	{ 0x0000, 0x0000 }, /* R253 */
-	{ 0x0000, 0x0000 }, /* R254 */
-	{ 0x0000, 0x0000 }, /* R255 */
-	{ 0x000F, 0x0000 }, /* R256   - Chip Revision */
-	{ 0x0074, 0x0074 }, /* R257   - Control Interface */
-	{ 0x0000, 0x0000 }, /* R258 */
-	{ 0x0000, 0x0000 }, /* R259 */
-	{ 0x0000, 0x0000 }, /* R260 */
-	{ 0x0000, 0x0000 }, /* R261 */
-	{ 0x0000, 0x0000 }, /* R262 */
-	{ 0x0000, 0x0000 }, /* R263 */
-	{ 0x0000, 0x0000 }, /* R264 */
-	{ 0x0000, 0x0000 }, /* R265 */
-	{ 0x0000, 0x0000 }, /* R266 */
-	{ 0x0000, 0x0000 }, /* R267 */
-	{ 0x0000, 0x0000 }, /* R268 */
-	{ 0x0000, 0x0000 }, /* R269 */
-	{ 0x0000, 0x0000 }, /* R270 */
-	{ 0x0000, 0x0000 }, /* R271 */
-	{ 0x807F, 0x837F }, /* R272   - Write Sequencer Ctrl (1) */
-	{ 0x017F, 0x0000 }, /* R273   - Write Sequencer Ctrl (2) */
-	{ 0x0000, 0x0000 }, /* R274 */
-	{ 0x0000, 0x0000 }, /* R275 */
-	{ 0x0000, 0x0000 }, /* R276 */
-	{ 0x0000, 0x0000 }, /* R277 */
-	{ 0x0000, 0x0000 }, /* R278 */
-	{ 0x0000, 0x0000 }, /* R279 */
-	{ 0x0000, 0x0000 }, /* R280 */
-	{ 0x0000, 0x0000 }, /* R281 */
-	{ 0x0000, 0x0000 }, /* R282 */
-	{ 0x0000, 0x0000 }, /* R283 */
-	{ 0x0000, 0x0000 }, /* R284 */
-	{ 0x0000, 0x0000 }, /* R285 */
-	{ 0x0000, 0x0000 }, /* R286 */
-	{ 0x0000, 0x0000 }, /* R287 */
-	{ 0x0000, 0x0000 }, /* R288 */
-	{ 0x0000, 0x0000 }, /* R289 */
-	{ 0x0000, 0x0000 }, /* R290 */
-	{ 0x0000, 0x0000 }, /* R291 */
-	{ 0x0000, 0x0000 }, /* R292 */
-	{ 0x0000, 0x0000 }, /* R293 */
-	{ 0x0000, 0x0000 }, /* R294 */
-	{ 0x0000, 0x0000 }, /* R295 */
-	{ 0x0000, 0x0000 }, /* R296 */
-	{ 0x0000, 0x0000 }, /* R297 */
-	{ 0x0000, 0x0000 }, /* R298 */
-	{ 0x0000, 0x0000 }, /* R299 */
-	{ 0x0000, 0x0000 }, /* R300 */
-	{ 0x0000, 0x0000 }, /* R301 */
-	{ 0x0000, 0x0000 }, /* R302 */
-	{ 0x0000, 0x0000 }, /* R303 */
-	{ 0x0000, 0x0000 }, /* R304 */
-	{ 0x0000, 0x0000 }, /* R305 */
-	{ 0x0000, 0x0000 }, /* R306 */
-	{ 0x0000, 0x0000 }, /* R307 */
-	{ 0x0000, 0x0000 }, /* R308 */
-	{ 0x0000, 0x0000 }, /* R309 */
-	{ 0x0000, 0x0000 }, /* R310 */
-	{ 0x0000, 0x0000 }, /* R311 */
-	{ 0x0000, 0x0000 }, /* R312 */
-	{ 0x0000, 0x0000 }, /* R313 */
-	{ 0x0000, 0x0000 }, /* R314 */
-	{ 0x0000, 0x0000 }, /* R315 */
-	{ 0x0000, 0x0000 }, /* R316 */
-	{ 0x0000, 0x0000 }, /* R317 */
-	{ 0x0000, 0x0000 }, /* R318 */
-	{ 0x0000, 0x0000 }, /* R319 */
-	{ 0x0000, 0x0000 }, /* R320 */
-	{ 0x0000, 0x0000 }, /* R321 */
-	{ 0x0000, 0x0000 }, /* R322 */
-	{ 0x0000, 0x0000 }, /* R323 */
-	{ 0x0000, 0x0000 }, /* R324 */
-	{ 0x0000, 0x0000 }, /* R325 */
-	{ 0x0000, 0x0000 }, /* R326 */
-	{ 0x0000, 0x0000 }, /* R327 */
-	{ 0x0000, 0x0000 }, /* R328 */
-	{ 0x0000, 0x0000 }, /* R329 */
-	{ 0x0000, 0x0000 }, /* R330 */
-	{ 0x0000, 0x0000 }, /* R331 */
-	{ 0x0000, 0x0000 }, /* R332 */
-	{ 0x0000, 0x0000 }, /* R333 */
-	{ 0x0000, 0x0000 }, /* R334 */
-	{ 0x0000, 0x0000 }, /* R335 */
-	{ 0x0000, 0x0000 }, /* R336 */
-	{ 0x0000, 0x0000 }, /* R337 */
-	{ 0x0000, 0x0000 }, /* R338 */
-	{ 0x0000, 0x0000 }, /* R339 */
-	{ 0x0000, 0x0000 }, /* R340 */
-	{ 0x0000, 0x0000 }, /* R341 */
-	{ 0x0000, 0x0000 }, /* R342 */
-	{ 0x0000, 0x0000 }, /* R343 */
-	{ 0x0000, 0x0000 }, /* R344 */
-	{ 0x0000, 0x0000 }, /* R345 */
-	{ 0x0000, 0x0000 }, /* R346 */
-	{ 0x0000, 0x0000 }, /* R347 */
-	{ 0x0000, 0x0000 }, /* R348 */
-	{ 0x0000, 0x0000 }, /* R349 */
-	{ 0x0000, 0x0000 }, /* R350 */
-	{ 0x0000, 0x0000 }, /* R351 */
-	{ 0x0000, 0x0000 }, /* R352 */
-	{ 0x0000, 0x0000 }, /* R353 */
-	{ 0x0000, 0x0000 }, /* R354 */
-	{ 0x0000, 0x0000 }, /* R355 */
-	{ 0x0000, 0x0000 }, /* R356 */
-	{ 0x0000, 0x0000 }, /* R357 */
-	{ 0x0000, 0x0000 }, /* R358 */
-	{ 0x0000, 0x0000 }, /* R359 */
-	{ 0x0000, 0x0000 }, /* R360 */
-	{ 0x0000, 0x0000 }, /* R361 */
-	{ 0x0000, 0x0000 }, /* R362 */
-	{ 0x0000, 0x0000 }, /* R363 */
-	{ 0x0000, 0x0000 }, /* R364 */
-	{ 0x0000, 0x0000 }, /* R365 */
-	{ 0x0000, 0x0000 }, /* R366 */
-	{ 0x0000, 0x0000 }, /* R367 */
-	{ 0x0000, 0x0000 }, /* R368 */
-	{ 0x0000, 0x0000 }, /* R369 */
-	{ 0x0000, 0x0000 }, /* R370 */
-	{ 0x0000, 0x0000 }, /* R371 */
-	{ 0x0000, 0x0000 }, /* R372 */
-	{ 0x0000, 0x0000 }, /* R373 */
-	{ 0x0000, 0x0000 }, /* R374 */
-	{ 0x0000, 0x0000 }, /* R375 */
-	{ 0x0000, 0x0000 }, /* R376 */
-	{ 0x0000, 0x0000 }, /* R377 */
-	{ 0x0000, 0x0000 }, /* R378 */
-	{ 0x0000, 0x0000 }, /* R379 */
-	{ 0x0000, 0x0000 }, /* R380 */
-	{ 0x0000, 0x0000 }, /* R381 */
-	{ 0x0000, 0x0000 }, /* R382 */
-	{ 0x0000, 0x0000 }, /* R383 */
-	{ 0x0000, 0x0000 }, /* R384 */
-	{ 0x0000, 0x0000 }, /* R385 */
-	{ 0x0000, 0x0000 }, /* R386 */
-	{ 0x0000, 0x0000 }, /* R387 */
-	{ 0x0000, 0x0000 }, /* R388 */
-	{ 0x0000, 0x0000 }, /* R389 */
-	{ 0x0000, 0x0000 }, /* R390 */
-	{ 0x0000, 0x0000 }, /* R391 */
-	{ 0x0000, 0x0000 }, /* R392 */
-	{ 0x0000, 0x0000 }, /* R393 */
-	{ 0x0000, 0x0000 }, /* R394 */
-	{ 0x0000, 0x0000 }, /* R395 */
-	{ 0x0000, 0x0000 }, /* R396 */
-	{ 0x0000, 0x0000 }, /* R397 */
-	{ 0x0000, 0x0000 }, /* R398 */
-	{ 0x0000, 0x0000 }, /* R399 */
-	{ 0x0000, 0x0000 }, /* R400 */
-	{ 0x0000, 0x0000 }, /* R401 */
-	{ 0x0000, 0x0000 }, /* R402 */
-	{ 0x0000, 0x0000 }, /* R403 */
-	{ 0x0000, 0x0000 }, /* R404 */
-	{ 0x0000, 0x0000 }, /* R405 */
-	{ 0x0000, 0x0000 }, /* R406 */
-	{ 0x0000, 0x0000 }, /* R407 */
-	{ 0x0000, 0x0000 }, /* R408 */
-	{ 0x0000, 0x0000 }, /* R409 */
-	{ 0x0000, 0x0000 }, /* R410 */
-	{ 0x0000, 0x0000 }, /* R411 */
-	{ 0x0000, 0x0000 }, /* R412 */
-	{ 0x0000, 0x0000 }, /* R413 */
-	{ 0x0000, 0x0000 }, /* R414 */
-	{ 0x0000, 0x0000 }, /* R415 */
-	{ 0x0000, 0x0000 }, /* R416 */
-	{ 0x0000, 0x0000 }, /* R417 */
-	{ 0x0000, 0x0000 }, /* R418 */
-	{ 0x0000, 0x0000 }, /* R419 */
-	{ 0x0000, 0x0000 }, /* R420 */
-	{ 0x0000, 0x0000 }, /* R421 */
-	{ 0x0000, 0x0000 }, /* R422 */
-	{ 0x0000, 0x0000 }, /* R423 */
-	{ 0x0000, 0x0000 }, /* R424 */
-	{ 0x0000, 0x0000 }, /* R425 */
-	{ 0x0000, 0x0000 }, /* R426 */
-	{ 0x0000, 0x0000 }, /* R427 */
-	{ 0x0000, 0x0000 }, /* R428 */
-	{ 0x0000, 0x0000 }, /* R429 */
-	{ 0x0000, 0x0000 }, /* R430 */
-	{ 0x0000, 0x0000 }, /* R431 */
-	{ 0x0000, 0x0000 }, /* R432 */
-	{ 0x0000, 0x0000 }, /* R433 */
-	{ 0x0000, 0x0000 }, /* R434 */
-	{ 0x0000, 0x0000 }, /* R435 */
-	{ 0x0000, 0x0000 }, /* R436 */
-	{ 0x0000, 0x0000 }, /* R437 */
-	{ 0x0000, 0x0000 }, /* R438 */
-	{ 0x0000, 0x0000 }, /* R439 */
-	{ 0x0000, 0x0000 }, /* R440 */
-	{ 0x0000, 0x0000 }, /* R441 */
-	{ 0x0000, 0x0000 }, /* R442 */
-	{ 0x0000, 0x0000 }, /* R443 */
-	{ 0x0000, 0x0000 }, /* R444 */
-	{ 0x0000, 0x0000 }, /* R445 */
-	{ 0x0000, 0x0000 }, /* R446 */
-	{ 0x0000, 0x0000 }, /* R447 */
-	{ 0x0000, 0x0000 }, /* R448 */
-	{ 0x0000, 0x0000 }, /* R449 */
-	{ 0x0000, 0x0000 }, /* R450 */
-	{ 0x0000, 0x0000 }, /* R451 */
-	{ 0x0000, 0x0000 }, /* R452 */
-	{ 0x0000, 0x0000 }, /* R453 */
-	{ 0x0000, 0x0000 }, /* R454 */
-	{ 0x0000, 0x0000 }, /* R455 */
-	{ 0x0000, 0x0000 }, /* R456 */
-	{ 0x0000, 0x0000 }, /* R457 */
-	{ 0x0000, 0x0000 }, /* R458 */
-	{ 0x0000, 0x0000 }, /* R459 */
-	{ 0x0000, 0x0000 }, /* R460 */
-	{ 0x0000, 0x0000 }, /* R461 */
-	{ 0x0000, 0x0000 }, /* R462 */
-	{ 0x0000, 0x0000 }, /* R463 */
-	{ 0x0000, 0x0000 }, /* R464 */
-	{ 0x0000, 0x0000 }, /* R465 */
-	{ 0x0000, 0x0000 }, /* R466 */
-	{ 0x0000, 0x0000 }, /* R467 */
-	{ 0x0000, 0x0000 }, /* R468 */
-	{ 0x0000, 0x0000 }, /* R469 */
-	{ 0x0000, 0x0000 }, /* R470 */
-	{ 0x0000, 0x0000 }, /* R471 */
-	{ 0x0000, 0x0000 }, /* R472 */
-	{ 0x0000, 0x0000 }, /* R473 */
-	{ 0x0000, 0x0000 }, /* R474 */
-	{ 0x0000, 0x0000 }, /* R475 */
-	{ 0x0000, 0x0000 }, /* R476 */
-	{ 0x0000, 0x0000 }, /* R477 */
-	{ 0x0000, 0x0000 }, /* R478 */
-	{ 0x0000, 0x0000 }, /* R479 */
-	{ 0x0000, 0x0000 }, /* R480 */
-	{ 0x0000, 0x0000 }, /* R481 */
-	{ 0x0000, 0x0000 }, /* R482 */
-	{ 0x0000, 0x0000 }, /* R483 */
-	{ 0x0000, 0x0000 }, /* R484 */
-	{ 0x0000, 0x0000 }, /* R485 */
-	{ 0x0000, 0x0000 }, /* R486 */
-	{ 0x0000, 0x0000 }, /* R487 */
-	{ 0x0000, 0x0000 }, /* R488 */
-	{ 0x0000, 0x0000 }, /* R489 */
-	{ 0x0000, 0x0000 }, /* R490 */
-	{ 0x0000, 0x0000 }, /* R491 */
-	{ 0x0000, 0x0000 }, /* R492 */
-	{ 0x0000, 0x0000 }, /* R493 */
-	{ 0x0000, 0x0000 }, /* R494 */
-	{ 0x0000, 0x0000 }, /* R495 */
-	{ 0x0000, 0x0000 }, /* R496 */
-	{ 0x0000, 0x0000 }, /* R497 */
-	{ 0x0000, 0x0000 }, /* R498 */
-	{ 0x0000, 0x0000 }, /* R499 */
-	{ 0x0000, 0x0000 }, /* R500 */
-	{ 0x0000, 0x0000 }, /* R501 */
-	{ 0x0000, 0x0000 }, /* R502 */
-	{ 0x0000, 0x0000 }, /* R503 */
-	{ 0x0000, 0x0000 }, /* R504 */
-	{ 0x0000, 0x0000 }, /* R505 */
-	{ 0x0000, 0x0000 }, /* R506 */
-	{ 0x0000, 0x0000 }, /* R507 */
-	{ 0x0000, 0x0000 }, /* R508 */
-	{ 0x0000, 0x0000 }, /* R509 */
-	{ 0x0000, 0x0000 }, /* R510 */
-	{ 0x0000, 0x0000 }, /* R511 */
-	{ 0x001F, 0x001F }, /* R512   - AIF1 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R513   - AIF1 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R514 */
-	{ 0x0000, 0x0000 }, /* R515 */
-	{ 0x001F, 0x001F }, /* R516   - AIF2 Clocking (1) */
-	{ 0x003F, 0x003F }, /* R517   - AIF2 Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R518 */
-	{ 0x0000, 0x0000 }, /* R519 */
-	{ 0x001F, 0x001F }, /* R520   - Clocking (1) */
-	{ 0x0777, 0x0777 }, /* R521   - Clocking (2) */
-	{ 0x0000, 0x0000 }, /* R522 */
-	{ 0x0000, 0x0000 }, /* R523 */
-	{ 0x0000, 0x0000 }, /* R524 */
-	{ 0x0000, 0x0000 }, /* R525 */
-	{ 0x0000, 0x0000 }, /* R526 */
-	{ 0x0000, 0x0000 }, /* R527 */
-	{ 0x00FF, 0x00FF }, /* R528   - AIF1 Rate */
-	{ 0x00FF, 0x00FF }, /* R529   - AIF2 Rate */
-	{ 0x000F, 0x0000 }, /* R530   - Rate Status */
-	{ 0x0000, 0x0000 }, /* R531 */
-	{ 0x0000, 0x0000 }, /* R532 */
-	{ 0x0000, 0x0000 }, /* R533 */
-	{ 0x0000, 0x0000 }, /* R534 */
-	{ 0x0000, 0x0000 }, /* R535 */
-	{ 0x0000, 0x0000 }, /* R536 */
-	{ 0x0000, 0x0000 }, /* R537 */
-	{ 0x0000, 0x0000 }, /* R538 */
-	{ 0x0000, 0x0000 }, /* R539 */
-	{ 0x0000, 0x0000 }, /* R540 */
-	{ 0x0000, 0x0000 }, /* R541 */
-	{ 0x0000, 0x0000 }, /* R542 */
-	{ 0x0000, 0x0000 }, /* R543 */
-	{ 0x0007, 0x0007 }, /* R544   - FLL1 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R545   - FLL1 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R546   - FLL1 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R547   - FLL1 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R548   - FLL1 Control (5) */
-	{ 0x0000, 0x0000 }, /* R549 */
-	{ 0x0000, 0x0000 }, /* R550 */
-	{ 0x0000, 0x0000 }, /* R551 */
-	{ 0x0000, 0x0000 }, /* R552 */
-	{ 0x0000, 0x0000 }, /* R553 */
-	{ 0x0000, 0x0000 }, /* R554 */
-	{ 0x0000, 0x0000 }, /* R555 */
-	{ 0x0000, 0x0000 }, /* R556 */
-	{ 0x0000, 0x0000 }, /* R557 */
-	{ 0x0000, 0x0000 }, /* R558 */
-	{ 0x0000, 0x0000 }, /* R559 */
-	{ 0x0000, 0x0000 }, /* R560 */
-	{ 0x0000, 0x0000 }, /* R561 */
-	{ 0x0000, 0x0000 }, /* R562 */
-	{ 0x0000, 0x0000 }, /* R563 */
-	{ 0x0000, 0x0000 }, /* R564 */
-	{ 0x0000, 0x0000 }, /* R565 */
-	{ 0x0000, 0x0000 }, /* R566 */
-	{ 0x0000, 0x0000 }, /* R567 */
-	{ 0x0000, 0x0000 }, /* R568 */
-	{ 0x0000, 0x0000 }, /* R569 */
-	{ 0x0000, 0x0000 }, /* R570 */
-	{ 0x0000, 0x0000 }, /* R571 */
-	{ 0x0000, 0x0000 }, /* R572 */
-	{ 0x0000, 0x0000 }, /* R573 */
-	{ 0x0000, 0x0000 }, /* R574 */
-	{ 0x0000, 0x0000 }, /* R575 */
-	{ 0x0007, 0x0007 }, /* R576   - FLL2 Control (1) */
-	{ 0x3F77, 0x3F77 }, /* R577   - FLL2 Control (2) */
-	{ 0xFFFF, 0xFFFF }, /* R578   - FLL2 Control (3) */
-	{ 0x7FEF, 0x7FEF }, /* R579   - FLL2 Control (4) */
-	{ 0x1FDB, 0x1FDB }, /* R580   - FLL2 Control (5) */
-	{ 0x0000, 0x0000 }, /* R581 */
-	{ 0x0000, 0x0000 }, /* R582 */
-	{ 0x0000, 0x0000 }, /* R583 */
-	{ 0x0000, 0x0000 }, /* R584 */
-	{ 0x0000, 0x0000 }, /* R585 */
-	{ 0x0000, 0x0000 }, /* R586 */
-	{ 0x0000, 0x0000 }, /* R587 */
-	{ 0x0000, 0x0000 }, /* R588 */
-	{ 0x0000, 0x0000 }, /* R589 */
-	{ 0x0000, 0x0000 }, /* R590 */
-	{ 0x0000, 0x0000 }, /* R591 */
-	{ 0x0000, 0x0000 }, /* R592 */
-	{ 0x0000, 0x0000 }, /* R593 */
-	{ 0x0000, 0x0000 }, /* R594 */
-	{ 0x0000, 0x0000 }, /* R595 */
-	{ 0x0000, 0x0000 }, /* R596 */
-	{ 0x0000, 0x0000 }, /* R597 */
-	{ 0x0000, 0x0000 }, /* R598 */
-	{ 0x0000, 0x0000 }, /* R599 */
-	{ 0x0000, 0x0000 }, /* R600 */
-	{ 0x0000, 0x0000 }, /* R601 */
-	{ 0x0000, 0x0000 }, /* R602 */
-	{ 0x0000, 0x0000 }, /* R603 */
-	{ 0x0000, 0x0000 }, /* R604 */
-	{ 0x0000, 0x0000 }, /* R605 */
-	{ 0x0000, 0x0000 }, /* R606 */
-	{ 0x0000, 0x0000 }, /* R607 */
-	{ 0x0000, 0x0000 }, /* R608 */
-	{ 0x0000, 0x0000 }, /* R609 */
-	{ 0x0000, 0x0000 }, /* R610 */
-	{ 0x0000, 0x0000 }, /* R611 */
-	{ 0x0000, 0x0000 }, /* R612 */
-	{ 0x0000, 0x0000 }, /* R613 */
-	{ 0x0000, 0x0000 }, /* R614 */
-	{ 0x0000, 0x0000 }, /* R615 */
-	{ 0x0000, 0x0000 }, /* R616 */
-	{ 0x0000, 0x0000 }, /* R617 */
-	{ 0x0000, 0x0000 }, /* R618 */
-	{ 0x0000, 0x0000 }, /* R619 */
-	{ 0x0000, 0x0000 }, /* R620 */
-	{ 0x0000, 0x0000 }, /* R621 */
-	{ 0x0000, 0x0000 }, /* R622 */
-	{ 0x0000, 0x0000 }, /* R623 */
-	{ 0x0000, 0x0000 }, /* R624 */
-	{ 0x0000, 0x0000 }, /* R625 */
-	{ 0x0000, 0x0000 }, /* R626 */
-	{ 0x0000, 0x0000 }, /* R627 */
-	{ 0x0000, 0x0000 }, /* R628 */
-	{ 0x0000, 0x0000 }, /* R629 */
-	{ 0x0000, 0x0000 }, /* R630 */
-	{ 0x0000, 0x0000 }, /* R631 */
-	{ 0x0000, 0x0000 }, /* R632 */
-	{ 0x0000, 0x0000 }, /* R633 */
-	{ 0x0000, 0x0000 }, /* R634 */
-	{ 0x0000, 0x0000 }, /* R635 */
-	{ 0x0000, 0x0000 }, /* R636 */
-	{ 0x0000, 0x0000 }, /* R637 */
-	{ 0x0000, 0x0000 }, /* R638 */
-	{ 0x0000, 0x0000 }, /* R639 */
-	{ 0x0000, 0x0000 }, /* R640 */
-	{ 0x0000, 0x0000 }, /* R641 */
-	{ 0x0000, 0x0000 }, /* R642 */
-	{ 0x0000, 0x0000 }, /* R643 */
-	{ 0x0000, 0x0000 }, /* R644 */
-	{ 0x0000, 0x0000 }, /* R645 */
-	{ 0x0000, 0x0000 }, /* R646 */
-	{ 0x0000, 0x0000 }, /* R647 */
-	{ 0x0000, 0x0000 }, /* R648 */
-	{ 0x0000, 0x0000 }, /* R649 */
-	{ 0x0000, 0x0000 }, /* R650 */
-	{ 0x0000, 0x0000 }, /* R651 */
-	{ 0x0000, 0x0000 }, /* R652 */
-	{ 0x0000, 0x0000 }, /* R653 */
-	{ 0x0000, 0x0000 }, /* R654 */
-	{ 0x0000, 0x0000 }, /* R655 */
-	{ 0x0000, 0x0000 }, /* R656 */
-	{ 0x0000, 0x0000 }, /* R657 */
-	{ 0x0000, 0x0000 }, /* R658 */
-	{ 0x0000, 0x0000 }, /* R659 */
-	{ 0x0000, 0x0000 }, /* R660 */
-	{ 0x0000, 0x0000 }, /* R661 */
-	{ 0x0000, 0x0000 }, /* R662 */
-	{ 0x0000, 0x0000 }, /* R663 */
-	{ 0x0000, 0x0000 }, /* R664 */
-	{ 0x0000, 0x0000 }, /* R665 */
-	{ 0x0000, 0x0000 }, /* R666 */
-	{ 0x0000, 0x0000 }, /* R667 */
-	{ 0x0000, 0x0000 }, /* R668 */
-	{ 0x0000, 0x0000 }, /* R669 */
-	{ 0x0000, 0x0000 }, /* R670 */
-	{ 0x0000, 0x0000 }, /* R671 */
-	{ 0x0000, 0x0000 }, /* R672 */
-	{ 0x0000, 0x0000 }, /* R673 */
-	{ 0x0000, 0x0000 }, /* R674 */
-	{ 0x0000, 0x0000 }, /* R675 */
-	{ 0x0000, 0x0000 }, /* R676 */
-	{ 0x0000, 0x0000 }, /* R677 */
-	{ 0x0000, 0x0000 }, /* R678 */
-	{ 0x0000, 0x0000 }, /* R679 */
-	{ 0x0000, 0x0000 }, /* R680 */
-	{ 0x0000, 0x0000 }, /* R681 */
-	{ 0x0000, 0x0000 }, /* R682 */
-	{ 0x0000, 0x0000 }, /* R683 */
-	{ 0x0000, 0x0000 }, /* R684 */
-	{ 0x0000, 0x0000 }, /* R685 */
-	{ 0x0000, 0x0000 }, /* R686 */
-	{ 0x0000, 0x0000 }, /* R687 */
-	{ 0x0000, 0x0000 }, /* R688 */
-	{ 0x0000, 0x0000 }, /* R689 */
-	{ 0x0000, 0x0000 }, /* R690 */
-	{ 0x0000, 0x0000 }, /* R691 */
-	{ 0x0000, 0x0000 }, /* R692 */
-	{ 0x0000, 0x0000 }, /* R693 */
-	{ 0x0000, 0x0000 }, /* R694 */
-	{ 0x0000, 0x0000 }, /* R695 */
-	{ 0x0000, 0x0000 }, /* R696 */
-	{ 0x0000, 0x0000 }, /* R697 */
-	{ 0x0000, 0x0000 }, /* R698 */
-	{ 0x0000, 0x0000 }, /* R699 */
-	{ 0x0000, 0x0000 }, /* R700 */
-	{ 0x0000, 0x0000 }, /* R701 */
-	{ 0x0000, 0x0000 }, /* R702 */
-	{ 0x0000, 0x0000 }, /* R703 */
-	{ 0x0000, 0x0000 }, /* R704 */
-	{ 0x0000, 0x0000 }, /* R705 */
-	{ 0x0000, 0x0000 }, /* R706 */
-	{ 0x0000, 0x0000 }, /* R707 */
-	{ 0x0000, 0x0000 }, /* R708 */
-	{ 0x0000, 0x0000 }, /* R709 */
-	{ 0x0000, 0x0000 }, /* R710 */
-	{ 0x0000, 0x0000 }, /* R711 */
-	{ 0x0000, 0x0000 }, /* R712 */
-	{ 0x0000, 0x0000 }, /* R713 */
-	{ 0x0000, 0x0000 }, /* R714 */
-	{ 0x0000, 0x0000 }, /* R715 */
-	{ 0x0000, 0x0000 }, /* R716 */
-	{ 0x0000, 0x0000 }, /* R717 */
-	{ 0x0000, 0x0000 }, /* R718 */
-	{ 0x0000, 0x0000 }, /* R719 */
-	{ 0x0000, 0x0000 }, /* R720 */
-	{ 0x0000, 0x0000 }, /* R721 */
-	{ 0x0000, 0x0000 }, /* R722 */
-	{ 0x0000, 0x0000 }, /* R723 */
-	{ 0x0000, 0x0000 }, /* R724 */
-	{ 0x0000, 0x0000 }, /* R725 */
-	{ 0x0000, 0x0000 }, /* R726 */
-	{ 0x0000, 0x0000 }, /* R727 */
-	{ 0x0000, 0x0000 }, /* R728 */
-	{ 0x0000, 0x0000 }, /* R729 */
-	{ 0x0000, 0x0000 }, /* R730 */
-	{ 0x0000, 0x0000 }, /* R731 */
-	{ 0x0000, 0x0000 }, /* R732 */
-	{ 0x0000, 0x0000 }, /* R733 */
-	{ 0x0000, 0x0000 }, /* R734 */
-	{ 0x0000, 0x0000 }, /* R735 */
-	{ 0x0000, 0x0000 }, /* R736 */
-	{ 0x0000, 0x0000 }, /* R737 */
-	{ 0x0000, 0x0000 }, /* R738 */
-	{ 0x0000, 0x0000 }, /* R739 */
-	{ 0x0000, 0x0000 }, /* R740 */
-	{ 0x0000, 0x0000 }, /* R741 */
-	{ 0x0000, 0x0000 }, /* R742 */
-	{ 0x0000, 0x0000 }, /* R743 */
-	{ 0x0000, 0x0000 }, /* R744 */
-	{ 0x0000, 0x0000 }, /* R745 */
-	{ 0x0000, 0x0000 }, /* R746 */
-	{ 0x0000, 0x0000 }, /* R747 */
-	{ 0x0000, 0x0000 }, /* R748 */
-	{ 0x0000, 0x0000 }, /* R749 */
-	{ 0x0000, 0x0000 }, /* R750 */
-	{ 0x0000, 0x0000 }, /* R751 */
-	{ 0x0000, 0x0000 }, /* R752 */
-	{ 0x0000, 0x0000 }, /* R753 */
-	{ 0x0000, 0x0000 }, /* R754 */
-	{ 0x0000, 0x0000 }, /* R755 */
-	{ 0x0000, 0x0000 }, /* R756 */
-	{ 0x0000, 0x0000 }, /* R757 */
-	{ 0x0000, 0x0000 }, /* R758 */
-	{ 0x0000, 0x0000 }, /* R759 */
-	{ 0x0000, 0x0000 }, /* R760 */
-	{ 0x0000, 0x0000 }, /* R761 */
-	{ 0x0000, 0x0000 }, /* R762 */
-	{ 0x0000, 0x0000 }, /* R763 */
-	{ 0x0000, 0x0000 }, /* R764 */
-	{ 0x0000, 0x0000 }, /* R765 */
-	{ 0x0000, 0x0000 }, /* R766 */
-	{ 0x0000, 0x0000 }, /* R767 */
-	{ 0xE1F8, 0xE1F8 }, /* R768   - AIF1 Control (1) */
-	{ 0xCD1F, 0xCD1F }, /* R769   - AIF1 Control (2) */
-	{ 0xF000, 0xF000 }, /* R770   - AIF1 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R771   - AIF1 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R772   - AIF1ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R773   - AIF1DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R774   - AIF1DAC Data */
-	{ 0x0003, 0x0003 }, /* R775   - AIF1ADC Data */
-	{ 0x0000, 0x0000 }, /* R776 */
-	{ 0x0000, 0x0000 }, /* R777 */
-	{ 0x0000, 0x0000 }, /* R778 */
-	{ 0x0000, 0x0000 }, /* R779 */
-	{ 0x0000, 0x0000 }, /* R780 */
-	{ 0x0000, 0x0000 }, /* R781 */
-	{ 0x0000, 0x0000 }, /* R782 */
-	{ 0x0000, 0x0000 }, /* R783 */
-	{ 0xF1F8, 0xF1F8 }, /* R784   - AIF2 Control (1) */
-	{ 0xFD1F, 0xFD1F }, /* R785   - AIF2 Control (2) */
-	{ 0xF000, 0xF000 }, /* R786   - AIF2 Master/Slave */
-	{ 0x01F0, 0x01F0 }, /* R787   - AIF2 BCLK */
-	{ 0x0FFF, 0x0FFF }, /* R788   - AIF2ADC LRCLK */
-	{ 0x0FFF, 0x0FFF }, /* R789   - AIF2DAC LRCLK */
-	{ 0x0003, 0x0003 }, /* R790   - AIF2DAC Data */
-	{ 0x0003, 0x0003 }, /* R791   - AIF2ADC Data */
-	{ 0x0000, 0x0000 }, /* R792 */
-	{ 0x0000, 0x0000 }, /* R793 */
-	{ 0x0000, 0x0000 }, /* R794 */
-	{ 0x0000, 0x0000 }, /* R795 */
-	{ 0x0000, 0x0000 }, /* R796 */
-	{ 0x0000, 0x0000 }, /* R797 */
-	{ 0x0000, 0x0000 }, /* R798 */
-	{ 0x0000, 0x0000 }, /* R799 */
-	{ 0x0000, 0x0000 }, /* R800 */
-	{ 0x0000, 0x0000 }, /* R801 */
-	{ 0x0000, 0x0000 }, /* R802 */
-	{ 0x0000, 0x0000 }, /* R803 */
-	{ 0x0000, 0x0000 }, /* R804 */
-	{ 0x0000, 0x0000 }, /* R805 */
-	{ 0x0000, 0x0000 }, /* R806 */
-	{ 0x0000, 0x0000 }, /* R807 */
-	{ 0x0000, 0x0000 }, /* R808 */
-	{ 0x0000, 0x0000 }, /* R809 */
-	{ 0x0000, 0x0000 }, /* R810 */
-	{ 0x0000, 0x0000 }, /* R811 */
-	{ 0x0000, 0x0000 }, /* R812 */
-	{ 0x0000, 0x0000 }, /* R813 */
-	{ 0x0000, 0x0000 }, /* R814 */
-	{ 0x0000, 0x0000 }, /* R815 */
-	{ 0x0000, 0x0000 }, /* R816 */
-	{ 0x0000, 0x0000 }, /* R817 */
-	{ 0x0000, 0x0000 }, /* R818 */
-	{ 0x0000, 0x0000 }, /* R819 */
-	{ 0x0000, 0x0000 }, /* R820 */
-	{ 0x0000, 0x0000 }, /* R821 */
-	{ 0x0000, 0x0000 }, /* R822 */
-	{ 0x0000, 0x0000 }, /* R823 */
-	{ 0x0000, 0x0000 }, /* R824 */
-	{ 0x0000, 0x0000 }, /* R825 */
-	{ 0x0000, 0x0000 }, /* R826 */
-	{ 0x0000, 0x0000 }, /* R827 */
-	{ 0x0000, 0x0000 }, /* R828 */
-	{ 0x0000, 0x0000 }, /* R829 */
-	{ 0x0000, 0x0000 }, /* R830 */
-	{ 0x0000, 0x0000 }, /* R831 */
-	{ 0x0000, 0x0000 }, /* R832 */
-	{ 0x0000, 0x0000 }, /* R833 */
-	{ 0x0000, 0x0000 }, /* R834 */
-	{ 0x0000, 0x0000 }, /* R835 */
-	{ 0x0000, 0x0000 }, /* R836 */
-	{ 0x0000, 0x0000 }, /* R837 */
-	{ 0x0000, 0x0000 }, /* R838 */
-	{ 0x0000, 0x0000 }, /* R839 */
-	{ 0x0000, 0x0000 }, /* R840 */
-	{ 0x0000, 0x0000 }, /* R841 */
-	{ 0x0000, 0x0000 }, /* R842 */
-	{ 0x0000, 0x0000 }, /* R843 */
-	{ 0x0000, 0x0000 }, /* R844 */
-	{ 0x0000, 0x0000 }, /* R845 */
-	{ 0x0000, 0x0000 }, /* R846 */
-	{ 0x0000, 0x0000 }, /* R847 */
-	{ 0x0000, 0x0000 }, /* R848 */
-	{ 0x0000, 0x0000 }, /* R849 */
-	{ 0x0000, 0x0000 }, /* R850 */
-	{ 0x0000, 0x0000 }, /* R851 */
-	{ 0x0000, 0x0000 }, /* R852 */
-	{ 0x0000, 0x0000 }, /* R853 */
-	{ 0x0000, 0x0000 }, /* R854 */
-	{ 0x0000, 0x0000 }, /* R855 */
-	{ 0x0000, 0x0000 }, /* R856 */
-	{ 0x0000, 0x0000 }, /* R857 */
-	{ 0x0000, 0x0000 }, /* R858 */
-	{ 0x0000, 0x0000 }, /* R859 */
-	{ 0x0000, 0x0000 }, /* R860 */
-	{ 0x0000, 0x0000 }, /* R861 */
-	{ 0x0000, 0x0000 }, /* R862 */
-	{ 0x0000, 0x0000 }, /* R863 */
-	{ 0x0000, 0x0000 }, /* R864 */
-	{ 0x0000, 0x0000 }, /* R865 */
-	{ 0x0000, 0x0000 }, /* R866 */
-	{ 0x0000, 0x0000 }, /* R867 */
-	{ 0x0000, 0x0000 }, /* R868 */
-	{ 0x0000, 0x0000 }, /* R869 */
-	{ 0x0000, 0x0000 }, /* R870 */
-	{ 0x0000, 0x0000 }, /* R871 */
-	{ 0x0000, 0x0000 }, /* R872 */
-	{ 0x0000, 0x0000 }, /* R873 */
-	{ 0x0000, 0x0000 }, /* R874 */
-	{ 0x0000, 0x0000 }, /* R875 */
-	{ 0x0000, 0x0000 }, /* R876 */
-	{ 0x0000, 0x0000 }, /* R877 */
-	{ 0x0000, 0x0000 }, /* R878 */
-	{ 0x0000, 0x0000 }, /* R879 */
-	{ 0x0000, 0x0000 }, /* R880 */
-	{ 0x0000, 0x0000 }, /* R881 */
-	{ 0x0000, 0x0000 }, /* R882 */
-	{ 0x0000, 0x0000 }, /* R883 */
-	{ 0x0000, 0x0000 }, /* R884 */
-	{ 0x0000, 0x0000 }, /* R885 */
-	{ 0x0000, 0x0000 }, /* R886 */
-	{ 0x0000, 0x0000 }, /* R887 */
-	{ 0x0000, 0x0000 }, /* R888 */
-	{ 0x0000, 0x0000 }, /* R889 */
-	{ 0x0000, 0x0000 }, /* R890 */
-	{ 0x0000, 0x0000 }, /* R891 */
-	{ 0x0000, 0x0000 }, /* R892 */
-	{ 0x0000, 0x0000 }, /* R893 */
-	{ 0x0000, 0x0000 }, /* R894 */
-	{ 0x0000, 0x0000 }, /* R895 */
-	{ 0x0000, 0x0000 }, /* R896 */
-	{ 0x0000, 0x0000 }, /* R897 */
-	{ 0x0000, 0x0000 }, /* R898 */
-	{ 0x0000, 0x0000 }, /* R899 */
-	{ 0x0000, 0x0000 }, /* R900 */
-	{ 0x0000, 0x0000 }, /* R901 */
-	{ 0x0000, 0x0000 }, /* R902 */
-	{ 0x0000, 0x0000 }, /* R903 */
-	{ 0x0000, 0x0000 }, /* R904 */
-	{ 0x0000, 0x0000 }, /* R905 */
-	{ 0x0000, 0x0000 }, /* R906 */
-	{ 0x0000, 0x0000 }, /* R907 */
-	{ 0x0000, 0x0000 }, /* R908 */
-	{ 0x0000, 0x0000 }, /* R909 */
-	{ 0x0000, 0x0000 }, /* R910 */
-	{ 0x0000, 0x0000 }, /* R911 */
-	{ 0x0000, 0x0000 }, /* R912 */
-	{ 0x0000, 0x0000 }, /* R913 */
-	{ 0x0000, 0x0000 }, /* R914 */
-	{ 0x0000, 0x0000 }, /* R915 */
-	{ 0x0000, 0x0000 }, /* R916 */
-	{ 0x0000, 0x0000 }, /* R917 */
-	{ 0x0000, 0x0000 }, /* R918 */
-	{ 0x0000, 0x0000 }, /* R919 */
-	{ 0x0000, 0x0000 }, /* R920 */
-	{ 0x0000, 0x0000 }, /* R921 */
-	{ 0x0000, 0x0000 }, /* R922 */
-	{ 0x0000, 0x0000 }, /* R923 */
-	{ 0x0000, 0x0000 }, /* R924 */
-	{ 0x0000, 0x0000 }, /* R925 */
-	{ 0x0000, 0x0000 }, /* R926 */
-	{ 0x0000, 0x0000 }, /* R927 */
-	{ 0x0000, 0x0000 }, /* R928 */
-	{ 0x0000, 0x0000 }, /* R929 */
-	{ 0x0000, 0x0000 }, /* R930 */
-	{ 0x0000, 0x0000 }, /* R931 */
-	{ 0x0000, 0x0000 }, /* R932 */
-	{ 0x0000, 0x0000 }, /* R933 */
-	{ 0x0000, 0x0000 }, /* R934 */
-	{ 0x0000, 0x0000 }, /* R935 */
-	{ 0x0000, 0x0000 }, /* R936 */
-	{ 0x0000, 0x0000 }, /* R937 */
-	{ 0x0000, 0x0000 }, /* R938 */
-	{ 0x0000, 0x0000 }, /* R939 */
-	{ 0x0000, 0x0000 }, /* R940 */
-	{ 0x0000, 0x0000 }, /* R941 */
-	{ 0x0000, 0x0000 }, /* R942 */
-	{ 0x0000, 0x0000 }, /* R943 */
-	{ 0x0000, 0x0000 }, /* R944 */
-	{ 0x0000, 0x0000 }, /* R945 */
-	{ 0x0000, 0x0000 }, /* R946 */
-	{ 0x0000, 0x0000 }, /* R947 */
-	{ 0x0000, 0x0000 }, /* R948 */
-	{ 0x0000, 0x0000 }, /* R949 */
-	{ 0x0000, 0x0000 }, /* R950 */
-	{ 0x0000, 0x0000 }, /* R951 */
-	{ 0x0000, 0x0000 }, /* R952 */
-	{ 0x0000, 0x0000 }, /* R953 */
-	{ 0x0000, 0x0000 }, /* R954 */
-	{ 0x0000, 0x0000 }, /* R955 */
-	{ 0x0000, 0x0000 }, /* R956 */
-	{ 0x0000, 0x0000 }, /* R957 */
-	{ 0x0000, 0x0000 }, /* R958 */
-	{ 0x0000, 0x0000 }, /* R959 */
-	{ 0x0000, 0x0000 }, /* R960 */
-	{ 0x0000, 0x0000 }, /* R961 */
-	{ 0x0000, 0x0000 }, /* R962 */
-	{ 0x0000, 0x0000 }, /* R963 */
-	{ 0x0000, 0x0000 }, /* R964 */
-	{ 0x0000, 0x0000 }, /* R965 */
-	{ 0x0000, 0x0000 }, /* R966 */
-	{ 0x0000, 0x0000 }, /* R967 */
-	{ 0x0000, 0x0000 }, /* R968 */
-	{ 0x0000, 0x0000 }, /* R969 */
-	{ 0x0000, 0x0000 }, /* R970 */
-	{ 0x0000, 0x0000 }, /* R971 */
-	{ 0x0000, 0x0000 }, /* R972 */
-	{ 0x0000, 0x0000 }, /* R973 */
-	{ 0x0000, 0x0000 }, /* R974 */
-	{ 0x0000, 0x0000 }, /* R975 */
-	{ 0x0000, 0x0000 }, /* R976 */
-	{ 0x0000, 0x0000 }, /* R977 */
-	{ 0x0000, 0x0000 }, /* R978 */
-	{ 0x0000, 0x0000 }, /* R979 */
-	{ 0x0000, 0x0000 }, /* R980 */
-	{ 0x0000, 0x0000 }, /* R981 */
-	{ 0x0000, 0x0000 }, /* R982 */
-	{ 0x0000, 0x0000 }, /* R983 */
-	{ 0x0000, 0x0000 }, /* R984 */
-	{ 0x0000, 0x0000 }, /* R985 */
-	{ 0x0000, 0x0000 }, /* R986 */
-	{ 0x0000, 0x0000 }, /* R987 */
-	{ 0x0000, 0x0000 }, /* R988 */
-	{ 0x0000, 0x0000 }, /* R989 */
-	{ 0x0000, 0x0000 }, /* R990 */
-	{ 0x0000, 0x0000 }, /* R991 */
-	{ 0x0000, 0x0000 }, /* R992 */
-	{ 0x0000, 0x0000 }, /* R993 */
-	{ 0x0000, 0x0000 }, /* R994 */
-	{ 0x0000, 0x0000 }, /* R995 */
-	{ 0x0000, 0x0000 }, /* R996 */
-	{ 0x0000, 0x0000 }, /* R997 */
-	{ 0x0000, 0x0000 }, /* R998 */
-	{ 0x0000, 0x0000 }, /* R999 */
-	{ 0x0000, 0x0000 }, /* R1000 */
-	{ 0x0000, 0x0000 }, /* R1001 */
-	{ 0x0000, 0x0000 }, /* R1002 */
-	{ 0x0000, 0x0000 }, /* R1003 */
-	{ 0x0000, 0x0000 }, /* R1004 */
-	{ 0x0000, 0x0000 }, /* R1005 */
-	{ 0x0000, 0x0000 }, /* R1006 */
-	{ 0x0000, 0x0000 }, /* R1007 */
-	{ 0x0000, 0x0000 }, /* R1008 */
-	{ 0x0000, 0x0000 }, /* R1009 */
-	{ 0x0000, 0x0000 }, /* R1010 */
-	{ 0x0000, 0x0000 }, /* R1011 */
-	{ 0x0000, 0x0000 }, /* R1012 */
-	{ 0x0000, 0x0000 }, /* R1013 */
-	{ 0x0000, 0x0000 }, /* R1014 */
-	{ 0x0000, 0x0000 }, /* R1015 */
-	{ 0x0000, 0x0000 }, /* R1016 */
-	{ 0x0000, 0x0000 }, /* R1017 */
-	{ 0x0000, 0x0000 }, /* R1018 */
-	{ 0x0000, 0x0000 }, /* R1019 */
-	{ 0x0000, 0x0000 }, /* R1020 */
-	{ 0x0000, 0x0000 }, /* R1021 */
-	{ 0x0000, 0x0000 }, /* R1022 */
-	{ 0x0000, 0x0000 }, /* R1023 */
-	{ 0x00FF, 0x01FF }, /* R1024  - AIF1 ADC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1025  - AIF1 ADC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1026  - AIF1 DAC1 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1027  - AIF1 DAC1 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1028  - AIF1 ADC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1029  - AIF1 ADC2 Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1030  - AIF1 DAC2 Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1031  - AIF1 DAC2 Right Volume */
-	{ 0x0000, 0x0000 }, /* R1032 */
-	{ 0x0000, 0x0000 }, /* R1033 */
-	{ 0x0000, 0x0000 }, /* R1034 */
-	{ 0x0000, 0x0000 }, /* R1035 */
-	{ 0x0000, 0x0000 }, /* R1036 */
-	{ 0x0000, 0x0000 }, /* R1037 */
-	{ 0x0000, 0x0000 }, /* R1038 */
-	{ 0x0000, 0x0000 }, /* R1039 */
-	{ 0xF800, 0xF800 }, /* R1040  - AIF1 ADC1 Filters */
-	{ 0x7800, 0x7800 }, /* R1041  - AIF1 ADC2 Filters */
-	{ 0x0000, 0x0000 }, /* R1042 */
-	{ 0x0000, 0x0000 }, /* R1043 */
-	{ 0x0000, 0x0000 }, /* R1044 */
-	{ 0x0000, 0x0000 }, /* R1045 */
-	{ 0x0000, 0x0000 }, /* R1046 */
-	{ 0x0000, 0x0000 }, /* R1047 */
-	{ 0x0000, 0x0000 }, /* R1048 */
-	{ 0x0000, 0x0000 }, /* R1049 */
-	{ 0x0000, 0x0000 }, /* R1050 */
-	{ 0x0000, 0x0000 }, /* R1051 */
-	{ 0x0000, 0x0000 }, /* R1052 */
-	{ 0x0000, 0x0000 }, /* R1053 */
-	{ 0x0000, 0x0000 }, /* R1054 */
-	{ 0x0000, 0x0000 }, /* R1055 */
-	{ 0x02B6, 0x02B6 }, /* R1056  - AIF1 DAC1 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1057  - AIF1 DAC1 Filters (2) */
-	{ 0x02B6, 0x02B6 }, /* R1058  - AIF1 DAC2 Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1059  - AIF1 DAC2 Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1060 */
-	{ 0x0000, 0x0000 }, /* R1061 */
-	{ 0x0000, 0x0000 }, /* R1062 */
-	{ 0x0000, 0x0000 }, /* R1063 */
-	{ 0x0000, 0x0000 }, /* R1064 */
-	{ 0x0000, 0x0000 }, /* R1065 */
-	{ 0x0000, 0x0000 }, /* R1066 */
-	{ 0x0000, 0x0000 }, /* R1067 */
-	{ 0x0000, 0x0000 }, /* R1068 */
-	{ 0x0000, 0x0000 }, /* R1069 */
-	{ 0x0000, 0x0000 }, /* R1070 */
-	{ 0x0000, 0x0000 }, /* R1071 */
-	{ 0x0000, 0x0000 }, /* R1072 */
-	{ 0x0000, 0x0000 }, /* R1073 */
-	{ 0x0000, 0x0000 }, /* R1074 */
-	{ 0x0000, 0x0000 }, /* R1075 */
-	{ 0x0000, 0x0000 }, /* R1076 */
-	{ 0x0000, 0x0000 }, /* R1077 */
-	{ 0x0000, 0x0000 }, /* R1078 */
-	{ 0x0000, 0x0000 }, /* R1079 */
-	{ 0x0000, 0x0000 }, /* R1080 */
-	{ 0x0000, 0x0000 }, /* R1081 */
-	{ 0x0000, 0x0000 }, /* R1082 */
-	{ 0x0000, 0x0000 }, /* R1083 */
-	{ 0x0000, 0x0000 }, /* R1084 */
-	{ 0x0000, 0x0000 }, /* R1085 */
-	{ 0x0000, 0x0000 }, /* R1086 */
-	{ 0x0000, 0x0000 }, /* R1087 */
-	{ 0xFFFF, 0xFFFF }, /* R1088  - AIF1 DRC1 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1089  - AIF1 DRC1 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1090  - AIF1 DRC1 (3) */
-	{ 0x07FF, 0x07FF }, /* R1091  - AIF1 DRC1 (4) */
-	{ 0x03FF, 0x03FF }, /* R1092  - AIF1 DRC1 (5) */
-	{ 0x0000, 0x0000 }, /* R1093 */
-	{ 0x0000, 0x0000 }, /* R1094 */
-	{ 0x0000, 0x0000 }, /* R1095 */
-	{ 0x0000, 0x0000 }, /* R1096 */
-	{ 0x0000, 0x0000 }, /* R1097 */
-	{ 0x0000, 0x0000 }, /* R1098 */
-	{ 0x0000, 0x0000 }, /* R1099 */
-	{ 0x0000, 0x0000 }, /* R1100 */
-	{ 0x0000, 0x0000 }, /* R1101 */
-	{ 0x0000, 0x0000 }, /* R1102 */
-	{ 0x0000, 0x0000 }, /* R1103 */
-	{ 0xFFFF, 0xFFFF }, /* R1104  - AIF1 DRC2 (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1105  - AIF1 DRC2 (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1106  - AIF1 DRC2 (3) */
-	{ 0x07FF, 0x07FF }, /* R1107  - AIF1 DRC2 (4) */
-	{ 0x03FF, 0x03FF }, /* R1108  - AIF1 DRC2 (5) */
-	{ 0x0000, 0x0000 }, /* R1109 */
-	{ 0x0000, 0x0000 }, /* R1110 */
-	{ 0x0000, 0x0000 }, /* R1111 */
-	{ 0x0000, 0x0000 }, /* R1112 */
-	{ 0x0000, 0x0000 }, /* R1113 */
-	{ 0x0000, 0x0000 }, /* R1114 */
-	{ 0x0000, 0x0000 }, /* R1115 */
-	{ 0x0000, 0x0000 }, /* R1116 */
-	{ 0x0000, 0x0000 }, /* R1117 */
-	{ 0x0000, 0x0000 }, /* R1118 */
-	{ 0x0000, 0x0000 }, /* R1119 */
-	{ 0x0000, 0x0000 }, /* R1120 */
-	{ 0x0000, 0x0000 }, /* R1121 */
-	{ 0x0000, 0x0000 }, /* R1122 */
-	{ 0x0000, 0x0000 }, /* R1123 */
-	{ 0x0000, 0x0000 }, /* R1124 */
-	{ 0x0000, 0x0000 }, /* R1125 */
-	{ 0x0000, 0x0000 }, /* R1126 */
-	{ 0x0000, 0x0000 }, /* R1127 */
-	{ 0x0000, 0x0000 }, /* R1128 */
-	{ 0x0000, 0x0000 }, /* R1129 */
-	{ 0x0000, 0x0000 }, /* R1130 */
-	{ 0x0000, 0x0000 }, /* R1131 */
-	{ 0x0000, 0x0000 }, /* R1132 */
-	{ 0x0000, 0x0000 }, /* R1133 */
-	{ 0x0000, 0x0000 }, /* R1134 */
-	{ 0x0000, 0x0000 }, /* R1135 */
-	{ 0x0000, 0x0000 }, /* R1136 */
-	{ 0x0000, 0x0000 }, /* R1137 */
-	{ 0x0000, 0x0000 }, /* R1138 */
-	{ 0x0000, 0x0000 }, /* R1139 */
-	{ 0x0000, 0x0000 }, /* R1140 */
-	{ 0x0000, 0x0000 }, /* R1141 */
-	{ 0x0000, 0x0000 }, /* R1142 */
-	{ 0x0000, 0x0000 }, /* R1143 */
-	{ 0x0000, 0x0000 }, /* R1144 */
-	{ 0x0000, 0x0000 }, /* R1145 */
-	{ 0x0000, 0x0000 }, /* R1146 */
-	{ 0x0000, 0x0000 }, /* R1147 */
-	{ 0x0000, 0x0000 }, /* R1148 */
-	{ 0x0000, 0x0000 }, /* R1149 */
-	{ 0x0000, 0x0000 }, /* R1150 */
-	{ 0x0000, 0x0000 }, /* R1151 */
-	{ 0xFFFF, 0xFFFF }, /* R1152  - AIF1 DAC1 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1153  - AIF1 DAC1 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1154  - AIF1 DAC1 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1155  - AIF1 DAC1 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1156  - AIF1 DAC1 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1157  - AIF1 DAC1 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1158  - AIF1 DAC1 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1159  - AIF1 DAC1 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1160  - AIF1 DAC1 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1161  - AIF1 DAC1 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1162  - AIF1 DAC1 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1163  - AIF1 DAC1 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1164  - AIF1 DAC1 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1165  - AIF1 DAC1 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1166  - AIF1 DAC1 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1167  - AIF1 DAC1 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1168  - AIF1 DAC1 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1169  - AIF1 DAC1 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1170  - AIF1 DAC1 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1171  - AIF1 DAC1 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1172 */
-	{ 0x0000, 0x0000 }, /* R1173 */
-	{ 0x0000, 0x0000 }, /* R1174 */
-	{ 0x0000, 0x0000 }, /* R1175 */
-	{ 0x0000, 0x0000 }, /* R1176 */
-	{ 0x0000, 0x0000 }, /* R1177 */
-	{ 0x0000, 0x0000 }, /* R1178 */
-	{ 0x0000, 0x0000 }, /* R1179 */
-	{ 0x0000, 0x0000 }, /* R1180 */
-	{ 0x0000, 0x0000 }, /* R1181 */
-	{ 0x0000, 0x0000 }, /* R1182 */
-	{ 0x0000, 0x0000 }, /* R1183 */
-	{ 0xFFFF, 0xFFFF }, /* R1184  - AIF1 DAC2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1185  - AIF1 DAC2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1186  - AIF1 DAC2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1187  - AIF1 DAC2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1188  - AIF1 DAC2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1189  - AIF1 DAC2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1190  - AIF1 DAC2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1191  - AIF1 DAC2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1192  - AIF1 DAC2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1193  - AIF1 DAC2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1194  - AIF1 DAC2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1195  - AIF1 DAC2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1196  - AIF1 DAC2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1197  - AIF1 DAC2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1198  - AIF1 DAC2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1199  - AIF1 DAC2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1200  - AIF1 DAC2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1201  - AIF1 DAC2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1202  - AIF1 DAC2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1203  - AIF1 DAC2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1204 */
-	{ 0x0000, 0x0000 }, /* R1205 */
-	{ 0x0000, 0x0000 }, /* R1206 */
-	{ 0x0000, 0x0000 }, /* R1207 */
-	{ 0x0000, 0x0000 }, /* R1208 */
-	{ 0x0000, 0x0000 }, /* R1209 */
-	{ 0x0000, 0x0000 }, /* R1210 */
-	{ 0x0000, 0x0000 }, /* R1211 */
-	{ 0x0000, 0x0000 }, /* R1212 */
-	{ 0x0000, 0x0000 }, /* R1213 */
-	{ 0x0000, 0x0000 }, /* R1214 */
-	{ 0x0000, 0x0000 }, /* R1215 */
-	{ 0x0000, 0x0000 }, /* R1216 */
-	{ 0x0000, 0x0000 }, /* R1217 */
-	{ 0x0000, 0x0000 }, /* R1218 */
-	{ 0x0000, 0x0000 }, /* R1219 */
-	{ 0x0000, 0x0000 }, /* R1220 */
-	{ 0x0000, 0x0000 }, /* R1221 */
-	{ 0x0000, 0x0000 }, /* R1222 */
-	{ 0x0000, 0x0000 }, /* R1223 */
-	{ 0x0000, 0x0000 }, /* R1224 */
-	{ 0x0000, 0x0000 }, /* R1225 */
-	{ 0x0000, 0x0000 }, /* R1226 */
-	{ 0x0000, 0x0000 }, /* R1227 */
-	{ 0x0000, 0x0000 }, /* R1228 */
-	{ 0x0000, 0x0000 }, /* R1229 */
-	{ 0x0000, 0x0000 }, /* R1230 */
-	{ 0x0000, 0x0000 }, /* R1231 */
-	{ 0x0000, 0x0000 }, /* R1232 */
-	{ 0x0000, 0x0000 }, /* R1233 */
-	{ 0x0000, 0x0000 }, /* R1234 */
-	{ 0x0000, 0x0000 }, /* R1235 */
-	{ 0x0000, 0x0000 }, /* R1236 */
-	{ 0x0000, 0x0000 }, /* R1237 */
-	{ 0x0000, 0x0000 }, /* R1238 */
-	{ 0x0000, 0x0000 }, /* R1239 */
-	{ 0x0000, 0x0000 }, /* R1240 */
-	{ 0x0000, 0x0000 }, /* R1241 */
-	{ 0x0000, 0x0000 }, /* R1242 */
-	{ 0x0000, 0x0000 }, /* R1243 */
-	{ 0x0000, 0x0000 }, /* R1244 */
-	{ 0x0000, 0x0000 }, /* R1245 */
-	{ 0x0000, 0x0000 }, /* R1246 */
-	{ 0x0000, 0x0000 }, /* R1247 */
-	{ 0x0000, 0x0000 }, /* R1248 */
-	{ 0x0000, 0x0000 }, /* R1249 */
-	{ 0x0000, 0x0000 }, /* R1250 */
-	{ 0x0000, 0x0000 }, /* R1251 */
-	{ 0x0000, 0x0000 }, /* R1252 */
-	{ 0x0000, 0x0000 }, /* R1253 */
-	{ 0x0000, 0x0000 }, /* R1254 */
-	{ 0x0000, 0x0000 }, /* R1255 */
-	{ 0x0000, 0x0000 }, /* R1256 */
-	{ 0x0000, 0x0000 }, /* R1257 */
-	{ 0x0000, 0x0000 }, /* R1258 */
-	{ 0x0000, 0x0000 }, /* R1259 */
-	{ 0x0000, 0x0000 }, /* R1260 */
-	{ 0x0000, 0x0000 }, /* R1261 */
-	{ 0x0000, 0x0000 }, /* R1262 */
-	{ 0x0000, 0x0000 }, /* R1263 */
-	{ 0x0000, 0x0000 }, /* R1264 */
-	{ 0x0000, 0x0000 }, /* R1265 */
-	{ 0x0000, 0x0000 }, /* R1266 */
-	{ 0x0000, 0x0000 }, /* R1267 */
-	{ 0x0000, 0x0000 }, /* R1268 */
-	{ 0x0000, 0x0000 }, /* R1269 */
-	{ 0x0000, 0x0000 }, /* R1270 */
-	{ 0x0000, 0x0000 }, /* R1271 */
-	{ 0x0000, 0x0000 }, /* R1272 */
-	{ 0x0000, 0x0000 }, /* R1273 */
-	{ 0x0000, 0x0000 }, /* R1274 */
-	{ 0x0000, 0x0000 }, /* R1275 */
-	{ 0x0000, 0x0000 }, /* R1276 */
-	{ 0x0000, 0x0000 }, /* R1277 */
-	{ 0x0000, 0x0000 }, /* R1278 */
-	{ 0x0000, 0x0000 }, /* R1279 */
-	{ 0x00FF, 0x01FF }, /* R1280  - AIF2 ADC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1281  - AIF2 ADC Right Volume */
-	{ 0x00FF, 0x01FF }, /* R1282  - AIF2 DAC Left Volume */
-	{ 0x00FF, 0x01FF }, /* R1283  - AIF2 DAC Right Volume */
-	{ 0x0000, 0x0000 }, /* R1284 */
-	{ 0x0000, 0x0000 }, /* R1285 */
-	{ 0x0000, 0x0000 }, /* R1286 */
-	{ 0x0000, 0x0000 }, /* R1287 */
-	{ 0x0000, 0x0000 }, /* R1288 */
-	{ 0x0000, 0x0000 }, /* R1289 */
-	{ 0x0000, 0x0000 }, /* R1290 */
-	{ 0x0000, 0x0000 }, /* R1291 */
-	{ 0x0000, 0x0000 }, /* R1292 */
-	{ 0x0000, 0x0000 }, /* R1293 */
-	{ 0x0000, 0x0000 }, /* R1294 */
-	{ 0x0000, 0x0000 }, /* R1295 */
-	{ 0xF800, 0xF800 }, /* R1296  - AIF2 ADC Filters */
-	{ 0x0000, 0x0000 }, /* R1297 */
-	{ 0x0000, 0x0000 }, /* R1298 */
-	{ 0x0000, 0x0000 }, /* R1299 */
-	{ 0x0000, 0x0000 }, /* R1300 */
-	{ 0x0000, 0x0000 }, /* R1301 */
-	{ 0x0000, 0x0000 }, /* R1302 */
-	{ 0x0000, 0x0000 }, /* R1303 */
-	{ 0x0000, 0x0000 }, /* R1304 */
-	{ 0x0000, 0x0000 }, /* R1305 */
-	{ 0x0000, 0x0000 }, /* R1306 */
-	{ 0x0000, 0x0000 }, /* R1307 */
-	{ 0x0000, 0x0000 }, /* R1308 */
-	{ 0x0000, 0x0000 }, /* R1309 */
-	{ 0x0000, 0x0000 }, /* R1310 */
-	{ 0x0000, 0x0000 }, /* R1311 */
-	{ 0x02B6, 0x02B6 }, /* R1312  - AIF2 DAC Filters (1) */
-	{ 0x3F00, 0x3F00 }, /* R1313  - AIF2 DAC Filters (2) */
-	{ 0x0000, 0x0000 }, /* R1314 */
-	{ 0x0000, 0x0000 }, /* R1315 */
-	{ 0x0000, 0x0000 }, /* R1316 */
-	{ 0x0000, 0x0000 }, /* R1317 */
-	{ 0x0000, 0x0000 }, /* R1318 */
-	{ 0x0000, 0x0000 }, /* R1319 */
-	{ 0x0000, 0x0000 }, /* R1320 */
-	{ 0x0000, 0x0000 }, /* R1321 */
-	{ 0x0000, 0x0000 }, /* R1322 */
-	{ 0x0000, 0x0000 }, /* R1323 */
-	{ 0x0000, 0x0000 }, /* R1324 */
-	{ 0x0000, 0x0000 }, /* R1325 */
-	{ 0x0000, 0x0000 }, /* R1326 */
-	{ 0x0000, 0x0000 }, /* R1327 */
-	{ 0x0000, 0x0000 }, /* R1328 */
-	{ 0x0000, 0x0000 }, /* R1329 */
-	{ 0x0000, 0x0000 }, /* R1330 */
-	{ 0x0000, 0x0000 }, /* R1331 */
-	{ 0x0000, 0x0000 }, /* R1332 */
-	{ 0x0000, 0x0000 }, /* R1333 */
-	{ 0x0000, 0x0000 }, /* R1334 */
-	{ 0x0000, 0x0000 }, /* R1335 */
-	{ 0x0000, 0x0000 }, /* R1336 */
-	{ 0x0000, 0x0000 }, /* R1337 */
-	{ 0x0000, 0x0000 }, /* R1338 */
-	{ 0x0000, 0x0000 }, /* R1339 */
-	{ 0x0000, 0x0000 }, /* R1340 */
-	{ 0x0000, 0x0000 }, /* R1341 */
-	{ 0x0000, 0x0000 }, /* R1342 */
-	{ 0x0000, 0x0000 }, /* R1343 */
-	{ 0xFFFF, 0xFFFF }, /* R1344  - AIF2 DRC (1) */
-	{ 0x1FFF, 0x1FFF }, /* R1345  - AIF2 DRC (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1346  - AIF2 DRC (3) */
-	{ 0x07FF, 0x07FF }, /* R1347  - AIF2 DRC (4) */
-	{ 0x03FF, 0x03FF }, /* R1348  - AIF2 DRC (5) */
-	{ 0x0000, 0x0000 }, /* R1349 */
-	{ 0x0000, 0x0000 }, /* R1350 */
-	{ 0x0000, 0x0000 }, /* R1351 */
-	{ 0x0000, 0x0000 }, /* R1352 */
-	{ 0x0000, 0x0000 }, /* R1353 */
-	{ 0x0000, 0x0000 }, /* R1354 */
-	{ 0x0000, 0x0000 }, /* R1355 */
-	{ 0x0000, 0x0000 }, /* R1356 */
-	{ 0x0000, 0x0000 }, /* R1357 */
-	{ 0x0000, 0x0000 }, /* R1358 */
-	{ 0x0000, 0x0000 }, /* R1359 */
-	{ 0x0000, 0x0000 }, /* R1360 */
-	{ 0x0000, 0x0000 }, /* R1361 */
-	{ 0x0000, 0x0000 }, /* R1362 */
-	{ 0x0000, 0x0000 }, /* R1363 */
-	{ 0x0000, 0x0000 }, /* R1364 */
-	{ 0x0000, 0x0000 }, /* R1365 */
-	{ 0x0000, 0x0000 }, /* R1366 */
-	{ 0x0000, 0x0000 }, /* R1367 */
-	{ 0x0000, 0x0000 }, /* R1368 */
-	{ 0x0000, 0x0000 }, /* R1369 */
-	{ 0x0000, 0x0000 }, /* R1370 */
-	{ 0x0000, 0x0000 }, /* R1371 */
-	{ 0x0000, 0x0000 }, /* R1372 */
-	{ 0x0000, 0x0000 }, /* R1373 */
-	{ 0x0000, 0x0000 }, /* R1374 */
-	{ 0x0000, 0x0000 }, /* R1375 */
-	{ 0x0000, 0x0000 }, /* R1376 */
-	{ 0x0000, 0x0000 }, /* R1377 */
-	{ 0x0000, 0x0000 }, /* R1378 */
-	{ 0x0000, 0x0000 }, /* R1379 */
-	{ 0x0000, 0x0000 }, /* R1380 */
-	{ 0x0000, 0x0000 }, /* R1381 */
-	{ 0x0000, 0x0000 }, /* R1382 */
-	{ 0x0000, 0x0000 }, /* R1383 */
-	{ 0x0000, 0x0000 }, /* R1384 */
-	{ 0x0000, 0x0000 }, /* R1385 */
-	{ 0x0000, 0x0000 }, /* R1386 */
-	{ 0x0000, 0x0000 }, /* R1387 */
-	{ 0x0000, 0x0000 }, /* R1388 */
-	{ 0x0000, 0x0000 }, /* R1389 */
-	{ 0x0000, 0x0000 }, /* R1390 */
-	{ 0x0000, 0x0000 }, /* R1391 */
-	{ 0x0000, 0x0000 }, /* R1392 */
-	{ 0x0000, 0x0000 }, /* R1393 */
-	{ 0x0000, 0x0000 }, /* R1394 */
-	{ 0x0000, 0x0000 }, /* R1395 */
-	{ 0x0000, 0x0000 }, /* R1396 */
-	{ 0x0000, 0x0000 }, /* R1397 */
-	{ 0x0000, 0x0000 }, /* R1398 */
-	{ 0x0000, 0x0000 }, /* R1399 */
-	{ 0x0000, 0x0000 }, /* R1400 */
-	{ 0x0000, 0x0000 }, /* R1401 */
-	{ 0x0000, 0x0000 }, /* R1402 */
-	{ 0x0000, 0x0000 }, /* R1403 */
-	{ 0x0000, 0x0000 }, /* R1404 */
-	{ 0x0000, 0x0000 }, /* R1405 */
-	{ 0x0000, 0x0000 }, /* R1406 */
-	{ 0x0000, 0x0000 }, /* R1407 */
-	{ 0xFFFF, 0xFFFF }, /* R1408  - AIF2 EQ Gains (1) */
-	{ 0xFFC0, 0xFFC0 }, /* R1409  - AIF2 EQ Gains (2) */
-	{ 0xFFFF, 0xFFFF }, /* R1410  - AIF2 EQ Band 1 A */
-	{ 0xFFFF, 0xFFFF }, /* R1411  - AIF2 EQ Band 1 B */
-	{ 0xFFFF, 0xFFFF }, /* R1412  - AIF2 EQ Band 1 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1413  - AIF2 EQ Band 2 A */
-	{ 0xFFFF, 0xFFFF }, /* R1414  - AIF2 EQ Band 2 B */
-	{ 0xFFFF, 0xFFFF }, /* R1415  - AIF2 EQ Band 2 C */
-	{ 0xFFFF, 0xFFFF }, /* R1416  - AIF2 EQ Band 2 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1417  - AIF2 EQ Band 3 A */
-	{ 0xFFFF, 0xFFFF }, /* R1418  - AIF2 EQ Band 3 B */
-	{ 0xFFFF, 0xFFFF }, /* R1419  - AIF2 EQ Band 3 C */
-	{ 0xFFFF, 0xFFFF }, /* R1420  - AIF2 EQ Band 3 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1421  - AIF2 EQ Band 4 A */
-	{ 0xFFFF, 0xFFFF }, /* R1422  - AIF2 EQ Band 4 B */
-	{ 0xFFFF, 0xFFFF }, /* R1423  - AIF2 EQ Band 4 C */
-	{ 0xFFFF, 0xFFFF }, /* R1424  - AIF2 EQ Band 4 PG */
-	{ 0xFFFF, 0xFFFF }, /* R1425  - AIF2 EQ Band 5 A */
-	{ 0xFFFF, 0xFFFF }, /* R1426  - AIF2 EQ Band 5 B */
-	{ 0xFFFF, 0xFFFF }, /* R1427  - AIF2 EQ Band 5 PG */
-	{ 0x0000, 0x0000 }, /* R1428 */
-	{ 0x0000, 0x0000 }, /* R1429 */
-	{ 0x0000, 0x0000 }, /* R1430 */
-	{ 0x0000, 0x0000 }, /* R1431 */
-	{ 0x0000, 0x0000 }, /* R1432 */
-	{ 0x0000, 0x0000 }, /* R1433 */
-	{ 0x0000, 0x0000 }, /* R1434 */
-	{ 0x0000, 0x0000 }, /* R1435 */
-	{ 0x0000, 0x0000 }, /* R1436 */
-	{ 0x0000, 0x0000 }, /* R1437 */
-	{ 0x0000, 0x0000 }, /* R1438 */
-	{ 0x0000, 0x0000 }, /* R1439 */
-	{ 0x0000, 0x0000 }, /* R1440 */
-	{ 0x0000, 0x0000 }, /* R1441 */
-	{ 0x0000, 0x0000 }, /* R1442 */
-	{ 0x0000, 0x0000 }, /* R1443 */
-	{ 0x0000, 0x0000 }, /* R1444 */
-	{ 0x0000, 0x0000 }, /* R1445 */
-	{ 0x0000, 0x0000 }, /* R1446 */
-	{ 0x0000, 0x0000 }, /* R1447 */
-	{ 0x0000, 0x0000 }, /* R1448 */
-	{ 0x0000, 0x0000 }, /* R1449 */
-	{ 0x0000, 0x0000 }, /* R1450 */
-	{ 0x0000, 0x0000 }, /* R1451 */
-	{ 0x0000, 0x0000 }, /* R1452 */
-	{ 0x0000, 0x0000 }, /* R1453 */
-	{ 0x0000, 0x0000 }, /* R1454 */
-	{ 0x0000, 0x0000 }, /* R1455 */
-	{ 0x0000, 0x0000 }, /* R1456 */
-	{ 0x0000, 0x0000 }, /* R1457 */
-	{ 0x0000, 0x0000 }, /* R1458 */
-	{ 0x0000, 0x0000 }, /* R1459 */
-	{ 0x0000, 0x0000 }, /* R1460 */
-	{ 0x0000, 0x0000 }, /* R1461 */
-	{ 0x0000, 0x0000 }, /* R1462 */
-	{ 0x0000, 0x0000 }, /* R1463 */
-	{ 0x0000, 0x0000 }, /* R1464 */
-	{ 0x0000, 0x0000 }, /* R1465 */
-	{ 0x0000, 0x0000 }, /* R1466 */
-	{ 0x0000, 0x0000 }, /* R1467 */
-	{ 0x0000, 0x0000 }, /* R1468 */
-	{ 0x0000, 0x0000 }, /* R1469 */
-	{ 0x0000, 0x0000 }, /* R1470 */
-	{ 0x0000, 0x0000 }, /* R1471 */
-	{ 0x0000, 0x0000 }, /* R1472 */
-	{ 0x0000, 0x0000 }, /* R1473 */
-	{ 0x0000, 0x0000 }, /* R1474 */
-	{ 0x0000, 0x0000 }, /* R1475 */
-	{ 0x0000, 0x0000 }, /* R1476 */
-	{ 0x0000, 0x0000 }, /* R1477 */
-	{ 0x0000, 0x0000 }, /* R1478 */
-	{ 0x0000, 0x0000 }, /* R1479 */
-	{ 0x0000, 0x0000 }, /* R1480 */
-	{ 0x0000, 0x0000 }, /* R1481 */
-	{ 0x0000, 0x0000 }, /* R1482 */
-	{ 0x0000, 0x0000 }, /* R1483 */
-	{ 0x0000, 0x0000 }, /* R1484 */
-	{ 0x0000, 0x0000 }, /* R1485 */
-	{ 0x0000, 0x0000 }, /* R1486 */
-	{ 0x0000, 0x0000 }, /* R1487 */
-	{ 0x0000, 0x0000 }, /* R1488 */
-	{ 0x0000, 0x0000 }, /* R1489 */
-	{ 0x0000, 0x0000 }, /* R1490 */
-	{ 0x0000, 0x0000 }, /* R1491 */
-	{ 0x0000, 0x0000 }, /* R1492 */
-	{ 0x0000, 0x0000 }, /* R1493 */
-	{ 0x0000, 0x0000 }, /* R1494 */
-	{ 0x0000, 0x0000 }, /* R1495 */
-	{ 0x0000, 0x0000 }, /* R1496 */
-	{ 0x0000, 0x0000 }, /* R1497 */
-	{ 0x0000, 0x0000 }, /* R1498 */
-	{ 0x0000, 0x0000 }, /* R1499 */
-	{ 0x0000, 0x0000 }, /* R1500 */
-	{ 0x0000, 0x0000 }, /* R1501 */
-	{ 0x0000, 0x0000 }, /* R1502 */
-	{ 0x0000, 0x0000 }, /* R1503 */
-	{ 0x0000, 0x0000 }, /* R1504 */
-	{ 0x0000, 0x0000 }, /* R1505 */
-	{ 0x0000, 0x0000 }, /* R1506 */
-	{ 0x0000, 0x0000 }, /* R1507 */
-	{ 0x0000, 0x0000 }, /* R1508 */
-	{ 0x0000, 0x0000 }, /* R1509 */
-	{ 0x0000, 0x0000 }, /* R1510 */
-	{ 0x0000, 0x0000 }, /* R1511 */
-	{ 0x0000, 0x0000 }, /* R1512 */
-	{ 0x0000, 0x0000 }, /* R1513 */
-	{ 0x0000, 0x0000 }, /* R1514 */
-	{ 0x0000, 0x0000 }, /* R1515 */
-	{ 0x0000, 0x0000 }, /* R1516 */
-	{ 0x0000, 0x0000 }, /* R1517 */
-	{ 0x0000, 0x0000 }, /* R1518 */
-	{ 0x0000, 0x0000 }, /* R1519 */
-	{ 0x0000, 0x0000 }, /* R1520 */
-	{ 0x0000, 0x0000 }, /* R1521 */
-	{ 0x0000, 0x0000 }, /* R1522 */
-	{ 0x0000, 0x0000 }, /* R1523 */
-	{ 0x0000, 0x0000 }, /* R1524 */
-	{ 0x0000, 0x0000 }, /* R1525 */
-	{ 0x0000, 0x0000 }, /* R1526 */
-	{ 0x0000, 0x0000 }, /* R1527 */
-	{ 0x0000, 0x0000 }, /* R1528 */
-	{ 0x0000, 0x0000 }, /* R1529 */
-	{ 0x0000, 0x0000 }, /* R1530 */
-	{ 0x0000, 0x0000 }, /* R1531 */
-	{ 0x0000, 0x0000 }, /* R1532 */
-	{ 0x0000, 0x0000 }, /* R1533 */
-	{ 0x0000, 0x0000 }, /* R1534 */
-	{ 0x0000, 0x0000 }, /* R1535 */
-	{ 0x01EF, 0x01EF }, /* R1536  - DAC1 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1537  - DAC1 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1538  - DAC1 Right Mixer Routing */
-	{ 0x01EF, 0x01EF }, /* R1539  - DAC2 Mixer Volumes */
-	{ 0x0037, 0x0037 }, /* R1540  - DAC2 Left Mixer Routing */
-	{ 0x0037, 0x0037 }, /* R1541  - DAC2 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1542  - AIF1 ADC1 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1543  - AIF1 ADC1 Right Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1544  - AIF1 ADC2 Left Mixer Routing */
-	{ 0x0003, 0x0003 }, /* R1545  - AIF1 ADC2 Right mixer Routing */
-	{ 0x0000, 0x0000 }, /* R1546 */
-	{ 0x0000, 0x0000 }, /* R1547 */
-	{ 0x0000, 0x0000 }, /* R1548 */
-	{ 0x0000, 0x0000 }, /* R1549 */
-	{ 0x0000, 0x0000 }, /* R1550 */
-	{ 0x0000, 0x0000 }, /* R1551 */
-	{ 0x02FF, 0x03FF }, /* R1552  - DAC1 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1553  - DAC1 Right Volume */
-	{ 0x02FF, 0x03FF }, /* R1554  - DAC2 Left Volume */
-	{ 0x02FF, 0x03FF }, /* R1555  - DAC2 Right Volume */
-	{ 0x0003, 0x0003 }, /* R1556  - DAC Softmute */
-	{ 0x0000, 0x0000 }, /* R1557 */
-	{ 0x0000, 0x0000 }, /* R1558 */
-	{ 0x0000, 0x0000 }, /* R1559 */
-	{ 0x0000, 0x0000 }, /* R1560 */
-	{ 0x0000, 0x0000 }, /* R1561 */
-	{ 0x0000, 0x0000 }, /* R1562 */
-	{ 0x0000, 0x0000 }, /* R1563 */
-	{ 0x0000, 0x0000 }, /* R1564 */
-	{ 0x0000, 0x0000 }, /* R1565 */
-	{ 0x0000, 0x0000 }, /* R1566 */
-	{ 0x0000, 0x0000 }, /* R1567 */
-	{ 0x0003, 0x0003 }, /* R1568  - Oversampling */
-	{ 0x03C3, 0x03C3 }, /* R1569  - Sidetone */
-};
-
 static int wm8994_readable(unsigned int reg)
 {
 	switch (reg) {
@@ -1696,14 +131,14 @@
 		break;
 	}
 
-	if (reg >= ARRAY_SIZE(access_masks))
+	if (reg >= WM8994_CACHE_SIZE)
 		return 0;
-	return access_masks[reg].readable != 0;
+	return wm8994_access_masks[reg].readable != 0;
 }
 
 static int wm8994_volatile(unsigned int reg)
 {
-	if (reg >= WM8994_REG_CACHE_SIZE)
+	if (reg >= WM8994_CACHE_SIZE)
 		return 1;
 
 	switch (reg) {
@@ -1714,6 +149,8 @@
 	case WM8994_RATE_STATUS:
 	case WM8994_LDO_1:
 	case WM8994_LDO_2:
+	case WM8958_DSP2_EXECCONTROL:
+	case WM8958_MIC_DETECT_3:
 		return 1;
 	default:
 		return 0;
@@ -1723,14 +160,16 @@
 static int wm8994_write(struct snd_soc_codec *codec, unsigned int reg,
 	unsigned int value)
 {
-	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (!wm8994_volatile(reg))
-		wm8994->reg_cache[reg] = value;
-
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
+	if (!wm8994_volatile(reg)) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret != 0)
+			dev_err(codec->dev, "Cache write to %x failed: %d\n",
+				reg, ret);
+	}
 
 	return wm8994_reg_write(codec->control_data, reg, value);
 }
@@ -1738,14 +177,22 @@
 static unsigned int wm8994_read(struct snd_soc_codec *codec,
 				unsigned int reg)
 {
-	u16 *reg_cache = codec->reg_cache;
+	unsigned int val;
+	int ret;
 
 	BUG_ON(reg > WM8994_MAX_REGISTER);
 
-	if (wm8994_volatile(reg))
-		return wm8994_reg_read(codec->control_data, reg);
-	else
-		return reg_cache[reg];
+	if (!wm8994_volatile(reg) && wm8994_readable(reg) &&
+	    reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_read(codec, reg, &val);
+		if (ret >= 0)
+			return val;
+		else
+			dev_err(codec->dev, "Cache read from %x failed: %d\n",
+				reg, ret);
+	}
+
+	return wm8994_reg_read(codec->control_data, reg);
 }
 
 static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
@@ -1837,7 +284,7 @@
 
 	snd_soc_update_bits(codec, WM8994_CLOCKING_1, WM8994_SYSCLK_SRC, new);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	return 0;
 }
@@ -1864,6 +311,19 @@
 static const struct soc_enum sidetone_hpf =
 	SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text);
 
+static const char *adc_hpf_text[] = {
+	"HiFi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum aif1adc1_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif1adc2_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text);
+
+static const struct soc_enum aif2adc_hpf =
+	SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text);
+
 static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0);
 static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
 static const DECLARE_TLV_DB_SCALE(st_tlv, -3600, 300, 0);
@@ -2071,21 +531,252 @@
 	return 0;
 }
 
-static const char *aifdac_src_text[] = {
+static const char *aif_chan_src_text[] = {
 	"Left", "Right"
 };
 
+static const struct soc_enum aif1adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif1adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcl_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text);
+
+static const struct soc_enum aif2adcr_src =
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text);
+
 static const struct soc_enum aif1dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif1dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacl_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text);
 
 static const struct soc_enum aif2dacr_src =
-	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aifdac_src_text);
+	SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text);
+
+static const char *osr_text[] = {
+	"Low Power", "High Performance",
+};
+
+static const struct soc_enum dac_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text);
+
+static const struct soc_enum adc_osr =
+	SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text);
+
+static void wm8958_mbc_apply(struct snd_soc_codec *codec, int mbc, int start)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int pwr_reg = snd_soc_read(codec, WM8994_POWER_MANAGEMENT_5);
+	int ena, reg, aif, i;
+
+	switch (mbc) {
+	case 0:
+		pwr_reg &= (WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA);
+		aif = 0;
+		break;
+	case 1:
+		pwr_reg &= (WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA);
+		aif = 0;
+		break;
+	case 2:
+		pwr_reg &= (WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA);
+		aif = 1;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	/* We can only enable the MBC if the AIF is enabled and we
+	 * want it to be enabled. */
+	ena = pwr_reg && wm8994->mbc_ena[mbc];
+
+	reg = snd_soc_read(codec, WM8958_DSP2_PROGRAM);
+
+	dev_dbg(codec->dev, "MBC %d startup: %d, power: %x, DSP: %x\n",
+		mbc, start, pwr_reg, reg);
+
+	if (start && ena) {
+		/* If the DSP is already running then noop */
+		if (reg & WM8958_DSP2_ENA)
+			return;
+
+		/* Switch the clock over to the appropriate AIF */
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_SRC | WM8958_DSP2CLK_ENA,
+				    aif << WM8958_DSP2CLK_SRC_SHIFT |
+				    WM8958_DSP2CLK_ENA);
+
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, WM8958_DSP2_ENA);
+
+		/* If we've got user supplied MBC settings use them */
+		if (pdata && pdata->num_mbc_cfgs) {
+			struct wm8958_mbc_cfg *cfg
+				= &pdata->mbc_cfgs[wm8994->mbc_cfg];
+
+			for (i = 0; i < ARRAY_SIZE(cfg->coeff_regs); i++)
+				snd_soc_write(codec, i + WM8958_MBC_BAND_1_K_1,
+					      cfg->coeff_regs[i]);
+
+			for (i = 0; i < ARRAY_SIZE(cfg->cutoff_regs); i++)
+				snd_soc_write(codec,
+					      i + WM8958_MBC_BAND_2_LOWER_CUTOFF_C1_1,
+					      cfg->cutoff_regs[i]);
+		}
+
+		/* Run the DSP */
+		snd_soc_write(codec, WM8958_DSP2_EXECCONTROL,
+			      WM8958_DSP2_RUNR);
+
+		/* And we're off! */
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA | WM8958_MBC_SEL_MASK,
+				    mbc << WM8958_MBC_SEL_SHIFT |
+				    WM8958_MBC_ENA);
+	} else {
+		/* If the DSP is already stopped then noop */
+		if (!(reg & WM8958_DSP2_ENA))
+			return;
+
+		snd_soc_update_bits(codec, WM8958_DSP2_CONFIG,
+				    WM8958_MBC_ENA, 0);	
+		snd_soc_update_bits(codec, WM8958_DSP2_PROGRAM,
+				    WM8958_DSP2_ENA, 0);
+		snd_soc_update_bits(codec, WM8994_CLOCKING_1,
+				    WM8958_DSP2CLK_ENA, 0);
+	}
+}
+
+static int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec = w->codec;
+	int mbc;
+
+	switch (w->shift) {
+	case 13:
+	case 12:
+		mbc = 2;
+		break;
+	case 11:
+	case 10:
+		mbc = 1;
+		break;
+	case 9:
+	case 8:
+		mbc = 0;
+		break;
+	default:
+		BUG();
+		return -EINVAL;
+	}
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		wm8958_mbc_apply(codec, mbc, 1);
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		wm8958_mbc_apply(codec, mbc, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static int wm8958_put_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994_pdata *pdata = wm8994->pdata;
+	int value = ucontrol->value.integer.value[0];
+	int reg;
+
+	/* Don't allow on the fly reconfiguration */
+	reg = snd_soc_read(codec, WM8994_CLOCKING_1);
+	if (reg < 0 || reg & WM8958_DSP2CLK_ENA)
+		return -EBUSY;
+
+	if (value >= pdata->num_mbc_cfgs)
+		return -EINVAL;
+
+	wm8994->mbc_cfg = value;
+
+	return 0;
+}
+
+static int wm8958_get_mbc_enum(struct snd_kcontrol *kcontrol,
+			       struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.enumerated.item[0] = wm8994->mbc_cfg;
+
+	return 0;
+}
+
+static int wm8958_mbc_info(struct snd_kcontrol *kcontrol,
+			   struct snd_ctl_elem_info *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 1;
+	return 0;
+}
+
+static int wm8958_mbc_get(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	ucontrol->value.integer.value[0] = wm8994->mbc_ena[mbc];
+
+	return 0;
+}
+
+static int wm8958_mbc_put(struct snd_kcontrol *kcontrol,
+			  struct snd_ctl_elem_value *ucontrol)
+{
+	int mbc = kcontrol->private_value;
+	int i;
+	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+
+	if (ucontrol->value.integer.value[0] > 1)
+		return -EINVAL;
+
+	for (i = 0; i < ARRAY_SIZE(wm8994->mbc_ena); i++) {
+		if (mbc != i && wm8994->mbc_ena[i]) {
+			dev_dbg(codec->dev, "MBC %d active already\n", mbc);
+			return -EBUSY;
+		}
+	}
+
+	wm8994->mbc_ena[mbc] = ucontrol->value.integer.value[0];
+
+	wm8958_mbc_apply(codec, mbc, wm8994->mbc_ena[mbc]);
+
+	return 0;
+}
+
+#define WM8958_MBC_SWITCH(xname, xval) {\
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+	.info = wm8958_mbc_info, \
+	.get = wm8958_mbc_get, .put = wm8958_mbc_put, \
+	.private_value = xval }
 
 static const struct snd_kcontrol_new wm8994_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
@@ -2098,10 +789,15 @@
 		 WM8994_AIF2_ADC_RIGHT_VOLUME,
 		 1, 119, 0, digital_tlv),
 
+SOC_ENUM("AIF1ADCL Source", aif1adcl_src),
+SOC_ENUM("AIF1ADCR Source", aif1adcr_src),
+SOC_ENUM("AIF2ADCL Source", aif2adcl_src),
+SOC_ENUM("AIF2ADCR Source", aif2adcr_src),
+
 SOC_ENUM("AIF1DACL Source", aif1dacl_src),
 SOC_ENUM("AIF1DACR Source", aif1dacr_src),
-SOC_ENUM("AIF2DACL Source", aif1dacl_src),
-SOC_ENUM("AIF2DACR Source", aif1dacr_src),
+SOC_ENUM("AIF2DACL Source", aif2dacl_src),
+SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
 		 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
@@ -2140,6 +836,18 @@
 SOC_ENUM("Sidetone HPF Mux", sidetone_hpf),
 SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 
+SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
+SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
+SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
+
+SOC_ENUM("ADC OSR", adc_osr),
+SOC_ENUM("DAC OSR", dac_osr),
+
 SOC_DOUBLE_R_TLV("DAC1 Volume", WM8994_DAC1_LEFT_VOLUME,
 		 WM8994_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R("DAC1 Switch", WM8994_DAC1_LEFT_VOLUME,
@@ -2162,15 +870,15 @@
 
 SOC_SINGLE_TLV("AIF1DAC1 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF1DAC1 3D Stereo Switch", WM8994_AIF1_DAC1_FILTERS_2,
 	   8, 1, 0),
 SOC_SINGLE_TLV("AIF1DAC2 3D Stereo Volume", WM8994_AIF1_DAC2_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
 SOC_SINGLE("AIF1DAC2 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
 	   8, 1, 0),
-SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF1_DAC1_FILTERS_2,
+SOC_SINGLE_TLV("AIF2DAC 3D Stereo Volume", WM8994_AIF2_DAC_FILTERS_2,
 	       10, 15, 0, wm8994_3d_tlv),
-SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF1_DAC2_FILTERS_2,
+SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
 	   8, 1, 0),
 };
 
@@ -2209,6 +917,13 @@
 	       eq_tlv),
 };
 
+static const struct snd_kcontrol_new wm8958_snd_controls[] = {
+SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv),
+WM8958_MBC_SWITCH("AIF1DAC1 MBC Switch", 0),
+WM8958_MBC_SWITCH("AIF1DAC2 MBC Switch", 1),
+WM8958_MBC_SWITCH("AIF2DAC MBC Switch", 2),
+};
+
 static int clk_sys_event(struct snd_soc_dapm_widget *w,
 			 struct snd_kcontrol *kcontrol, int event)
 {
@@ -2228,6 +943,7 @@
 
 static void wm8994_update_class_w(struct snd_soc_codec *codec)
 {
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int enable = 1;
 	int source = 0;  /* GCC flow analysis can't track enable */
 	int reg, reg_r;
@@ -2278,11 +994,13 @@
 				    WM8994_CP_DYN_PWR |
 				    WM8994_CP_DYN_SRC_SEL_MASK,
 				    source | WM8994_CP_DYN_PWR);
+		wm8994->hubs.class_w = true;
 		
 	} else {
 		dev_dbg(codec->dev, "Class W disabled\n");
 		snd_soc_update_bits(codec, WM8994_CLASS_W_1,
 				    WM8994_CP_DYN_PWR, 0);
+		wm8994->hubs.class_w = false;
 	}
 }
 
@@ -2512,14 +1230,47 @@
 	SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum);
 
 static const char *aif3adc_text[] = {
-	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT",
+	"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM",
 };
 
-static const struct soc_enum aif3adc_enum =
+static const struct soc_enum wm8994_aif3adc_enum =
 	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text);
 
-static const struct snd_kcontrol_new aif3adc_mux =
-	SOC_DAPM_ENUM("AIF3ADC Mux", aif3adc_enum);
+static const struct snd_kcontrol_new wm8994_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum);
+
+static const struct soc_enum wm8958_aif3adc_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text);
+
+static const struct snd_kcontrol_new wm8958_aif3adc_mux =
+	SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum);
+
+static const char *mono_pcm_out_text[] = {
+	"None", "AIF2ADCL", "AIF2ADCR", 
+};
+
+static const struct soc_enum mono_pcm_out_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text);
+
+static const struct snd_kcontrol_new mono_pcm_out_mux =
+	SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum);
+
+static const char *aif2dac_src_text[] = {
+	"AIF2", "AIF3",
+};
+
+/* Note that these two control shouldn't be simultaneously switched to AIF3 */
+static const struct soc_enum aif2dacl_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacl_src_mux =
+	SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum);
+
+static const struct soc_enum aif2dacr_src_enum =
+	SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text);
+
+static const struct snd_kcontrol_new aif2dacr_src_mux =
+	SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
 SND_SOC_DAPM_INPUT("DMIC1DAT"),
@@ -2540,19 +1291,23 @@
 		     0, WM8994_POWER_MANAGEMENT_4, 9, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 8, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 9, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 8, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 11, 0),
 SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
 		     0, WM8994_POWER_MANAGEMENT_4, 10, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 11, 0),
-SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 10, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
 
 SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
 		   aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
@@ -2581,10 +1336,12 @@
 		     WM8994_POWER_MANAGEMENT_4, 13, 0),
 SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0,
 		     WM8994_POWER_MANAGEMENT_4, 12, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACL", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 13, 0),
-SND_SOC_DAPM_AIF_IN("AIF2DACR", NULL, 0,
-		    WM8994_POWER_MANAGEMENT_5, 12, 0),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0,
+		      WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev,
+		      SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
 
 SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF2DACDAT", "AIF2 Playback", 0, SND_SOC_NOPM, 0, 0),
@@ -2593,7 +1350,6 @@
 SND_SOC_DAPM_MUX("AIF1DAC Mux", SND_SOC_NOPM, 0, 0, &aif1dac_mux),
 SND_SOC_DAPM_MUX("AIF2DAC Mux", SND_SOC_NOPM, 0, 0, &aif2dac_mux),
 SND_SOC_DAPM_MUX("AIF2ADC Mux", SND_SOC_NOPM, 0, 0, &aif2adc_mux),
-SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &aif3adc_mux),
 
 SND_SOC_DAPM_AIF_IN("AIF3DACDAT", "AIF3 Playback", 0, SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_AIF_IN("AIF3ADCDAT", "AIF3 Capture", 0, SND_SOC_NOPM, 0, 0),
@@ -2631,8 +1387,18 @@
 SND_SOC_DAPM_POST("Debug log", post_ev),
 };
 
-static const struct snd_soc_dapm_route intercon[] = {
+static const struct snd_soc_dapm_widget wm8994_specific_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8994_aif3adc_mux),
+};
 
+static const struct snd_soc_dapm_widget wm8958_dapm_widgets[] = {
+SND_SOC_DAPM_MUX("Mono PCM Out Mux", SND_SOC_NOPM, 0, 0, &mono_pcm_out_mux),
+SND_SOC_DAPM_MUX("AIF2DACL Mux", SND_SOC_NOPM, 0, 0, &aif2dacl_src_mux),
+SND_SOC_DAPM_MUX("AIF2DACR Mux", SND_SOC_NOPM, 0, 0, &aif2dacr_src_mux),
+SND_SOC_DAPM_MUX("AIF3ADC Mux", SND_SOC_NOPM, 0, 0, &wm8958_aif3adc_mux),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
 	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
 	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
 
@@ -2740,9 +1506,6 @@
 	{ "AIF1DAC2L", NULL, "AIF1DAC Mux" },
 	{ "AIF1DAC2R", NULL, "AIF1DAC Mux" },
 
-	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
-	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
-
 	{ "AIF1DAC Mux", "AIF1DACDAT", "AIF1DACDAT" },
 	{ "AIF1DAC Mux", "AIF3DACDAT", "AIF3DACDAT" },
 	{ "AIF2DAC Mux", "AIF2DACDAT", "AIF2DACDAT" },
@@ -2815,6 +1578,26 @@
 	{ "Right Headphone Mux", "DAC", "DAC1R" },
 };
 
+static const struct snd_soc_dapm_route wm8994_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DAC Mux" },
+	{ "AIF2DACR", NULL, "AIF2DAC Mux" },
+};
+
+static const struct snd_soc_dapm_route wm8958_intercon[] = {
+	{ "AIF2DACL", NULL, "AIF2DACL Mux" },
+	{ "AIF2DACR", NULL, "AIF2DACR Mux" },
+
+	{ "AIF2DACL Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACL Mux", "AIF3", "AIF3DACDAT" },
+	{ "AIF2DACR Mux", "AIF2", "AIF2DAC Mux" },
+	{ "AIF2DACR Mux", "AIF3", "AIF3DACDAT" },
+
+	{ "Mono PCM Out Mux", "AIF2ADCL", "AIF2ADCL" },
+	{ "Mono PCM Out Mux", "AIF2ADCR", "AIF2ADCR" },
+
+	{ "AIF3ADC Mux", "Mono PCM", "Mono PCM Out Mux" },
+};
+
 /* The size in bits of the FLL divide multiplied by 10
  * to allow rounding later */
 #define FIXED_FLL_SIZE ((1 << 16) * 10)
@@ -2930,6 +1713,7 @@
 		/* Allow no source specification when stopping */
 		if (freq_out)
 			return -EINVAL;
+		src = wm8994->fll[id].src;
 		break;
 	case WM8994_FLL_SRC_MCLK1:
 	case WM8994_FLL_SRC_MCLK2:
@@ -3094,6 +1878,7 @@
 static int wm8994_set_bias_level(struct snd_soc_codec *codec,
 				 enum snd_soc_bias_level level)
 {
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
 	switch (level) {
@@ -3107,16 +1892,36 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
-			/* Tweak DC servo and DSP configuration for
-			 * improved performance. */
-			if (wm8994->revision < 4) {
-				/* Tweak DC servo and DSP configuration for
-				 * improved performance. */
-				snd_soc_write(codec, 0x102, 0x3);
-				snd_soc_write(codec, 0x56, 0x3);
-				snd_soc_write(codec, 0x817, 0);
-				snd_soc_write(codec, 0x102, 0);
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			pm_runtime_get_sync(codec->dev);
+
+			switch (control->type) {
+			case WM8994:
+				if (wm8994->revision < 4) {
+					/* Tweak DC servo and DSP
+					 * configuration for improved
+					 * performance. */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0x56, 0x3);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+				}
+				break;
+
+			case WM8958:
+				if (wm8994->revision == 0) {
+					/* Optimise performance for rev A */
+					snd_soc_write(codec, 0x102, 0x3);
+					snd_soc_write(codec, 0xcb, 0x81);
+					snd_soc_write(codec, 0x817, 0);
+					snd_soc_write(codec, 0x102, 0);
+
+					snd_soc_update_bits(codec,
+							    WM8958_CHARGE_PUMP_2,
+							    WM8958_CP_DISCH,
+							    WM8958_CP_DISCH);
+				}
+				break;
 			}
 
 			/* Discharge LINEOUT1 & 2 */
@@ -3151,7 +1956,7 @@
 		break;
 
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level == SND_SOC_BIAS_STANDBY) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) {
 			/* Switch over to startup biases */
 			snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
 					    WM8994_BIAS_SRC |
@@ -3183,16 +1988,19 @@
 					    WM8994_STARTUP_BIAS_ENA |
 					    WM8994_VMID_BUF_ENA |
 					    WM8994_VMID_RAMP_MASK, 0);
+
+			pm_runtime_put(codec->dev);
 		}
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
 static int wm8994_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	int ms_reg;
 	int aif1_reg;
 	int ms = 0;
@@ -3277,6 +2085,13 @@
 		return -EINVAL;
 	}
 
+	/* The AIF2 format configuration needs to be mirrored to AIF3
+	 * on WM8958 if it's in use so just do it all the time. */
+	if (control->type == WM8958 && dai->id == 2)
+		snd_soc_update_bits(codec, WM8958_AIF3_CONTROL_1,
+				    WM8994_AIF1_LRCLK_INV |
+				    WM8958_AIF3_FMT_MASK, aif1);
+
 	snd_soc_update_bits(codec, aif1_reg,
 			    WM8994_AIF1_BCLK_INV | WM8994_AIF1_LRCLK_INV |
 			    WM8994_AIF1_FMT_MASK,
@@ -3317,12 +2132,15 @@
 			    struct snd_soc_dai *dai)
 {
 	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	int aif1_reg;
+	int aif2_reg;
 	int bclk_reg;
 	int lrclk_reg;
 	int rate_reg;
 	int aif1 = 0;
+	int aif2 = 0;
 	int bclk = 0;
 	int lrclk = 0;
 	int rate_val = 0;
@@ -3333,6 +2151,7 @@
 	switch (dai->id) {
 	case 1:
 		aif1_reg = WM8994_AIF1_CONTROL_1;
+		aif2_reg = WM8994_AIF1_CONTROL_2;
 		bclk_reg = WM8994_AIF1_BCLK;
 		rate_reg = WM8994_AIF1_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3345,6 +2164,7 @@
 		break;
 	case 2:
 		aif1_reg = WM8994_AIF2_CONTROL_1;
+		aif2_reg = WM8994_AIF2_CONTROL_2;
 		bclk_reg = WM8994_AIF2_BCLK;
 		rate_reg = WM8994_AIF2_RATE;
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ||
@@ -3355,6 +2175,14 @@
 			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
 		}
 		break;
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
 	default:
 		return -EINVAL;
 	}
@@ -3392,6 +2220,10 @@
 	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
 		dai->id, wm8994->aifclk[id], bclk_rate);
 
+	if (params_channels(params) == 1 &&
+	    (snd_soc_read(codec, aif1_reg) & 0x18) == 0x18)
+		aif2 |= WM8994_AIF1_MONO;
+
 	if (wm8994->aifclk[id] == 0) {
 		dev_err(dai->dev, "AIF%dCLK not configured\n", dai->id);
 		return -EINVAL;
@@ -3435,6 +2267,7 @@
 		lrclk, bclk_rate / lrclk);
 
 	snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, aif2_reg, WM8994_AIF1_MONO, aif2);
 	snd_soc_update_bits(codec, bclk_reg, WM8994_AIF1_BCLK_DIV_MASK, bclk);
 	snd_soc_update_bits(codec, lrclk_reg, WM8994_AIF1DAC_RATE_MASK,
 			    lrclk);
@@ -3458,6 +2291,47 @@
 	return 0;
 }
 
+static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream,
+				 struct snd_pcm_hw_params *params,
+				 struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	struct wm8994 *control = codec->control_data;
+	int aif1_reg;
+	int aif1 = 0;
+
+	switch (dai->id) {
+	case 3:
+		switch (control->type) {
+		case WM8958:
+			aif1_reg = WM8958_AIF3_CONTROL_1;
+			break;
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= 0x20;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= 0x40;
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= 0x60;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return snd_soc_update_bits(codec, aif1_reg, WM8994_AIF1_WL_MASK, aif1);
+}
+
 static int wm8994_aif_mute(struct snd_soc_dai *codec_dai, int mute)
 {
 	struct snd_soc_codec *codec = codec_dai->codec;
@@ -3539,6 +2413,7 @@
 };
 
 static struct snd_soc_dai_ops wm8994_aif3_dai_ops = {
+	.hw_params	= wm8994_aif3_hw_params,
 	.set_tristate	= wm8994_set_tristate,
 };
 
@@ -3548,14 +2423,14 @@
 		.id = 1,
 		.playback = {
 			.stream_name = "AIF1 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF1 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3567,14 +2442,14 @@
 		.id = 2,
 		.playback = {
 			.stream_name = "AIF2 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF2 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3586,14 +2461,14 @@
 		.id = 3,
 		.playback = {
 			.stream_name = "AIF3 Playback",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
 		},
 		.capture = {
 			.stream_name = "AIF3 Capture",
-			.channels_min = 2,
+			.channels_min = 1,
 			.channels_max = 2,
 			.rates = WM8994_RATES,
 			.formats = WM8994_FORMATS,
@@ -3625,26 +2500,12 @@
 static int wm8994_resume(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
-	u16 *reg_cache = codec->reg_cache;
 	int i, ret;
 
 	/* Restore the registers */
-	for (i = 1; i < ARRAY_SIZE(wm8994->reg_cache); i++) {
-		switch (i) {
-		case WM8994_LDO_1:
-		case WM8994_LDO_2:
-		case WM8994_SOFTWARE_RESET:
-			/* Handled by other MFD drivers */
-			continue;
-		default:
-			break;
-		}
-
-		if (!access_masks[i].writable)
-			continue;
-
-		wm8994_reg_write(codec->control_data, i, reg_cache[i]);
-	}
+	ret = snd_soc_cache_sync(codec);
+	if (ret != 0)
+		dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
@@ -3794,6 +2655,34 @@
 	dev_dbg(codec->dev, "%d ReTune Mobile configurations\n",
 		pdata->num_retune_mobile_cfgs);
 
+	if (pdata->num_mbc_cfgs) {
+		struct snd_kcontrol_new control[] = {
+			SOC_ENUM_EXT("MBC Mode", wm8994->mbc_enum,
+				     wm8958_get_mbc_enum, wm8958_put_mbc_enum),
+		};
+
+		/* We need an array of texts for the enum API */
+		wm8994->mbc_texts = kmalloc(sizeof(char *)
+					    * pdata->num_mbc_cfgs, GFP_KERNEL);
+		if (!wm8994->mbc_texts) {
+			dev_err(wm8994->codec->dev,
+				"Failed to allocate %d MBC config texts\n",
+				pdata->num_mbc_cfgs);
+			return;
+		}
+
+		for (i = 0; i < pdata->num_mbc_cfgs; i++)
+			wm8994->mbc_texts[i] = pdata->mbc_cfgs[i].name;
+
+		wm8994->mbc_enum.max = pdata->num_mbc_cfgs;
+		wm8994->mbc_enum.texts = wm8994->mbc_texts;
+
+		ret = snd_soc_add_controls(wm8994->codec, control, 1);
+		if (ret != 0)
+			dev_err(wm8994->codec->dev,
+				"Failed to add MBC mode controls: %d\n", ret);
+	}
+
 	if (pdata->num_retune_mobile_cfgs)
 		wm8994_handle_retune_mobile_pdata(wm8994);
 	else
@@ -3823,8 +2712,12 @@
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 	struct wm8994_micdet *micdet;
+	struct wm8994 *control = codec->control_data;
 	int reg;
 
+	if (control->type != WM8994)
+		return -EINVAL;
+
 	switch (micbias) {
 	case 1:
 		micdet = &wm8994->micdet[0];
@@ -3863,6 +2756,10 @@
 	int reg;
 	int report;
 
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
 	reg = snd_soc_read(codec, WM8994_INTERRUPT_RAW_STATUS_2);
 	if (reg < 0) {
 		dev_err(codec->dev, "Failed to read microphone status: %d\n",
@@ -3891,77 +2788,251 @@
 	return IRQ_HANDLED;
 }
 
+/* Default microphone detection handler for WM8958 - the user can
+ * override this if they wish.
+ */
+static void wm8958_default_micdet(u16 status, void *data)
+{
+	struct snd_soc_codec *codec = data;
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	int report = 0;
+
+	/* If nothing present then clear our statuses */
+	if (!(status & WM8958_MICD_STS)) {
+		wm8994->jack_is_video = false;
+		wm8994->jack_is_mic = false;
+		goto done;
+	}
+
+	/* Assume anything over 475 ohms is a microphone and remember
+	 * that we've seen one (since buttons override it) */
+	if (status & 0x600)
+		wm8994->jack_is_mic = true;
+	if (wm8994->jack_is_mic)
+		report |= SND_JACK_MICROPHONE;
+
+	/* Video has an impedence of approximately 75 ohms; assume
+	 * this isn't used as a button and remember it since buttons
+	 * override it. */
+	if (status & 0x40)
+		wm8994->jack_is_video = true;
+	if (wm8994->jack_is_video)
+		report |= SND_JACK_VIDEOOUT;
+
+	/* Everything else is buttons; just assign slots */
+	if (status & 0x4)
+		report |= SND_JACK_BTN_0;
+	if (status & 0x8)
+		report |= SND_JACK_BTN_1;
+	if (status & 0x10)
+		report |= SND_JACK_BTN_2;
+	if (status & 0x20)
+		report |= SND_JACK_BTN_3;
+	if (status & 0x80)
+		report |= SND_JACK_BTN_4;
+	if (status & 0x100)
+		report |= SND_JACK_BTN_5;
+
+done:
+	snd_soc_jack_report(wm8994->micdet[0].jack,
+			    SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
+			    SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
+			    SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT,
+			    report);
+}
+
+/**
+ * wm8958_mic_detect - Enable microphone detection via the WM8958 IRQ
+ *
+ * @codec:   WM8958 codec
+ * @jack:    jack to report detection events on
+ *
+ * Enable microphone detection functionality for the WM8958.  By
+ * default simple detection which supports the detection of up to 6
+ * buttons plus video and microphone functionality is supported.
+ *
+ * The WM8958 has an advanced jack detection facility which is able to
+ * support complex accessory detection, especially when used in
+ * conjunction with external circuitry.  In order to provide maximum
+ * flexiblity a callback is provided which allows a completely custom
+ * detection algorithm.
+ */
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data)
+{
+	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
+
+	if (control->type != WM8958)
+		return -EINVAL;
+
+	if (jack) {
+		if (!cb) {
+			dev_dbg(codec->dev, "Using default micdet callback\n");
+			cb = wm8958_default_micdet;
+			cb_data = codec;
+		}
+
+		wm8994->micdet[0].jack = jack;
+		wm8994->jack_cb = cb;
+		wm8994->jack_cb_data = cb_data;
+
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, WM8958_MICD_ENA);
+	} else {
+		snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
+				    WM8958_MICD_ENA, 0);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+
+static irqreturn_t wm8958_mic_irq(int irq, void *data)
+{
+	struct wm8994_priv *wm8994 = data;
+	struct snd_soc_codec *codec = wm8994->codec;
+	int reg;
+
+	reg = snd_soc_read(codec, WM8958_MIC_DETECT_3);
+	if (reg < 0) {
+		dev_err(codec->dev, "Failed to read mic detect status: %d\n",
+			reg);
+		return IRQ_NONE;
+	}
+
+	if (!(reg & WM8958_MICD_VALID)) {
+		dev_dbg(codec->dev, "Mic detect data not valid\n");
+		goto out;
+	}
+
+#ifndef CONFIG_SND_SOC_WM8994_MODULE
+	trace_snd_soc_jack_irq(dev_name(codec->dev));
+#endif
+
+	if (wm8994->jack_cb)
+		wm8994->jack_cb(reg, wm8994->jack_cb_data);
+	else
+		dev_warn(codec->dev, "Accessory detection with no callback\n");
+
+out:
+	return IRQ_HANDLED;
+}
+
 static int wm8994_codec_probe(struct snd_soc_codec *codec)
 {
+	struct wm8994 *control;
 	struct wm8994_priv *wm8994;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret, i;
 
 	codec->control_data = dev_get_drvdata(codec->dev->parent);
+	control = codec->control_data;
 
 	wm8994 = kzalloc(sizeof(struct wm8994_priv), GFP_KERNEL);
 	if (wm8994 == NULL)
 		return -ENOMEM;
 	snd_soc_codec_set_drvdata(codec, wm8994);
 
-	codec->reg_cache = &wm8994->reg_cache;
-
 	wm8994->pdata = dev_get_platdata(codec->dev->parent);
 	wm8994->codec = codec;
 
-	/* Fill the cache with physical values we inherited; don't reset */
-	ret = wm8994_bulk_read(codec->control_data, 0,
-			       ARRAY_SIZE(wm8994->reg_cache) - 1,
-			       codec->reg_cache);
-	if (ret < 0) {
-		dev_err(codec->dev, "Failed to fill register cache: %d\n",
-			ret);
-		goto err;
-	}
+	pm_runtime_enable(codec->dev);
+	pm_runtime_resume(codec->dev);
 
-	/* Clear the cached values for unreadable/volatile registers to
-	 * avoid potential confusion.
-	 */
-	for (i = 0; i < ARRAY_SIZE(wm8994->reg_cache); i++)
-		if (wm8994_volatile(i) || !wm8994_readable(i))
-			wm8994->reg_cache[i] = 0;
+	/* Read our current status back from the chip - we don't want to
+	 * reset as this may interfere with the GPIO or LDO operation. */
+	for (i = 0; i < WM8994_CACHE_SIZE; i++) {
+		if (!wm8994_readable(i) || wm8994_volatile(i))
+			continue;
+
+		ret = wm8994_reg_read(codec->control_data, i);
+		if (ret <= 0)
+			continue;
+
+		ret = snd_soc_cache_write(codec, i, ret);
+		if (ret != 0) {
+			dev_err(codec->dev,
+				"Failed to initialise cache for 0x%x: %d\n",
+				i, ret);
+			goto err;
+		}
+	}
 
 	/* Set revision-specific configuration */
 	wm8994->revision = snd_soc_read(codec, WM8994_CHIP_REVISION);
-	switch (wm8994->revision) {
-	case 2:
-	case 3:
-		wm8994->hubs.dcs_codes = -5;
-		wm8994->hubs.hp_startup_mode = 1;
+	switch (control->type) {
+	case WM8994:
+		switch (wm8994->revision) {
+		case 2:
+		case 3:
+			wm8994->hubs.dcs_codes = -5;
+			wm8994->hubs.hp_startup_mode = 1;
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		default:
+			wm8994->hubs.dcs_readback_mode = 1;
+			break;
+		}
+
+	case WM8958:
 		wm8994->hubs.dcs_readback_mode = 1;
 		break;
+
 	default:
-		wm8994->hubs.dcs_readback_mode = 1;
 		break;
 	}
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
-				 wm8994_mic_irq, "Mic 1 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 detect IRQ: %d\n", ret);
+	switch (control->type) {
+	case WM8994:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8994_mic_irq, "Mic 1 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
-				 wm8994_mic_irq, "Mic 1 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic1 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_SHRT,
+					 wm8994_mic_irq, "Mic 1 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic1 short IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
-				 wm8994_mic_irq, "Mic 2 detect", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 detect IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_DET,
+					 wm8994_mic_irq, "Mic 2 detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 detect IRQ: %d\n",
+				 ret);
 
-	ret = wm8994_request_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
-				 wm8994_mic_irq, "Mic 2 short", wm8994);
-	if (ret != 0)
-		dev_warn(codec->dev,
-			 "Failed to request Mic2 short IRQ: %d\n", ret);
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC2_SHRT,
+					 wm8994_mic_irq, "Mic 2 short",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic2 short IRQ: %d\n",
+				 ret);
+		break;
+
+	case WM8958:
+		ret = wm8994_request_irq(codec->control_data,
+					 WM8994_IRQ_MIC1_DET,
+					 wm8958_mic_irq, "Mic detect",
+					 wm8994);
+		if (ret != 0)
+			dev_warn(codec->dev,
+				 "Failed to request Mic detect IRQ: %d\n",
+				 ret);
+		break;
+	}
 
 	/* Remember if AIFnLRCLK is configured as a GPIO.  This should be
 	 * configured on init - if a system wants to do this dynamically
@@ -4034,10 +3105,36 @@
 	wm_hubs_add_analogue_controls(codec);
 	snd_soc_add_controls(codec, wm8994_snd_controls,
 			     ARRAY_SIZE(wm8994_snd_controls));
-	snd_soc_dapm_new_controls(codec, wm8994_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
 				  ARRAY_SIZE(wm8994_dapm_widgets));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
+					  ARRAY_SIZE(wm8994_specific_dapm_widgets));
+		break;
+	case WM8958:
+		snd_soc_add_controls(codec, wm8958_snd_controls,
+				     ARRAY_SIZE(wm8958_snd_controls));
+		snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
+					  ARRAY_SIZE(wm8958_dapm_widgets));
+		break;
+	}
+		
+
 	wm_hubs_add_analogue_routes(codec, 0, 0);
-	snd_soc_dapm_add_routes(codec, intercon, ARRAY_SIZE(intercon));
+	snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
+
+	switch (control->type) {
+	case WM8994:
+		snd_soc_dapm_add_routes(dapm, wm8994_intercon,
+					ARRAY_SIZE(wm8994_intercon));
+		break;
+	case WM8958:
+		snd_soc_dapm_add_routes(dapm, wm8958_intercon,
+					ARRAY_SIZE(wm8958_intercon));
+		break;
+	}
 
 	return 0;
 
@@ -4054,13 +3151,29 @@
 static int  wm8994_codec_remove(struct snd_soc_codec *codec)
 {
 	struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
+	struct wm8994 *control = codec->control_data;
 
 	wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
 
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
-	wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
+	pm_runtime_disable(codec->dev);
+
+	switch (control->type) {
+	case WM8994:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT,
+				wm8994);
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+
+	case WM8958:
+		wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET,
+				wm8994);
+		break;
+	}
 	kfree(wm8994->retune_mobile_texts);
 	kfree(wm8994->drc_texts);
 	kfree(wm8994);
@@ -4073,11 +3186,16 @@
 	.remove =	wm8994_codec_remove,
 	.suspend =	wm8994_suspend,
 	.resume =	wm8994_resume,
-	.read = wm8994_read,
-	.write = wm8994_write,
+	.read =		wm8994_read,
+	.write =	wm8994_write,
 	.readable_register = wm8994_readable,
 	.volatile_register = wm8994_volatile,
 	.set_bias_level = wm8994_set_bias_level,
+
+	.reg_cache_size = WM8994_CACHE_SIZE,
+	.reg_cache_default = wm8994_reg_defaults,
+	.reg_word_size = 2,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION,
 };
 
 static int __devinit wm8994_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index d8dce26..0c355bf 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -28,7 +28,21 @@
 #define WM8994_FLL_SRC_LRCLK  3
 #define WM8994_FLL_SRC_BCLK   4
 
+typedef void (*wm8958_micdet_cb)(u16 status, void *data);
+
 int wm8994_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 		      int micbias, int det, int shrt);
+int wm8958_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
+		      wm8958_micdet_cb cb, void *cb_data);
+
+#define WM8994_CACHE_SIZE 1570
+
+struct wm8994_access_mask {
+	unsigned short readable;   /* Mask of readable bits */
+	unsigned short writable;   /* Mask of writable bits */
+};
+
+extern const struct wm8994_access_mask wm8994_access_masks[WM8994_CACHE_SIZE];
+extern const __devinitdata  u16 wm8994_reg_defaults[WM8994_CACHE_SIZE];
 
 #endif
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
new file mode 100644
index 0000000..6045cbd
--- /dev/null
+++ b/sound/soc/codecs/wm8995.c
@@ -0,0 +1,1818 @@
+/*
+ * wm8995.c  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * Based on wm8994.c and wm_hubs.c by Mark Brown
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "wm8995.h"
+
+static const u16 wm8995_reg_defs[WM8995_MAX_REGISTER + 1] = {
+	[0]     = 0x8995, [5]     = 0x0100, [16]    = 0x000b, [17]    = 0x000b,
+	[24]    = 0x02c0, [25]    = 0x02c0, [26]    = 0x02c0, [27]    = 0x02c0,
+	[28]    = 0x000f, [32]    = 0x0005, [33]    = 0x0005, [40]    = 0x0003,
+	[41]    = 0x0013, [48]    = 0x0004, [56]    = 0x09f8, [64]    = 0x1f25,
+	[69]    = 0x0004, [82]    = 0xaaaa, [84]    = 0x2a2a, [146]   = 0x0060,
+	[256]   = 0x0002, [257]   = 0x8004, [520]   = 0x0010, [528]   = 0x0083,
+	[529]   = 0x0083, [548]   = 0x0c80, [580]   = 0x0c80, [768]   = 0x4050,
+	[769]   = 0x4000, [771]   = 0x0040, [772]   = 0x0040, [773]   = 0x0040,
+	[774]   = 0x0004, [775]   = 0x0100, [784]   = 0x4050, [785]   = 0x4000,
+	[787]   = 0x0040, [788]   = 0x0040, [789]   = 0x0040, [1024]  = 0x00c0,
+	[1025]  = 0x00c0, [1026]  = 0x00c0, [1027]  = 0x00c0, [1028]  = 0x00c0,
+	[1029]  = 0x00c0, [1030]  = 0x00c0, [1031]  = 0x00c0, [1056]  = 0x0200,
+	[1057]  = 0x0010, [1058]  = 0x0200, [1059]  = 0x0010, [1088]  = 0x0098,
+	[1089]  = 0x0845, [1104]  = 0x0098, [1105]  = 0x0845, [1152]  = 0x6318,
+	[1153]  = 0x6300, [1154]  = 0x0fca, [1155]  = 0x0400, [1156]  = 0x00d8,
+	[1157]  = 0x1eb5, [1158]  = 0xf145, [1159]  = 0x0b75, [1160]  = 0x01c5,
+	[1161]  = 0x1c58, [1162]  = 0xf373, [1163]  = 0x0a54, [1164]  = 0x0558,
+	[1165]  = 0x168e, [1166]  = 0xf829, [1167]  = 0x07ad, [1168]  = 0x1103,
+	[1169]  = 0x0564, [1170]  = 0x0559, [1171]  = 0x4000, [1184]  = 0x6318,
+	[1185]  = 0x6300, [1186]  = 0x0fca, [1187]  = 0x0400, [1188]  = 0x00d8,
+	[1189]  = 0x1eb5, [1190]  = 0xf145, [1191]  = 0x0b75, [1192]  = 0x01c5,
+	[1193]  = 0x1c58, [1194]  = 0xf373, [1195]  = 0x0a54, [1196]  = 0x0558,
+	[1197]  = 0x168e, [1198]  = 0xf829, [1199]  = 0x07ad, [1200]  = 0x1103,
+	[1201]  = 0x0564, [1202]  = 0x0559, [1203]  = 0x4000, [1280]  = 0x00c0,
+	[1281]  = 0x00c0, [1282]  = 0x00c0, [1283]  = 0x00c0, [1312]  = 0x0200,
+	[1313]  = 0x0010, [1344]  = 0x0098, [1345]  = 0x0845, [1408]  = 0x6318,
+	[1409]  = 0x6300, [1410]  = 0x0fca, [1411]  = 0x0400, [1412]  = 0x00d8,
+	[1413]  = 0x1eb5, [1414]  = 0xf145, [1415]  = 0x0b75, [1416]  = 0x01c5,
+	[1417]  = 0x1c58, [1418]  = 0xf373, [1419]  = 0x0a54, [1420]  = 0x0558,
+	[1421]  = 0x168e, [1422]  = 0xf829, [1423]  = 0x07ad, [1424]  = 0x1103,
+	[1425]  = 0x0564, [1426]  = 0x0559, [1427]  = 0x4000, [1568]  = 0x0002,
+	[1792]  = 0xa100, [1793]  = 0xa101, [1794]  = 0xa101, [1795]  = 0xa101,
+	[1796]  = 0xa101, [1797]  = 0xa101, [1798]  = 0xa101, [1799]  = 0xa101,
+	[1800]  = 0xa101, [1801]  = 0xa101, [1802]  = 0xa101, [1803]  = 0xa101,
+	[1804]  = 0xa101, [1805]  = 0xa101, [1825]  = 0x0055, [1848]  = 0x3fff,
+	[1849]  = 0x1fff, [2049]  = 0x0001, [2050]  = 0x0069, [2056]  = 0x0002,
+	[2057]  = 0x0003, [2058]  = 0x0069, [12288] = 0x0001, [12289] = 0x0001,
+	[12291] = 0x0006, [12292] = 0x0040, [12293] = 0x0001, [12294] = 0x000f,
+	[12295] = 0x0006, [12296] = 0x0001, [12297] = 0x0003, [12298] = 0x0104,
+	[12300] = 0x0060, [12301] = 0x0011, [12302] = 0x0401, [12304] = 0x0050,
+	[12305] = 0x0003, [12306] = 0x0100, [12308] = 0x0051, [12309] = 0x0003,
+	[12310] = 0x0104, [12311] = 0x000a, [12312] = 0x0060, [12313] = 0x003b,
+	[12314] = 0x0502, [12315] = 0x0100, [12316] = 0x2fff, [12320] = 0x2fff,
+	[12324] = 0x2fff, [12328] = 0x2fff, [12332] = 0x2fff, [12336] = 0x2fff,
+	[12340] = 0x2fff, [12344] = 0x2fff, [12348] = 0x2fff, [12352] = 0x0001,
+	[12353] = 0x0001, [12355] = 0x0006, [12356] = 0x0040, [12357] = 0x0001,
+	[12358] = 0x000f, [12359] = 0x0006, [12360] = 0x0001, [12361] = 0x0003,
+	[12362] = 0x0104, [12364] = 0x0060, [12365] = 0x0011, [12366] = 0x0401,
+	[12368] = 0x0050, [12369] = 0x0003, [12370] = 0x0100, [12372] = 0x0060,
+	[12373] = 0x003b, [12374] = 0x0502, [12375] = 0x0100, [12376] = 0x2fff,
+	[12380] = 0x2fff, [12384] = 0x2fff, [12388] = 0x2fff, [12392] = 0x2fff,
+	[12396] = 0x2fff, [12400] = 0x2fff, [12404] = 0x2fff, [12408] = 0x2fff,
+	[12412] = 0x2fff, [12416] = 0x0001, [12417] = 0x0001, [12419] = 0x0006,
+	[12420] = 0x0040, [12421] = 0x0001, [12422] = 0x000f, [12423] = 0x0006,
+	[12424] = 0x0001, [12425] = 0x0003, [12426] = 0x0106, [12428] = 0x0061,
+	[12429] = 0x0011, [12430] = 0x0401, [12432] = 0x0050, [12433] = 0x0003,
+	[12434] = 0x0102, [12436] = 0x0051, [12437] = 0x0003, [12438] = 0x0106,
+	[12439] = 0x000a, [12440] = 0x0061, [12441] = 0x003b, [12442] = 0x0502,
+	[12443] = 0x0100, [12444] = 0x2fff, [12448] = 0x2fff, [12452] = 0x2fff,
+	[12456] = 0x2fff, [12460] = 0x2fff, [12464] = 0x2fff, [12468] = 0x2fff,
+	[12472] = 0x2fff, [12476] = 0x2fff, [12480] = 0x0001, [12481] = 0x0001,
+	[12483] = 0x0006, [12484] = 0x0040, [12485] = 0x0001, [12486] = 0x000f,
+	[12487] = 0x0006, [12488] = 0x0001, [12489] = 0x0003, [12490] = 0x0106,
+	[12492] = 0x0061, [12493] = 0x0011, [12494] = 0x0401, [12496] = 0x0050,
+	[12497] = 0x0003, [12498] = 0x0102, [12500] = 0x0061, [12501] = 0x003b,
+	[12502] = 0x0502, [12503] = 0x0100, [12504] = 0x2fff, [12508] = 0x2fff,
+	[12512] = 0x2fff, [12516] = 0x2fff, [12520] = 0x2fff, [12524] = 0x2fff,
+	[12528] = 0x2fff, [12532] = 0x2fff, [12536] = 0x2fff, [12540] = 0x2fff,
+	[12544] = 0x0060, [12546] = 0x0601, [12548] = 0x0050, [12550] = 0x0100,
+	[12552] = 0x0001, [12554] = 0x0104, [12555] = 0x0100, [12556] = 0x2fff,
+	[12560] = 0x2fff, [12564] = 0x2fff, [12568] = 0x2fff, [12572] = 0x2fff,
+	[12576] = 0x2fff, [12580] = 0x2fff, [12584] = 0x2fff, [12588] = 0x2fff,
+	[12592] = 0x2fff, [12596] = 0x2fff, [12600] = 0x2fff, [12604] = 0x2fff,
+	[12608] = 0x0061, [12610] = 0x0601, [12612] = 0x0050, [12614] = 0x0102,
+	[12616] = 0x0001, [12618] = 0x0106, [12619] = 0x0100, [12620] = 0x2fff,
+	[12624] = 0x2fff, [12628] = 0x2fff, [12632] = 0x2fff, [12636] = 0x2fff,
+	[12640] = 0x2fff, [12644] = 0x2fff, [12648] = 0x2fff, [12652] = 0x2fff,
+	[12656] = 0x2fff, [12660] = 0x2fff, [12664] = 0x2fff, [12668] = 0x2fff,
+	[12672] = 0x0060, [12674] = 0x0601, [12676] = 0x0061, [12678] = 0x0601,
+	[12680] = 0x0050, [12682] = 0x0300, [12684] = 0x0001, [12686] = 0x0304,
+	[12688] = 0x0040, [12690] = 0x000f, [12692] = 0x0001, [12695] = 0x0100
+};
+
+struct fll_config {
+	int src;
+	int in;
+	int out;
+};
+
+struct wm8995_priv {
+	enum snd_soc_control_type control_type;
+	int sysclk[2];
+	int mclk[2];
+	int aifclk[2];
+	struct fll_config fll[2], fll_suspend[2];
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(in1lr_pga_tlv, -1650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(in1l_boost_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 150, 0);
+
+static const char *in1l_text[] = {
+	"Differential", "Single-ended IN1LN", "Single-ended IN1LP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1l_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  2, in1l_text);
+
+static const char *in1r_text[] = {
+	"Differential", "Single-ended IN1RN", "Single-ended IN1RP"
+};
+
+static const SOC_ENUM_SINGLE_DECL(in1r_enum, WM8995_LEFT_LINE_INPUT_CONTROL,
+				  0, in1r_text);
+
+static const char *dmic_src_text[] = {
+	"DMICDAT1", "DMICDAT2", "DMICDAT3"
+};
+
+static const SOC_ENUM_SINGLE_DECL(dmic_src1_enum, WM8995_POWER_MANAGEMENT_5,
+				  8, dmic_src_text);
+static const SOC_ENUM_SINGLE_DECL(dmic_src2_enum, WM8995_POWER_MANAGEMENT_5,
+				  6, dmic_src_text);
+
+static const struct snd_kcontrol_new wm8995_snd_controls[] = {
+	SOC_DOUBLE_R_TLV("DAC1 Volume", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC1 Switch", WM8995_DAC1_LEFT_VOLUME,
+		WM8995_DAC1_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("DAC2 Volume", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R("DAC2 Switch", WM8995_DAC2_LEFT_VOLUME,
+		WM8995_DAC2_RIGHT_VOLUME, 9, 1, 1),
+
+	SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8995_AIF1_DAC1_LEFT_VOLUME,
+		WM8995_AIF1_DAC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8995_AIF1_DAC2_LEFT_VOLUME,
+		WM8995_AIF1_DAC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8995_AIF2_DAC_LEFT_VOLUME,
+		WM8995_AIF2_DAC_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+
+	SOC_DOUBLE_R_TLV("IN1LR Volume", WM8995_LEFT_LINE_INPUT_1_VOLUME,
+		WM8995_RIGHT_LINE_INPUT_1_VOLUME, 0, 31, 0, in1lr_pga_tlv),
+
+	SOC_SINGLE_TLV("IN1L Boost", WM8995_LEFT_LINE_INPUT_CONTROL,
+		4, 3, 0, in1l_boost_tlv),
+
+	SOC_ENUM("IN1L Mode", in1l_enum),
+	SOC_ENUM("IN1R Mode", in1r_enum),
+
+	SOC_ENUM("DMIC1 SRC", dmic_src1_enum),
+	SOC_ENUM("DMIC2 SRC", dmic_src2_enum),
+
+	SOC_DOUBLE_TLV("DAC1 Sidetone Volume", WM8995_DAC1_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+	SOC_DOUBLE_TLV("DAC2 Sidetone Volume", WM8995_DAC2_MIXER_VOLUMES, 0, 5,
+		24, 0, sidetone_tlv),
+
+	SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8995_AIF1_ADC1_LEFT_VOLUME,
+		WM8995_AIF1_ADC1_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8995_AIF1_ADC2_LEFT_VOLUME,
+		WM8995_AIF1_ADC2_RIGHT_VOLUME, 0, 96, 0, digital_tlv),
+	SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8995_AIF2_ADC_LEFT_VOLUME,
+		WM8995_AIF2_ADC_RIGHT_VOLUME, 0, 96, 0, digital_tlv)
+};
+
+static void wm8995_update_class_w(struct snd_soc_codec *codec)
+{
+	int enable = 1;
+	int source = 0;  /* GCC flow analysis can't track enable */
+	int reg, reg_r;
+
+	/* We also need the same setting for L/R and only one path */
+	reg = snd_soc_read(codec, WM8995_DAC1_LEFT_MIXER_ROUTING);
+	switch (reg) {
+	case WM8995_AIF2DACL_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF2DAC\n");
+		source = 2 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC2L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC2\n");
+		source = 1 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	case WM8995_AIF1DAC1L_TO_DAC1L:
+		dev_dbg(codec->dev, "Class W source AIF1DAC1\n");
+		source = 0 << WM8995_CP_DYN_SRC_SEL_SHIFT;
+		break;
+	default:
+		dev_dbg(codec->dev, "DAC mixer setting: %x\n", reg);
+		enable = 0;
+		break;
+	}
+
+	reg_r = snd_soc_read(codec, WM8995_DAC1_RIGHT_MIXER_ROUTING);
+	if (reg_r != reg) {
+		dev_dbg(codec->dev, "Left and right DAC mixers different\n");
+		enable = 0;
+	}
+
+	if (enable) {
+		dev_dbg(codec->dev, "Class W enabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK |
+				    WM8995_CP_DYN_SRC_SEL_MASK,
+				    source | WM8995_CP_DYN_PWR);
+	} else {
+		dev_dbg(codec->dev, "Class W disabled\n");
+		snd_soc_update_bits(codec, WM8995_CLASS_W_1,
+				    WM8995_CP_DYN_PWR_MASK, 0);
+	}
+}
+
+static int check_clk_sys(struct snd_soc_dapm_widget *source,
+			 struct snd_soc_dapm_widget *sink)
+{
+	unsigned int reg;
+	const char *clk;
+
+	reg = snd_soc_read(source->codec, WM8995_CLOCKING_1);
+	/* Check what we're currently using for CLK_SYS */
+	if (reg & WM8995_SYSCLK_SRC)
+		clk = "AIF2CLK";
+	else
+		clk = "AIF1CLK";
+	return !strcmp(source->name, clk);
+}
+
+static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
+			      struct snd_ctl_elem_value *ucontrol)
+{
+	struct snd_soc_dapm_widget *w;
+	struct snd_soc_codec *codec;
+	int ret;
+
+	w = snd_kcontrol_chip(kcontrol);
+	codec = w->codec;
+	ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
+	wm8995_update_class_w(codec);
+	return ret;
+}
+
+static int hp_supply_event(struct snd_soc_dapm_widget *w,
+			   struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = w->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		/* Enable the headphone amp */
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA |
+				    WM8995_HPOUT1R_ENA);
+
+		/* Enable the second stage */
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK,
+				    WM8995_HPOUT1L_DLY |
+				    WM8995_HPOUT1R_DLY);
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, 0);
+		break;
+	}
+
+	return 0;
+}
+
+static void dc_servo_cmd(struct snd_soc_codec *codec,
+			 unsigned int reg, unsigned int val, unsigned int mask)
+{
+	int timeout = 10;
+
+	dev_dbg(codec->dev, "%s: reg = %#x, val = %#x, mask = %#x\n",
+		__func__, reg, val, mask);
+
+	snd_soc_write(codec, reg, val);
+	while (timeout--) {
+		msleep(10);
+		val = snd_soc_read(codec, WM8995_DC_SERVO_READBACK_0);
+		if ((val & mask) == mask)
+			return;
+	}
+
+	dev_err(codec->dev, "Timed out waiting for DC Servo\n");
+}
+
+static int hp_event(struct snd_soc_dapm_widget *w,
+		    struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+	unsigned int reg;
+
+	codec = w->codec;
+	reg = snd_soc_read(codec, WM8995_ANALOGUE_HP_1);
+
+	switch (event) {
+	case SND_SOC_DAPM_POST_PMU:
+		snd_soc_update_bits(codec, WM8995_CHARGE_PUMP_1,
+				    WM8995_CP_ENA_MASK, WM8995_CP_ENA);
+
+		msleep(5);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    WM8995_HPOUT1L_ENA | WM8995_HPOUT1R_ENA);
+
+		udelay(20);
+
+		reg |= WM8995_HPOUT1L_DLY | WM8995_HPOUT1R_DLY;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, WM8995_DCS_ENA_CHAN_0 |
+			      WM8995_DCS_ENA_CHAN_1);
+
+		dc_servo_cmd(codec, WM8995_DC_SERVO_2,
+			     WM8995_DCS_TRIG_STARTUP_0 |
+			     WM8995_DCS_TRIG_STARTUP_1,
+			     WM8995_DCS_TRIG_DAC_WR_0 |
+			     WM8995_DCS_TRIG_DAC_WR_1);
+
+		reg |= WM8995_HPOUT1R_OUTP | WM8995_HPOUT1R_RMV_SHORT |
+		       WM8995_HPOUT1L_OUTP | WM8995_HPOUT1L_RMV_SHORT;
+		snd_soc_write(codec, WM8995_ANALOGUE_HP_1, reg);
+
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_OUTP_MASK |
+				    WM8995_HPOUT1R_OUTP_MASK |
+				    WM8995_HPOUT1L_RMV_SHORT_MASK |
+				    WM8995_HPOUT1R_RMV_SHORT_MASK, 0);
+
+		snd_soc_update_bits(codec, WM8995_ANALOGUE_HP_1,
+				    WM8995_HPOUT1L_DLY_MASK |
+				    WM8995_HPOUT1R_DLY_MASK, 0);
+
+		snd_soc_write(codec, WM8995_DC_SERVO_1, 0);
+
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_HPOUT1L_ENA_MASK |
+				    WM8995_HPOUT1R_ENA_MASK,
+				    0);
+		break;
+	}
+
+	return 0;
+}
+
+static int configure_aif_clock(struct snd_soc_codec *codec, int aif)
+{
+	struct wm8995_priv *wm8995;
+	int rate;
+	int reg1 = 0;
+	int offset;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	if (aif)
+		offset = 4;
+	else
+		offset = 0;
+
+	switch (wm8995->sysclk[aif]) {
+	case WM8995_SYSCLK_MCLK1:
+		rate = wm8995->mclk[0];
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		reg1 |= 0x8;
+		rate = wm8995->mclk[1];
+		break;
+	case WM8995_SYSCLK_FLL1:
+		reg1 |= 0x10;
+		rate = wm8995->fll[0].out;
+		break;
+	case WM8995_SYSCLK_FLL2:
+		reg1 |= 0x18;
+		rate = wm8995->fll[1].out;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (rate >= 13500000) {
+		rate /= 2;
+		reg1 |= WM8995_AIF1CLK_DIV;
+
+		dev_dbg(codec->dev, "Dividing AIF%d clock to %dHz\n",
+			aif + 1, rate);
+	}
+
+	wm8995->aifclk[aif] = rate;
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1 + offset,
+			    WM8995_AIF1CLK_SRC_MASK | WM8995_AIF1CLK_DIV_MASK,
+			    reg1);
+	return 0;
+}
+
+static int configure_clock(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int old, new;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	/* Bring up the AIF clocks first */
+	configure_aif_clock(codec, 0);
+	configure_aif_clock(codec, 1);
+
+	/*
+	 * Then switch CLK_SYS over to the higher of them; a change
+	 * can only happen as a result of a clocking change which can
+	 * only be made outside of DAPM so we can safely redo the
+	 * clocking.
+	 */
+
+	/* If they're equal it doesn't matter which is used */
+	if (wm8995->aifclk[0] == wm8995->aifclk[1])
+		return 0;
+
+	if (wm8995->aifclk[0] < wm8995->aifclk[1])
+		new = WM8995_SYSCLK_SRC;
+	else
+		new = 0;
+
+	old = snd_soc_read(codec, WM8995_CLOCKING_1) & WM8995_SYSCLK_SRC;
+
+	/* If there's no change then we're done. */
+	if (old == new)
+		return 0;
+
+	snd_soc_update_bits(codec, WM8995_CLOCKING_1,
+			    WM8995_SYSCLK_SRC_MASK, new);
+
+	snd_soc_dapm_sync(&codec->dapm);
+
+	return 0;
+}
+
+static int clk_sys_event(struct snd_soc_dapm_widget *w,
+			 struct snd_kcontrol *kcontrol, int event)
+{
+	struct snd_soc_codec *codec;
+
+	codec = w->codec;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		return configure_clock(codec);
+
+	case SND_SOC_DAPM_POST_PMD:
+		configure_clock(codec);
+		break;
+	}
+
+	return 0;
+}
+
+static const char *sidetone_text[] = {
+	"ADC/DMIC1", "DMIC2",
+};
+
+static const struct soc_enum sidetone1_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 0, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone1_mux =
+	SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
+
+static const struct soc_enum sidetone2_enum =
+	SOC_ENUM_SINGLE(WM8995_SIDETONE, 1, 2, sidetone_text);
+
+static const struct snd_kcontrol_new sidetone2_mux =
+	SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
+
+static const struct snd_kcontrol_new aif1adc1l_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc1r_mix[] = {
+	SOC_DAPM_SINGLE("ADC/DMIC Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2l_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif1adc2r_mix[] = {
+	SOC_DAPM_SINGLE("DMIC Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1l_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new dac1r_mix[] = {
+	WM8995_CLASS_W_SWITCH("Right Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	WM8995_CLASS_W_SWITCH("Left Sidetone Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.2 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	WM8995_CLASS_W_SWITCH("AIF1.1 Switch", WM8995_DAC1_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2l_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_LEFT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new aif2dac2r_mix[] = {
+	SOC_DAPM_SINGLE("Right Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		5, 1, 0),
+	SOC_DAPM_SINGLE("Left Sidetone Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		4, 1, 0),
+	SOC_DAPM_SINGLE("AIF2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		2, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.2 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		1, 1, 0),
+	SOC_DAPM_SINGLE("AIF1.1 Switch", WM8995_DAC2_RIGHT_MIXER_ROUTING,
+		0, 1, 0),
+};
+
+static const struct snd_kcontrol_new in1l_pga =
+	SOC_DAPM_SINGLE("IN1L Switch", WM8995_POWER_MANAGEMENT_2, 5, 1, 0);
+
+static const struct snd_kcontrol_new in1r_pga =
+	SOC_DAPM_SINGLE("IN1R Switch", WM8995_POWER_MANAGEMENT_2, 4, 1, 0);
+
+static const char *adc_mux_text[] = {
+	"ADC",
+	"DMIC",
+};
+
+static const struct soc_enum adc_enum =
+	SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
+
+static const struct snd_kcontrol_new adcl_mux =
+	SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
+
+static const struct snd_kcontrol_new adcr_mux =
+	SOC_DAPM_ENUM_VIRT("ADCR Mux", adc_enum);
+
+static const char *spk_src_text[] = {
+	"DAC1L", "DAC1R", "DAC2L", "DAC2R"
+};
+
+static const SOC_ENUM_SINGLE_DECL(spk1l_src_enum, WM8995_LEFT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk1r_src_enum, WM8995_RIGHT_PDM_SPEAKER_1,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2l_src_enum, WM8995_LEFT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+static const SOC_ENUM_SINGLE_DECL(spk2r_src_enum, WM8995_RIGHT_PDM_SPEAKER_2,
+				  0, spk_src_text);
+
+static const struct snd_kcontrol_new spk1l_mux =
+	SOC_DAPM_ENUM("SPK1L SRC", spk1l_src_enum);
+static const struct snd_kcontrol_new spk1r_mux =
+	SOC_DAPM_ENUM("SPK1R SRC", spk1r_src_enum);
+static const struct snd_kcontrol_new spk2l_mux =
+	SOC_DAPM_ENUM("SPK2L SRC", spk2l_src_enum);
+static const struct snd_kcontrol_new spk2r_mux =
+	SOC_DAPM_ENUM("SPK2R SRC", spk2r_src_enum);
+
+static const struct snd_soc_dapm_widget wm8995_dapm_widgets[] = {
+	SND_SOC_DAPM_INPUT("DMIC1DAT"),
+	SND_SOC_DAPM_INPUT("DMIC2DAT"),
+
+	SND_SOC_DAPM_INPUT("IN1L"),
+	SND_SOC_DAPM_INPUT("IN1R"),
+
+	SND_SOC_DAPM_MIXER("IN1L PGA", SND_SOC_NOPM, 0, 0,
+		&in1l_pga, 1),
+	SND_SOC_DAPM_MIXER("IN1R PGA", SND_SOC_NOPM, 0, 0,
+		&in1r_pga, 1),
+
+	SND_SOC_DAPM_MICBIAS("MICBIAS1", WM8995_POWER_MANAGEMENT_1, 8, 0),
+	SND_SOC_DAPM_MICBIAS("MICBIAS2", WM8995_POWER_MANAGEMENT_1, 9, 0),
+
+	SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8995_AIF1_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8995_AIF2_CLOCKING_1, 0, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8995_CLOCKING_1, 3, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8995_CLOCKING_1, 2, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("SYSDSPCLK", WM8995_CLOCKING_1, 1, 0, NULL, 0),
+	SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event,
+		SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 9, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", "AIF1 Capture", 0,
+		WM8995_POWER_MANAGEMENT_3, 8, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADCDAT", "AIF1 Capture", 0,
+	SND_SOC_NOPM, 0, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 11, 0),
+	SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", "AIF1 Capture",
+		0, WM8995_POWER_MANAGEMENT_3, 10, 0),
+
+	SND_SOC_DAPM_VIRT_MUX("ADCL Mux", SND_SOC_NOPM, 1, 0,
+		&adcl_mux),
+	SND_SOC_DAPM_VIRT_MUX("ADCR Mux", SND_SOC_NOPM, 0, 0,
+		&adcr_mux),
+
+	SND_SOC_DAPM_ADC("DMIC2L", NULL, WM8995_POWER_MANAGEMENT_3, 5, 0),
+	SND_SOC_DAPM_ADC("DMIC2R", NULL, WM8995_POWER_MANAGEMENT_3, 4, 0),
+	SND_SOC_DAPM_ADC("DMIC1L", NULL, WM8995_POWER_MANAGEMENT_3, 3, 0),
+	SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8995_POWER_MANAGEMENT_3, 2, 0),
+
+	SND_SOC_DAPM_ADC("ADCL", NULL, WM8995_POWER_MANAGEMENT_3, 1, 0),
+	SND_SOC_DAPM_ADC("ADCR", NULL, WM8995_POWER_MANAGEMENT_3, 0, 0),
+
+	SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1l_mix, ARRAY_SIZE(aif1adc1l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC1R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc1r_mix, ARRAY_SIZE(aif1adc1r_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2l_mix, ARRAY_SIZE(aif1adc2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF1ADC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif1adc2r_mix, ARRAY_SIZE(aif1adc2r_mix)),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		9, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC1R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		8, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DACDAT", "AIF1 Playback", 0, SND_SOC_NOPM,
+		0, 0),
+
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2L", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		11, 0),
+	SND_SOC_DAPM_AIF_IN("AIF1DAC2R", NULL, 0, WM8995_POWER_MANAGEMENT_4,
+		10, 0),
+
+	SND_SOC_DAPM_MIXER("AIF2DAC2L Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2l_mix, ARRAY_SIZE(aif2dac2l_mix)),
+	SND_SOC_DAPM_MIXER("AIF2DAC2R Mixer", SND_SOC_NOPM, 0, 0,
+		aif2dac2r_mix, ARRAY_SIZE(aif2dac2r_mix)),
+
+	SND_SOC_DAPM_DAC("DAC2L", NULL, WM8995_POWER_MANAGEMENT_4, 3, 0),
+	SND_SOC_DAPM_DAC("DAC2R", NULL, WM8995_POWER_MANAGEMENT_4, 2, 0),
+	SND_SOC_DAPM_DAC("DAC1L", NULL, WM8995_POWER_MANAGEMENT_4, 1, 0),
+	SND_SOC_DAPM_DAC("DAC1R", NULL, WM8995_POWER_MANAGEMENT_4, 0, 0),
+
+	SND_SOC_DAPM_MIXER("DAC1L Mixer", SND_SOC_NOPM, 0, 0, dac1l_mix,
+		ARRAY_SIZE(dac1l_mix)),
+	SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix,
+		ARRAY_SIZE(dac1r_mix)),
+
+	SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &sidetone1_mux),
+	SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &sidetone2_mux),
+
+	SND_SOC_DAPM_PGA_E("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
+		hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_SUPPLY("Headphone Supply", SND_SOC_NOPM, 0, 0,
+		hp_supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
+
+	SND_SOC_DAPM_MUX("SPK1L Driver", WM8995_LEFT_PDM_SPEAKER_1,
+		4, 0, &spk1l_mux),
+	SND_SOC_DAPM_MUX("SPK1R Driver", WM8995_RIGHT_PDM_SPEAKER_1,
+		4, 0, &spk1r_mux),
+	SND_SOC_DAPM_MUX("SPK2L Driver", WM8995_LEFT_PDM_SPEAKER_2,
+		4, 0, &spk2l_mux),
+	SND_SOC_DAPM_MUX("SPK2R Driver", WM8995_RIGHT_PDM_SPEAKER_2,
+		4, 0, &spk2r_mux),
+
+	SND_SOC_DAPM_SUPPLY("LDO2", WM8995_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+
+	SND_SOC_DAPM_OUTPUT("HP1L"),
+	SND_SOC_DAPM_OUTPUT("HP1R"),
+	SND_SOC_DAPM_OUTPUT("SPK1L"),
+	SND_SOC_DAPM_OUTPUT("SPK1R"),
+	SND_SOC_DAPM_OUTPUT("SPK2L"),
+	SND_SOC_DAPM_OUTPUT("SPK2R")
+};
+
+static const struct snd_soc_dapm_route wm8995_intercon[] = {
+	{ "CLK_SYS", NULL, "AIF1CLK", check_clk_sys },
+	{ "CLK_SYS", NULL, "AIF2CLK", check_clk_sys },
+
+	{ "DSP1CLK", NULL, "CLK_SYS" },
+	{ "DSP2CLK", NULL, "CLK_SYS" },
+	{ "SYSDSPCLK", NULL, "CLK_SYS" },
+
+	{ "AIF1ADC1L", NULL, "AIF1CLK" },
+	{ "AIF1ADC1L", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "AIF1CLK" },
+	{ "AIF1ADC1R", NULL, "DSP1CLK" },
+	{ "AIF1ADC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1ADC2L", NULL, "AIF1CLK" },
+	{ "AIF1ADC2L", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "AIF1CLK" },
+	{ "AIF1ADC2R", NULL, "DSP1CLK" },
+	{ "AIF1ADC2R", NULL, "SYSDSPCLK" },
+
+	{ "DMIC1L", NULL, "DMIC1DAT" },
+	{ "DMIC1L", NULL, "CLK_SYS" },
+	{ "DMIC1R", NULL, "DMIC1DAT" },
+	{ "DMIC1R", NULL, "CLK_SYS" },
+	{ "DMIC2L", NULL, "DMIC2DAT" },
+	{ "DMIC2L", NULL, "CLK_SYS" },
+	{ "DMIC2R", NULL, "DMIC2DAT" },
+	{ "DMIC2R", NULL, "CLK_SYS" },
+
+	{ "ADCL", NULL, "AIF1CLK" },
+	{ "ADCL", NULL, "DSP1CLK" },
+	{ "ADCL", NULL, "SYSDSPCLK" },
+
+	{ "ADCR", NULL, "AIF1CLK" },
+	{ "ADCR", NULL, "DSP1CLK" },
+	{ "ADCR", NULL, "SYSDSPCLK" },
+
+	{ "IN1L PGA", "IN1L Switch", "IN1L" },
+	{ "IN1R PGA", "IN1R Switch", "IN1R" },
+	{ "IN1L PGA", NULL, "LDO2" },
+	{ "IN1R PGA", NULL, "LDO2" },
+
+	{ "ADCL", NULL, "IN1L PGA" },
+	{ "ADCR", NULL, "IN1R PGA" },
+
+	{ "ADCL Mux", "ADC", "ADCL" },
+	{ "ADCL Mux", "DMIC", "DMIC1L" },
+	{ "ADCR Mux", "ADC", "ADCR" },
+	{ "ADCR Mux", "DMIC", "DMIC1R" },
+
+	/* AIF1 outputs */
+	{ "AIF1ADC1L", NULL, "AIF1ADC1L Mixer" },
+	{ "AIF1ADC1L Mixer", "ADC/DMIC Switch", "ADCL Mux" },
+
+	{ "AIF1ADC1R", NULL, "AIF1ADC1R Mixer" },
+	{ "AIF1ADC1R Mixer", "ADC/DMIC Switch", "ADCR Mux" },
+
+	{ "AIF1ADC2L", NULL, "AIF1ADC2L Mixer" },
+	{ "AIF1ADC2L Mixer", "DMIC Switch", "DMIC2L" },
+
+	{ "AIF1ADC2R", NULL, "AIF1ADC2R Mixer" },
+	{ "AIF1ADC2R Mixer", "DMIC Switch", "DMIC2R" },
+
+	/* Sidetone */
+	{ "Left Sidetone", "ADC/DMIC1", "AIF1ADC1L" },
+	{ "Left Sidetone", "DMIC2", "AIF1ADC2L" },
+	{ "Right Sidetone", "ADC/DMIC1", "AIF1ADC1R" },
+	{ "Right Sidetone", "DMIC2", "AIF1ADC2R" },
+
+	{ "AIF1DAC1L", NULL, "AIF1CLK" },
+	{ "AIF1DAC1L", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "AIF1CLK" },
+	{ "AIF1DAC1R", NULL, "DSP1CLK" },
+	{ "AIF1DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC2L", NULL, "AIF1CLK" },
+	{ "AIF1DAC2L", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "AIF1CLK" },
+	{ "AIF1DAC2R", NULL, "DSP1CLK" },
+	{ "AIF1DAC2R", NULL, "SYSDSPCLK" },
+
+	{ "DAC1L", NULL, "AIF1CLK" },
+	{ "DAC1L", NULL, "DSP1CLK" },
+	{ "DAC1L", NULL, "SYSDSPCLK" },
+
+	{ "DAC1R", NULL, "AIF1CLK" },
+	{ "DAC1R", NULL, "DSP1CLK" },
+	{ "DAC1R", NULL, "SYSDSPCLK" },
+
+	{ "AIF1DAC1L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC1R", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2L", NULL, "AIF1DACDAT" },
+	{ "AIF1DAC2R", NULL, "AIF1DACDAT" },
+
+	/* DAC1 inputs */
+	{ "DAC1L", NULL, "DAC1L Mixer" },
+	{ "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+	{ "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	{ "DAC1R", NULL, "DAC1R Mixer" },
+	{ "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+	{ "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "DAC1R Mixer", "Left Sidetone Switch", "Left Sidetone" },
+	{ "DAC1R Mixer", "Right Sidetone Switch", "Right Sidetone" },
+
+	/* DAC2/AIF2 outputs */
+	{ "DAC2L", NULL, "AIF2DAC2L Mixer" },
+	{ "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
+	{ "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
+
+	{ "DAC2R", NULL, "AIF2DAC2R Mixer" },
+	{ "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
+	{ "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
+
+	/* Output stages */
+	{ "Headphone PGA", NULL, "DAC1L" },
+	{ "Headphone PGA", NULL, "DAC1R" },
+
+	{ "Headphone PGA", NULL, "DAC2L" },
+	{ "Headphone PGA", NULL, "DAC2R" },
+
+	{ "Headphone PGA", NULL, "Headphone Supply" },
+	{ "Headphone PGA", NULL, "CLK_SYS" },
+	{ "Headphone PGA", NULL, "LDO2" },
+
+	{ "HP1L", NULL, "Headphone PGA" },
+	{ "HP1R", NULL, "Headphone PGA" },
+
+	{ "SPK1L Driver", "DAC1L", "DAC1L" },
+	{ "SPK1L Driver", "DAC1R", "DAC1R" },
+	{ "SPK1L Driver", "DAC2L", "DAC2L" },
+	{ "SPK1L Driver", "DAC2R", "DAC2R" },
+	{ "SPK1L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1R Driver", "DAC1L", "DAC1L" },
+	{ "SPK1R Driver", "DAC1R", "DAC1R" },
+	{ "SPK1R Driver", "DAC2L", "DAC2L" },
+	{ "SPK1R Driver", "DAC2R", "DAC2R" },
+	{ "SPK1R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2L Driver", "DAC1L", "DAC1L" },
+	{ "SPK2L Driver", "DAC1R", "DAC1R" },
+	{ "SPK2L Driver", "DAC2L", "DAC2L" },
+	{ "SPK2L Driver", "DAC2R", "DAC2R" },
+	{ "SPK2L Driver", NULL, "CLK_SYS" },
+
+	{ "SPK2R Driver", "DAC1L", "DAC1L" },
+	{ "SPK2R Driver", "DAC1R", "DAC1R" },
+	{ "SPK2R Driver", "DAC2L", "DAC2L" },
+	{ "SPK2R Driver", "DAC2R", "DAC2R" },
+	{ "SPK2R Driver", NULL, "CLK_SYS" },
+
+	{ "SPK1L", NULL, "SPK1L Driver" },
+	{ "SPK1R", NULL, "SPK1R Driver" },
+	{ "SPK2L", NULL, "SPK2L Driver" },
+	{ "SPK2R", NULL, "SPK2R Driver" }
+};
+
+static int wm8995_volatile(unsigned int reg)
+{
+	/* out of bounds registers are generally considered
+	 * volatile to support register banks that are partially
+	 * owned by something else for e.g. a DSP
+	 */
+	if (reg > WM8995_MAX_CACHED_REGISTER)
+		return 1;
+
+	switch (reg) {
+	case WM8995_SOFTWARE_RESET:
+	case WM8995_DC_SERVO_READBACK_0:
+	case WM8995_INTERRUPT_STATUS_1:
+	case WM8995_INTERRUPT_STATUS_2:
+	case WM8995_INTERRUPT_STATUS_1_MASK:
+	case WM8995_INTERRUPT_STATUS_2_MASK:
+	case WM8995_INTERRUPT_CONTROL:
+	case WM8995_ACCESSORY_DETECT_MODE1:
+	case WM8995_ACCESSORY_DETECT_MODE2:
+	case WM8995_HEADPHONE_DETECT1:
+	case WM8995_HEADPHONE_DETECT2:
+		return 1;
+	}
+
+	return 0;
+}
+
+static int wm8995_aif_mute(struct snd_soc_dai *dai, int mute)
+{
+	struct snd_soc_codec *codec = dai->codec;
+	int mute_reg;
+
+	switch (dai->id) {
+	case 0:
+		mute_reg = WM8995_AIF1_DAC1_FILTERS_1;
+		break;
+	case 1:
+		mute_reg = WM8995_AIF2_DAC_FILTERS_1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, mute_reg, WM8995_AIF1DAC1_MUTE_MASK,
+			    !!mute << WM8995_AIF1DAC1_MUTE_SHIFT);
+	return 0;
+}
+
+static int wm8995_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+	struct snd_soc_codec *codec;
+	int master;
+	int aif;
+
+	codec = dai->codec;
+
+	master = 0;
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBS_CFS:
+		break;
+	case SND_SOC_DAIFMT_CBM_CFM:
+		master = WM8995_AIF1_MSTR;
+		break;
+	default:
+		dev_err(dai->dev, "Unknown master/slave configuration\n");
+		return -EINVAL;
+	}
+
+	aif = 0;
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_B:
+		aif |= WM8995_AIF1_LRCLK_INV;
+	case SND_SOC_DAIFMT_DSP_A:
+		aif |= (0x3 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		aif |= (0x2 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	case SND_SOC_DAIFMT_RIGHT_J:
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		aif |= (0x1 << WM8995_AIF1_FMT_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unknown dai format\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_DSP_A:
+	case SND_SOC_DAIFMT_DSP_B:
+		/* frame inversion not valid for DSP modes */
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+
+	case SND_SOC_DAIFMT_I2S:
+	case SND_SOC_DAIFMT_RIGHT_J:
+	case SND_SOC_DAIFMT_LEFT_J:
+		switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+		case SND_SOC_DAIFMT_NB_NF:
+			break;
+		case SND_SOC_DAIFMT_IB_IF:
+			aif |= WM8995_AIF1_BCLK_INV | WM8995_AIF1_LRCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_IB_NF:
+			aif |= WM8995_AIF1_BCLK_INV;
+			break;
+		case SND_SOC_DAIFMT_NB_IF:
+			aif |= WM8995_AIF1_LRCLK_INV;
+			break;
+		default:
+			return -EINVAL;
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	snd_soc_update_bits(codec, WM8995_AIF1_CONTROL_1,
+			    WM8995_AIF1_BCLK_INV_MASK |
+			    WM8995_AIF1_LRCLK_INV_MASK |
+			    WM8995_AIF1_FMT_MASK, aif);
+	snd_soc_update_bits(codec, WM8995_AIF1_MASTER_SLAVE,
+			    WM8995_AIF1_MSTR_MASK, master);
+	return 0;
+}
+
+static const int srs[] = {
+	8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100,
+	48000, 88200, 96000
+};
+
+static const int fs_ratios[] = {
+	-1 /* reserved */,
+	128, 192, 256, 384, 512, 768, 1024, 1408, 1536
+};
+
+static const int bclk_divs[] = {
+	10, 15, 20, 30, 40, 55, 60, 80, 110, 120, 160, 220, 240, 320, 440, 480
+};
+
+static int wm8995_hw_params(struct snd_pcm_substream *substream,
+			    struct snd_pcm_hw_params *params,
+			    struct snd_soc_dai *dai)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int aif1_reg;
+	int bclk_reg;
+	int lrclk_reg;
+	int rate_reg;
+	int bclk_rate;
+	int aif1;
+	int lrclk, bclk;
+	int i, rate_val, best, best_val, cur_val;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+		aif1_reg = WM8995_AIF1_CONTROL_1;
+		bclk_reg = WM8995_AIF1_BCLK;
+		rate_reg = WM8995_AIF1_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+			wm8995->lrclk_shared[0] */) {
+			lrclk_reg = WM8995_AIF1DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF1ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF1 using split LRCLK\n");
+		}
+		break;
+	case 1:
+		aif1_reg = WM8995_AIF2_CONTROL_1;
+		bclk_reg = WM8995_AIF2_BCLK;
+		rate_reg = WM8995_AIF2_RATE;
+		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK /* ||
+		    wm8995->lrclk_shared[1] */) {
+			lrclk_reg = WM8995_AIF2DAC_LRCLK;
+		} else {
+			lrclk_reg = WM8995_AIF2ADC_LRCLK;
+			dev_dbg(codec->dev, "AIF2 using split LRCLK\n");
+		}
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	bclk_rate = snd_soc_params_to_bclk(params);
+	if (bclk_rate < 0)
+		return bclk_rate;
+
+	aif1 = 0;
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S16_LE:
+		break;
+	case SNDRV_PCM_FORMAT_S20_3LE:
+		aif1 |= (0x1 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		aif1 |= (0x2 << WM8995_AIF1_WL_SHIFT);
+		break;
+	case SNDRV_PCM_FORMAT_S32_LE:
+		aif1 |= (0x3 << WM8995_AIF1_WL_SHIFT);
+		break;
+	default:
+		dev_err(dai->dev, "Unsupported word length %u\n",
+			params_format(params));
+		return -EINVAL;
+	}
+
+	/* try to find a suitable sample rate */
+	for (i = 0; i < ARRAY_SIZE(srs); ++i)
+		if (srs[i] == params_rate(params))
+			break;
+	if (i == ARRAY_SIZE(srs)) {
+		dev_err(dai->dev, "Sample rate %d is not supported\n",
+			params_rate(params));
+		return -EINVAL;
+	}
+	rate_val = i << WM8995_AIF1_SR_SHIFT;
+
+	dev_dbg(dai->dev, "Sample rate is %dHz\n", srs[i]);
+	dev_dbg(dai->dev, "AIF%dCLK is %dHz, target BCLK %dHz\n",
+		dai->id + 1, wm8995->aifclk[dai->id], bclk_rate);
+
+	/* AIFCLK/fs ratio; look for a close match in either direction */
+	best = 1;
+	best_val = abs((fs_ratios[1] * params_rate(params))
+		       - wm8995->aifclk[dai->id]);
+	for (i = 2; i < ARRAY_SIZE(fs_ratios); i++) {
+		cur_val = abs((fs_ratios[i] * params_rate(params))
+			      - wm8995->aifclk[dai->id]);
+		if (cur_val >= best_val)
+			continue;
+		best = i;
+		best_val = cur_val;
+	}
+	rate_val |= best;
+
+	dev_dbg(dai->dev, "Selected AIF%dCLK/fs = %d\n",
+		dai->id + 1, fs_ratios[best]);
+
+	/*
+	 * We may not get quite the right frequency if using
+	 * approximate clocks so look for the closest match that is
+	 * higher than the target (we need to ensure that there enough
+	 * BCLKs to clock out the samples).
+	 */
+	best = 0;
+	bclk = 0;
+	for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
+		cur_val = (wm8995->aifclk[dai->id] * 10 / bclk_divs[i]) - bclk_rate;
+		if (cur_val < 0) /* BCLK table is sorted */
+			break;
+		best = i;
+	}
+	bclk |= best << WM8995_AIF1_BCLK_DIV_SHIFT;
+
+	bclk_rate = wm8995->aifclk[dai->id] * 10 / bclk_divs[best];
+	dev_dbg(dai->dev, "Using BCLK_DIV %d for actual BCLK %dHz\n",
+		bclk_divs[best], bclk_rate);
+
+	lrclk = bclk_rate / params_rate(params);
+	dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
+		lrclk, bclk_rate / lrclk);
+
+	snd_soc_update_bits(codec, aif1_reg,
+			    WM8995_AIF1_WL_MASK, aif1);
+	snd_soc_update_bits(codec, bclk_reg,
+			    WM8995_AIF1_BCLK_DIV_MASK, bclk);
+	snd_soc_update_bits(codec, lrclk_reg,
+			    WM8995_AIF1DAC_RATE_MASK, lrclk);
+	snd_soc_update_bits(codec, rate_reg,
+			    WM8995_AIF1_SR_MASK |
+			    WM8995_AIF1CLK_RATE_MASK, rate_val);
+	return 0;
+}
+
+static int wm8995_set_tristate(struct snd_soc_dai *codec_dai, int tristate)
+{
+	struct snd_soc_codec *codec = codec_dai->codec;
+	int reg, val, mask;
+
+	switch (codec_dai->id) {
+	case 0:
+		reg = WM8995_AIF1_MASTER_SLAVE;
+		mask = WM8995_AIF1_TRI;
+		break;
+	case 1:
+		reg = WM8995_AIF2_MASTER_SLAVE;
+		mask = WM8995_AIF2_TRI;
+		break;
+	case 2:
+		reg = WM8995_POWER_MANAGEMENT_5;
+		mask = WM8995_AIF3_TRI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (tristate)
+		val = mask;
+	else
+		val = 0;
+
+	return snd_soc_update_bits(codec, reg, mask, reg);
+}
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 16) * 10)
+
+struct fll_div {
+	u16 outdiv;
+	u16 n;
+	u16 k;
+	u16 clk_ref_div;
+	u16 fll_fratio;
+};
+
+static int wm8995_get_fll_config(struct fll_div *fll,
+				 int freq_in, int freq_out)
+{
+	u64 Kpart;
+	unsigned int K, Ndiv, Nmod;
+
+	pr_debug("FLL input=%dHz, output=%dHz\n", freq_in, freq_out);
+
+	/* Scale the input frequency down to <= 13.5MHz */
+	fll->clk_ref_div = 0;
+	while (freq_in > 13500000) {
+		fll->clk_ref_div++;
+		freq_in /= 2;
+
+		if (fll->clk_ref_div > 3)
+			return -EINVAL;
+	}
+	pr_debug("CLK_REF_DIV=%d, Fref=%dHz\n", fll->clk_ref_div, freq_in);
+
+	/* Scale the output to give 90MHz<=Fvco<=100MHz */
+	fll->outdiv = 3;
+	while (freq_out * (fll->outdiv + 1) < 90000000) {
+		fll->outdiv++;
+		if (fll->outdiv > 63)
+			return -EINVAL;
+	}
+	freq_out *= fll->outdiv + 1;
+	pr_debug("OUTDIV=%d, Fvco=%dHz\n", fll->outdiv, freq_out);
+
+	if (freq_in > 1000000) {
+		fll->fll_fratio = 0;
+	} else if (freq_in > 256000) {
+		fll->fll_fratio = 1;
+		freq_in *= 2;
+	} else if (freq_in > 128000) {
+		fll->fll_fratio = 2;
+		freq_in *= 4;
+	} else if (freq_in > 64000) {
+		fll->fll_fratio = 3;
+		freq_in *= 8;
+	} else {
+		fll->fll_fratio = 4;
+		freq_in *= 16;
+	}
+	pr_debug("FLL_FRATIO=%d, Fref=%dHz\n", fll->fll_fratio, freq_in);
+
+	/* Now, calculate N.K */
+	Ndiv = freq_out / freq_in;
+
+	fll->n = Ndiv;
+	Nmod = freq_out % freq_in;
+	pr_debug("Nmod=%d\n", Nmod);
+
+	/* Calculate fractional part - scale up so we can round. */
+	Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+	do_div(Kpart, freq_in);
+
+	K = Kpart & 0xFFFFFFFF;
+
+	if ((K % 10) >= 5)
+		K += 5;
+
+	/* Move down to proper range now rounding is done */
+	fll->k = K / 10;
+
+	pr_debug("N=%x K=%x\n", fll->n, fll->k);
+
+	return 0;
+}
+
+static int wm8995_set_fll(struct snd_soc_dai *dai, int id,
+			  int src, unsigned int freq_in,
+			  unsigned int freq_out)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+	int reg_offset, ret;
+	struct fll_div fll;
+	u16 reg, aif1, aif2;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	aif1 = snd_soc_read(codec, WM8995_AIF1_CLOCKING_1)
+	       & WM8995_AIF1CLK_ENA;
+
+	aif2 = snd_soc_read(codec, WM8995_AIF2_CLOCKING_1)
+	       & WM8995_AIF2CLK_ENA;
+
+	switch (id) {
+	case WM8995_FLL1:
+		reg_offset = 0;
+		id = 0;
+		break;
+	case WM8995_FLL2:
+		reg_offset = 0x20;
+		id = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (src) {
+	case 0:
+		/* Allow no source specification when stopping */
+		if (freq_out)
+			return -EINVAL;
+		break;
+	case WM8995_FLL_SRC_MCLK1:
+	case WM8995_FLL_SRC_MCLK2:
+	case WM8995_FLL_SRC_LRCLK:
+	case WM8995_FLL_SRC_BCLK:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Are we changing anything? */
+	if (wm8995->fll[id].src == src &&
+	    wm8995->fll[id].in == freq_in && wm8995->fll[id].out == freq_out)
+		return 0;
+
+	/* If we're stopping the FLL redo the old config - no
+	 * registers will actually be written but we avoid GCC flow
+	 * analysis bugs spewing warnings.
+	 */
+	if (freq_out)
+		ret = wm8995_get_fll_config(&fll, freq_in, freq_out);
+	else
+		ret = wm8995_get_fll_config(&fll, wm8995->fll[id].in,
+					    wm8995->fll[id].out);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the AIF clocks while we reclock */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, 0);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, 0);
+
+	/* We always need to disable the FLL while reconfiguring */
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+			    WM8995_FLL1_ENA_MASK, 0);
+
+	reg = (fll.outdiv << WM8995_FLL1_OUTDIV_SHIFT) |
+	      (fll.fll_fratio << WM8995_FLL1_FRATIO_SHIFT);
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_2 + reg_offset,
+			    WM8995_FLL1_OUTDIV_MASK |
+			    WM8995_FLL1_FRATIO_MASK, reg);
+
+	snd_soc_write(codec, WM8995_FLL1_CONTROL_3 + reg_offset, fll.k);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_4 + reg_offset,
+			    WM8995_FLL1_N_MASK,
+			    fll.n << WM8995_FLL1_N_SHIFT);
+
+	snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_5 + reg_offset,
+			    WM8995_FLL1_REFCLK_DIV_MASK |
+			    WM8995_FLL1_REFCLK_SRC_MASK,
+			    (fll.clk_ref_div << WM8995_FLL1_REFCLK_DIV_SHIFT) |
+			    (src - 1));
+
+	if (freq_out)
+		snd_soc_update_bits(codec, WM8995_FLL1_CONTROL_1 + reg_offset,
+				    WM8995_FLL1_ENA_MASK, WM8995_FLL1_ENA);
+
+	wm8995->fll[id].in = freq_in;
+	wm8995->fll[id].out = freq_out;
+	wm8995->fll[id].src = src;
+
+	/* Enable any gated AIF clocks */
+	snd_soc_update_bits(codec, WM8995_AIF1_CLOCKING_1,
+			    WM8995_AIF1CLK_ENA_MASK, aif1);
+	snd_soc_update_bits(codec, WM8995_AIF2_CLOCKING_1,
+			    WM8995_AIF2CLK_ENA_MASK, aif2);
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_dai_sysclk(struct snd_soc_dai *dai,
+				 int clk_id, unsigned int freq, int dir)
+{
+	struct snd_soc_codec *codec;
+	struct wm8995_priv *wm8995;
+
+	codec = dai->codec;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	switch (dai->id) {
+	case 0:
+	case 1:
+		break;
+	default:
+		/* AIF3 shares clocking with AIF1/2 */
+		return -EINVAL;
+	}
+
+	switch (clk_id) {
+	case WM8995_SYSCLK_MCLK1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[0] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK1 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_MCLK2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_MCLK1;
+		wm8995->mclk[1] = freq;
+		dev_dbg(dai->dev, "AIF%d using MCLK2 at %uHz\n",
+			dai->id + 1, freq);
+		break;
+	case WM8995_SYSCLK_FLL1:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL1;
+		dev_dbg(dai->dev, "AIF%d using FLL1\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_FLL2:
+		wm8995->sysclk[dai->id] = WM8995_SYSCLK_FLL2;
+		dev_dbg(dai->dev, "AIF%d using FLL2\n", dai->id + 1);
+		break;
+	case WM8995_SYSCLK_OPCLK:
+	default:
+		dev_err(dai->dev, "Unknown clock source %d\n", clk_id);
+		return -EINVAL;
+	}
+
+	configure_clock(codec);
+
+	return 0;
+}
+
+static int wm8995_set_bias_level(struct snd_soc_codec *codec,
+				 enum snd_soc_bias_level level)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	switch (level) {
+	case SND_SOC_BIAS_ON:
+	case SND_SOC_BIAS_PREPARE:
+		break;
+	case SND_SOC_BIAS_STANDBY:
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_cache_sync(codec);
+			if (ret) {
+				dev_err(codec->dev,
+					"Failed to sync cache: %d\n", ret);
+				return ret;
+			}
+
+			snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+					    WM8995_BG_ENA_MASK, WM8995_BG_ENA);
+
+		}
+		break;
+	case SND_SOC_BIAS_OFF:
+		snd_soc_update_bits(codec, WM8995_POWER_MANAGEMENT_1,
+				    WM8995_BG_ENA_MASK, 0);
+		break;
+	}
+
+	codec->dapm.bias_level = level;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int wm8995_suspend(struct snd_soc_codec *codec, pm_message_t state)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_resume(struct snd_soc_codec *codec)
+{
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+	return 0;
+}
+#else
+#define wm8995_suspend NULL
+#define wm8995_resume NULL
+#endif
+
+static int wm8995_remove(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	struct i2c_client *i2c;
+
+	i2c = container_of(codec->dev, struct i2c_client, dev);
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_OFF);
+	return 0;
+}
+
+static int wm8995_probe(struct snd_soc_codec *codec)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	codec->dapm.idle_bias_off = 1;
+	wm8995 = snd_soc_codec_get_drvdata(codec);
+
+	ret = snd_soc_codec_set_cache_io(codec, 16, 16, wm8995->control_type);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache i/o: %d\n", ret);
+		return ret;
+	}
+
+	ret = snd_soc_read(codec, WM8995_SOFTWARE_RESET);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to read device ID: %d\n", ret);
+		return ret;
+	}
+
+	if (ret != 0x8995) {
+		dev_err(codec->dev, "Invalid device ID: %#x\n", ret);
+		return -EINVAL;
+	}
+
+	ret = snd_soc_write(codec, WM8995_SOFTWARE_RESET, 0);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+		return ret;
+	}
+
+	wm8995_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+	/* Latch volume updates (right only; we always do left then right). */
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC1_RIGHT_VOLUME,
+			    WM8995_AIF1DAC1_VU_MASK, WM8995_AIF1DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_DAC2_RIGHT_VOLUME,
+			    WM8995_AIF1DAC2_VU_MASK, WM8995_AIF1DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_DAC_RIGHT_VOLUME,
+			    WM8995_AIF2DAC_VU_MASK, WM8995_AIF2DAC_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC1_RIGHT_VOLUME,
+			    WM8995_AIF1ADC1_VU_MASK, WM8995_AIF1ADC1_VU);
+	snd_soc_update_bits(codec, WM8995_AIF1_ADC2_RIGHT_VOLUME,
+			    WM8995_AIF1ADC2_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_AIF2_ADC_RIGHT_VOLUME,
+			    WM8995_AIF2ADC_VU_MASK, WM8995_AIF1ADC2_VU);
+	snd_soc_update_bits(codec, WM8995_DAC1_RIGHT_VOLUME,
+			    WM8995_DAC1_VU_MASK, WM8995_DAC1_VU);
+	snd_soc_update_bits(codec, WM8995_DAC2_RIGHT_VOLUME,
+			    WM8995_DAC2_VU_MASK, WM8995_DAC2_VU);
+	snd_soc_update_bits(codec, WM8995_RIGHT_LINE_INPUT_1_VOLUME,
+			    WM8995_IN1_VU_MASK, WM8995_IN1_VU);
+
+	wm8995_update_class_w(codec);
+
+	snd_soc_add_controls(codec, wm8995_snd_controls,
+			     ARRAY_SIZE(wm8995_snd_controls));
+	snd_soc_dapm_new_controls(&codec->dapm, wm8995_dapm_widgets,
+				  ARRAY_SIZE(wm8995_dapm_widgets));
+	snd_soc_dapm_add_routes(&codec->dapm, wm8995_intercon,
+				ARRAY_SIZE(wm8995_intercon));
+
+	return 0;
+}
+
+#define WM8995_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+			SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8995_aif1_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif2_dai_ops = {
+	.set_sysclk = wm8995_set_dai_sysclk,
+	.set_fmt = wm8995_set_dai_fmt,
+	.hw_params = wm8995_hw_params,
+	.digital_mute = wm8995_aif_mute,
+	.set_pll = wm8995_set_fll,
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_ops wm8995_aif3_dai_ops = {
+	.set_tristate = wm8995_set_tristate,
+};
+
+static struct snd_soc_dai_driver wm8995_dai[] = {
+	{
+		.name = "wm8995-aif1",
+		.playback = {
+			.stream_name = "AIF1 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF1 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif1_dai_ops
+	},
+	{
+		.name = "wm8995-aif2",
+		.playback = {
+			.stream_name = "AIF2 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF2 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif2_dai_ops
+	},
+	{
+		.name = "wm8995-aif3",
+		.playback = {
+			.stream_name = "AIF3 Playback",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_96000,
+			.formats = WM8995_FORMATS
+		},
+		.capture = {
+			.stream_name = "AIF3 Capture",
+			.channels_min = 2,
+			.channels_max = 2,
+			.rates = SNDRV_PCM_RATE_8000_48000,
+			.formats = WM8995_FORMATS
+		},
+		.ops = &wm8995_aif3_dai_ops
+	}
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8995 = {
+	.probe = wm8995_probe,
+	.remove = wm8995_remove,
+	.suspend = wm8995_suspend,
+	.resume = wm8995_resume,
+	.set_bias_level = wm8995_set_bias_level,
+	.reg_cache_size = ARRAY_SIZE(wm8995_reg_defs),
+	.reg_word_size = sizeof(u16),
+	.reg_cache_default = wm8995_reg_defs,
+	.volatile_register = wm8995_volatile,
+	.compress_type = SND_SOC_RBTREE_COMPRESSION
+};
+
+#if defined(CONFIG_SPI_MASTER)
+static int __devinit wm8995_spi_probe(struct spi_device *spi)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_SPI;
+	spi_set_drvdata(spi, wm8995);
+
+	ret = snd_soc_register_codec(&spi->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static int __devexit wm8995_spi_remove(struct spi_device *spi)
+{
+	snd_soc_unregister_codec(&spi->dev);
+	kfree(spi_get_drvdata(spi));
+	return 0;
+}
+
+static struct spi_driver wm8995_spi_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_spi_probe,
+	.remove = __devexit_p(wm8995_spi_remove)
+};
+#endif
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8995_i2c_probe(struct i2c_client *i2c,
+				      const struct i2c_device_id *id)
+{
+	struct wm8995_priv *wm8995;
+	int ret;
+
+	wm8995 = kzalloc(sizeof *wm8995, GFP_KERNEL);
+	if (!wm8995)
+		return -ENOMEM;
+
+	wm8995->control_type = SND_SOC_I2C;
+	i2c_set_clientdata(i2c, wm8995);
+
+	ret = snd_soc_register_codec(&i2c->dev,
+				     &soc_codec_dev_wm8995, wm8995_dai,
+				     ARRAY_SIZE(wm8995_dai));
+	if (ret < 0)
+		kfree(wm8995);
+	return ret;
+}
+
+static __devexit int wm8995_i2c_remove(struct i2c_client *client)
+{
+	snd_soc_unregister_codec(&client->dev);
+	kfree(i2c_get_clientdata(client));
+	return 0;
+}
+
+static const struct i2c_device_id wm8995_i2c_id[] = {
+	{"wm8995", 0},
+	{}
+};
+
+MODULE_DEVICE_TABLE(i2c, wm8995_i2c_id);
+
+static struct i2c_driver wm8995_i2c_driver = {
+	.driver = {
+		.name = "wm8995",
+		.owner = THIS_MODULE,
+	},
+	.probe = wm8995_i2c_probe,
+	.remove = __devexit_p(wm8995_i2c_remove),
+	.id_table = wm8995_i2c_id
+};
+#endif
+
+static int __init wm8995_modinit(void)
+{
+	int ret = 0;
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	ret = i2c_add_driver(&wm8995_i2c_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 I2C driver: %d\n",
+		       ret);
+	}
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	ret = spi_register_driver(&wm8995_spi_driver);
+	if (ret) {
+		printk(KERN_ERR "Failed to register wm8995 SPI driver: %d\n",
+		       ret);
+	}
+#endif
+	return ret;
+}
+
+module_init(wm8995_modinit);
+
+static void __exit wm8995_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+	i2c_del_driver(&wm8995_i2c_driver);
+#endif
+#if defined(CONFIG_SPI_MASTER)
+	spi_unregister_driver(&wm8995_spi_driver);
+#endif
+}
+
+module_exit(wm8995_exit);
+
+MODULE_DESCRIPTION("ASoC WM8995 driver");
+MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8995.h b/sound/soc/codecs/wm8995.h
new file mode 100644
index 0000000..5642121
--- /dev/null
+++ b/sound/soc/codecs/wm8995.h
@@ -0,0 +1,4269 @@
+/*
+ * wm8995.h  --  WM8995 ALSA SoC Audio driver
+ *
+ * Copyright 2010 Wolfson Microelectronics plc
+ *
+ * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8995_H
+#define _WM8995_H
+
+#include <asm/types.h>
+
+/*
+ * Register values.
+ */
+#define WM8995_SOFTWARE_RESET                   0x00
+#define WM8995_POWER_MANAGEMENT_1               0x01
+#define WM8995_POWER_MANAGEMENT_2               0x02
+#define WM8995_POWER_MANAGEMENT_3               0x03
+#define WM8995_POWER_MANAGEMENT_4               0x04
+#define WM8995_POWER_MANAGEMENT_5               0x05
+#define WM8995_LEFT_LINE_INPUT_1_VOLUME         0x10
+#define WM8995_RIGHT_LINE_INPUT_1_VOLUME        0x11
+#define WM8995_LEFT_LINE_INPUT_CONTROL          0x12
+#define WM8995_DAC1_LEFT_VOLUME                 0x18
+#define WM8995_DAC1_RIGHT_VOLUME                0x19
+#define WM8995_DAC2_LEFT_VOLUME                 0x1A
+#define WM8995_DAC2_RIGHT_VOLUME                0x1B
+#define WM8995_OUTPUT_VOLUME_ZC_1               0x1C
+#define WM8995_MICBIAS_1                        0x20
+#define WM8995_MICBIAS_2                        0x21
+#define WM8995_LDO_1                            0x28
+#define WM8995_LDO_2                            0x29
+#define WM8995_ACCESSORY_DETECT_MODE1           0x30
+#define WM8995_ACCESSORY_DETECT_MODE2           0x31
+#define WM8995_HEADPHONE_DETECT1                0x34
+#define WM8995_HEADPHONE_DETECT2                0x35
+#define WM8995_MIC_DETECT_1                     0x38
+#define WM8995_MIC_DETECT_2                     0x39
+#define WM8995_CHARGE_PUMP_1                    0x40
+#define WM8995_CLASS_W_1                        0x45
+#define WM8995_DC_SERVO_1                       0x50
+#define WM8995_DC_SERVO_2                       0x51
+#define WM8995_DC_SERVO_3                       0x52
+#define WM8995_DC_SERVO_5                       0x54
+#define WM8995_DC_SERVO_6                       0x55
+#define WM8995_DC_SERVO_7                       0x56
+#define WM8995_DC_SERVO_READBACK_0              0x57
+#define WM8995_ANALOGUE_HP_1                    0x60
+#define WM8995_ANALOGUE_HP_2                    0x61
+#define WM8995_CHIP_REVISION                    0x100
+#define WM8995_CONTROL_INTERFACE_1              0x101
+#define WM8995_CONTROL_INTERFACE_2              0x102
+#define WM8995_WRITE_SEQUENCER_CTRL_1           0x110
+#define WM8995_WRITE_SEQUENCER_CTRL_2           0x111
+#define WM8995_AIF1_CLOCKING_1                  0x200
+#define WM8995_AIF1_CLOCKING_2                  0x201
+#define WM8995_AIF2_CLOCKING_1                  0x204
+#define WM8995_AIF2_CLOCKING_2                  0x205
+#define WM8995_CLOCKING_1                       0x208
+#define WM8995_CLOCKING_2                       0x209
+#define WM8995_AIF1_RATE                        0x210
+#define WM8995_AIF2_RATE                        0x211
+#define WM8995_RATE_STATUS                      0x212
+#define WM8995_FLL1_CONTROL_1                   0x220
+#define WM8995_FLL1_CONTROL_2                   0x221
+#define WM8995_FLL1_CONTROL_3                   0x222
+#define WM8995_FLL1_CONTROL_4                   0x223
+#define WM8995_FLL1_CONTROL_5                   0x224
+#define WM8995_FLL2_CONTROL_1                   0x240
+#define WM8995_FLL2_CONTROL_2                   0x241
+#define WM8995_FLL2_CONTROL_3                   0x242
+#define WM8995_FLL2_CONTROL_4                   0x243
+#define WM8995_FLL2_CONTROL_5                   0x244
+#define WM8995_AIF1_CONTROL_1                   0x300
+#define WM8995_AIF1_CONTROL_2                   0x301
+#define WM8995_AIF1_MASTER_SLAVE                0x302
+#define WM8995_AIF1_BCLK                        0x303
+#define WM8995_AIF1ADC_LRCLK                    0x304
+#define WM8995_AIF1DAC_LRCLK                    0x305
+#define WM8995_AIF1DAC_DATA                     0x306
+#define WM8995_AIF1ADC_DATA                     0x307
+#define WM8995_AIF2_CONTROL_1                   0x310
+#define WM8995_AIF2_CONTROL_2                   0x311
+#define WM8995_AIF2_MASTER_SLAVE                0x312
+#define WM8995_AIF2_BCLK                        0x313
+#define WM8995_AIF2ADC_LRCLK                    0x314
+#define WM8995_AIF2DAC_LRCLK                    0x315
+#define WM8995_AIF2DAC_DATA                     0x316
+#define WM8995_AIF2ADC_DATA                     0x317
+#define WM8995_AIF1_ADC1_LEFT_VOLUME            0x400
+#define WM8995_AIF1_ADC1_RIGHT_VOLUME           0x401
+#define WM8995_AIF1_DAC1_LEFT_VOLUME            0x402
+#define WM8995_AIF1_DAC1_RIGHT_VOLUME           0x403
+#define WM8995_AIF1_ADC2_LEFT_VOLUME            0x404
+#define WM8995_AIF1_ADC2_RIGHT_VOLUME           0x405
+#define WM8995_AIF1_DAC2_LEFT_VOLUME            0x406
+#define WM8995_AIF1_DAC2_RIGHT_VOLUME           0x407
+#define WM8995_AIF1_ADC1_FILTERS                0x410
+#define WM8995_AIF1_ADC2_FILTERS                0x411
+#define WM8995_AIF1_DAC1_FILTERS_1              0x420
+#define WM8995_AIF1_DAC1_FILTERS_2              0x421
+#define WM8995_AIF1_DAC2_FILTERS_1              0x422
+#define WM8995_AIF1_DAC2_FILTERS_2              0x423
+#define WM8995_AIF1_DRC1_1                      0x440
+#define WM8995_AIF1_DRC1_2                      0x441
+#define WM8995_AIF1_DRC1_3                      0x442
+#define WM8995_AIF1_DRC1_4                      0x443
+#define WM8995_AIF1_DRC1_5                      0x444
+#define WM8995_AIF1_DRC2_1                      0x450
+#define WM8995_AIF1_DRC2_2                      0x451
+#define WM8995_AIF1_DRC2_3                      0x452
+#define WM8995_AIF1_DRC2_4                      0x453
+#define WM8995_AIF1_DRC2_5                      0x454
+#define WM8995_AIF1_DAC1_EQ_GAINS_1             0x480
+#define WM8995_AIF1_DAC1_EQ_GAINS_2             0x481
+#define WM8995_AIF1_DAC1_EQ_BAND_1_A            0x482
+#define WM8995_AIF1_DAC1_EQ_BAND_1_B            0x483
+#define WM8995_AIF1_DAC1_EQ_BAND_1_PG           0x484
+#define WM8995_AIF1_DAC1_EQ_BAND_2_A            0x485
+#define WM8995_AIF1_DAC1_EQ_BAND_2_B            0x486
+#define WM8995_AIF1_DAC1_EQ_BAND_2_C            0x487
+#define WM8995_AIF1_DAC1_EQ_BAND_2_PG           0x488
+#define WM8995_AIF1_DAC1_EQ_BAND_3_A            0x489
+#define WM8995_AIF1_DAC1_EQ_BAND_3_B            0x48A
+#define WM8995_AIF1_DAC1_EQ_BAND_3_C            0x48B
+#define WM8995_AIF1_DAC1_EQ_BAND_3_PG           0x48C
+#define WM8995_AIF1_DAC1_EQ_BAND_4_A            0x48D
+#define WM8995_AIF1_DAC1_EQ_BAND_4_B            0x48E
+#define WM8995_AIF1_DAC1_EQ_BAND_4_C            0x48F
+#define WM8995_AIF1_DAC1_EQ_BAND_4_PG           0x490
+#define WM8995_AIF1_DAC1_EQ_BAND_5_A            0x491
+#define WM8995_AIF1_DAC1_EQ_BAND_5_B            0x492
+#define WM8995_AIF1_DAC1_EQ_BAND_5_PG           0x493
+#define WM8995_AIF1_DAC2_EQ_GAINS_1             0x4A0
+#define WM8995_AIF1_DAC2_EQ_GAINS_2             0x4A1
+#define WM8995_AIF1_DAC2_EQ_BAND_1_A            0x4A2
+#define WM8995_AIF1_DAC2_EQ_BAND_1_B            0x4A3
+#define WM8995_AIF1_DAC2_EQ_BAND_1_PG           0x4A4
+#define WM8995_AIF1_DAC2_EQ_BAND_2_A            0x4A5
+#define WM8995_AIF1_DAC2_EQ_BAND_2_B            0x4A6
+#define WM8995_AIF1_DAC2_EQ_BAND_2_C            0x4A7
+#define WM8995_AIF1_DAC2_EQ_BAND_2_PG           0x4A8
+#define WM8995_AIF1_DAC2_EQ_BAND_3_A            0x4A9
+#define WM8995_AIF1_DAC2_EQ_BAND_3_B            0x4AA
+#define WM8995_AIF1_DAC2_EQ_BAND_3_C            0x4AB
+#define WM8995_AIF1_DAC2_EQ_BAND_3_PG           0x4AC
+#define WM8995_AIF1_DAC2_EQ_BAND_4_A            0x4AD
+#define WM8995_AIF1_DAC2_EQ_BAND_4_B            0x4AE
+#define WM8995_AIF1_DAC2_EQ_BAND_4_C            0x4AF
+#define WM8995_AIF1_DAC2_EQ_BAND_4_PG           0x4B0
+#define WM8995_AIF1_DAC2_EQ_BAND_5_A            0x4B1
+#define WM8995_AIF1_DAC2_EQ_BAND_5_B            0x4B2
+#define WM8995_AIF1_DAC2_EQ_BAND_5_PG           0x4B3
+#define WM8995_AIF2_ADC_LEFT_VOLUME             0x500
+#define WM8995_AIF2_ADC_RIGHT_VOLUME            0x501
+#define WM8995_AIF2_DAC_LEFT_VOLUME             0x502
+#define WM8995_AIF2_DAC_RIGHT_VOLUME            0x503
+#define WM8995_AIF2_ADC_FILTERS                 0x510
+#define WM8995_AIF2_DAC_FILTERS_1               0x520
+#define WM8995_AIF2_DAC_FILTERS_2               0x521
+#define WM8995_AIF2_DRC_1                       0x540
+#define WM8995_AIF2_DRC_2                       0x541
+#define WM8995_AIF2_DRC_3                       0x542
+#define WM8995_AIF2_DRC_4                       0x543
+#define WM8995_AIF2_DRC_5                       0x544
+#define WM8995_AIF2_EQ_GAINS_1                  0x580
+#define WM8995_AIF2_EQ_GAINS_2                  0x581
+#define WM8995_AIF2_EQ_BAND_1_A                 0x582
+#define WM8995_AIF2_EQ_BAND_1_B                 0x583
+#define WM8995_AIF2_EQ_BAND_1_PG                0x584
+#define WM8995_AIF2_EQ_BAND_2_A                 0x585
+#define WM8995_AIF2_EQ_BAND_2_B                 0x586
+#define WM8995_AIF2_EQ_BAND_2_C                 0x587
+#define WM8995_AIF2_EQ_BAND_2_PG                0x588
+#define WM8995_AIF2_EQ_BAND_3_A                 0x589
+#define WM8995_AIF2_EQ_BAND_3_B                 0x58A
+#define WM8995_AIF2_EQ_BAND_3_C                 0x58B
+#define WM8995_AIF2_EQ_BAND_3_PG                0x58C
+#define WM8995_AIF2_EQ_BAND_4_A                 0x58D
+#define WM8995_AIF2_EQ_BAND_4_B                 0x58E
+#define WM8995_AIF2_EQ_BAND_4_C                 0x58F
+#define WM8995_AIF2_EQ_BAND_4_PG                0x590
+#define WM8995_AIF2_EQ_BAND_5_A                 0x591
+#define WM8995_AIF2_EQ_BAND_5_B                 0x592
+#define WM8995_AIF2_EQ_BAND_5_PG                0x593
+#define WM8995_DAC1_MIXER_VOLUMES               0x600
+#define WM8995_DAC1_LEFT_MIXER_ROUTING          0x601
+#define WM8995_DAC1_RIGHT_MIXER_ROUTING         0x602
+#define WM8995_DAC2_MIXER_VOLUMES               0x603
+#define WM8995_DAC2_LEFT_MIXER_ROUTING          0x604
+#define WM8995_DAC2_RIGHT_MIXER_ROUTING         0x605
+#define WM8995_AIF1_ADC1_LEFT_MIXER_ROUTING     0x606
+#define WM8995_AIF1_ADC1_RIGHT_MIXER_ROUTING    0x607
+#define WM8995_AIF1_ADC2_LEFT_MIXER_ROUTING     0x608
+#define WM8995_AIF1_ADC2_RIGHT_MIXER_ROUTING    0x609
+#define WM8995_DAC_SOFTMUTE                     0x610
+#define WM8995_OVERSAMPLING                     0x620
+#define WM8995_SIDETONE                         0x621
+#define WM8995_GPIO_1                           0x700
+#define WM8995_GPIO_2                           0x701
+#define WM8995_GPIO_3                           0x702
+#define WM8995_GPIO_4                           0x703
+#define WM8995_GPIO_5                           0x704
+#define WM8995_GPIO_6                           0x705
+#define WM8995_GPIO_7                           0x706
+#define WM8995_GPIO_8                           0x707
+#define WM8995_GPIO_9                           0x708
+#define WM8995_GPIO_10                          0x709
+#define WM8995_GPIO_11                          0x70A
+#define WM8995_GPIO_12                          0x70B
+#define WM8995_GPIO_13                          0x70C
+#define WM8995_GPIO_14                          0x70D
+#define WM8995_PULL_CONTROL_1                   0x720
+#define WM8995_PULL_CONTROL_2                   0x721
+#define WM8995_INTERRUPT_STATUS_1               0x730
+#define WM8995_INTERRUPT_STATUS_2               0x731
+#define WM8995_INTERRUPT_RAW_STATUS_2           0x732
+#define WM8995_INTERRUPT_STATUS_1_MASK          0x738
+#define WM8995_INTERRUPT_STATUS_2_MASK          0x739
+#define WM8995_INTERRUPT_CONTROL                0x740
+#define WM8995_LEFT_PDM_SPEAKER_1               0x800
+#define WM8995_RIGHT_PDM_SPEAKER_1              0x801
+#define WM8995_PDM_SPEAKER_1_MUTE_SEQUENCE      0x802
+#define WM8995_LEFT_PDM_SPEAKER_2               0x808
+#define WM8995_RIGHT_PDM_SPEAKER_2              0x809
+#define WM8995_PDM_SPEAKER_2_MUTE_SEQUENCE      0x80A
+#define WM8995_WRITE_SEQUENCER_0                0x3000
+#define WM8995_WRITE_SEQUENCER_1                0x3001
+#define WM8995_WRITE_SEQUENCER_2                0x3002
+#define WM8995_WRITE_SEQUENCER_3                0x3003
+#define WM8995_WRITE_SEQUENCER_4                0x3004
+#define WM8995_WRITE_SEQUENCER_5                0x3005
+#define WM8995_WRITE_SEQUENCER_6                0x3006
+#define WM8995_WRITE_SEQUENCER_7                0x3007
+#define WM8995_WRITE_SEQUENCER_8                0x3008
+#define WM8995_WRITE_SEQUENCER_9                0x3009
+#define WM8995_WRITE_SEQUENCER_10               0x300A
+#define WM8995_WRITE_SEQUENCER_11               0x300B
+#define WM8995_WRITE_SEQUENCER_12               0x300C
+#define WM8995_WRITE_SEQUENCER_13               0x300D
+#define WM8995_WRITE_SEQUENCER_14               0x300E
+#define WM8995_WRITE_SEQUENCER_15               0x300F
+#define WM8995_WRITE_SEQUENCER_16               0x3010
+#define WM8995_WRITE_SEQUENCER_17               0x3011
+#define WM8995_WRITE_SEQUENCER_18               0x3012
+#define WM8995_WRITE_SEQUENCER_19               0x3013
+#define WM8995_WRITE_SEQUENCER_20               0x3014
+#define WM8995_WRITE_SEQUENCER_21               0x3015
+#define WM8995_WRITE_SEQUENCER_22               0x3016
+#define WM8995_WRITE_SEQUENCER_23               0x3017
+#define WM8995_WRITE_SEQUENCER_24               0x3018
+#define WM8995_WRITE_SEQUENCER_25               0x3019
+#define WM8995_WRITE_SEQUENCER_26               0x301A
+#define WM8995_WRITE_SEQUENCER_27               0x301B
+#define WM8995_WRITE_SEQUENCER_28               0x301C
+#define WM8995_WRITE_SEQUENCER_29               0x301D
+#define WM8995_WRITE_SEQUENCER_30               0x301E
+#define WM8995_WRITE_SEQUENCER_31               0x301F
+#define WM8995_WRITE_SEQUENCER_32               0x3020
+#define WM8995_WRITE_SEQUENCER_33               0x3021
+#define WM8995_WRITE_SEQUENCER_34               0x3022
+#define WM8995_WRITE_SEQUENCER_35               0x3023
+#define WM8995_WRITE_SEQUENCER_36               0x3024
+#define WM8995_WRITE_SEQUENCER_37               0x3025
+#define WM8995_WRITE_SEQUENCER_38               0x3026
+#define WM8995_WRITE_SEQUENCER_39               0x3027
+#define WM8995_WRITE_SEQUENCER_40               0x3028
+#define WM8995_WRITE_SEQUENCER_41               0x3029
+#define WM8995_WRITE_SEQUENCER_42               0x302A
+#define WM8995_WRITE_SEQUENCER_43               0x302B
+#define WM8995_WRITE_SEQUENCER_44               0x302C
+#define WM8995_WRITE_SEQUENCER_45               0x302D
+#define WM8995_WRITE_SEQUENCER_46               0x302E
+#define WM8995_WRITE_SEQUENCER_47               0x302F
+#define WM8995_WRITE_SEQUENCER_48               0x3030
+#define WM8995_WRITE_SEQUENCER_49               0x3031
+#define WM8995_WRITE_SEQUENCER_50               0x3032
+#define WM8995_WRITE_SEQUENCER_51               0x3033
+#define WM8995_WRITE_SEQUENCER_52               0x3034
+#define WM8995_WRITE_SEQUENCER_53               0x3035
+#define WM8995_WRITE_SEQUENCER_54               0x3036
+#define WM8995_WRITE_SEQUENCER_55               0x3037
+#define WM8995_WRITE_SEQUENCER_56               0x3038
+#define WM8995_WRITE_SEQUENCER_57               0x3039
+#define WM8995_WRITE_SEQUENCER_58               0x303A
+#define WM8995_WRITE_SEQUENCER_59               0x303B
+#define WM8995_WRITE_SEQUENCER_60               0x303C
+#define WM8995_WRITE_SEQUENCER_61               0x303D
+#define WM8995_WRITE_SEQUENCER_62               0x303E
+#define WM8995_WRITE_SEQUENCER_63               0x303F
+#define WM8995_WRITE_SEQUENCER_64               0x3040
+#define WM8995_WRITE_SEQUENCER_65               0x3041
+#define WM8995_WRITE_SEQUENCER_66               0x3042
+#define WM8995_WRITE_SEQUENCER_67               0x3043
+#define WM8995_WRITE_SEQUENCER_68               0x3044
+#define WM8995_WRITE_SEQUENCER_69               0x3045
+#define WM8995_WRITE_SEQUENCER_70               0x3046
+#define WM8995_WRITE_SEQUENCER_71               0x3047
+#define WM8995_WRITE_SEQUENCER_72               0x3048
+#define WM8995_WRITE_SEQUENCER_73               0x3049
+#define WM8995_WRITE_SEQUENCER_74               0x304A
+#define WM8995_WRITE_SEQUENCER_75               0x304B
+#define WM8995_WRITE_SEQUENCER_76               0x304C
+#define WM8995_WRITE_SEQUENCER_77               0x304D
+#define WM8995_WRITE_SEQUENCER_78               0x304E
+#define WM8995_WRITE_SEQUENCER_79               0x304F
+#define WM8995_WRITE_SEQUENCER_80               0x3050
+#define WM8995_WRITE_SEQUENCER_81               0x3051
+#define WM8995_WRITE_SEQUENCER_82               0x3052
+#define WM8995_WRITE_SEQUENCER_83               0x3053
+#define WM8995_WRITE_SEQUENCER_84               0x3054
+#define WM8995_WRITE_SEQUENCER_85               0x3055
+#define WM8995_WRITE_SEQUENCER_86               0x3056
+#define WM8995_WRITE_SEQUENCER_87               0x3057
+#define WM8995_WRITE_SEQUENCER_88               0x3058
+#define WM8995_WRITE_SEQUENCER_89               0x3059
+#define WM8995_WRITE_SEQUENCER_90               0x305A
+#define WM8995_WRITE_SEQUENCER_91               0x305B
+#define WM8995_WRITE_SEQUENCER_92               0x305C
+#define WM8995_WRITE_SEQUENCER_93               0x305D
+#define WM8995_WRITE_SEQUENCER_94               0x305E
+#define WM8995_WRITE_SEQUENCER_95               0x305F
+#define WM8995_WRITE_SEQUENCER_96               0x3060
+#define WM8995_WRITE_SEQUENCER_97               0x3061
+#define WM8995_WRITE_SEQUENCER_98               0x3062
+#define WM8995_WRITE_SEQUENCER_99               0x3063
+#define WM8995_WRITE_SEQUENCER_100              0x3064
+#define WM8995_WRITE_SEQUENCER_101              0x3065
+#define WM8995_WRITE_SEQUENCER_102              0x3066
+#define WM8995_WRITE_SEQUENCER_103              0x3067
+#define WM8995_WRITE_SEQUENCER_104              0x3068
+#define WM8995_WRITE_SEQUENCER_105              0x3069
+#define WM8995_WRITE_SEQUENCER_106              0x306A
+#define WM8995_WRITE_SEQUENCER_107              0x306B
+#define WM8995_WRITE_SEQUENCER_108              0x306C
+#define WM8995_WRITE_SEQUENCER_109              0x306D
+#define WM8995_WRITE_SEQUENCER_110              0x306E
+#define WM8995_WRITE_SEQUENCER_111              0x306F
+#define WM8995_WRITE_SEQUENCER_112              0x3070
+#define WM8995_WRITE_SEQUENCER_113              0x3071
+#define WM8995_WRITE_SEQUENCER_114              0x3072
+#define WM8995_WRITE_SEQUENCER_115              0x3073
+#define WM8995_WRITE_SEQUENCER_116              0x3074
+#define WM8995_WRITE_SEQUENCER_117              0x3075
+#define WM8995_WRITE_SEQUENCER_118              0x3076
+#define WM8995_WRITE_SEQUENCER_119              0x3077
+#define WM8995_WRITE_SEQUENCER_120              0x3078
+#define WM8995_WRITE_SEQUENCER_121              0x3079
+#define WM8995_WRITE_SEQUENCER_122              0x307A
+#define WM8995_WRITE_SEQUENCER_123              0x307B
+#define WM8995_WRITE_SEQUENCER_124              0x307C
+#define WM8995_WRITE_SEQUENCER_125              0x307D
+#define WM8995_WRITE_SEQUENCER_126              0x307E
+#define WM8995_WRITE_SEQUENCER_127              0x307F
+#define WM8995_WRITE_SEQUENCER_128              0x3080
+#define WM8995_WRITE_SEQUENCER_129              0x3081
+#define WM8995_WRITE_SEQUENCER_130              0x3082
+#define WM8995_WRITE_SEQUENCER_131              0x3083
+#define WM8995_WRITE_SEQUENCER_132              0x3084
+#define WM8995_WRITE_SEQUENCER_133              0x3085
+#define WM8995_WRITE_SEQUENCER_134              0x3086
+#define WM8995_WRITE_SEQUENCER_135              0x3087
+#define WM8995_WRITE_SEQUENCER_136              0x3088
+#define WM8995_WRITE_SEQUENCER_137              0x3089
+#define WM8995_WRITE_SEQUENCER_138              0x308A
+#define WM8995_WRITE_SEQUENCER_139              0x308B
+#define WM8995_WRITE_SEQUENCER_140              0x308C
+#define WM8995_WRITE_SEQUENCER_141              0x308D
+#define WM8995_WRITE_SEQUENCER_142              0x308E
+#define WM8995_WRITE_SEQUENCER_143              0x308F
+#define WM8995_WRITE_SEQUENCER_144              0x3090
+#define WM8995_WRITE_SEQUENCER_145              0x3091
+#define WM8995_WRITE_SEQUENCER_146              0x3092
+#define WM8995_WRITE_SEQUENCER_147              0x3093
+#define WM8995_WRITE_SEQUENCER_148              0x3094
+#define WM8995_WRITE_SEQUENCER_149              0x3095
+#define WM8995_WRITE_SEQUENCER_150              0x3096
+#define WM8995_WRITE_SEQUENCER_151              0x3097
+#define WM8995_WRITE_SEQUENCER_152              0x3098
+#define WM8995_WRITE_SEQUENCER_153              0x3099
+#define WM8995_WRITE_SEQUENCER_154              0x309A
+#define WM8995_WRITE_SEQUENCER_155              0x309B
+#define WM8995_WRITE_SEQUENCER_156              0x309C
+#define WM8995_WRITE_SEQUENCER_157              0x309D
+#define WM8995_WRITE_SEQUENCER_158              0x309E
+#define WM8995_WRITE_SEQUENCER_159              0x309F
+#define WM8995_WRITE_SEQUENCER_160              0x30A0
+#define WM8995_WRITE_SEQUENCER_161              0x30A1
+#define WM8995_WRITE_SEQUENCER_162              0x30A2
+#define WM8995_WRITE_SEQUENCER_163              0x30A3
+#define WM8995_WRITE_SEQUENCER_164              0x30A4
+#define WM8995_WRITE_SEQUENCER_165              0x30A5
+#define WM8995_WRITE_SEQUENCER_166              0x30A6
+#define WM8995_WRITE_SEQUENCER_167              0x30A7
+#define WM8995_WRITE_SEQUENCER_168              0x30A8
+#define WM8995_WRITE_SEQUENCER_169              0x30A9
+#define WM8995_WRITE_SEQUENCER_170              0x30AA
+#define WM8995_WRITE_SEQUENCER_171              0x30AB
+#define WM8995_WRITE_SEQUENCER_172              0x30AC
+#define WM8995_WRITE_SEQUENCER_173              0x30AD
+#define WM8995_WRITE_SEQUENCER_174              0x30AE
+#define WM8995_WRITE_SEQUENCER_175              0x30AF
+#define WM8995_WRITE_SEQUENCER_176              0x30B0
+#define WM8995_WRITE_SEQUENCER_177              0x30B1
+#define WM8995_WRITE_SEQUENCER_178              0x30B2
+#define WM8995_WRITE_SEQUENCER_179              0x30B3
+#define WM8995_WRITE_SEQUENCER_180              0x30B4
+#define WM8995_WRITE_SEQUENCER_181              0x30B5
+#define WM8995_WRITE_SEQUENCER_182              0x30B6
+#define WM8995_WRITE_SEQUENCER_183              0x30B7
+#define WM8995_WRITE_SEQUENCER_184              0x30B8
+#define WM8995_WRITE_SEQUENCER_185              0x30B9
+#define WM8995_WRITE_SEQUENCER_186              0x30BA
+#define WM8995_WRITE_SEQUENCER_187              0x30BB
+#define WM8995_WRITE_SEQUENCER_188              0x30BC
+#define WM8995_WRITE_SEQUENCER_189              0x30BD
+#define WM8995_WRITE_SEQUENCER_190              0x30BE
+#define WM8995_WRITE_SEQUENCER_191              0x30BF
+#define WM8995_WRITE_SEQUENCER_192              0x30C0
+#define WM8995_WRITE_SEQUENCER_193              0x30C1
+#define WM8995_WRITE_SEQUENCER_194              0x30C2
+#define WM8995_WRITE_SEQUENCER_195              0x30C3
+#define WM8995_WRITE_SEQUENCER_196              0x30C4
+#define WM8995_WRITE_SEQUENCER_197              0x30C5
+#define WM8995_WRITE_SEQUENCER_198              0x30C6
+#define WM8995_WRITE_SEQUENCER_199              0x30C7
+#define WM8995_WRITE_SEQUENCER_200              0x30C8
+#define WM8995_WRITE_SEQUENCER_201              0x30C9
+#define WM8995_WRITE_SEQUENCER_202              0x30CA
+#define WM8995_WRITE_SEQUENCER_203              0x30CB
+#define WM8995_WRITE_SEQUENCER_204              0x30CC
+#define WM8995_WRITE_SEQUENCER_205              0x30CD
+#define WM8995_WRITE_SEQUENCER_206              0x30CE
+#define WM8995_WRITE_SEQUENCER_207              0x30CF
+#define WM8995_WRITE_SEQUENCER_208              0x30D0
+#define WM8995_WRITE_SEQUENCER_209              0x30D1
+#define WM8995_WRITE_SEQUENCER_210              0x30D2
+#define WM8995_WRITE_SEQUENCER_211              0x30D3
+#define WM8995_WRITE_SEQUENCER_212              0x30D4
+#define WM8995_WRITE_SEQUENCER_213              0x30D5
+#define WM8995_WRITE_SEQUENCER_214              0x30D6
+#define WM8995_WRITE_SEQUENCER_215              0x30D7
+#define WM8995_WRITE_SEQUENCER_216              0x30D8
+#define WM8995_WRITE_SEQUENCER_217              0x30D9
+#define WM8995_WRITE_SEQUENCER_218              0x30DA
+#define WM8995_WRITE_SEQUENCER_219              0x30DB
+#define WM8995_WRITE_SEQUENCER_220              0x30DC
+#define WM8995_WRITE_SEQUENCER_221              0x30DD
+#define WM8995_WRITE_SEQUENCER_222              0x30DE
+#define WM8995_WRITE_SEQUENCER_223              0x30DF
+#define WM8995_WRITE_SEQUENCER_224              0x30E0
+#define WM8995_WRITE_SEQUENCER_225              0x30E1
+#define WM8995_WRITE_SEQUENCER_226              0x30E2
+#define WM8995_WRITE_SEQUENCER_227              0x30E3
+#define WM8995_WRITE_SEQUENCER_228              0x30E4
+#define WM8995_WRITE_SEQUENCER_229              0x30E5
+#define WM8995_WRITE_SEQUENCER_230              0x30E6
+#define WM8995_WRITE_SEQUENCER_231              0x30E7
+#define WM8995_WRITE_SEQUENCER_232              0x30E8
+#define WM8995_WRITE_SEQUENCER_233              0x30E9
+#define WM8995_WRITE_SEQUENCER_234              0x30EA
+#define WM8995_WRITE_SEQUENCER_235              0x30EB
+#define WM8995_WRITE_SEQUENCER_236              0x30EC
+#define WM8995_WRITE_SEQUENCER_237              0x30ED
+#define WM8995_WRITE_SEQUENCER_238              0x30EE
+#define WM8995_WRITE_SEQUENCER_239              0x30EF
+#define WM8995_WRITE_SEQUENCER_240              0x30F0
+#define WM8995_WRITE_SEQUENCER_241              0x30F1
+#define WM8995_WRITE_SEQUENCER_242              0x30F2
+#define WM8995_WRITE_SEQUENCER_243              0x30F3
+#define WM8995_WRITE_SEQUENCER_244              0x30F4
+#define WM8995_WRITE_SEQUENCER_245              0x30F5
+#define WM8995_WRITE_SEQUENCER_246              0x30F6
+#define WM8995_WRITE_SEQUENCER_247              0x30F7
+#define WM8995_WRITE_SEQUENCER_248              0x30F8
+#define WM8995_WRITE_SEQUENCER_249              0x30F9
+#define WM8995_WRITE_SEQUENCER_250              0x30FA
+#define WM8995_WRITE_SEQUENCER_251              0x30FB
+#define WM8995_WRITE_SEQUENCER_252              0x30FC
+#define WM8995_WRITE_SEQUENCER_253              0x30FD
+#define WM8995_WRITE_SEQUENCER_254              0x30FE
+#define WM8995_WRITE_SEQUENCER_255              0x30FF
+#define WM8995_WRITE_SEQUENCER_256              0x3100
+#define WM8995_WRITE_SEQUENCER_257              0x3101
+#define WM8995_WRITE_SEQUENCER_258              0x3102
+#define WM8995_WRITE_SEQUENCER_259              0x3103
+#define WM8995_WRITE_SEQUENCER_260              0x3104
+#define WM8995_WRITE_SEQUENCER_261              0x3105
+#define WM8995_WRITE_SEQUENCER_262              0x3106
+#define WM8995_WRITE_SEQUENCER_263              0x3107
+#define WM8995_WRITE_SEQUENCER_264              0x3108
+#define WM8995_WRITE_SEQUENCER_265              0x3109
+#define WM8995_WRITE_SEQUENCER_266              0x310A
+#define WM8995_WRITE_SEQUENCER_267              0x310B
+#define WM8995_WRITE_SEQUENCER_268              0x310C
+#define WM8995_WRITE_SEQUENCER_269              0x310D
+#define WM8995_WRITE_SEQUENCER_270              0x310E
+#define WM8995_WRITE_SEQUENCER_271              0x310F
+#define WM8995_WRITE_SEQUENCER_272              0x3110
+#define WM8995_WRITE_SEQUENCER_273              0x3111
+#define WM8995_WRITE_SEQUENCER_274              0x3112
+#define WM8995_WRITE_SEQUENCER_275              0x3113
+#define WM8995_WRITE_SEQUENCER_276              0x3114
+#define WM8995_WRITE_SEQUENCER_277              0x3115
+#define WM8995_WRITE_SEQUENCER_278              0x3116
+#define WM8995_WRITE_SEQUENCER_279              0x3117
+#define WM8995_WRITE_SEQUENCER_280              0x3118
+#define WM8995_WRITE_SEQUENCER_281              0x3119
+#define WM8995_WRITE_SEQUENCER_282              0x311A
+#define WM8995_WRITE_SEQUENCER_283              0x311B
+#define WM8995_WRITE_SEQUENCER_284              0x311C
+#define WM8995_WRITE_SEQUENCER_285              0x311D
+#define WM8995_WRITE_SEQUENCER_286              0x311E
+#define WM8995_WRITE_SEQUENCER_287              0x311F
+#define WM8995_WRITE_SEQUENCER_288              0x3120
+#define WM8995_WRITE_SEQUENCER_289              0x3121
+#define WM8995_WRITE_SEQUENCER_290              0x3122
+#define WM8995_WRITE_SEQUENCER_291              0x3123
+#define WM8995_WRITE_SEQUENCER_292              0x3124
+#define WM8995_WRITE_SEQUENCER_293              0x3125
+#define WM8995_WRITE_SEQUENCER_294              0x3126
+#define WM8995_WRITE_SEQUENCER_295              0x3127
+#define WM8995_WRITE_SEQUENCER_296              0x3128
+#define WM8995_WRITE_SEQUENCER_297              0x3129
+#define WM8995_WRITE_SEQUENCER_298              0x312A
+#define WM8995_WRITE_SEQUENCER_299              0x312B
+#define WM8995_WRITE_SEQUENCER_300              0x312C
+#define WM8995_WRITE_SEQUENCER_301              0x312D
+#define WM8995_WRITE_SEQUENCER_302              0x312E
+#define WM8995_WRITE_SEQUENCER_303              0x312F
+#define WM8995_WRITE_SEQUENCER_304              0x3130
+#define WM8995_WRITE_SEQUENCER_305              0x3131
+#define WM8995_WRITE_SEQUENCER_306              0x3132
+#define WM8995_WRITE_SEQUENCER_307              0x3133
+#define WM8995_WRITE_SEQUENCER_308              0x3134
+#define WM8995_WRITE_SEQUENCER_309              0x3135
+#define WM8995_WRITE_SEQUENCER_310              0x3136
+#define WM8995_WRITE_SEQUENCER_311              0x3137
+#define WM8995_WRITE_SEQUENCER_312              0x3138
+#define WM8995_WRITE_SEQUENCER_313              0x3139
+#define WM8995_WRITE_SEQUENCER_314              0x313A
+#define WM8995_WRITE_SEQUENCER_315              0x313B
+#define WM8995_WRITE_SEQUENCER_316              0x313C
+#define WM8995_WRITE_SEQUENCER_317              0x313D
+#define WM8995_WRITE_SEQUENCER_318              0x313E
+#define WM8995_WRITE_SEQUENCER_319              0x313F
+#define WM8995_WRITE_SEQUENCER_320              0x3140
+#define WM8995_WRITE_SEQUENCER_321              0x3141
+#define WM8995_WRITE_SEQUENCER_322              0x3142
+#define WM8995_WRITE_SEQUENCER_323              0x3143
+#define WM8995_WRITE_SEQUENCER_324              0x3144
+#define WM8995_WRITE_SEQUENCER_325              0x3145
+#define WM8995_WRITE_SEQUENCER_326              0x3146
+#define WM8995_WRITE_SEQUENCER_327              0x3147
+#define WM8995_WRITE_SEQUENCER_328              0x3148
+#define WM8995_WRITE_SEQUENCER_329              0x3149
+#define WM8995_WRITE_SEQUENCER_330              0x314A
+#define WM8995_WRITE_SEQUENCER_331              0x314B
+#define WM8995_WRITE_SEQUENCER_332              0x314C
+#define WM8995_WRITE_SEQUENCER_333              0x314D
+#define WM8995_WRITE_SEQUENCER_334              0x314E
+#define WM8995_WRITE_SEQUENCER_335              0x314F
+#define WM8995_WRITE_SEQUENCER_336              0x3150
+#define WM8995_WRITE_SEQUENCER_337              0x3151
+#define WM8995_WRITE_SEQUENCER_338              0x3152
+#define WM8995_WRITE_SEQUENCER_339              0x3153
+#define WM8995_WRITE_SEQUENCER_340              0x3154
+#define WM8995_WRITE_SEQUENCER_341              0x3155
+#define WM8995_WRITE_SEQUENCER_342              0x3156
+#define WM8995_WRITE_SEQUENCER_343              0x3157
+#define WM8995_WRITE_SEQUENCER_344              0x3158
+#define WM8995_WRITE_SEQUENCER_345              0x3159
+#define WM8995_WRITE_SEQUENCER_346              0x315A
+#define WM8995_WRITE_SEQUENCER_347              0x315B
+#define WM8995_WRITE_SEQUENCER_348              0x315C
+#define WM8995_WRITE_SEQUENCER_349              0x315D
+#define WM8995_WRITE_SEQUENCER_350              0x315E
+#define WM8995_WRITE_SEQUENCER_351              0x315F
+#define WM8995_WRITE_SEQUENCER_352              0x3160
+#define WM8995_WRITE_SEQUENCER_353              0x3161
+#define WM8995_WRITE_SEQUENCER_354              0x3162
+#define WM8995_WRITE_SEQUENCER_355              0x3163
+#define WM8995_WRITE_SEQUENCER_356              0x3164
+#define WM8995_WRITE_SEQUENCER_357              0x3165
+#define WM8995_WRITE_SEQUENCER_358              0x3166
+#define WM8995_WRITE_SEQUENCER_359              0x3167
+#define WM8995_WRITE_SEQUENCER_360              0x3168
+#define WM8995_WRITE_SEQUENCER_361              0x3169
+#define WM8995_WRITE_SEQUENCER_362              0x316A
+#define WM8995_WRITE_SEQUENCER_363              0x316B
+#define WM8995_WRITE_SEQUENCER_364              0x316C
+#define WM8995_WRITE_SEQUENCER_365              0x316D
+#define WM8995_WRITE_SEQUENCER_366              0x316E
+#define WM8995_WRITE_SEQUENCER_367              0x316F
+#define WM8995_WRITE_SEQUENCER_368              0x3170
+#define WM8995_WRITE_SEQUENCER_369              0x3171
+#define WM8995_WRITE_SEQUENCER_370              0x3172
+#define WM8995_WRITE_SEQUENCER_371              0x3173
+#define WM8995_WRITE_SEQUENCER_372              0x3174
+#define WM8995_WRITE_SEQUENCER_373              0x3175
+#define WM8995_WRITE_SEQUENCER_374              0x3176
+#define WM8995_WRITE_SEQUENCER_375              0x3177
+#define WM8995_WRITE_SEQUENCER_376              0x3178
+#define WM8995_WRITE_SEQUENCER_377              0x3179
+#define WM8995_WRITE_SEQUENCER_378              0x317A
+#define WM8995_WRITE_SEQUENCER_379              0x317B
+#define WM8995_WRITE_SEQUENCER_380              0x317C
+#define WM8995_WRITE_SEQUENCER_381              0x317D
+#define WM8995_WRITE_SEQUENCER_382              0x317E
+#define WM8995_WRITE_SEQUENCER_383              0x317F
+#define WM8995_WRITE_SEQUENCER_384              0x3180
+#define WM8995_WRITE_SEQUENCER_385              0x3181
+#define WM8995_WRITE_SEQUENCER_386              0x3182
+#define WM8995_WRITE_SEQUENCER_387              0x3183
+#define WM8995_WRITE_SEQUENCER_388              0x3184
+#define WM8995_WRITE_SEQUENCER_389              0x3185
+#define WM8995_WRITE_SEQUENCER_390              0x3186
+#define WM8995_WRITE_SEQUENCER_391              0x3187
+#define WM8995_WRITE_SEQUENCER_392              0x3188
+#define WM8995_WRITE_SEQUENCER_393              0x3189
+#define WM8995_WRITE_SEQUENCER_394              0x318A
+#define WM8995_WRITE_SEQUENCER_395              0x318B
+#define WM8995_WRITE_SEQUENCER_396              0x318C
+#define WM8995_WRITE_SEQUENCER_397              0x318D
+#define WM8995_WRITE_SEQUENCER_398              0x318E
+#define WM8995_WRITE_SEQUENCER_399              0x318F
+#define WM8995_WRITE_SEQUENCER_400              0x3190
+#define WM8995_WRITE_SEQUENCER_401              0x3191
+#define WM8995_WRITE_SEQUENCER_402              0x3192
+#define WM8995_WRITE_SEQUENCER_403              0x3193
+#define WM8995_WRITE_SEQUENCER_404              0x3194
+#define WM8995_WRITE_SEQUENCER_405              0x3195
+#define WM8995_WRITE_SEQUENCER_406              0x3196
+#define WM8995_WRITE_SEQUENCER_407              0x3197
+#define WM8995_WRITE_SEQUENCER_408              0x3198
+#define WM8995_WRITE_SEQUENCER_409              0x3199
+#define WM8995_WRITE_SEQUENCER_410              0x319A
+#define WM8995_WRITE_SEQUENCER_411              0x319B
+#define WM8995_WRITE_SEQUENCER_412              0x319C
+#define WM8995_WRITE_SEQUENCER_413              0x319D
+#define WM8995_WRITE_SEQUENCER_414              0x319E
+#define WM8995_WRITE_SEQUENCER_415              0x319F
+#define WM8995_WRITE_SEQUENCER_416              0x31A0
+#define WM8995_WRITE_SEQUENCER_417              0x31A1
+#define WM8995_WRITE_SEQUENCER_418              0x31A2
+#define WM8995_WRITE_SEQUENCER_419              0x31A3
+#define WM8995_WRITE_SEQUENCER_420              0x31A4
+#define WM8995_WRITE_SEQUENCER_421              0x31A5
+#define WM8995_WRITE_SEQUENCER_422              0x31A6
+#define WM8995_WRITE_SEQUENCER_423              0x31A7
+#define WM8995_WRITE_SEQUENCER_424              0x31A8
+#define WM8995_WRITE_SEQUENCER_425              0x31A9
+#define WM8995_WRITE_SEQUENCER_426              0x31AA
+#define WM8995_WRITE_SEQUENCER_427              0x31AB
+#define WM8995_WRITE_SEQUENCER_428              0x31AC
+#define WM8995_WRITE_SEQUENCER_429              0x31AD
+#define WM8995_WRITE_SEQUENCER_430              0x31AE
+#define WM8995_WRITE_SEQUENCER_431              0x31AF
+#define WM8995_WRITE_SEQUENCER_432              0x31B0
+#define WM8995_WRITE_SEQUENCER_433              0x31B1
+#define WM8995_WRITE_SEQUENCER_434              0x31B2
+#define WM8995_WRITE_SEQUENCER_435              0x31B3
+#define WM8995_WRITE_SEQUENCER_436              0x31B4
+#define WM8995_WRITE_SEQUENCER_437              0x31B5
+#define WM8995_WRITE_SEQUENCER_438              0x31B6
+#define WM8995_WRITE_SEQUENCER_439              0x31B7
+#define WM8995_WRITE_SEQUENCER_440              0x31B8
+#define WM8995_WRITE_SEQUENCER_441              0x31B9
+#define WM8995_WRITE_SEQUENCER_442              0x31BA
+#define WM8995_WRITE_SEQUENCER_443              0x31BB
+#define WM8995_WRITE_SEQUENCER_444              0x31BC
+#define WM8995_WRITE_SEQUENCER_445              0x31BD
+#define WM8995_WRITE_SEQUENCER_446              0x31BE
+#define WM8995_WRITE_SEQUENCER_447              0x31BF
+#define WM8995_WRITE_SEQUENCER_448              0x31C0
+#define WM8995_WRITE_SEQUENCER_449              0x31C1
+#define WM8995_WRITE_SEQUENCER_450              0x31C2
+#define WM8995_WRITE_SEQUENCER_451              0x31C3
+#define WM8995_WRITE_SEQUENCER_452              0x31C4
+#define WM8995_WRITE_SEQUENCER_453              0x31C5
+#define WM8995_WRITE_SEQUENCER_454              0x31C6
+#define WM8995_WRITE_SEQUENCER_455              0x31C7
+#define WM8995_WRITE_SEQUENCER_456              0x31C8
+#define WM8995_WRITE_SEQUENCER_457              0x31C9
+#define WM8995_WRITE_SEQUENCER_458              0x31CA
+#define WM8995_WRITE_SEQUENCER_459              0x31CB
+#define WM8995_WRITE_SEQUENCER_460              0x31CC
+#define WM8995_WRITE_SEQUENCER_461              0x31CD
+#define WM8995_WRITE_SEQUENCER_462              0x31CE
+#define WM8995_WRITE_SEQUENCER_463              0x31CF
+#define WM8995_WRITE_SEQUENCER_464              0x31D0
+#define WM8995_WRITE_SEQUENCER_465              0x31D1
+#define WM8995_WRITE_SEQUENCER_466              0x31D2
+#define WM8995_WRITE_SEQUENCER_467              0x31D3
+#define WM8995_WRITE_SEQUENCER_468              0x31D4
+#define WM8995_WRITE_SEQUENCER_469              0x31D5
+#define WM8995_WRITE_SEQUENCER_470              0x31D6
+#define WM8995_WRITE_SEQUENCER_471              0x31D7
+#define WM8995_WRITE_SEQUENCER_472              0x31D8
+#define WM8995_WRITE_SEQUENCER_473              0x31D9
+#define WM8995_WRITE_SEQUENCER_474              0x31DA
+#define WM8995_WRITE_SEQUENCER_475              0x31DB
+#define WM8995_WRITE_SEQUENCER_476              0x31DC
+#define WM8995_WRITE_SEQUENCER_477              0x31DD
+#define WM8995_WRITE_SEQUENCER_478              0x31DE
+#define WM8995_WRITE_SEQUENCER_479              0x31DF
+#define WM8995_WRITE_SEQUENCER_480              0x31E0
+#define WM8995_WRITE_SEQUENCER_481              0x31E1
+#define WM8995_WRITE_SEQUENCER_482              0x31E2
+#define WM8995_WRITE_SEQUENCER_483              0x31E3
+#define WM8995_WRITE_SEQUENCER_484              0x31E4
+#define WM8995_WRITE_SEQUENCER_485              0x31E5
+#define WM8995_WRITE_SEQUENCER_486              0x31E6
+#define WM8995_WRITE_SEQUENCER_487              0x31E7
+#define WM8995_WRITE_SEQUENCER_488              0x31E8
+#define WM8995_WRITE_SEQUENCER_489              0x31E9
+#define WM8995_WRITE_SEQUENCER_490              0x31EA
+#define WM8995_WRITE_SEQUENCER_491              0x31EB
+#define WM8995_WRITE_SEQUENCER_492              0x31EC
+#define WM8995_WRITE_SEQUENCER_493              0x31ED
+#define WM8995_WRITE_SEQUENCER_494              0x31EE
+#define WM8995_WRITE_SEQUENCER_495              0x31EF
+#define WM8995_WRITE_SEQUENCER_496              0x31F0
+#define WM8995_WRITE_SEQUENCER_497              0x31F1
+#define WM8995_WRITE_SEQUENCER_498              0x31F2
+#define WM8995_WRITE_SEQUENCER_499              0x31F3
+#define WM8995_WRITE_SEQUENCER_500              0x31F4
+#define WM8995_WRITE_SEQUENCER_501              0x31F5
+#define WM8995_WRITE_SEQUENCER_502              0x31F6
+#define WM8995_WRITE_SEQUENCER_503              0x31F7
+#define WM8995_WRITE_SEQUENCER_504              0x31F8
+#define WM8995_WRITE_SEQUENCER_505              0x31F9
+#define WM8995_WRITE_SEQUENCER_506              0x31FA
+#define WM8995_WRITE_SEQUENCER_507              0x31FB
+#define WM8995_WRITE_SEQUENCER_508              0x31FC
+#define WM8995_WRITE_SEQUENCER_509              0x31FD
+#define WM8995_WRITE_SEQUENCER_510              0x31FE
+#define WM8995_WRITE_SEQUENCER_511              0x31FF
+
+#define WM8995_REGISTER_COUNT                   725
+#define WM8995_MAX_REGISTER                     0x31FF
+
+#define WM8995_MAX_CACHED_REGISTER		WM8995_MAX_REGISTER
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - Software Reset
+ */
+#define WM8995_SW_RESET_MASK                    0xFFFF	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_SHIFT                        0	/* SW_RESET - [15:0] */
+#define WM8995_SW_RESET_WIDTH                       16	/* SW_RESET - [15:0] */
+
+/*
+ * R1 (0x01) - Power Management (1)
+ */
+#define WM8995_MICB2_ENA                        0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_MASK                   0x0200	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_SHIFT                       9	/* MICB2_ENA */
+#define WM8995_MICB2_ENA_WIDTH                       1	/* MICB2_ENA */
+#define WM8995_MICB1_ENA                        0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_MASK                   0x0100	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_SHIFT                       8	/* MICB1_ENA */
+#define WM8995_MICB1_ENA_WIDTH                       1	/* MICB1_ENA */
+#define WM8995_HPOUT2L_ENA                      0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_MASK                 0x0080	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_SHIFT                     7	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2L_ENA_WIDTH                     1	/* HPOUT2L_ENA */
+#define WM8995_HPOUT2R_ENA                      0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_MASK                 0x0040	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_SHIFT                     6	/* HPOUT2R_ENA */
+#define WM8995_HPOUT2R_ENA_WIDTH                     1	/* HPOUT2R_ENA */
+#define WM8995_HPOUT1L_ENA                      0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_MASK                 0x0020	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_SHIFT                     5	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1L_ENA_WIDTH                     1	/* HPOUT1L_ENA */
+#define WM8995_HPOUT1R_ENA                      0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_MASK                 0x0010	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_SHIFT                     4	/* HPOUT1R_ENA */
+#define WM8995_HPOUT1R_ENA_WIDTH                     1	/* HPOUT1R_ENA */
+#define WM8995_BG_ENA                           0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_MASK                      0x0001	/* BG_ENA */
+#define WM8995_BG_ENA_SHIFT                          0	/* BG_ENA */
+#define WM8995_BG_ENA_WIDTH                          1	/* BG_ENA */
+
+/*
+ * R2 (0x02) - Power Management (2)
+ */
+#define WM8995_OPCLK_ENA                        0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_MASK                   0x0800	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_SHIFT                      11	/* OPCLK_ENA */
+#define WM8995_OPCLK_ENA_WIDTH                       1	/* OPCLK_ENA */
+#define WM8995_IN1L_ENA                         0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_MASK                    0x0020	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_SHIFT                        5	/* IN1L_ENA */
+#define WM8995_IN1L_ENA_WIDTH                        1	/* IN1L_ENA */
+#define WM8995_IN1R_ENA                         0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_MASK                    0x0010	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_SHIFT                        4	/* IN1R_ENA */
+#define WM8995_IN1R_ENA_WIDTH                        1	/* IN1R_ENA */
+#define WM8995_LDO2_ENA                         0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_MASK                    0x0002	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_SHIFT                        1	/* LDO2_ENA */
+#define WM8995_LDO2_ENA_WIDTH                        1	/* LDO2_ENA */
+
+/*
+ * R3 (0x03) - Power Management (3)
+ */
+#define WM8995_AIF2ADCL_ENA                     0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_MASK                0x2000	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_SHIFT                   13	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCL_ENA_WIDTH                    1	/* AIF2ADCL_ENA */
+#define WM8995_AIF2ADCR_ENA                     0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_MASK                0x1000	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_SHIFT                   12	/* AIF2ADCR_ENA */
+#define WM8995_AIF2ADCR_ENA_WIDTH                    1	/* AIF2ADCR_ENA */
+#define WM8995_AIF1ADC2L_ENA                    0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_MASK               0x0800	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_SHIFT                  11	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2L_ENA_WIDTH                   1	/* AIF1ADC2L_ENA */
+#define WM8995_AIF1ADC2R_ENA                    0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_MASK               0x0400	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_SHIFT                  10	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC2R_ENA_WIDTH                   1	/* AIF1ADC2R_ENA */
+#define WM8995_AIF1ADC1L_ENA                    0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_MASK               0x0200	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_SHIFT                   9	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1L_ENA_WIDTH                   1	/* AIF1ADC1L_ENA */
+#define WM8995_AIF1ADC1R_ENA                    0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_MASK               0x0100	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_SHIFT                   8	/* AIF1ADC1R_ENA */
+#define WM8995_AIF1ADC1R_ENA_WIDTH                   1	/* AIF1ADC1R_ENA */
+#define WM8995_DMIC3L_ENA                       0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_MASK                  0x0080	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_SHIFT                      7	/* DMIC3L_ENA */
+#define WM8995_DMIC3L_ENA_WIDTH                      1	/* DMIC3L_ENA */
+#define WM8995_DMIC3R_ENA                       0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_MASK                  0x0040	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_SHIFT                      6	/* DMIC3R_ENA */
+#define WM8995_DMIC3R_ENA_WIDTH                      1	/* DMIC3R_ENA */
+#define WM8995_DMIC2L_ENA                       0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_MASK                  0x0020	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_SHIFT                      5	/* DMIC2L_ENA */
+#define WM8995_DMIC2L_ENA_WIDTH                      1	/* DMIC2L_ENA */
+#define WM8995_DMIC2R_ENA                       0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_MASK                  0x0010	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_SHIFT                      4	/* DMIC2R_ENA */
+#define WM8995_DMIC2R_ENA_WIDTH                      1	/* DMIC2R_ENA */
+#define WM8995_DMIC1L_ENA                       0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_MASK                  0x0008	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_SHIFT                      3	/* DMIC1L_ENA */
+#define WM8995_DMIC1L_ENA_WIDTH                      1	/* DMIC1L_ENA */
+#define WM8995_DMIC1R_ENA                       0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_MASK                  0x0004	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_SHIFT                      2	/* DMIC1R_ENA */
+#define WM8995_DMIC1R_ENA_WIDTH                      1	/* DMIC1R_ENA */
+#define WM8995_ADCL_ENA                         0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_MASK                    0x0002	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_SHIFT                        1	/* ADCL_ENA */
+#define WM8995_ADCL_ENA_WIDTH                        1	/* ADCL_ENA */
+#define WM8995_ADCR_ENA                         0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_MASK                    0x0001	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_SHIFT                        0	/* ADCR_ENA */
+#define WM8995_ADCR_ENA_WIDTH                        1	/* ADCR_ENA */
+
+/*
+ * R4 (0x04) - Power Management (4)
+ */
+#define WM8995_AIF2DACL_ENA                     0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_MASK                0x2000	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_SHIFT                   13	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACL_ENA_WIDTH                    1	/* AIF2DACL_ENA */
+#define WM8995_AIF2DACR_ENA                     0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_MASK                0x1000	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_SHIFT                   12	/* AIF2DACR_ENA */
+#define WM8995_AIF2DACR_ENA_WIDTH                    1	/* AIF2DACR_ENA */
+#define WM8995_AIF1DAC2L_ENA                    0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_MASK               0x0800	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_SHIFT                  11	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2L_ENA_WIDTH                   1	/* AIF1DAC2L_ENA */
+#define WM8995_AIF1DAC2R_ENA                    0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_MASK               0x0400	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_SHIFT                  10	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC2R_ENA_WIDTH                   1	/* AIF1DAC2R_ENA */
+#define WM8995_AIF1DAC1L_ENA                    0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_MASK               0x0200	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_SHIFT                   9	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1L_ENA_WIDTH                   1	/* AIF1DAC1L_ENA */
+#define WM8995_AIF1DAC1R_ENA                    0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_MASK               0x0100	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_SHIFT                   8	/* AIF1DAC1R_ENA */
+#define WM8995_AIF1DAC1R_ENA_WIDTH                   1	/* AIF1DAC1R_ENA */
+#define WM8995_DAC2L_ENA                        0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_MASK                   0x0008	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_SHIFT                       3	/* DAC2L_ENA */
+#define WM8995_DAC2L_ENA_WIDTH                       1	/* DAC2L_ENA */
+#define WM8995_DAC2R_ENA                        0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_MASK                   0x0004	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_SHIFT                       2	/* DAC2R_ENA */
+#define WM8995_DAC2R_ENA_WIDTH                       1	/* DAC2R_ENA */
+#define WM8995_DAC1L_ENA                        0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_MASK                   0x0002	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_SHIFT                       1	/* DAC1L_ENA */
+#define WM8995_DAC1L_ENA_WIDTH                       1	/* DAC1L_ENA */
+#define WM8995_DAC1R_ENA                        0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_MASK                   0x0001	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_SHIFT                       0	/* DAC1R_ENA */
+#define WM8995_DAC1R_ENA_WIDTH                       1	/* DAC1R_ENA */
+
+/*
+ * R5 (0x05) - Power Management (5)
+ */
+#define WM8995_DMIC_SRC2_MASK                   0x0300	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_SHIFT                       8	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC2_WIDTH                       2	/* DMIC_SRC2 - [9:8] */
+#define WM8995_DMIC_SRC1_MASK                   0x00C0	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_SHIFT                       6	/* DMIC_SRC1 - [7:6] */
+#define WM8995_DMIC_SRC1_WIDTH                       2	/* DMIC_SRC1 - [7:6] */
+#define WM8995_AIF3_TRI                         0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_MASK                    0x0020	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_SHIFT                        5	/* AIF3_TRI */
+#define WM8995_AIF3_TRI_WIDTH                        1	/* AIF3_TRI */
+#define WM8995_AIF3_ADCDAT_SRC_MASK             0x0018	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_SHIFT                 3	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF3_ADCDAT_SRC_WIDTH                 2	/* AIF3_ADCDAT_SRC - [4:3] */
+#define WM8995_AIF2_ADCDAT_SRC                  0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_MASK             0x0004	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_SHIFT                 2	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_ADCDAT_SRC_WIDTH                 1	/* AIF2_ADCDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC                  0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_MASK             0x0002	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_SHIFT                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF2_DACDAT_SRC_WIDTH                 1	/* AIF2_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC                  0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_MASK             0x0001	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_SHIFT                 0	/* AIF1_DACDAT_SRC */
+#define WM8995_AIF1_DACDAT_SRC_WIDTH                 1	/* AIF1_DACDAT_SRC */
+
+/*
+ * R16 (0x10) - Left Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1L_ZC                          0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_MASK                     0x0020	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_SHIFT                         5	/* IN1L_ZC */
+#define WM8995_IN1L_ZC_WIDTH                         1	/* IN1L_ZC */
+#define WM8995_IN1L_VOL_MASK                    0x001F	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_SHIFT                        0	/* IN1L_VOL - [4:0] */
+#define WM8995_IN1L_VOL_WIDTH                        5	/* IN1L_VOL - [4:0] */
+
+/*
+ * R17 (0x11) - Right Line Input 1 Volume
+ */
+#define WM8995_IN1_VU                           0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_MASK                      0x0080	/* IN1_VU */
+#define WM8995_IN1_VU_SHIFT                          7	/* IN1_VU */
+#define WM8995_IN1_VU_WIDTH                          1	/* IN1_VU */
+#define WM8995_IN1R_ZC                          0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_MASK                     0x0020	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_SHIFT                         5	/* IN1R_ZC */
+#define WM8995_IN1R_ZC_WIDTH                         1	/* IN1R_ZC */
+#define WM8995_IN1R_VOL_MASK                    0x001F	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_SHIFT                        0	/* IN1R_VOL - [4:0] */
+#define WM8995_IN1R_VOL_WIDTH                        5	/* IN1R_VOL - [4:0] */
+
+/*
+ * R18 (0x12) - Left Line Input Control
+ */
+#define WM8995_IN1L_BOOST_MASK                  0x0030	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_SHIFT                      4	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_BOOST_WIDTH                      2	/* IN1L_BOOST - [5:4] */
+#define WM8995_IN1L_MODE_MASK                   0x000C	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_SHIFT                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1L_MODE_WIDTH                       2	/* IN1L_MODE - [3:2] */
+#define WM8995_IN1R_MODE_MASK                   0x0003	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_SHIFT                       0	/* IN1R_MODE - [1:0] */
+#define WM8995_IN1R_MODE_WIDTH                       2	/* IN1R_MODE - [1:0] */
+
+/*
+ * R24 (0x18) - DAC1 Left Volume
+ */
+#define WM8995_DAC1L_MUTE                       0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_MASK                  0x0200	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_SHIFT                      9	/* DAC1L_MUTE */
+#define WM8995_DAC1L_MUTE_WIDTH                      1	/* DAC1L_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1L_VOL_MASK                   0x00FF	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_SHIFT                       0	/* DAC1L_VOL - [7:0] */
+#define WM8995_DAC1L_VOL_WIDTH                       8	/* DAC1L_VOL - [7:0] */
+
+/*
+ * R25 (0x19) - DAC1 Right Volume
+ */
+#define WM8995_DAC1R_MUTE                       0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_MASK                  0x0200	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_SHIFT                      9	/* DAC1R_MUTE */
+#define WM8995_DAC1R_MUTE_WIDTH                      1	/* DAC1R_MUTE */
+#define WM8995_DAC1_VU                          0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_MASK                     0x0100	/* DAC1_VU */
+#define WM8995_DAC1_VU_SHIFT                         8	/* DAC1_VU */
+#define WM8995_DAC1_VU_WIDTH                         1	/* DAC1_VU */
+#define WM8995_DAC1R_VOL_MASK                   0x00FF	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_SHIFT                       0	/* DAC1R_VOL - [7:0] */
+#define WM8995_DAC1R_VOL_WIDTH                       8	/* DAC1R_VOL - [7:0] */
+
+/*
+ * R26 (0x1A) - DAC2 Left Volume
+ */
+#define WM8995_DAC2L_MUTE                       0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_MASK                  0x0200	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_SHIFT                      9	/* DAC2L_MUTE */
+#define WM8995_DAC2L_MUTE_WIDTH                      1	/* DAC2L_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2L_VOL_MASK                   0x00FF	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_SHIFT                       0	/* DAC2L_VOL - [7:0] */
+#define WM8995_DAC2L_VOL_WIDTH                       8	/* DAC2L_VOL - [7:0] */
+
+/*
+ * R27 (0x1B) - DAC2 Right Volume
+ */
+#define WM8995_DAC2R_MUTE                       0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_MASK                  0x0200	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_SHIFT                      9	/* DAC2R_MUTE */
+#define WM8995_DAC2R_MUTE_WIDTH                      1	/* DAC2R_MUTE */
+#define WM8995_DAC2_VU                          0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_MASK                     0x0100	/* DAC2_VU */
+#define WM8995_DAC2_VU_SHIFT                         8	/* DAC2_VU */
+#define WM8995_DAC2_VU_WIDTH                         1	/* DAC2_VU */
+#define WM8995_DAC2R_VOL_MASK                   0x00FF	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_SHIFT                       0	/* DAC2R_VOL - [7:0] */
+#define WM8995_DAC2R_VOL_WIDTH                       8	/* DAC2R_VOL - [7:0] */
+
+/*
+ * R28 (0x1C) - Output Volume ZC (1)
+ */
+#define WM8995_HPOUT2L_ZC                       0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_MASK                  0x0008	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_SHIFT                      3	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2L_ZC_WIDTH                      1	/* HPOUT2L_ZC */
+#define WM8995_HPOUT2R_ZC                       0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_MASK                  0x0004	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_SHIFT                      2	/* HPOUT2R_ZC */
+#define WM8995_HPOUT2R_ZC_WIDTH                      1	/* HPOUT2R_ZC */
+#define WM8995_HPOUT1L_ZC                       0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_MASK                  0x0002	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_SHIFT                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1L_ZC_WIDTH                      1	/* HPOUT1L_ZC */
+#define WM8995_HPOUT1R_ZC                       0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_MASK                  0x0001	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_SHIFT                      0	/* HPOUT1R_ZC */
+#define WM8995_HPOUT1R_ZC_WIDTH                      1	/* HPOUT1R_ZC */
+
+/*
+ * R32 (0x20) - MICBIAS (1)
+ */
+#define WM8995_MICB1_MODE                       0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_MASK                  0x0008	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_SHIFT                      3	/* MICB1_MODE */
+#define WM8995_MICB1_MODE_WIDTH                      1	/* MICB1_MODE */
+#define WM8995_MICB1_LVL_MASK                   0x0006	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_SHIFT                       1	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_LVL_WIDTH                       2	/* MICB1_LVL - [2:1] */
+#define WM8995_MICB1_DISCH                      0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_MASK                 0x0001	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_SHIFT                     0	/* MICB1_DISCH */
+#define WM8995_MICB1_DISCH_WIDTH                     1	/* MICB1_DISCH */
+
+/*
+ * R33 (0x21) - MICBIAS (2)
+ */
+#define WM8995_MICB2_MODE                       0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_MASK                  0x0008	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_SHIFT                      3	/* MICB2_MODE */
+#define WM8995_MICB2_MODE_WIDTH                      1	/* MICB2_MODE */
+#define WM8995_MICB2_LVL_MASK                   0x0006	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_SHIFT                       1	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_LVL_WIDTH                       2	/* MICB2_LVL - [2:1] */
+#define WM8995_MICB2_DISCH                      0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_MASK                 0x0001	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_SHIFT                     0	/* MICB2_DISCH */
+#define WM8995_MICB2_DISCH_WIDTH                     1	/* MICB2_DISCH */
+
+/*
+ * R40 (0x28) - LDO 1
+ */
+#define WM8995_LDO1_MODE                        0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_MASK                   0x0020	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_SHIFT                       5	/* LDO1_MODE */
+#define WM8995_LDO1_MODE_WIDTH                       1	/* LDO1_MODE */
+#define WM8995_LDO1_VSEL_MASK                   0x0006	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_SHIFT                       1	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_VSEL_WIDTH                       2	/* LDO1_VSEL - [2:1] */
+#define WM8995_LDO1_DISCH                       0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_MASK                  0x0001	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_SHIFT                      0	/* LDO1_DISCH */
+#define WM8995_LDO1_DISCH_WIDTH                      1	/* LDO1_DISCH */
+
+/*
+ * R41 (0x29) - LDO 2
+ */
+#define WM8995_LDO2_MODE                        0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_MASK                   0x0020	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_SHIFT                       5	/* LDO2_MODE */
+#define WM8995_LDO2_MODE_WIDTH                       1	/* LDO2_MODE */
+#define WM8995_LDO2_VSEL_MASK                   0x001E	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_SHIFT                       1	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_VSEL_WIDTH                       4	/* LDO2_VSEL - [4:1] */
+#define WM8995_LDO2_DISCH                       0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_MASK                  0x0001	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_SHIFT                      0	/* LDO2_DISCH */
+#define WM8995_LDO2_DISCH_WIDTH                      1	/* LDO2_DISCH */
+
+/*
+ * R48 (0x30) - Accessory Detect Mode1
+ */
+#define WM8995_JD_MODE_MASK                     0x0003	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_SHIFT                         0	/* JD_MODE - [1:0] */
+#define WM8995_JD_MODE_WIDTH                         2	/* JD_MODE - [1:0] */
+
+/*
+ * R49 (0x31) - Accessory Detect Mode2
+ */
+#define WM8995_VID_ENA                          0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_MASK                     0x0001	/* VID_ENA */
+#define WM8995_VID_ENA_SHIFT                         0	/* VID_ENA */
+#define WM8995_VID_ENA_WIDTH                         1	/* VID_ENA */
+
+/*
+ * R52 (0x34) - Headphone Detect1
+ */
+#define WM8995_HP_RAMPRATE                      0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_MASK                 0x0002	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_SHIFT                     1	/* HP_RAMPRATE */
+#define WM8995_HP_RAMPRATE_WIDTH                     1	/* HP_RAMPRATE */
+#define WM8995_HP_POLL                          0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_MASK                     0x0001	/* HP_POLL */
+#define WM8995_HP_POLL_SHIFT                         0	/* HP_POLL */
+#define WM8995_HP_POLL_WIDTH                         1	/* HP_POLL */
+
+/*
+ * R53 (0x35) - Headphone Detect2
+ */
+#define WM8995_HP_DONE                          0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_MASK                     0x0080	/* HP_DONE */
+#define WM8995_HP_DONE_SHIFT                         7	/* HP_DONE */
+#define WM8995_HP_DONE_WIDTH                         1	/* HP_DONE */
+#define WM8995_HP_LVL_MASK                      0x007F	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_SHIFT                          0	/* HP_LVL - [6:0] */
+#define WM8995_HP_LVL_WIDTH                          7	/* HP_LVL - [6:0] */
+
+/*
+ * R56 (0x38) - Mic Detect (1)
+ */
+#define WM8995_MICD_RATE_MASK                   0x7800	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_SHIFT                      11	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_RATE_WIDTH                       4	/* MICD_RATE - [14:11] */
+#define WM8995_MICD_LVL_SEL_MASK                0x01F8	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_SHIFT                    3	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_LVL_SEL_WIDTH                    6	/* MICD_LVL_SEL - [8:3] */
+#define WM8995_MICD_DBTIME                      0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_MASK                 0x0002	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_SHIFT                     1	/* MICD_DBTIME */
+#define WM8995_MICD_DBTIME_WIDTH                     1	/* MICD_DBTIME */
+#define WM8995_MICD_ENA                         0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_MASK                    0x0001	/* MICD_ENA */
+#define WM8995_MICD_ENA_SHIFT                        0	/* MICD_ENA */
+#define WM8995_MICD_ENA_WIDTH                        1	/* MICD_ENA */
+
+/*
+ * R57 (0x39) - Mic Detect (2)
+ */
+#define WM8995_MICD_LVL_MASK                    0x01FC	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_SHIFT                        2	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_LVL_WIDTH                        7	/* MICD_LVL - [8:2] */
+#define WM8995_MICD_VALID                       0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_MASK                  0x0002	/* MICD_VALID */
+#define WM8995_MICD_VALID_SHIFT                      1	/* MICD_VALID */
+#define WM8995_MICD_VALID_WIDTH                      1	/* MICD_VALID */
+#define WM8995_MICD_STS                         0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_MASK                    0x0001	/* MICD_STS */
+#define WM8995_MICD_STS_SHIFT                        0	/* MICD_STS */
+#define WM8995_MICD_STS_WIDTH                        1	/* MICD_STS */
+
+/*
+ * R64 (0x40) - Charge Pump (1)
+ */
+#define WM8995_CP_ENA                           0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_MASK                      0x8000	/* CP_ENA */
+#define WM8995_CP_ENA_SHIFT                         15	/* CP_ENA */
+#define WM8995_CP_ENA_WIDTH                          1	/* CP_ENA */
+
+/*
+ * R69 (0x45) - Class W (1)
+ */
+#define WM8995_CP_DYN_SRC_SEL_MASK              0x0300	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_SHIFT                  8	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_SRC_SEL_WIDTH                  2	/* CP_DYN_SRC_SEL - [9:8] */
+#define WM8995_CP_DYN_PWR                       0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_MASK                  0x0001	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_SHIFT                      0	/* CP_DYN_PWR */
+#define WM8995_CP_DYN_PWR_WIDTH                      1	/* CP_DYN_PWR */
+
+/*
+ * R80 (0x50) - DC Servo (1)
+ */
+#define WM8995_DCS_ENA_CHAN_3                   0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_MASK              0x0008	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_SHIFT                  3	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_3_WIDTH                  1	/* DCS_ENA_CHAN_3 */
+#define WM8995_DCS_ENA_CHAN_2                   0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_MASK              0x0004	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_SHIFT                  2	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_2_WIDTH                  1	/* DCS_ENA_CHAN_2 */
+#define WM8995_DCS_ENA_CHAN_1                   0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_MASK              0x0002	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_SHIFT                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_1_WIDTH                  1	/* DCS_ENA_CHAN_1 */
+#define WM8995_DCS_ENA_CHAN_0                   0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_MASK              0x0001	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_SHIFT                  0	/* DCS_ENA_CHAN_0 */
+#define WM8995_DCS_ENA_CHAN_0_WIDTH                  1	/* DCS_ENA_CHAN_0 */
+
+/*
+ * R81 (0x51) - DC Servo (2)
+ */
+#define WM8995_DCS_TRIG_SINGLE_3                0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_MASK           0x8000	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_SHIFT              15	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_3_WIDTH               1	/* DCS_TRIG_SINGLE_3 */
+#define WM8995_DCS_TRIG_SINGLE_2                0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_MASK           0x4000	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_SHIFT              14	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_2_WIDTH               1	/* DCS_TRIG_SINGLE_2 */
+#define WM8995_DCS_TRIG_SINGLE_1                0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_MASK           0x2000	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_SHIFT              13	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_1_WIDTH               1	/* DCS_TRIG_SINGLE_1 */
+#define WM8995_DCS_TRIG_SINGLE_0                0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_MASK           0x1000	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_SHIFT              12	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SINGLE_0_WIDTH               1	/* DCS_TRIG_SINGLE_0 */
+#define WM8995_DCS_TRIG_SERIES_3                0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_MASK           0x0800	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_SHIFT              11	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_3_WIDTH               1	/* DCS_TRIG_SERIES_3 */
+#define WM8995_DCS_TRIG_SERIES_2                0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_MASK           0x0400	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_SHIFT              10	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_2_WIDTH               1	/* DCS_TRIG_SERIES_2 */
+#define WM8995_DCS_TRIG_SERIES_1                0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_MASK           0x0200	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_SHIFT               9	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_1_WIDTH               1	/* DCS_TRIG_SERIES_1 */
+#define WM8995_DCS_TRIG_SERIES_0                0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_MASK           0x0100	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_SHIFT               8	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_SERIES_0_WIDTH               1	/* DCS_TRIG_SERIES_0 */
+#define WM8995_DCS_TRIG_STARTUP_3               0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_MASK          0x0080	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_SHIFT              7	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_3_WIDTH              1	/* DCS_TRIG_STARTUP_3 */
+#define WM8995_DCS_TRIG_STARTUP_2               0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_MASK          0x0040	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_SHIFT              6	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_2_WIDTH              1	/* DCS_TRIG_STARTUP_2 */
+#define WM8995_DCS_TRIG_STARTUP_1               0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_MASK          0x0020	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_SHIFT              5	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_1_WIDTH              1	/* DCS_TRIG_STARTUP_1 */
+#define WM8995_DCS_TRIG_STARTUP_0               0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_MASK          0x0010	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_SHIFT              4	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_STARTUP_0_WIDTH              1	/* DCS_TRIG_STARTUP_0 */
+#define WM8995_DCS_TRIG_DAC_WR_3                0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_MASK           0x0008	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_SHIFT               3	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_3_WIDTH               1	/* DCS_TRIG_DAC_WR_3 */
+#define WM8995_DCS_TRIG_DAC_WR_2                0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_MASK           0x0004	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_SHIFT               2	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_2_WIDTH               1	/* DCS_TRIG_DAC_WR_2 */
+#define WM8995_DCS_TRIG_DAC_WR_1                0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_MASK           0x0002	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_SHIFT               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_1_WIDTH               1	/* DCS_TRIG_DAC_WR_1 */
+#define WM8995_DCS_TRIG_DAC_WR_0                0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_MASK           0x0001	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_SHIFT               0	/* DCS_TRIG_DAC_WR_0 */
+#define WM8995_DCS_TRIG_DAC_WR_0_WIDTH               1	/* DCS_TRIG_DAC_WR_0 */
+
+/*
+ * R82 (0x52) - DC Servo (3)
+ */
+#define WM8995_DCS_TIMER_PERIOD_23_MASK         0x0F00	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_SHIFT             8	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_23_WIDTH             4	/* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8995_DCS_TIMER_PERIOD_01_MASK         0x000F	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_SHIFT             0	/* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8995_DCS_TIMER_PERIOD_01_WIDTH             4	/* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R84 (0x54) - DC Servo (5)
+ */
+#define WM8995_DCS_SERIES_NO_23_MASK            0x7F00	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_SHIFT                8	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_23_WIDTH                7	/* DCS_SERIES_NO_23 - [14:8] */
+#define WM8995_DCS_SERIES_NO_01_MASK            0x007F	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_SHIFT                0	/* DCS_SERIES_NO_01 - [6:0] */
+#define WM8995_DCS_SERIES_NO_01_WIDTH                7	/* DCS_SERIES_NO_01 - [6:0] */
+
+/*
+ * R85 (0x55) - DC Servo (6)
+ */
+#define WM8995_DCS_DAC_WR_VAL_3_MASK            0xFF00	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_SHIFT                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_3_WIDTH                8	/* DCS_DAC_WR_VAL_3 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_2_MASK            0x00FF	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_SHIFT                0	/* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_2_WIDTH                8	/* DCS_DAC_WR_VAL_2 - [7:0] */
+
+/*
+ * R86 (0x56) - DC Servo (7)
+ */
+#define WM8995_DCS_DAC_WR_VAL_1_MASK            0xFF00	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_SHIFT                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_1_WIDTH                8	/* DCS_DAC_WR_VAL_1 - [15:8] */
+#define WM8995_DCS_DAC_WR_VAL_0_MASK            0x00FF	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_SHIFT                0	/* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8995_DCS_DAC_WR_VAL_0_WIDTH                8	/* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R87 (0x57) - DC Servo Readback 0
+ */
+#define WM8995_DCS_CAL_COMPLETE_MASK            0x0F00	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_SHIFT                8	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_CAL_COMPLETE_WIDTH                4	/* DCS_CAL_COMPLETE - [11:8] */
+#define WM8995_DCS_DAC_WR_COMPLETE_MASK         0x00F0	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_SHIFT             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_DAC_WR_COMPLETE_WIDTH             4	/* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8995_DCS_STARTUP_COMPLETE_MASK        0x000F	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_SHIFT            0	/* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8995_DCS_STARTUP_COMPLETE_WIDTH            4	/* DCS_STARTUP_COMPLETE - [3:0] */
+
+/*
+ * R96 (0x60) - Analogue HP (1)
+ */
+#define WM8995_HPOUT1L_RMV_SHORT                0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_MASK           0x0080	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_SHIFT               7	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_RMV_SHORT_WIDTH               1	/* HPOUT1L_RMV_SHORT */
+#define WM8995_HPOUT1L_OUTP                     0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_MASK                0x0040	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_SHIFT                    6	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_OUTP_WIDTH                    1	/* HPOUT1L_OUTP */
+#define WM8995_HPOUT1L_DLY                      0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_MASK                 0x0020	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_SHIFT                     5	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1L_DLY_WIDTH                     1	/* HPOUT1L_DLY */
+#define WM8995_HPOUT1R_RMV_SHORT                0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_MASK           0x0008	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_SHIFT               3	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_RMV_SHORT_WIDTH               1	/* HPOUT1R_RMV_SHORT */
+#define WM8995_HPOUT1R_OUTP                     0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_MASK                0x0004	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_SHIFT                    2	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_OUTP_WIDTH                    1	/* HPOUT1R_OUTP */
+#define WM8995_HPOUT1R_DLY                      0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_MASK                 0x0002	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_SHIFT                     1	/* HPOUT1R_DLY */
+#define WM8995_HPOUT1R_DLY_WIDTH                     1	/* HPOUT1R_DLY */
+
+/*
+ * R97 (0x61) - Analogue HP (2)
+ */
+#define WM8995_HPOUT2L_RMV_SHORT                0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_MASK           0x0080	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_SHIFT               7	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_RMV_SHORT_WIDTH               1	/* HPOUT2L_RMV_SHORT */
+#define WM8995_HPOUT2L_OUTP                     0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_MASK                0x0040	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_SHIFT                    6	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_OUTP_WIDTH                    1	/* HPOUT2L_OUTP */
+#define WM8995_HPOUT2L_DLY                      0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_MASK                 0x0020	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_SHIFT                     5	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2L_DLY_WIDTH                     1	/* HPOUT2L_DLY */
+#define WM8995_HPOUT2R_RMV_SHORT                0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_MASK           0x0008	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_SHIFT               3	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_RMV_SHORT_WIDTH               1	/* HPOUT2R_RMV_SHORT */
+#define WM8995_HPOUT2R_OUTP                     0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_MASK                0x0004	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_SHIFT                    2	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_OUTP_WIDTH                    1	/* HPOUT2R_OUTP */
+#define WM8995_HPOUT2R_DLY                      0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_MASK                 0x0002	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_SHIFT                     1	/* HPOUT2R_DLY */
+#define WM8995_HPOUT2R_DLY_WIDTH                     1	/* HPOUT2R_DLY */
+
+/*
+ * R256 (0x100) - Chip Revision
+ */
+#define WM8995_CHIP_REV_MASK                    0x000F	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_SHIFT                        0	/* CHIP_REV - [3:0] */
+#define WM8995_CHIP_REV_WIDTH                        4	/* CHIP_REV - [3:0] */
+
+/*
+ * R257 (0x101) - Control Interface (1)
+ */
+#define WM8995_REG_SYNC                         0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_MASK                    0x8000	/* REG_SYNC */
+#define WM8995_REG_SYNC_SHIFT                       15	/* REG_SYNC */
+#define WM8995_REG_SYNC_WIDTH                        1	/* REG_SYNC */
+#define WM8995_SPI_CONTRD                       0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_MASK                  0x0040	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_SHIFT                      6	/* SPI_CONTRD */
+#define WM8995_SPI_CONTRD_WIDTH                      1	/* SPI_CONTRD */
+#define WM8995_SPI_4WIRE                        0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_MASK                   0x0020	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_SHIFT                       5	/* SPI_4WIRE */
+#define WM8995_SPI_4WIRE_WIDTH                       1	/* SPI_4WIRE */
+#define WM8995_SPI_CFG                          0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_MASK                     0x0010	/* SPI_CFG */
+#define WM8995_SPI_CFG_SHIFT                         4	/* SPI_CFG */
+#define WM8995_SPI_CFG_WIDTH                         1	/* SPI_CFG */
+#define WM8995_AUTO_INC                         0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_MASK                    0x0004	/* AUTO_INC */
+#define WM8995_AUTO_INC_SHIFT                        2	/* AUTO_INC */
+#define WM8995_AUTO_INC_WIDTH                        1	/* AUTO_INC */
+
+/*
+ * R258 (0x102) - Control Interface (2)
+ */
+#define WM8995_CTRL_IF_SRC                      0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_MASK                 0x0001	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_SHIFT                     0	/* CTRL_IF_SRC */
+#define WM8995_CTRL_IF_SRC_WIDTH                     1	/* CTRL_IF_SRC */
+
+/*
+ * R272 (0x110) - Write Sequencer Ctrl (1)
+ */
+#define WM8995_WSEQ_ENA                         0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_MASK                    0x8000	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_SHIFT                       15	/* WSEQ_ENA */
+#define WM8995_WSEQ_ENA_WIDTH                        1	/* WSEQ_ENA */
+#define WM8995_WSEQ_ABORT                       0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_MASK                  0x0200	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_SHIFT                      9	/* WSEQ_ABORT */
+#define WM8995_WSEQ_ABORT_WIDTH                      1	/* WSEQ_ABORT */
+#define WM8995_WSEQ_START                       0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_MASK                  0x0100	/* WSEQ_START */
+#define WM8995_WSEQ_START_SHIFT                      8	/* WSEQ_START */
+#define WM8995_WSEQ_START_WIDTH                      1	/* WSEQ_START */
+#define WM8995_WSEQ_START_INDEX_MASK            0x007F	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_SHIFT                0	/* WSEQ_START_INDEX - [6:0] */
+#define WM8995_WSEQ_START_INDEX_WIDTH                7	/* WSEQ_START_INDEX - [6:0] */
+
+/*
+ * R273 (0x111) - Write Sequencer Ctrl (2)
+ */
+#define WM8995_WSEQ_BUSY                        0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_MASK                   0x0100	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_SHIFT                       8	/* WSEQ_BUSY */
+#define WM8995_WSEQ_BUSY_WIDTH                       1	/* WSEQ_BUSY */
+#define WM8995_WSEQ_CURRENT_INDEX_MASK          0x007F	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_SHIFT              0	/* WSEQ_CURRENT_INDEX - [6:0] */
+#define WM8995_WSEQ_CURRENT_INDEX_WIDTH              7	/* WSEQ_CURRENT_INDEX - [6:0] */
+
+/*
+ * R512 (0x200) - AIF1 Clocking (1)
+ */
+#define WM8995_AIF1CLK_SRC_MASK                 0x0018	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_SHIFT                     3	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_SRC_WIDTH                     2	/* AIF1CLK_SRC - [4:3] */
+#define WM8995_AIF1CLK_INV                      0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_MASK                 0x0004	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_SHIFT                     2	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_INV_WIDTH                     1	/* AIF1CLK_INV */
+#define WM8995_AIF1CLK_DIV                      0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_MASK                 0x0002	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_SHIFT                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_DIV_WIDTH                     1	/* AIF1CLK_DIV */
+#define WM8995_AIF1CLK_ENA                      0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_MASK                 0x0001	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_SHIFT                     0	/* AIF1CLK_ENA */
+#define WM8995_AIF1CLK_ENA_WIDTH                     1	/* AIF1CLK_ENA */
+
+/*
+ * R513 (0x201) - AIF1 Clocking (2)
+ */
+#define WM8995_AIF1DAC_DIV_MASK                 0x0038	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_SHIFT                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1DAC_DIV_WIDTH                     3	/* AIF1DAC_DIV - [5:3] */
+#define WM8995_AIF1ADC_DIV_MASK                 0x0007	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_SHIFT                     0	/* AIF1ADC_DIV - [2:0] */
+#define WM8995_AIF1ADC_DIV_WIDTH                     3	/* AIF1ADC_DIV - [2:0] */
+
+/*
+ * R516 (0x204) - AIF2 Clocking (1)
+ */
+#define WM8995_AIF2CLK_SRC_MASK                 0x0018	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_SHIFT                     3	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_SRC_WIDTH                     2	/* AIF2CLK_SRC - [4:3] */
+#define WM8995_AIF2CLK_INV                      0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_MASK                 0x0004	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_SHIFT                     2	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_INV_WIDTH                     1	/* AIF2CLK_INV */
+#define WM8995_AIF2CLK_DIV                      0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_MASK                 0x0002	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_SHIFT                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_DIV_WIDTH                     1	/* AIF2CLK_DIV */
+#define WM8995_AIF2CLK_ENA                      0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_MASK                 0x0001	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_SHIFT                     0	/* AIF2CLK_ENA */
+#define WM8995_AIF2CLK_ENA_WIDTH                     1	/* AIF2CLK_ENA */
+
+/*
+ * R517 (0x205) - AIF2 Clocking (2)
+ */
+#define WM8995_AIF2DAC_DIV_MASK                 0x0038	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_SHIFT                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2DAC_DIV_WIDTH                     3	/* AIF2DAC_DIV - [5:3] */
+#define WM8995_AIF2ADC_DIV_MASK                 0x0007	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_SHIFT                     0	/* AIF2ADC_DIV - [2:0] */
+#define WM8995_AIF2ADC_DIV_WIDTH                     3	/* AIF2ADC_DIV - [2:0] */
+
+/*
+ * R520 (0x208) - Clocking (1)
+ */
+#define WM8995_LFCLK_ENA                        0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_MASK                   0x0020	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_SHIFT                       5	/* LFCLK_ENA */
+#define WM8995_LFCLK_ENA_WIDTH                       1	/* LFCLK_ENA */
+#define WM8995_TOCLK_ENA                        0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_MASK                   0x0010	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_SHIFT                       4	/* TOCLK_ENA */
+#define WM8995_TOCLK_ENA_WIDTH                       1	/* TOCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA                   0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_MASK              0x0008	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_SHIFT                  3	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF1DSPCLK_ENA_WIDTH                  1	/* AIF1DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA                   0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_MASK              0x0004	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_SHIFT                  2	/* AIF2DSPCLK_ENA */
+#define WM8995_AIF2DSPCLK_ENA_WIDTH                  1	/* AIF2DSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA                    0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_MASK               0x0002	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_SHIFT                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSDSPCLK_ENA_WIDTH                   1	/* SYSDSPCLK_ENA */
+#define WM8995_SYSCLK_SRC                       0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_MASK                  0x0001	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_SHIFT                      0	/* SYSCLK_SRC */
+#define WM8995_SYSCLK_SRC_WIDTH                      1	/* SYSCLK_SRC */
+
+/*
+ * R521 (0x209) - Clocking (2)
+ */
+#define WM8995_TOCLK_DIV_MASK                   0x0700	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_SHIFT                       8	/* TOCLK_DIV - [10:8] */
+#define WM8995_TOCLK_DIV_WIDTH                       3	/* TOCLK_DIV - [10:8] */
+#define WM8995_DBCLK_DIV_MASK                   0x00F0	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_SHIFT                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_DBCLK_DIV_WIDTH                       4	/* DBCLK_DIV - [7:4] */
+#define WM8995_OPCLK_DIV_MASK                   0x0007	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_SHIFT                       0	/* OPCLK_DIV - [2:0] */
+#define WM8995_OPCLK_DIV_WIDTH                       3	/* OPCLK_DIV - [2:0] */
+
+/*
+ * R528 (0x210) - AIF1 Rate
+ */
+#define WM8995_AIF1_SR_MASK                     0x00F0	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_SHIFT                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1_SR_WIDTH                         4	/* AIF1_SR - [7:4] */
+#define WM8995_AIF1CLK_RATE_MASK                0x000F	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_SHIFT                    0	/* AIF1CLK_RATE - [3:0] */
+#define WM8995_AIF1CLK_RATE_WIDTH                    4	/* AIF1CLK_RATE - [3:0] */
+
+/*
+ * R529 (0x211) - AIF2 Rate
+ */
+#define WM8995_AIF2_SR_MASK                     0x00F0	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_SHIFT                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2_SR_WIDTH                         4	/* AIF2_SR - [7:4] */
+#define WM8995_AIF2CLK_RATE_MASK                0x000F	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_SHIFT                    0	/* AIF2CLK_RATE - [3:0] */
+#define WM8995_AIF2CLK_RATE_WIDTH                    4	/* AIF2CLK_RATE - [3:0] */
+
+/*
+ * R530 (0x212) - Rate Status
+ */
+#define WM8995_SR_ERROR_MASK                    0x000F	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_SHIFT                        0	/* SR_ERROR - [3:0] */
+#define WM8995_SR_ERROR_WIDTH                        4	/* SR_ERROR - [3:0] */
+
+/*
+ * R544 (0x220) - FLL1 Control (1)
+ */
+#define WM8995_FLL1_OSC_ENA                     0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_MASK                0x0002	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_SHIFT                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_OSC_ENA_WIDTH                    1	/* FLL1_OSC_ENA */
+#define WM8995_FLL1_ENA                         0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_MASK                    0x0001	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_SHIFT                        0	/* FLL1_ENA */
+#define WM8995_FLL1_ENA_WIDTH                        1	/* FLL1_ENA */
+
+/*
+ * R545 (0x221) - FLL1 Control (2)
+ */
+#define WM8995_FLL1_OUTDIV_MASK                 0x3F00	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_SHIFT                     8	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_OUTDIV_WIDTH                     6	/* FLL1_OUTDIV - [13:8] */
+#define WM8995_FLL1_CTRL_RATE_MASK              0x0070	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_SHIFT                  4	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_CTRL_RATE_WIDTH                  3	/* FLL1_CTRL_RATE - [6:4] */
+#define WM8995_FLL1_FRATIO_MASK                 0x0007	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_SHIFT                     0	/* FLL1_FRATIO - [2:0] */
+#define WM8995_FLL1_FRATIO_WIDTH                     3	/* FLL1_FRATIO - [2:0] */
+
+/*
+ * R546 (0x222) - FLL1 Control (3)
+ */
+#define WM8995_FLL1_K_MASK                      0xFFFF	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_SHIFT                          0	/* FLL1_K - [15:0] */
+#define WM8995_FLL1_K_WIDTH                         16	/* FLL1_K - [15:0] */
+
+/*
+ * R547 (0x223) - FLL1 Control (4)
+ */
+#define WM8995_FLL1_N_MASK                      0x7FE0	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_SHIFT                          5	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_N_WIDTH                         10	/* FLL1_N - [14:5] */
+#define WM8995_FLL1_LOOP_GAIN_MASK              0x000F	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_SHIFT                  0	/* FLL1_LOOP_GAIN - [3:0] */
+#define WM8995_FLL1_LOOP_GAIN_WIDTH                  4	/* FLL1_LOOP_GAIN - [3:0] */
+
+/*
+ * R548 (0x224) - FLL1 Control (5)
+ */
+#define WM8995_FLL1_FRC_NCO_VAL_MASK            0x1F80	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_SHIFT                7	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO_VAL_WIDTH                6	/* FLL1_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL1_FRC_NCO                     0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_MASK                0x0040	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_SHIFT                    6	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_FRC_NCO_WIDTH                    1	/* FLL1_FRC_NCO */
+#define WM8995_FLL1_REFCLK_DIV_MASK             0x0018	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_SHIFT                 3	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_DIV_WIDTH                 2	/* FLL1_REFCLK_DIV - [4:3] */
+#define WM8995_FLL1_REFCLK_SRC_MASK             0x0003	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_SHIFT                 0	/* FLL1_REFCLK_SRC - [1:0] */
+#define WM8995_FLL1_REFCLK_SRC_WIDTH                 2	/* FLL1_REFCLK_SRC - [1:0] */
+
+/*
+ * R576 (0x240) - FLL2 Control (1)
+ */
+#define WM8995_FLL2_OSC_ENA                     0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_MASK                0x0002	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_SHIFT                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_OSC_ENA_WIDTH                    1	/* FLL2_OSC_ENA */
+#define WM8995_FLL2_ENA                         0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_MASK                    0x0001	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_SHIFT                        0	/* FLL2_ENA */
+#define WM8995_FLL2_ENA_WIDTH                        1	/* FLL2_ENA */
+
+/*
+ * R577 (0x241) - FLL2 Control (2)
+ */
+#define WM8995_FLL2_OUTDIV_MASK                 0x3F00	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_SHIFT                     8	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_OUTDIV_WIDTH                     6	/* FLL2_OUTDIV - [13:8] */
+#define WM8995_FLL2_CTRL_RATE_MASK              0x0070	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_SHIFT                  4	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_CTRL_RATE_WIDTH                  3	/* FLL2_CTRL_RATE - [6:4] */
+#define WM8995_FLL2_FRATIO_MASK                 0x0007	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_SHIFT                     0	/* FLL2_FRATIO - [2:0] */
+#define WM8995_FLL2_FRATIO_WIDTH                     3	/* FLL2_FRATIO - [2:0] */
+
+/*
+ * R578 (0x242) - FLL2 Control (3)
+ */
+#define WM8995_FLL2_K_MASK                      0xFFFF	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_SHIFT                          0	/* FLL2_K - [15:0] */
+#define WM8995_FLL2_K_WIDTH                         16	/* FLL2_K - [15:0] */
+
+/*
+ * R579 (0x243) - FLL2 Control (4)
+ */
+#define WM8995_FLL2_N_MASK                      0x7FE0	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_SHIFT                          5	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_N_WIDTH                         10	/* FLL2_N - [14:5] */
+#define WM8995_FLL2_LOOP_GAIN_MASK              0x000F	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_SHIFT                  0	/* FLL2_LOOP_GAIN - [3:0] */
+#define WM8995_FLL2_LOOP_GAIN_WIDTH                  4	/* FLL2_LOOP_GAIN - [3:0] */
+
+/*
+ * R580 (0x244) - FLL2 Control (5)
+ */
+#define WM8995_FLL2_FRC_NCO_VAL_MASK            0x1F80	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_SHIFT                7	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO_VAL_WIDTH                6	/* FLL2_FRC_NCO_VAL - [12:7] */
+#define WM8995_FLL2_FRC_NCO                     0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_MASK                0x0040	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_SHIFT                    6	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_FRC_NCO_WIDTH                    1	/* FLL2_FRC_NCO */
+#define WM8995_FLL2_REFCLK_DIV_MASK             0x0018	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_SHIFT                 3	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_DIV_WIDTH                 2	/* FLL2_REFCLK_DIV - [4:3] */
+#define WM8995_FLL2_REFCLK_SRC_MASK             0x0003	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_SHIFT                 0	/* FLL2_REFCLK_SRC - [1:0] */
+#define WM8995_FLL2_REFCLK_SRC_WIDTH                 2	/* FLL2_REFCLK_SRC - [1:0] */
+
+/*
+ * R768 (0x300) - AIF1 Control (1)
+ */
+#define WM8995_AIF1ADCL_SRC                     0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_MASK                0x8000	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_SHIFT                   15	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCL_SRC_WIDTH                    1	/* AIF1ADCL_SRC */
+#define WM8995_AIF1ADCR_SRC                     0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_MASK                0x4000	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_SHIFT                   14	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADCR_SRC_WIDTH                    1	/* AIF1ADCR_SRC */
+#define WM8995_AIF1ADC_TDM                      0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_MASK                 0x2000	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_SHIFT                    13	/* AIF1ADC_TDM */
+#define WM8995_AIF1ADC_TDM_WIDTH                     1	/* AIF1ADC_TDM */
+#define WM8995_AIF1_BCLK_INV                    0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_MASK               0x0100	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_SHIFT                   8	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_BCLK_INV_WIDTH                   1	/* AIF1_BCLK_INV */
+#define WM8995_AIF1_LRCLK_INV                   0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_MASK              0x0080	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_SHIFT                  7	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_LRCLK_INV_WIDTH                  1	/* AIF1_LRCLK_INV */
+#define WM8995_AIF1_WL_MASK                     0x0060	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_SHIFT                         5	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_WL_WIDTH                         2	/* AIF1_WL - [6:5] */
+#define WM8995_AIF1_FMT_MASK                    0x0018	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_SHIFT                        3	/* AIF1_FMT - [4:3] */
+#define WM8995_AIF1_FMT_WIDTH                        2	/* AIF1_FMT - [4:3] */
+
+/*
+ * R769 (0x301) - AIF1 Control (2)
+ */
+#define WM8995_AIF1DACL_SRC                     0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_MASK                0x8000	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_SHIFT                   15	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACL_SRC_WIDTH                    1	/* AIF1DACL_SRC */
+#define WM8995_AIF1DACR_SRC                     0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_MASK                0x4000	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_SHIFT                   14	/* AIF1DACR_SRC */
+#define WM8995_AIF1DACR_SRC_WIDTH                    1	/* AIF1DACR_SRC */
+#define WM8995_AIF1DAC_BOOST_MASK               0x0C00	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_SHIFT                  10	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_BOOST_WIDTH                   2	/* AIF1DAC_BOOST - [11:10] */
+#define WM8995_AIF1DAC_COMP                     0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_MASK                0x0010	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_SHIFT                    4	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMP_WIDTH                    1	/* AIF1DAC_COMP */
+#define WM8995_AIF1DAC_COMPMODE                 0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_MASK            0x0008	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_SHIFT                3	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1DAC_COMPMODE_WIDTH                1	/* AIF1DAC_COMPMODE */
+#define WM8995_AIF1ADC_COMP                     0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_MASK                0x0004	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_SHIFT                    2	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMP_WIDTH                    1	/* AIF1ADC_COMP */
+#define WM8995_AIF1ADC_COMPMODE                 0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_MASK            0x0002	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_SHIFT                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1ADC_COMPMODE_WIDTH                1	/* AIF1ADC_COMPMODE */
+#define WM8995_AIF1_LOOPBACK                    0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_MASK               0x0001	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_SHIFT                   0	/* AIF1_LOOPBACK */
+#define WM8995_AIF1_LOOPBACK_WIDTH                   1	/* AIF1_LOOPBACK */
+
+/*
+ * R770 (0x302) - AIF1 Master/Slave
+ */
+#define WM8995_AIF1_TRI                         0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_MASK                    0x8000	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_SHIFT                       15	/* AIF1_TRI */
+#define WM8995_AIF1_TRI_WIDTH                        1	/* AIF1_TRI */
+#define WM8995_AIF1_MSTR                        0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_MASK                   0x4000	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_SHIFT                      14	/* AIF1_MSTR */
+#define WM8995_AIF1_MSTR_WIDTH                       1	/* AIF1_MSTR */
+#define WM8995_AIF1_CLK_FRC                     0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_MASK                0x2000	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_SHIFT                   13	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_CLK_FRC_WIDTH                    1	/* AIF1_CLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC                   0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_MASK              0x1000	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_SHIFT                 12	/* AIF1_LRCLK_FRC */
+#define WM8995_AIF1_LRCLK_FRC_WIDTH                  1	/* AIF1_LRCLK_FRC */
+
+/*
+ * R771 (0x303) - AIF1 BCLK
+ */
+#define WM8995_AIF1_BCLK_DIV_MASK               0x00F0	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_SHIFT                   4	/* AIF1_BCLK_DIV - [7:4] */
+#define WM8995_AIF1_BCLK_DIV_WIDTH                   4	/* AIF1_BCLK_DIV - [7:4] */
+
+/*
+ * R772 (0x304) - AIF1ADC LRCLK
+ */
+#define WM8995_AIF1ADC_LRCLK_DIR                0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_MASK           0x0800	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_SHIFT              11	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_LRCLK_DIR_WIDTH               1	/* AIF1ADC_LRCLK_DIR */
+#define WM8995_AIF1ADC_RATE_MASK                0x07FF	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_SHIFT                    0	/* AIF1ADC_RATE - [10:0] */
+#define WM8995_AIF1ADC_RATE_WIDTH                   11	/* AIF1ADC_RATE - [10:0] */
+
+/*
+ * R773 (0x305) - AIF1DAC LRCLK
+ */
+#define WM8995_AIF1DAC_LRCLK_DIR                0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_MASK           0x0800	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_SHIFT              11	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_LRCLK_DIR_WIDTH               1	/* AIF1DAC_LRCLK_DIR */
+#define WM8995_AIF1DAC_RATE_MASK                0x07FF	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_SHIFT                    0	/* AIF1DAC_RATE - [10:0] */
+#define WM8995_AIF1DAC_RATE_WIDTH                   11	/* AIF1DAC_RATE - [10:0] */
+
+/*
+ * R774 (0x306) - AIF1DAC Data
+ */
+#define WM8995_AIF1DACL_DAT_INV                 0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_MASK            0x0002	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_SHIFT                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACL_DAT_INV_WIDTH                1	/* AIF1DACL_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV                 0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_MASK            0x0001	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_SHIFT                0	/* AIF1DACR_DAT_INV */
+#define WM8995_AIF1DACR_DAT_INV_WIDTH                1	/* AIF1DACR_DAT_INV */
+
+/*
+ * R775 (0x307) - AIF1ADC Data
+ */
+#define WM8995_AIF1ADCL_DAT_INV                 0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_MASK            0x0002	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_SHIFT                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCL_DAT_INV_WIDTH                1	/* AIF1ADCL_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV                 0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_MASK            0x0001	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_SHIFT                0	/* AIF1ADCR_DAT_INV */
+#define WM8995_AIF1ADCR_DAT_INV_WIDTH                1	/* AIF1ADCR_DAT_INV */
+
+/*
+ * R784 (0x310) - AIF2 Control (1)
+ */
+#define WM8995_AIF2ADCL_SRC                     0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_MASK                0x8000	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_SHIFT                   15	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCL_SRC_WIDTH                    1	/* AIF2ADCL_SRC */
+#define WM8995_AIF2ADCR_SRC                     0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_MASK                0x4000	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_SHIFT                   14	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADCR_SRC_WIDTH                    1	/* AIF2ADCR_SRC */
+#define WM8995_AIF2ADC_TDM                      0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_MASK                 0x2000	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_SHIFT                    13	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_WIDTH                     1	/* AIF2ADC_TDM */
+#define WM8995_AIF2ADC_TDM_CHAN                 0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_MASK            0x1000	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_SHIFT               12	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2ADC_TDM_CHAN_WIDTH                1	/* AIF2ADC_TDM_CHAN */
+#define WM8995_AIF2_BCLK_INV                    0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_MASK               0x0100	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_SHIFT                   8	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_BCLK_INV_WIDTH                   1	/* AIF2_BCLK_INV */
+#define WM8995_AIF2_LRCLK_INV                   0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_MASK              0x0080	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_SHIFT                  7	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_LRCLK_INV_WIDTH                  1	/* AIF2_LRCLK_INV */
+#define WM8995_AIF2_WL_MASK                     0x0060	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_SHIFT                         5	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_WL_WIDTH                         2	/* AIF2_WL - [6:5] */
+#define WM8995_AIF2_FMT_MASK                    0x0018	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_SHIFT                        3	/* AIF2_FMT - [4:3] */
+#define WM8995_AIF2_FMT_WIDTH                        2	/* AIF2_FMT - [4:3] */
+
+/*
+ * R785 (0x311) - AIF2 Control (2)
+ */
+#define WM8995_AIF2DACL_SRC                     0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_MASK                0x8000	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_SHIFT                   15	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACL_SRC_WIDTH                    1	/* AIF2DACL_SRC */
+#define WM8995_AIF2DACR_SRC                     0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_MASK                0x4000	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_SHIFT                   14	/* AIF2DACR_SRC */
+#define WM8995_AIF2DACR_SRC_WIDTH                    1	/* AIF2DACR_SRC */
+#define WM8995_AIF2DAC_TDM                      0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_MASK                 0x2000	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_SHIFT                    13	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_WIDTH                     1	/* AIF2DAC_TDM */
+#define WM8995_AIF2DAC_TDM_CHAN                 0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_MASK            0x1000	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_SHIFT               12	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_TDM_CHAN_WIDTH                1	/* AIF2DAC_TDM_CHAN */
+#define WM8995_AIF2DAC_BOOST_MASK               0x0C00	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_SHIFT                  10	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_BOOST_WIDTH                   2	/* AIF2DAC_BOOST - [11:10] */
+#define WM8995_AIF2DAC_COMP                     0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_MASK                0x0010	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_SHIFT                    4	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMP_WIDTH                    1	/* AIF2DAC_COMP */
+#define WM8995_AIF2DAC_COMPMODE                 0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_MASK            0x0008	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_SHIFT                3	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2DAC_COMPMODE_WIDTH                1	/* AIF2DAC_COMPMODE */
+#define WM8995_AIF2ADC_COMP                     0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_MASK                0x0004	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_SHIFT                    2	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMP_WIDTH                    1	/* AIF2ADC_COMP */
+#define WM8995_AIF2ADC_COMPMODE                 0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_MASK            0x0002	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_SHIFT                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2ADC_COMPMODE_WIDTH                1	/* AIF2ADC_COMPMODE */
+#define WM8995_AIF2_LOOPBACK                    0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_MASK               0x0001	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_SHIFT                   0	/* AIF2_LOOPBACK */
+#define WM8995_AIF2_LOOPBACK_WIDTH                   1	/* AIF2_LOOPBACK */
+
+/*
+ * R786 (0x312) - AIF2 Master/Slave
+ */
+#define WM8995_AIF2_TRI                         0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_MASK                    0x8000	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_SHIFT                       15	/* AIF2_TRI */
+#define WM8995_AIF2_TRI_WIDTH                        1	/* AIF2_TRI */
+#define WM8995_AIF2_MSTR                        0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_MASK                   0x4000	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_SHIFT                      14	/* AIF2_MSTR */
+#define WM8995_AIF2_MSTR_WIDTH                       1	/* AIF2_MSTR */
+#define WM8995_AIF2_CLK_FRC                     0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_MASK                0x2000	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_SHIFT                   13	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_CLK_FRC_WIDTH                    1	/* AIF2_CLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC                   0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_MASK              0x1000	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_SHIFT                 12	/* AIF2_LRCLK_FRC */
+#define WM8995_AIF2_LRCLK_FRC_WIDTH                  1	/* AIF2_LRCLK_FRC */
+
+/*
+ * R787 (0x313) - AIF2 BCLK
+ */
+#define WM8995_AIF2_BCLK_DIV_MASK               0x00F0	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_SHIFT                   4	/* AIF2_BCLK_DIV - [7:4] */
+#define WM8995_AIF2_BCLK_DIV_WIDTH                   4	/* AIF2_BCLK_DIV - [7:4] */
+
+/*
+ * R788 (0x314) - AIF2ADC LRCLK
+ */
+#define WM8995_AIF2ADC_LRCLK_DIR                0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_MASK           0x0800	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_SHIFT              11	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_LRCLK_DIR_WIDTH               1	/* AIF2ADC_LRCLK_DIR */
+#define WM8995_AIF2ADC_RATE_MASK                0x07FF	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_SHIFT                    0	/* AIF2ADC_RATE - [10:0] */
+#define WM8995_AIF2ADC_RATE_WIDTH                   11	/* AIF2ADC_RATE - [10:0] */
+
+/*
+ * R789 (0x315) - AIF2DAC LRCLK
+ */
+#define WM8995_AIF2DAC_LRCLK_DIR                0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_MASK           0x0800	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_SHIFT              11	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_LRCLK_DIR_WIDTH               1	/* AIF2DAC_LRCLK_DIR */
+#define WM8995_AIF2DAC_RATE_MASK                0x07FF	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_SHIFT                    0	/* AIF2DAC_RATE - [10:0] */
+#define WM8995_AIF2DAC_RATE_WIDTH                   11	/* AIF2DAC_RATE - [10:0] */
+
+/*
+ * R790 (0x316) - AIF2DAC Data
+ */
+#define WM8995_AIF2DACL_DAT_INV                 0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_MASK            0x0002	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_SHIFT                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACL_DAT_INV_WIDTH                1	/* AIF2DACL_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV                 0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_MASK            0x0001	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_SHIFT                0	/* AIF2DACR_DAT_INV */
+#define WM8995_AIF2DACR_DAT_INV_WIDTH                1	/* AIF2DACR_DAT_INV */
+
+/*
+ * R791 (0x317) - AIF2ADC Data
+ */
+#define WM8995_AIF2ADCL_DAT_INV                 0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_MASK            0x0002	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_SHIFT                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCL_DAT_INV_WIDTH                1	/* AIF2ADCL_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV                 0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_MASK            0x0001	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_SHIFT                0	/* AIF2ADCR_DAT_INV */
+#define WM8995_AIF2ADCR_DAT_INV_WIDTH                1	/* AIF2ADCR_DAT_INV */
+
+/*
+ * R1024 (0x400) - AIF1 ADC1 Left Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1L_VOL_MASK               0x00FF	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_SHIFT                   0	/* AIF1ADC1L_VOL - [7:0] */
+#define WM8995_AIF1ADC1L_VOL_WIDTH                   8	/* AIF1ADC1L_VOL - [7:0] */
+
+/*
+ * R1025 (0x401) - AIF1 ADC1 Right Volume
+ */
+#define WM8995_AIF1ADC1_VU                      0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_MASK                 0x0100	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_SHIFT                     8	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1_VU_WIDTH                     1	/* AIF1ADC1_VU */
+#define WM8995_AIF1ADC1R_VOL_MASK               0x00FF	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_SHIFT                   0	/* AIF1ADC1R_VOL - [7:0] */
+#define WM8995_AIF1ADC1R_VOL_WIDTH                   8	/* AIF1ADC1R_VOL - [7:0] */
+
+/*
+ * R1026 (0x402) - AIF1 DAC1 Left Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1L_VOL_MASK               0x00FF	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_SHIFT                   0	/* AIF1DAC1L_VOL - [7:0] */
+#define WM8995_AIF1DAC1L_VOL_WIDTH                   8	/* AIF1DAC1L_VOL - [7:0] */
+
+/*
+ * R1027 (0x403) - AIF1 DAC1 Right Volume
+ */
+#define WM8995_AIF1DAC1_VU                      0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_MASK                 0x0100	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_SHIFT                     8	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1_VU_WIDTH                     1	/* AIF1DAC1_VU */
+#define WM8995_AIF1DAC1R_VOL_MASK               0x00FF	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_SHIFT                   0	/* AIF1DAC1R_VOL - [7:0] */
+#define WM8995_AIF1DAC1R_VOL_WIDTH                   8	/* AIF1DAC1R_VOL - [7:0] */
+
+/*
+ * R1028 (0x404) - AIF1 ADC2 Left Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2L_VOL_MASK               0x00FF	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_SHIFT                   0	/* AIF1ADC2L_VOL - [7:0] */
+#define WM8995_AIF1ADC2L_VOL_WIDTH                   8	/* AIF1ADC2L_VOL - [7:0] */
+
+/*
+ * R1029 (0x405) - AIF1 ADC2 Right Volume
+ */
+#define WM8995_AIF1ADC2_VU                      0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_MASK                 0x0100	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_SHIFT                     8	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2_VU_WIDTH                     1	/* AIF1ADC2_VU */
+#define WM8995_AIF1ADC2R_VOL_MASK               0x00FF	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_SHIFT                   0	/* AIF1ADC2R_VOL - [7:0] */
+#define WM8995_AIF1ADC2R_VOL_WIDTH                   8	/* AIF1ADC2R_VOL - [7:0] */
+
+/*
+ * R1030 (0x406) - AIF1 DAC2 Left Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2L_VOL_MASK               0x00FF	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_SHIFT                   0	/* AIF1DAC2L_VOL - [7:0] */
+#define WM8995_AIF1DAC2L_VOL_WIDTH                   8	/* AIF1DAC2L_VOL - [7:0] */
+
+/*
+ * R1031 (0x407) - AIF1 DAC2 Right Volume
+ */
+#define WM8995_AIF1DAC2_VU                      0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_MASK                 0x0100	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_SHIFT                     8	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2_VU_WIDTH                     1	/* AIF1DAC2_VU */
+#define WM8995_AIF1DAC2R_VOL_MASK               0x00FF	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_SHIFT                   0	/* AIF1DAC2R_VOL - [7:0] */
+#define WM8995_AIF1DAC2R_VOL_WIDTH                   8	/* AIF1DAC2R_VOL - [7:0] */
+
+/*
+ * R1040 (0x410) - AIF1 ADC1 Filters
+ */
+#define WM8995_AIF1ADC_4FS                      0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_MASK                 0x8000	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_SHIFT                    15	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC_4FS_WIDTH                     1	/* AIF1ADC_4FS */
+#define WM8995_AIF1ADC1L_HPF                    0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_MASK               0x1000	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_SHIFT                  12	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1L_HPF_WIDTH                   1	/* AIF1ADC1L_HPF */
+#define WM8995_AIF1ADC1R_HPF                    0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_MASK               0x0800	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_SHIFT                  11	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1R_HPF_WIDTH                   1	/* AIF1ADC1R_HPF */
+#define WM8995_AIF1ADC1_HPF_MODE                0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_MASK           0x0008	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_SHIFT               3	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_MODE_WIDTH               1	/* AIF1ADC1_HPF_MODE */
+#define WM8995_AIF1ADC1_HPF_CUT_MASK            0x0007	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_SHIFT                0	/* AIF1ADC1_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC1_HPF_CUT_WIDTH                3	/* AIF1ADC1_HPF_CUT - [2:0] */
+
+/*
+ * R1041 (0x411) - AIF1 ADC2 Filters
+ */
+#define WM8995_AIF1ADC2L_HPF                    0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_MASK               0x1000	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_SHIFT                  12	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2L_HPF_WIDTH                   1	/* AIF1ADC2L_HPF */
+#define WM8995_AIF1ADC2R_HPF                    0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_MASK               0x0800	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_SHIFT                  11	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2R_HPF_WIDTH                   1	/* AIF1ADC2R_HPF */
+#define WM8995_AIF1ADC2_HPF_MODE                0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_MASK           0x0008	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_SHIFT               3	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_MODE_WIDTH               1	/* AIF1ADC2_HPF_MODE */
+#define WM8995_AIF1ADC2_HPF_CUT_MASK            0x0007	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_SHIFT                0	/* AIF1ADC2_HPF_CUT - [2:0] */
+#define WM8995_AIF1ADC2_HPF_CUT_WIDTH                3	/* AIF1ADC2_HPF_CUT - [2:0] */
+
+/*
+ * R1056 (0x420) - AIF1 DAC1 Filters (1)
+ */
+#define WM8995_AIF1DAC1_MUTE                    0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_MASK               0x0200	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_SHIFT                   9	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MUTE_WIDTH                   1	/* AIF1DAC1_MUTE */
+#define WM8995_AIF1DAC1_MONO                    0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_MASK               0x0080	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_SHIFT                   7	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MONO_WIDTH                   1	/* AIF1DAC1_MONO */
+#define WM8995_AIF1DAC1_MUTERATE                0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_MASK           0x0020	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_SHIFT               5	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_MUTERATE_WIDTH               1	/* AIF1DAC1_MUTERATE */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP             0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC1_UNMUTE_RAMP */
+#define WM8995_AIF1DAC1_DEEMP_MASK              0x0006	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_SHIFT                  1	/* AIF1DAC1_DEEMP - [2:1] */
+#define WM8995_AIF1DAC1_DEEMP_WIDTH                  2	/* AIF1DAC1_DEEMP - [2:1] */
+
+/*
+ * R1057 (0x421) - AIF1 DAC1 Filters (2)
+ */
+#define WM8995_AIF1DAC1_3D_GAIN_MASK            0x3E00	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_SHIFT                9	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_GAIN_WIDTH                5	/* AIF1DAC1_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC1_3D_ENA                  0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_MASK             0x0100	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_SHIFT                 8	/* AIF1DAC1_3D_ENA */
+#define WM8995_AIF1DAC1_3D_ENA_WIDTH                 1	/* AIF1DAC1_3D_ENA */
+
+/*
+ * R1058 (0x422) - AIF1 DAC2 Filters (1)
+ */
+#define WM8995_AIF1DAC2_MUTE                    0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_MASK               0x0200	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_SHIFT                   9	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MUTE_WIDTH                   1	/* AIF1DAC2_MUTE */
+#define WM8995_AIF1DAC2_MONO                    0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_MASK               0x0080	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_SHIFT                   7	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MONO_WIDTH                   1	/* AIF1DAC2_MONO */
+#define WM8995_AIF1DAC2_MUTERATE                0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_MASK           0x0020	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_SHIFT               5	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_MUTERATE_WIDTH               1	/* AIF1DAC2_MUTERATE */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP             0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_MASK        0x0010	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_SHIFT            4	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_UNMUTE_RAMP_WIDTH            1	/* AIF1DAC2_UNMUTE_RAMP */
+#define WM8995_AIF1DAC2_DEEMP_MASK              0x0006	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_SHIFT                  1	/* AIF1DAC2_DEEMP - [2:1] */
+#define WM8995_AIF1DAC2_DEEMP_WIDTH                  2	/* AIF1DAC2_DEEMP - [2:1] */
+
+/*
+ * R1059 (0x423) - AIF1 DAC2 Filters (2)
+ */
+#define WM8995_AIF1DAC2_3D_GAIN_MASK            0x3E00	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_SHIFT                9	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_GAIN_WIDTH                5	/* AIF1DAC2_3D_GAIN - [13:9] */
+#define WM8995_AIF1DAC2_3D_ENA                  0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_MASK             0x0100	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_SHIFT                 8	/* AIF1DAC2_3D_ENA */
+#define WM8995_AIF1DAC2_3D_ENA_WIDTH                 1	/* AIF1DAC2_3D_ENA */
+
+/*
+ * R1088 (0x440) - AIF1 DRC1 (1)
+ */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_SHIFT           11	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_RMS_WIDTH            5	/* AIF1DRC1_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_MASK         0x0600	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_SHIFT             9	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_SIG_DET_PK_WIDTH             2	/* AIF1DRC1_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC1_NG_ENA                  0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_MASK             0x0100	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_SHIFT                 8	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_NG_ENA_WIDTH                 1	/* AIF1DRC1_NG_ENA */
+#define WM8995_AIF1DRC1_SIG_DET_MODE            0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_SHIFT           7	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET_MODE_WIDTH           1	/* AIF1DRC1_SIG_DET_MODE */
+#define WM8995_AIF1DRC1_SIG_DET                 0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_MASK            0x0040	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_SHIFT                6	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_SIG_DET_WIDTH                1	/* AIF1DRC1_SIG_DET */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA            0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC1_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC1_QR                      0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_MASK                 0x0010	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_SHIFT                     4	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_QR_WIDTH                     1	/* AIF1DRC1_QR */
+#define WM8995_AIF1DRC1_ANTICLIP                0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_MASK           0x0008	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_SHIFT               3	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DRC1_ANTICLIP_WIDTH               1	/* AIF1DRC1_ANTICLIP */
+#define WM8995_AIF1DAC1_DRC_ENA                 0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_MASK            0x0004	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_SHIFT                2	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1DAC1_DRC_ENA_WIDTH                1	/* AIF1DAC1_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA                0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_MASK           0x0002	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_SHIFT               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1L_DRC_ENA_WIDTH               1	/* AIF1ADC1L_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA                0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_MASK           0x0001	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_SHIFT               0	/* AIF1ADC1R_DRC_ENA */
+#define WM8995_AIF1ADC1R_DRC_ENA_WIDTH               1	/* AIF1ADC1R_DRC_ENA */
+
+/*
+ * R1089 (0x441) - AIF1 DRC1 (2)
+ */
+#define WM8995_AIF1DRC1_ATK_MASK                0x1E00	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_SHIFT                    9	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_ATK_WIDTH                    4	/* AIF1DRC1_ATK - [12:9] */
+#define WM8995_AIF1DRC1_DCY_MASK                0x01E0	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_SHIFT                    5	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_DCY_WIDTH                    4	/* AIF1DRC1_DCY - [8:5] */
+#define WM8995_AIF1DRC1_MINGAIN_MASK            0x001C	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_SHIFT                2	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MINGAIN_WIDTH                3	/* AIF1DRC1_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC1_MAXGAIN_MASK            0x0003	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_SHIFT                0	/* AIF1DRC1_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC1_MAXGAIN_WIDTH                2	/* AIF1DRC1_MAXGAIN - [1:0] */
+
+/*
+ * R1090 (0x442) - AIF1 DRC1 (3)
+ */
+#define WM8995_AIF1DRC1_NG_MINGAIN_MASK         0xF000	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_SHIFT            12	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_MINGAIN_WIDTH             4	/* AIF1DRC1_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC1_NG_EXP_MASK             0x0C00	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_SHIFT                10	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_NG_EXP_WIDTH                 2	/* AIF1DRC1_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC1_QR_THR_MASK             0x0300	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_SHIFT                 8	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_THR_WIDTH                 2	/* AIF1DRC1_QR_THR - [9:8] */
+#define WM8995_AIF1DRC1_QR_DCY_MASK             0x00C0	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_SHIFT                 6	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_QR_DCY_WIDTH                 2	/* AIF1DRC1_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC1_HI_COMP_MASK            0x0038	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_SHIFT                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_HI_COMP_WIDTH                3	/* AIF1DRC1_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC1_LO_COMP_MASK            0x0007	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_SHIFT                0	/* AIF1DRC1_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC1_LO_COMP_WIDTH                3	/* AIF1DRC1_LO_COMP - [2:0] */
+
+/*
+ * R1091 (0x443) - AIF1 DRC1 (4)
+ */
+#define WM8995_AIF1DRC1_KNEE_IP_MASK            0x07E0	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_SHIFT                5	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_IP_WIDTH                6	/* AIF1DRC1_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC1_KNEE_OP_MASK            0x001F	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_SHIFT                0	/* AIF1DRC1_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE_OP_WIDTH                5	/* AIF1DRC1_KNEE_OP - [4:0] */
+
+/*
+ * R1092 (0x444) - AIF1 DRC1 (5)
+ */
+#define WM8995_AIF1DRC1_KNEE2_IP_MASK           0x03E0	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_SHIFT               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_IP_WIDTH               5	/* AIF1DRC1_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC1_KNEE2_OP_MASK           0x001F	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_SHIFT               0	/* AIF1DRC1_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC1_KNEE2_OP_WIDTH               5	/* AIF1DRC1_KNEE2_OP - [4:0] */
+
+/*
+ * R1104 (0x450) - AIF1 DRC2 (1)
+ */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_MASK        0xF800	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_SHIFT           11	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_RMS_WIDTH            5	/* AIF1DRC2_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_MASK         0x0600	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_SHIFT             9	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_SIG_DET_PK_WIDTH             2	/* AIF1DRC2_SIG_DET_PK - [10:9] */
+#define WM8995_AIF1DRC2_NG_ENA                  0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_MASK             0x0100	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_SHIFT                 8	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_NG_ENA_WIDTH                 1	/* AIF1DRC2_NG_ENA */
+#define WM8995_AIF1DRC2_SIG_DET_MODE            0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_MASK       0x0080	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_SHIFT           7	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET_MODE_WIDTH           1	/* AIF1DRC2_SIG_DET_MODE */
+#define WM8995_AIF1DRC2_SIG_DET                 0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_MASK            0x0040	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_SHIFT                6	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_SIG_DET_WIDTH                1	/* AIF1DRC2_SIG_DET */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA            0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_MASK       0x0020	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_SHIFT           5	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_KNEE2_OP_ENA_WIDTH           1	/* AIF1DRC2_KNEE2_OP_ENA */
+#define WM8995_AIF1DRC2_QR                      0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_MASK                 0x0010	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_SHIFT                     4	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_QR_WIDTH                     1	/* AIF1DRC2_QR */
+#define WM8995_AIF1DRC2_ANTICLIP                0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_MASK           0x0008	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_SHIFT               3	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DRC2_ANTICLIP_WIDTH               1	/* AIF1DRC2_ANTICLIP */
+#define WM8995_AIF1DAC2_DRC_ENA                 0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_MASK            0x0004	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_SHIFT                2	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1DAC2_DRC_ENA_WIDTH                1	/* AIF1DAC2_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA                0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_MASK           0x0002	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_SHIFT               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2L_DRC_ENA_WIDTH               1	/* AIF1ADC2L_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA                0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_MASK           0x0001	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_SHIFT               0	/* AIF1ADC2R_DRC_ENA */
+#define WM8995_AIF1ADC2R_DRC_ENA_WIDTH               1	/* AIF1ADC2R_DRC_ENA */
+
+/*
+ * R1105 (0x451) - AIF1 DRC2 (2)
+ */
+#define WM8995_AIF1DRC2_ATK_MASK                0x1E00	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_SHIFT                    9	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_ATK_WIDTH                    4	/* AIF1DRC2_ATK - [12:9] */
+#define WM8995_AIF1DRC2_DCY_MASK                0x01E0	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_SHIFT                    5	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_DCY_WIDTH                    4	/* AIF1DRC2_DCY - [8:5] */
+#define WM8995_AIF1DRC2_MINGAIN_MASK            0x001C	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_SHIFT                2	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MINGAIN_WIDTH                3	/* AIF1DRC2_MINGAIN - [4:2] */
+#define WM8995_AIF1DRC2_MAXGAIN_MASK            0x0003	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_SHIFT                0	/* AIF1DRC2_MAXGAIN - [1:0] */
+#define WM8995_AIF1DRC2_MAXGAIN_WIDTH                2	/* AIF1DRC2_MAXGAIN - [1:0] */
+
+/*
+ * R1106 (0x452) - AIF1 DRC2 (3)
+ */
+#define WM8995_AIF1DRC2_NG_MINGAIN_MASK         0xF000	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_SHIFT            12	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_MINGAIN_WIDTH             4	/* AIF1DRC2_NG_MINGAIN - [15:12] */
+#define WM8995_AIF1DRC2_NG_EXP_MASK             0x0C00	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_SHIFT                10	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_NG_EXP_WIDTH                 2	/* AIF1DRC2_NG_EXP - [11:10] */
+#define WM8995_AIF1DRC2_QR_THR_MASK             0x0300	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_SHIFT                 8	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_THR_WIDTH                 2	/* AIF1DRC2_QR_THR - [9:8] */
+#define WM8995_AIF1DRC2_QR_DCY_MASK             0x00C0	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_SHIFT                 6	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_QR_DCY_WIDTH                 2	/* AIF1DRC2_QR_DCY - [7:6] */
+#define WM8995_AIF1DRC2_HI_COMP_MASK            0x0038	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_SHIFT                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_HI_COMP_WIDTH                3	/* AIF1DRC2_HI_COMP - [5:3] */
+#define WM8995_AIF1DRC2_LO_COMP_MASK            0x0007	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_SHIFT                0	/* AIF1DRC2_LO_COMP - [2:0] */
+#define WM8995_AIF1DRC2_LO_COMP_WIDTH                3	/* AIF1DRC2_LO_COMP - [2:0] */
+
+/*
+ * R1107 (0x453) - AIF1 DRC2 (4)
+ */
+#define WM8995_AIF1DRC2_KNEE_IP_MASK            0x07E0	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_SHIFT                5	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_IP_WIDTH                6	/* AIF1DRC2_KNEE_IP - [10:5] */
+#define WM8995_AIF1DRC2_KNEE_OP_MASK            0x001F	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_SHIFT                0	/* AIF1DRC2_KNEE_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE_OP_WIDTH                5	/* AIF1DRC2_KNEE_OP - [4:0] */
+
+/*
+ * R1108 (0x454) - AIF1 DRC2 (5)
+ */
+#define WM8995_AIF1DRC2_KNEE2_IP_MASK           0x03E0	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_SHIFT               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_IP_WIDTH               5	/* AIF1DRC2_KNEE2_IP - [9:5] */
+#define WM8995_AIF1DRC2_KNEE2_OP_MASK           0x001F	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_SHIFT               0	/* AIF1DRC2_KNEE2_OP - [4:0] */
+#define WM8995_AIF1DRC2_KNEE2_OP_WIDTH               5	/* AIF1DRC2_KNEE2_OP - [4:0] */
+
+/*
+ * R1152 (0x480) - AIF1 DAC1 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC1_EQ_ENA                  0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_MASK             0x0001	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_SHIFT                 0	/* AIF1DAC1_EQ_ENA */
+#define WM8995_AIF1DAC1_EQ_ENA_WIDTH                 1	/* AIF1DAC1_EQ_ENA */
+
+/*
+ * R1153 (0x481) - AIF1 DAC1 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC1_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC1_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1154 (0x482) - AIF1 DAC1 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC1_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_SHIFT                0	/* AIF1DAC1_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_A_WIDTH               16	/* AIF1DAC1_EQ_B1_A - [15:0] */
+
+/*
+ * R1155 (0x483) - AIF1 DAC1 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC1_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_SHIFT                0	/* AIF1DAC1_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_B_WIDTH               16	/* AIF1DAC1_EQ_B1_B - [15:0] */
+
+/*
+ * R1156 (0x484) - AIF1 DAC1 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_SHIFT               0	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B1_PG_WIDTH              16	/* AIF1DAC1_EQ_B1_PG - [15:0] */
+
+/*
+ * R1157 (0x485) - AIF1 DAC1 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC1_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_SHIFT                0	/* AIF1DAC1_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_A_WIDTH               16	/* AIF1DAC1_EQ_B2_A - [15:0] */
+
+/*
+ * R1158 (0x486) - AIF1 DAC1 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC1_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_SHIFT                0	/* AIF1DAC1_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_B_WIDTH               16	/* AIF1DAC1_EQ_B2_B - [15:0] */
+
+/*
+ * R1159 (0x487) - AIF1 DAC1 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC1_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_SHIFT                0	/* AIF1DAC1_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_C_WIDTH               16	/* AIF1DAC1_EQ_B2_C - [15:0] */
+
+/*
+ * R1160 (0x488) - AIF1 DAC1 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_SHIFT               0	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B2_PG_WIDTH              16	/* AIF1DAC1_EQ_B2_PG - [15:0] */
+
+/*
+ * R1161 (0x489) - AIF1 DAC1 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC1_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_SHIFT                0	/* AIF1DAC1_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_A_WIDTH               16	/* AIF1DAC1_EQ_B3_A - [15:0] */
+
+/*
+ * R1162 (0x48A) - AIF1 DAC1 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC1_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_SHIFT                0	/* AIF1DAC1_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_B_WIDTH               16	/* AIF1DAC1_EQ_B3_B - [15:0] */
+
+/*
+ * R1163 (0x48B) - AIF1 DAC1 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC1_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_SHIFT                0	/* AIF1DAC1_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_C_WIDTH               16	/* AIF1DAC1_EQ_B3_C - [15:0] */
+
+/*
+ * R1164 (0x48C) - AIF1 DAC1 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_SHIFT               0	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B3_PG_WIDTH              16	/* AIF1DAC1_EQ_B3_PG - [15:0] */
+
+/*
+ * R1165 (0x48D) - AIF1 DAC1 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC1_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_SHIFT                0	/* AIF1DAC1_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_A_WIDTH               16	/* AIF1DAC1_EQ_B4_A - [15:0] */
+
+/*
+ * R1166 (0x48E) - AIF1 DAC1 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC1_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_SHIFT                0	/* AIF1DAC1_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_B_WIDTH               16	/* AIF1DAC1_EQ_B4_B - [15:0] */
+
+/*
+ * R1167 (0x48F) - AIF1 DAC1 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC1_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_SHIFT                0	/* AIF1DAC1_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_C_WIDTH               16	/* AIF1DAC1_EQ_B4_C - [15:0] */
+
+/*
+ * R1168 (0x490) - AIF1 DAC1 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_SHIFT               0	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B4_PG_WIDTH              16	/* AIF1DAC1_EQ_B4_PG - [15:0] */
+
+/*
+ * R1169 (0x491) - AIF1 DAC1 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC1_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_SHIFT                0	/* AIF1DAC1_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_A_WIDTH               16	/* AIF1DAC1_EQ_B5_A - [15:0] */
+
+/*
+ * R1170 (0x492) - AIF1 DAC1 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC1_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_SHIFT                0	/* AIF1DAC1_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_B_WIDTH               16	/* AIF1DAC1_EQ_B5_B - [15:0] */
+
+/*
+ * R1171 (0x493) - AIF1 DAC1 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC1_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_SHIFT               0	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC1_EQ_B5_PG_WIDTH              16	/* AIF1DAC1_EQ_B5_PG - [15:0] */
+
+/*
+ * R1184 (0x4A0) - AIF1 DAC2 EQ Gains (1)
+ */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B1_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B2_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_MASK         0x003E	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_SHIFT             1	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_B3_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF1DAC2_EQ_ENA                  0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_MASK             0x0001	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_SHIFT                 0	/* AIF1DAC2_EQ_ENA */
+#define WM8995_AIF1DAC2_EQ_ENA_WIDTH                 1	/* AIF1DAC2_EQ_ENA */
+
+/*
+ * R1185 (0x4A1) - AIF1 DAC2 EQ Gains (2)
+ */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_MASK         0xF800	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_SHIFT            11	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B4_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_MASK         0x07C0	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_SHIFT             6	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF1DAC2_EQ_B5_GAIN_WIDTH             5	/* AIF1DAC2_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1186 (0x4A2) - AIF1 DAC2 EQ Band 1 A
+ */
+#define WM8995_AIF1DAC2_EQ_B1_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_SHIFT                0	/* AIF1DAC2_EQ_B1_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_A_WIDTH               16	/* AIF1DAC2_EQ_B1_A - [15:0] */
+
+/*
+ * R1187 (0x4A3) - AIF1 DAC2 EQ Band 1 B
+ */
+#define WM8995_AIF1DAC2_EQ_B1_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_SHIFT                0	/* AIF1DAC2_EQ_B1_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_B_WIDTH               16	/* AIF1DAC2_EQ_B1_B - [15:0] */
+
+/*
+ * R1188 (0x4A4) - AIF1 DAC2 EQ Band 1 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B1_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_SHIFT               0	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B1_PG_WIDTH              16	/* AIF1DAC2_EQ_B1_PG - [15:0] */
+
+/*
+ * R1189 (0x4A5) - AIF1 DAC2 EQ Band 2 A
+ */
+#define WM8995_AIF1DAC2_EQ_B2_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_SHIFT                0	/* AIF1DAC2_EQ_B2_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_A_WIDTH               16	/* AIF1DAC2_EQ_B2_A - [15:0] */
+
+/*
+ * R1190 (0x4A6) - AIF1 DAC2 EQ Band 2 B
+ */
+#define WM8995_AIF1DAC2_EQ_B2_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_SHIFT                0	/* AIF1DAC2_EQ_B2_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_B_WIDTH               16	/* AIF1DAC2_EQ_B2_B - [15:0] */
+
+/*
+ * R1191 (0x4A7) - AIF1 DAC2 EQ Band 2 C
+ */
+#define WM8995_AIF1DAC2_EQ_B2_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_SHIFT                0	/* AIF1DAC2_EQ_B2_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_C_WIDTH               16	/* AIF1DAC2_EQ_B2_C - [15:0] */
+
+/*
+ * R1192 (0x4A8) - AIF1 DAC2 EQ Band 2 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B2_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_SHIFT               0	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B2_PG_WIDTH              16	/* AIF1DAC2_EQ_B2_PG - [15:0] */
+
+/*
+ * R1193 (0x4A9) - AIF1 DAC2 EQ Band 3 A
+ */
+#define WM8995_AIF1DAC2_EQ_B3_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_SHIFT                0	/* AIF1DAC2_EQ_B3_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_A_WIDTH               16	/* AIF1DAC2_EQ_B3_A - [15:0] */
+
+/*
+ * R1194 (0x4AA) - AIF1 DAC2 EQ Band 3 B
+ */
+#define WM8995_AIF1DAC2_EQ_B3_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_SHIFT                0	/* AIF1DAC2_EQ_B3_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_B_WIDTH               16	/* AIF1DAC2_EQ_B3_B - [15:0] */
+
+/*
+ * R1195 (0x4AB) - AIF1 DAC2 EQ Band 3 C
+ */
+#define WM8995_AIF1DAC2_EQ_B3_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_SHIFT                0	/* AIF1DAC2_EQ_B3_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_C_WIDTH               16	/* AIF1DAC2_EQ_B3_C - [15:0] */
+
+/*
+ * R1196 (0x4AC) - AIF1 DAC2 EQ Band 3 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B3_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_SHIFT               0	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B3_PG_WIDTH              16	/* AIF1DAC2_EQ_B3_PG - [15:0] */
+
+/*
+ * R1197 (0x4AD) - AIF1 DAC2 EQ Band 4 A
+ */
+#define WM8995_AIF1DAC2_EQ_B4_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_SHIFT                0	/* AIF1DAC2_EQ_B4_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_A_WIDTH               16	/* AIF1DAC2_EQ_B4_A - [15:0] */
+
+/*
+ * R1198 (0x4AE) - AIF1 DAC2 EQ Band 4 B
+ */
+#define WM8995_AIF1DAC2_EQ_B4_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_SHIFT                0	/* AIF1DAC2_EQ_B4_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_B_WIDTH               16	/* AIF1DAC2_EQ_B4_B - [15:0] */
+
+/*
+ * R1199 (0x4AF) - AIF1 DAC2 EQ Band 4 C
+ */
+#define WM8995_AIF1DAC2_EQ_B4_C_MASK            0xFFFF	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_SHIFT                0	/* AIF1DAC2_EQ_B4_C - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_C_WIDTH               16	/* AIF1DAC2_EQ_B4_C - [15:0] */
+
+/*
+ * R1200 (0x4B0) - AIF1 DAC2 EQ Band 4 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B4_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_SHIFT               0	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B4_PG_WIDTH              16	/* AIF1DAC2_EQ_B4_PG - [15:0] */
+
+/*
+ * R1201 (0x4B1) - AIF1 DAC2 EQ Band 5 A
+ */
+#define WM8995_AIF1DAC2_EQ_B5_A_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_SHIFT                0	/* AIF1DAC2_EQ_B5_A - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_A_WIDTH               16	/* AIF1DAC2_EQ_B5_A - [15:0] */
+
+/*
+ * R1202 (0x4B2) - AIF1 DAC2 EQ Band 5 B
+ */
+#define WM8995_AIF1DAC2_EQ_B5_B_MASK            0xFFFF	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_SHIFT                0	/* AIF1DAC2_EQ_B5_B - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_B_WIDTH               16	/* AIF1DAC2_EQ_B5_B - [15:0] */
+
+/*
+ * R1203 (0x4B3) - AIF1 DAC2 EQ Band 5 PG
+ */
+#define WM8995_AIF1DAC2_EQ_B5_PG_MASK           0xFFFF	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_SHIFT               0	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+#define WM8995_AIF1DAC2_EQ_B5_PG_WIDTH              16	/* AIF1DAC2_EQ_B5_PG - [15:0] */
+
+/*
+ * R1280 (0x500) - AIF2 ADC Left Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCL_VOL_MASK                0x00FF	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_SHIFT                    0	/* AIF2ADCL_VOL - [7:0] */
+#define WM8995_AIF2ADCL_VOL_WIDTH                    8	/* AIF2ADCL_VOL - [7:0] */
+
+/*
+ * R1281 (0x501) - AIF2 ADC Right Volume
+ */
+#define WM8995_AIF2ADC_VU                       0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_MASK                  0x0100	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_SHIFT                      8	/* AIF2ADC_VU */
+#define WM8995_AIF2ADC_VU_WIDTH                      1	/* AIF2ADC_VU */
+#define WM8995_AIF2ADCR_VOL_MASK                0x00FF	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_SHIFT                    0	/* AIF2ADCR_VOL - [7:0] */
+#define WM8995_AIF2ADCR_VOL_WIDTH                    8	/* AIF2ADCR_VOL - [7:0] */
+
+/*
+ * R1282 (0x502) - AIF2 DAC Left Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACL_VOL_MASK                0x00FF	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_SHIFT                    0	/* AIF2DACL_VOL - [7:0] */
+#define WM8995_AIF2DACL_VOL_WIDTH                    8	/* AIF2DACL_VOL - [7:0] */
+
+/*
+ * R1283 (0x503) - AIF2 DAC Right Volume
+ */
+#define WM8995_AIF2DAC_VU                       0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_MASK                  0x0100	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_SHIFT                      8	/* AIF2DAC_VU */
+#define WM8995_AIF2DAC_VU_WIDTH                      1	/* AIF2DAC_VU */
+#define WM8995_AIF2DACR_VOL_MASK                0x00FF	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_SHIFT                    0	/* AIF2DACR_VOL - [7:0] */
+#define WM8995_AIF2DACR_VOL_WIDTH                    8	/* AIF2DACR_VOL - [7:0] */
+
+/*
+ * R1296 (0x510) - AIF2 ADC Filters
+ */
+#define WM8995_AIF2ADC_4FS                      0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_MASK                 0x8000	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_SHIFT                    15	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADC_4FS_WIDTH                     1	/* AIF2ADC_4FS */
+#define WM8995_AIF2ADCL_HPF                     0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_MASK                0x1000	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_SHIFT                   12	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCL_HPF_WIDTH                    1	/* AIF2ADCL_HPF */
+#define WM8995_AIF2ADCR_HPF                     0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_MASK                0x0800	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_SHIFT                   11	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADCR_HPF_WIDTH                    1	/* AIF2ADCR_HPF */
+#define WM8995_AIF2ADC_HPF_MODE                 0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_MASK            0x0008	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_SHIFT                3	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_MODE_WIDTH                1	/* AIF2ADC_HPF_MODE */
+#define WM8995_AIF2ADC_HPF_CUT_MASK             0x0007	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_SHIFT                 0	/* AIF2ADC_HPF_CUT - [2:0] */
+#define WM8995_AIF2ADC_HPF_CUT_WIDTH                 3	/* AIF2ADC_HPF_CUT - [2:0] */
+
+/*
+ * R1312 (0x520) - AIF2 DAC Filters (1)
+ */
+#define WM8995_AIF2DAC_MUTE                     0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_MASK                0x0200	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_SHIFT                    9	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MUTE_WIDTH                    1	/* AIF2DAC_MUTE */
+#define WM8995_AIF2DAC_MONO                     0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_MASK                0x0080	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_SHIFT                    7	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MONO_WIDTH                    1	/* AIF2DAC_MONO */
+#define WM8995_AIF2DAC_MUTERATE                 0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_MASK            0x0020	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_SHIFT                5	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_MUTERATE_WIDTH                1	/* AIF2DAC_MUTERATE */
+#define WM8995_AIF2DAC_UNMUTE_RAMP              0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_MASK         0x0010	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_SHIFT             4	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_UNMUTE_RAMP_WIDTH             1	/* AIF2DAC_UNMUTE_RAMP */
+#define WM8995_AIF2DAC_DEEMP_MASK               0x0006	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_SHIFT                   1	/* AIF2DAC_DEEMP - [2:1] */
+#define WM8995_AIF2DAC_DEEMP_WIDTH                   2	/* AIF2DAC_DEEMP - [2:1] */
+
+/*
+ * R1313 (0x521) - AIF2 DAC Filters (2)
+ */
+#define WM8995_AIF2DAC_3D_GAIN_MASK             0x3E00	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_SHIFT                 9	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_GAIN_WIDTH                 5	/* AIF2DAC_3D_GAIN - [13:9] */
+#define WM8995_AIF2DAC_3D_ENA                   0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_MASK              0x0100	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_SHIFT                  8	/* AIF2DAC_3D_ENA */
+#define WM8995_AIF2DAC_3D_ENA_WIDTH                  1	/* AIF2DAC_3D_ENA */
+
+/*
+ * R1344 (0x540) - AIF2 DRC (1)
+ */
+#define WM8995_AIF2DRC_SIG_DET_RMS_MASK         0xF800	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_SHIFT            11	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_RMS_WIDTH             5	/* AIF2DRC_SIG_DET_RMS - [15:11] */
+#define WM8995_AIF2DRC_SIG_DET_PK_MASK          0x0600	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_SHIFT              9	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_SIG_DET_PK_WIDTH              2	/* AIF2DRC_SIG_DET_PK - [10:9] */
+#define WM8995_AIF2DRC_NG_ENA                   0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_MASK              0x0100	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_SHIFT                  8	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_NG_ENA_WIDTH                  1	/* AIF2DRC_NG_ENA */
+#define WM8995_AIF2DRC_SIG_DET_MODE             0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_MASK        0x0080	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_SHIFT            7	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET_MODE_WIDTH            1	/* AIF2DRC_SIG_DET_MODE */
+#define WM8995_AIF2DRC_SIG_DET                  0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_MASK             0x0040	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_SHIFT                 6	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_SIG_DET_WIDTH                 1	/* AIF2DRC_SIG_DET */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA             0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_MASK        0x0020	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_SHIFT            5	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_KNEE2_OP_ENA_WIDTH            1	/* AIF2DRC_KNEE2_OP_ENA */
+#define WM8995_AIF2DRC_QR                       0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_MASK                  0x0010	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_SHIFT                      4	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_QR_WIDTH                      1	/* AIF2DRC_QR */
+#define WM8995_AIF2DRC_ANTICLIP                 0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_MASK            0x0008	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_SHIFT                3	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DRC_ANTICLIP_WIDTH                1	/* AIF2DRC_ANTICLIP */
+#define WM8995_AIF2DAC_DRC_ENA                  0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_MASK             0x0004	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_SHIFT                 2	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2DAC_DRC_ENA_WIDTH                 1	/* AIF2DAC_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA                 0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_MASK            0x0002	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_SHIFT                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCL_DRC_ENA_WIDTH                1	/* AIF2ADCL_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA                 0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_MASK            0x0001	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_SHIFT                0	/* AIF2ADCR_DRC_ENA */
+#define WM8995_AIF2ADCR_DRC_ENA_WIDTH                1	/* AIF2ADCR_DRC_ENA */
+
+/*
+ * R1345 (0x541) - AIF2 DRC (2)
+ */
+#define WM8995_AIF2DRC_ATK_MASK                 0x1E00	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_SHIFT                     9	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_ATK_WIDTH                     4	/* AIF2DRC_ATK - [12:9] */
+#define WM8995_AIF2DRC_DCY_MASK                 0x01E0	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_SHIFT                     5	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_DCY_WIDTH                     4	/* AIF2DRC_DCY - [8:5] */
+#define WM8995_AIF2DRC_MINGAIN_MASK             0x001C	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_SHIFT                 2	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MINGAIN_WIDTH                 3	/* AIF2DRC_MINGAIN - [4:2] */
+#define WM8995_AIF2DRC_MAXGAIN_MASK             0x0003	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_SHIFT                 0	/* AIF2DRC_MAXGAIN - [1:0] */
+#define WM8995_AIF2DRC_MAXGAIN_WIDTH                 2	/* AIF2DRC_MAXGAIN - [1:0] */
+
+/*
+ * R1346 (0x542) - AIF2 DRC (3)
+ */
+#define WM8995_AIF2DRC_NG_MINGAIN_MASK          0xF000	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_SHIFT             12	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_MINGAIN_WIDTH              4	/* AIF2DRC_NG_MINGAIN - [15:12] */
+#define WM8995_AIF2DRC_NG_EXP_MASK              0x0C00	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_SHIFT                 10	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_NG_EXP_WIDTH                  2	/* AIF2DRC_NG_EXP - [11:10] */
+#define WM8995_AIF2DRC_QR_THR_MASK              0x0300	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_SHIFT                  8	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_THR_WIDTH                  2	/* AIF2DRC_QR_THR - [9:8] */
+#define WM8995_AIF2DRC_QR_DCY_MASK              0x00C0	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_SHIFT                  6	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_QR_DCY_WIDTH                  2	/* AIF2DRC_QR_DCY - [7:6] */
+#define WM8995_AIF2DRC_HI_COMP_MASK             0x0038	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_SHIFT                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_HI_COMP_WIDTH                 3	/* AIF2DRC_HI_COMP - [5:3] */
+#define WM8995_AIF2DRC_LO_COMP_MASK             0x0007	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_SHIFT                 0	/* AIF2DRC_LO_COMP - [2:0] */
+#define WM8995_AIF2DRC_LO_COMP_WIDTH                 3	/* AIF2DRC_LO_COMP - [2:0] */
+
+/*
+ * R1347 (0x543) - AIF2 DRC (4)
+ */
+#define WM8995_AIF2DRC_KNEE_IP_MASK             0x07E0	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_SHIFT                 5	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_IP_WIDTH                 6	/* AIF2DRC_KNEE_IP - [10:5] */
+#define WM8995_AIF2DRC_KNEE_OP_MASK             0x001F	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_SHIFT                 0	/* AIF2DRC_KNEE_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE_OP_WIDTH                 5	/* AIF2DRC_KNEE_OP - [4:0] */
+
+/*
+ * R1348 (0x544) - AIF2 DRC (5)
+ */
+#define WM8995_AIF2DRC_KNEE2_IP_MASK            0x03E0	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_SHIFT                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_IP_WIDTH                5	/* AIF2DRC_KNEE2_IP - [9:5] */
+#define WM8995_AIF2DRC_KNEE2_OP_MASK            0x001F	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_SHIFT                0	/* AIF2DRC_KNEE2_OP - [4:0] */
+#define WM8995_AIF2DRC_KNEE2_OP_WIDTH                5	/* AIF2DRC_KNEE2_OP - [4:0] */
+
+/*
+ * R1408 (0x580) - AIF2 EQ Gains (1)
+ */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_SHIFT             11	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B1_GAIN_WIDTH              5	/* AIF2DAC_EQ_B1_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_SHIFT              6	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B2_GAIN_WIDTH              5	/* AIF2DAC_EQ_B2_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_MASK          0x003E	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_SHIFT              1	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_B3_GAIN_WIDTH              5	/* AIF2DAC_EQ_B3_GAIN - [5:1] */
+#define WM8995_AIF2DAC_EQ_ENA                   0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_MASK              0x0001	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_SHIFT                  0	/* AIF2DAC_EQ_ENA */
+#define WM8995_AIF2DAC_EQ_ENA_WIDTH                  1	/* AIF2DAC_EQ_ENA */
+
+/*
+ * R1409 (0x581) - AIF2 EQ Gains (2)
+ */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_MASK          0xF800	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_SHIFT             11	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B4_GAIN_WIDTH              5	/* AIF2DAC_EQ_B4_GAIN - [15:11] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_MASK          0x07C0	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_SHIFT              6	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+#define WM8995_AIF2DAC_EQ_B5_GAIN_WIDTH              5	/* AIF2DAC_EQ_B5_GAIN - [10:6] */
+
+/*
+ * R1410 (0x582) - AIF2 EQ Band 1 A
+ */
+#define WM8995_AIF2DAC_EQ_B1_A_MASK             0xFFFF	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_SHIFT                 0	/* AIF2DAC_EQ_B1_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_A_WIDTH                16	/* AIF2DAC_EQ_B1_A - [15:0] */
+
+/*
+ * R1411 (0x583) - AIF2 EQ Band 1 B
+ */
+#define WM8995_AIF2DAC_EQ_B1_B_MASK             0xFFFF	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_SHIFT                 0	/* AIF2DAC_EQ_B1_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_B_WIDTH                16	/* AIF2DAC_EQ_B1_B - [15:0] */
+
+/*
+ * R1412 (0x584) - AIF2 EQ Band 1 PG
+ */
+#define WM8995_AIF2DAC_EQ_B1_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_SHIFT                0	/* AIF2DAC_EQ_B1_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B1_PG_WIDTH               16	/* AIF2DAC_EQ_B1_PG - [15:0] */
+
+/*
+ * R1413 (0x585) - AIF2 EQ Band 2 A
+ */
+#define WM8995_AIF2DAC_EQ_B2_A_MASK             0xFFFF	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_SHIFT                 0	/* AIF2DAC_EQ_B2_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_A_WIDTH                16	/* AIF2DAC_EQ_B2_A - [15:0] */
+
+/*
+ * R1414 (0x586) - AIF2 EQ Band 2 B
+ */
+#define WM8995_AIF2DAC_EQ_B2_B_MASK             0xFFFF	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_SHIFT                 0	/* AIF2DAC_EQ_B2_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_B_WIDTH                16	/* AIF2DAC_EQ_B2_B - [15:0] */
+
+/*
+ * R1415 (0x587) - AIF2 EQ Band 2 C
+ */
+#define WM8995_AIF2DAC_EQ_B2_C_MASK             0xFFFF	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_SHIFT                 0	/* AIF2DAC_EQ_B2_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_C_WIDTH                16	/* AIF2DAC_EQ_B2_C - [15:0] */
+
+/*
+ * R1416 (0x588) - AIF2 EQ Band 2 PG
+ */
+#define WM8995_AIF2DAC_EQ_B2_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_SHIFT                0	/* AIF2DAC_EQ_B2_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B2_PG_WIDTH               16	/* AIF2DAC_EQ_B2_PG - [15:0] */
+
+/*
+ * R1417 (0x589) - AIF2 EQ Band 3 A
+ */
+#define WM8995_AIF2DAC_EQ_B3_A_MASK             0xFFFF	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_SHIFT                 0	/* AIF2DAC_EQ_B3_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_A_WIDTH                16	/* AIF2DAC_EQ_B3_A - [15:0] */
+
+/*
+ * R1418 (0x58A) - AIF2 EQ Band 3 B
+ */
+#define WM8995_AIF2DAC_EQ_B3_B_MASK             0xFFFF	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_SHIFT                 0	/* AIF2DAC_EQ_B3_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_B_WIDTH                16	/* AIF2DAC_EQ_B3_B - [15:0] */
+
+/*
+ * R1419 (0x58B) - AIF2 EQ Band 3 C
+ */
+#define WM8995_AIF2DAC_EQ_B3_C_MASK             0xFFFF	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_SHIFT                 0	/* AIF2DAC_EQ_B3_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_C_WIDTH                16	/* AIF2DAC_EQ_B3_C - [15:0] */
+
+/*
+ * R1420 (0x58C) - AIF2 EQ Band 3 PG
+ */
+#define WM8995_AIF2DAC_EQ_B3_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_SHIFT                0	/* AIF2DAC_EQ_B3_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B3_PG_WIDTH               16	/* AIF2DAC_EQ_B3_PG - [15:0] */
+
+/*
+ * R1421 (0x58D) - AIF2 EQ Band 4 A
+ */
+#define WM8995_AIF2DAC_EQ_B4_A_MASK             0xFFFF	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_SHIFT                 0	/* AIF2DAC_EQ_B4_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_A_WIDTH                16	/* AIF2DAC_EQ_B4_A - [15:0] */
+
+/*
+ * R1422 (0x58E) - AIF2 EQ Band 4 B
+ */
+#define WM8995_AIF2DAC_EQ_B4_B_MASK             0xFFFF	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_SHIFT                 0	/* AIF2DAC_EQ_B4_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_B_WIDTH                16	/* AIF2DAC_EQ_B4_B - [15:0] */
+
+/*
+ * R1423 (0x58F) - AIF2 EQ Band 4 C
+ */
+#define WM8995_AIF2DAC_EQ_B4_C_MASK             0xFFFF	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_SHIFT                 0	/* AIF2DAC_EQ_B4_C - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_C_WIDTH                16	/* AIF2DAC_EQ_B4_C - [15:0] */
+
+/*
+ * R1424 (0x590) - AIF2 EQ Band 4 PG
+ */
+#define WM8995_AIF2DAC_EQ_B4_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_SHIFT                0	/* AIF2DAC_EQ_B4_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B4_PG_WIDTH               16	/* AIF2DAC_EQ_B4_PG - [15:0] */
+
+/*
+ * R1425 (0x591) - AIF2 EQ Band 5 A
+ */
+#define WM8995_AIF2DAC_EQ_B5_A_MASK             0xFFFF	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_SHIFT                 0	/* AIF2DAC_EQ_B5_A - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_A_WIDTH                16	/* AIF2DAC_EQ_B5_A - [15:0] */
+
+/*
+ * R1426 (0x592) - AIF2 EQ Band 5 B
+ */
+#define WM8995_AIF2DAC_EQ_B5_B_MASK             0xFFFF	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_SHIFT                 0	/* AIF2DAC_EQ_B5_B - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_B_WIDTH                16	/* AIF2DAC_EQ_B5_B - [15:0] */
+
+/*
+ * R1427 (0x593) - AIF2 EQ Band 5 PG
+ */
+#define WM8995_AIF2DAC_EQ_B5_PG_MASK            0xFFFF	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_SHIFT                0	/* AIF2DAC_EQ_B5_PG - [15:0] */
+#define WM8995_AIF2DAC_EQ_B5_PG_WIDTH               16	/* AIF2DAC_EQ_B5_PG - [15:0] */
+
+/*
+ * R1536 (0x600) - DAC1 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC1_VOL_MASK               0x03E0	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_SHIFT                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCR_DAC1_VOL_WIDTH                   5	/* ADCR_DAC1_VOL - [9:5] */
+#define WM8995_ADCL_DAC1_VOL_MASK               0x001F	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_SHIFT                   0	/* ADCL_DAC1_VOL - [4:0] */
+#define WM8995_ADCL_DAC1_VOL_WIDTH                   5	/* ADCL_DAC1_VOL - [4:0] */
+
+/*
+ * R1537 (0x601) - DAC1 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1L                    0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_MASK               0x0020	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_SHIFT                   5	/* ADCR_TO_DAC1L */
+#define WM8995_ADCR_TO_DAC1L_WIDTH                   1	/* ADCR_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L                    0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_MASK               0x0010	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_SHIFT                   4	/* ADCL_TO_DAC1L */
+#define WM8995_ADCL_TO_DAC1L_WIDTH                   1	/* ADCL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L                0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_MASK           0x0004	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_SHIFT               2	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF2DACL_TO_DAC1L_WIDTH               1	/* AIF2DACL_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L               0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_MASK          0x0002	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_SHIFT              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC2L_TO_DAC1L_WIDTH              1	/* AIF1DAC2L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L               0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_MASK          0x0001	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_SHIFT              0	/* AIF1DAC1L_TO_DAC1L */
+#define WM8995_AIF1DAC1L_TO_DAC1L_WIDTH              1	/* AIF1DAC1L_TO_DAC1L */
+
+/*
+ * R1538 (0x602) - DAC1 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC1R                    0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_MASK               0x0020	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_SHIFT                   5	/* ADCR_TO_DAC1R */
+#define WM8995_ADCR_TO_DAC1R_WIDTH                   1	/* ADCR_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R                    0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_MASK               0x0010	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_SHIFT                   4	/* ADCL_TO_DAC1R */
+#define WM8995_ADCL_TO_DAC1R_WIDTH                   1	/* ADCL_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R                0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_MASK           0x0004	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_SHIFT               2	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF2DACR_TO_DAC1R_WIDTH               1	/* AIF2DACR_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R               0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_MASK          0x0002	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_SHIFT              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC2R_TO_DAC1R_WIDTH              1	/* AIF1DAC2R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R               0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_MASK          0x0001	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_SHIFT              0	/* AIF1DAC1R_TO_DAC1R */
+#define WM8995_AIF1DAC1R_TO_DAC1R_WIDTH              1	/* AIF1DAC1R_TO_DAC1R */
+
+/*
+ * R1539 (0x603) - DAC2 Mixer Volumes
+ */
+#define WM8995_ADCR_DAC2_VOL_MASK               0x03E0	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_SHIFT                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCR_DAC2_VOL_WIDTH                   5	/* ADCR_DAC2_VOL - [9:5] */
+#define WM8995_ADCL_DAC2_VOL_MASK               0x001F	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_SHIFT                   0	/* ADCL_DAC2_VOL - [4:0] */
+#define WM8995_ADCL_DAC2_VOL_WIDTH                   5	/* ADCL_DAC2_VOL - [4:0] */
+
+/*
+ * R1540 (0x604) - DAC2 Left Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2L                    0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_MASK               0x0020	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_SHIFT                   5	/* ADCR_TO_DAC2L */
+#define WM8995_ADCR_TO_DAC2L_WIDTH                   1	/* ADCR_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L                    0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_MASK               0x0010	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_SHIFT                   4	/* ADCL_TO_DAC2L */
+#define WM8995_ADCL_TO_DAC2L_WIDTH                   1	/* ADCL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L                0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_MASK           0x0004	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_SHIFT               2	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF2DACL_TO_DAC2L_WIDTH               1	/* AIF2DACL_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L               0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_MASK          0x0002	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_SHIFT              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC2L_TO_DAC2L_WIDTH              1	/* AIF1DAC2L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L               0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_MASK          0x0001	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_SHIFT              0	/* AIF1DAC1L_TO_DAC2L */
+#define WM8995_AIF1DAC1L_TO_DAC2L_WIDTH              1	/* AIF1DAC1L_TO_DAC2L */
+
+/*
+ * R1541 (0x605) - DAC2 Right Mixer Routing
+ */
+#define WM8995_ADCR_TO_DAC2R                    0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_MASK               0x0020	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_SHIFT                   5	/* ADCR_TO_DAC2R */
+#define WM8995_ADCR_TO_DAC2R_WIDTH                   1	/* ADCR_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R                    0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_MASK               0x0010	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_SHIFT                   4	/* ADCL_TO_DAC2R */
+#define WM8995_ADCL_TO_DAC2R_WIDTH                   1	/* ADCL_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R                0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_MASK           0x0004	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_SHIFT               2	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF2DACR_TO_DAC2R_WIDTH               1	/* AIF2DACR_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R               0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_MASK          0x0002	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_SHIFT              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC2R_TO_DAC2R_WIDTH              1	/* AIF1DAC2R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R               0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_MASK          0x0001	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_SHIFT              0	/* AIF1DAC1R_TO_DAC2R */
+#define WM8995_AIF1DAC1R_TO_DAC2R_WIDTH              1	/* AIF1DAC1R_TO_DAC2R */
+
+/*
+ * R1542 (0x606) - AIF1 ADC1 Left Mixer Routing
+ */
+#define WM8995_ADC1L_TO_AIF1ADC1L               0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_MASK          0x0002	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_SHIFT              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_ADC1L_TO_AIF1ADC1L_WIDTH              1	/* ADC1L_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L            0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC1L */
+#define WM8995_AIF2DACL_TO_AIF1ADC1L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC1L */
+
+/*
+ * R1543 (0x607) - AIF1 ADC1 Right Mixer Routing
+ */
+#define WM8995_ADC1R_TO_AIF1ADC1R               0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_MASK          0x0002	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_SHIFT              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_ADC1R_TO_AIF1ADC1R_WIDTH              1	/* ADC1R_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R            0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC1R */
+#define WM8995_AIF2DACR_TO_AIF1ADC1R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC1R */
+
+/*
+ * R1544 (0x608) - AIF1 ADC2 Left Mixer Routing
+ */
+#define WM8995_ADC2L_TO_AIF1ADC2L               0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_MASK          0x0002	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_SHIFT              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_ADC2L_TO_AIF1ADC2L_WIDTH              1	/* ADC2L_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L            0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_MASK       0x0001	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_SHIFT           0	/* AIF2DACL_TO_AIF1ADC2L */
+#define WM8995_AIF2DACL_TO_AIF1ADC2L_WIDTH           1	/* AIF2DACL_TO_AIF1ADC2L */
+
+/*
+ * R1545 (0x609) - AIF1 ADC2 Right mixer Routing
+ */
+#define WM8995_ADC2R_TO_AIF1ADC2R               0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_MASK          0x0002	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_SHIFT              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_ADC2R_TO_AIF1ADC2R_WIDTH              1	/* ADC2R_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R            0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_MASK       0x0001	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_SHIFT           0	/* AIF2DACR_TO_AIF1ADC2R */
+#define WM8995_AIF2DACR_TO_AIF1ADC2R_WIDTH           1	/* AIF2DACR_TO_AIF1ADC2R */
+
+/*
+ * R1552 (0x610) - DAC Softmute
+ */
+#define WM8995_DAC_SOFTMUTEMODE                 0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_MASK            0x0002	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_SHIFT                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_SOFTMUTEMODE_WIDTH                1	/* DAC_SOFTMUTEMODE */
+#define WM8995_DAC_MUTERATE                     0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_MASK                0x0001	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_SHIFT                    0	/* DAC_MUTERATE */
+#define WM8995_DAC_MUTERATE_WIDTH                    1	/* DAC_MUTERATE */
+
+/*
+ * R1568 (0x620) - Oversampling
+ */
+#define WM8995_ADC_OSR128                       0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_MASK                  0x0002	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_SHIFT                      1	/* ADC_OSR128 */
+#define WM8995_ADC_OSR128_WIDTH                      1	/* ADC_OSR128 */
+#define WM8995_DAC_OSR128                       0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_MASK                  0x0001	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_SHIFT                      0	/* DAC_OSR128 */
+#define WM8995_DAC_OSR128_WIDTH                      1	/* DAC_OSR128 */
+
+/*
+ * R1569 (0x621) - Sidetone
+ */
+#define WM8995_ST_LPF                           0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_MASK                      0x1000	/* ST_LPF */
+#define WM8995_ST_LPF_SHIFT                         12	/* ST_LPF */
+#define WM8995_ST_LPF_WIDTH                          1	/* ST_LPF */
+#define WM8995_ST_HPF_CUT_MASK                  0x0380	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_SHIFT                      7	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF_CUT_WIDTH                      3	/* ST_HPF_CUT - [9:7] */
+#define WM8995_ST_HPF                           0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_MASK                      0x0040	/* ST_HPF */
+#define WM8995_ST_HPF_SHIFT                          6	/* ST_HPF */
+#define WM8995_ST_HPF_WIDTH                          1	/* ST_HPF */
+#define WM8995_STR_SEL                          0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_MASK                     0x0002	/* STR_SEL */
+#define WM8995_STR_SEL_SHIFT                         1	/* STR_SEL */
+#define WM8995_STR_SEL_WIDTH                         1	/* STR_SEL */
+#define WM8995_STL_SEL                          0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_MASK                     0x0001	/* STL_SEL */
+#define WM8995_STL_SEL_SHIFT                         0	/* STL_SEL */
+#define WM8995_STL_SEL_WIDTH                         1	/* STL_SEL */
+
+/*
+ * R1792 (0x700) - GPIO 1
+ */
+#define WM8995_GP1_DIR                          0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_MASK                     0x8000	/* GP1_DIR */
+#define WM8995_GP1_DIR_SHIFT                        15	/* GP1_DIR */
+#define WM8995_GP1_DIR_WIDTH                         1	/* GP1_DIR */
+#define WM8995_GP1_PU                           0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_MASK                      0x4000	/* GP1_PU */
+#define WM8995_GP1_PU_SHIFT                         14	/* GP1_PU */
+#define WM8995_GP1_PU_WIDTH                          1	/* GP1_PU */
+#define WM8995_GP1_PD                           0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_MASK                      0x2000	/* GP1_PD */
+#define WM8995_GP1_PD_SHIFT                         13	/* GP1_PD */
+#define WM8995_GP1_PD_WIDTH                          1	/* GP1_PD */
+#define WM8995_GP1_POL                          0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_MASK                     0x0400	/* GP1_POL */
+#define WM8995_GP1_POL_SHIFT                        10	/* GP1_POL */
+#define WM8995_GP1_POL_WIDTH                         1	/* GP1_POL */
+#define WM8995_GP1_OP_CFG                       0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_MASK                  0x0200	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_SHIFT                      9	/* GP1_OP_CFG */
+#define WM8995_GP1_OP_CFG_WIDTH                      1	/* GP1_OP_CFG */
+#define WM8995_GP1_DB                           0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_MASK                      0x0100	/* GP1_DB */
+#define WM8995_GP1_DB_SHIFT                          8	/* GP1_DB */
+#define WM8995_GP1_DB_WIDTH                          1	/* GP1_DB */
+#define WM8995_GP1_LVL                          0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_MASK                     0x0040	/* GP1_LVL */
+#define WM8995_GP1_LVL_SHIFT                         6	/* GP1_LVL */
+#define WM8995_GP1_LVL_WIDTH                         1	/* GP1_LVL */
+#define WM8995_GP1_FN_MASK                      0x001F	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_SHIFT                          0	/* GP1_FN - [4:0] */
+#define WM8995_GP1_FN_WIDTH                          5	/* GP1_FN - [4:0] */
+
+/*
+ * R1793 (0x701) - GPIO 2
+ */
+#define WM8995_GP2_DIR                          0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_MASK                     0x8000	/* GP2_DIR */
+#define WM8995_GP2_DIR_SHIFT                        15	/* GP2_DIR */
+#define WM8995_GP2_DIR_WIDTH                         1	/* GP2_DIR */
+#define WM8995_GP2_PU                           0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_MASK                      0x4000	/* GP2_PU */
+#define WM8995_GP2_PU_SHIFT                         14	/* GP2_PU */
+#define WM8995_GP2_PU_WIDTH                          1	/* GP2_PU */
+#define WM8995_GP2_PD                           0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_MASK                      0x2000	/* GP2_PD */
+#define WM8995_GP2_PD_SHIFT                         13	/* GP2_PD */
+#define WM8995_GP2_PD_WIDTH                          1	/* GP2_PD */
+#define WM8995_GP2_POL                          0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_MASK                     0x0400	/* GP2_POL */
+#define WM8995_GP2_POL_SHIFT                        10	/* GP2_POL */
+#define WM8995_GP2_POL_WIDTH                         1	/* GP2_POL */
+#define WM8995_GP2_OP_CFG                       0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_MASK                  0x0200	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_SHIFT                      9	/* GP2_OP_CFG */
+#define WM8995_GP2_OP_CFG_WIDTH                      1	/* GP2_OP_CFG */
+#define WM8995_GP2_DB                           0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_MASK                      0x0100	/* GP2_DB */
+#define WM8995_GP2_DB_SHIFT                          8	/* GP2_DB */
+#define WM8995_GP2_DB_WIDTH                          1	/* GP2_DB */
+#define WM8995_GP2_LVL                          0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_MASK                     0x0040	/* GP2_LVL */
+#define WM8995_GP2_LVL_SHIFT                         6	/* GP2_LVL */
+#define WM8995_GP2_LVL_WIDTH                         1	/* GP2_LVL */
+#define WM8995_GP2_FN_MASK                      0x001F	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_SHIFT                          0	/* GP2_FN - [4:0] */
+#define WM8995_GP2_FN_WIDTH                          5	/* GP2_FN - [4:0] */
+
+/*
+ * R1794 (0x702) - GPIO 3
+ */
+#define WM8995_GP3_DIR                          0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_MASK                     0x8000	/* GP3_DIR */
+#define WM8995_GP3_DIR_SHIFT                        15	/* GP3_DIR */
+#define WM8995_GP3_DIR_WIDTH                         1	/* GP3_DIR */
+#define WM8995_GP3_PU                           0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_MASK                      0x4000	/* GP3_PU */
+#define WM8995_GP3_PU_SHIFT                         14	/* GP3_PU */
+#define WM8995_GP3_PU_WIDTH                          1	/* GP3_PU */
+#define WM8995_GP3_PD                           0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_MASK                      0x2000	/* GP3_PD */
+#define WM8995_GP3_PD_SHIFT                         13	/* GP3_PD */
+#define WM8995_GP3_PD_WIDTH                          1	/* GP3_PD */
+#define WM8995_GP3_POL                          0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_MASK                     0x0400	/* GP3_POL */
+#define WM8995_GP3_POL_SHIFT                        10	/* GP3_POL */
+#define WM8995_GP3_POL_WIDTH                         1	/* GP3_POL */
+#define WM8995_GP3_OP_CFG                       0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_MASK                  0x0200	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_SHIFT                      9	/* GP3_OP_CFG */
+#define WM8995_GP3_OP_CFG_WIDTH                      1	/* GP3_OP_CFG */
+#define WM8995_GP3_DB                           0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_MASK                      0x0100	/* GP3_DB */
+#define WM8995_GP3_DB_SHIFT                          8	/* GP3_DB */
+#define WM8995_GP3_DB_WIDTH                          1	/* GP3_DB */
+#define WM8995_GP3_LVL                          0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_MASK                     0x0040	/* GP3_LVL */
+#define WM8995_GP3_LVL_SHIFT                         6	/* GP3_LVL */
+#define WM8995_GP3_LVL_WIDTH                         1	/* GP3_LVL */
+#define WM8995_GP3_FN_MASK                      0x001F	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_SHIFT                          0	/* GP3_FN - [4:0] */
+#define WM8995_GP3_FN_WIDTH                          5	/* GP3_FN - [4:0] */
+
+/*
+ * R1795 (0x703) - GPIO 4
+ */
+#define WM8995_GP4_DIR                          0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_MASK                     0x8000	/* GP4_DIR */
+#define WM8995_GP4_DIR_SHIFT                        15	/* GP4_DIR */
+#define WM8995_GP4_DIR_WIDTH                         1	/* GP4_DIR */
+#define WM8995_GP4_PU                           0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_MASK                      0x4000	/* GP4_PU */
+#define WM8995_GP4_PU_SHIFT                         14	/* GP4_PU */
+#define WM8995_GP4_PU_WIDTH                          1	/* GP4_PU */
+#define WM8995_GP4_PD                           0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_MASK                      0x2000	/* GP4_PD */
+#define WM8995_GP4_PD_SHIFT                         13	/* GP4_PD */
+#define WM8995_GP4_PD_WIDTH                          1	/* GP4_PD */
+#define WM8995_GP4_POL                          0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_MASK                     0x0400	/* GP4_POL */
+#define WM8995_GP4_POL_SHIFT                        10	/* GP4_POL */
+#define WM8995_GP4_POL_WIDTH                         1	/* GP4_POL */
+#define WM8995_GP4_OP_CFG                       0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_MASK                  0x0200	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_SHIFT                      9	/* GP4_OP_CFG */
+#define WM8995_GP4_OP_CFG_WIDTH                      1	/* GP4_OP_CFG */
+#define WM8995_GP4_DB                           0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_MASK                      0x0100	/* GP4_DB */
+#define WM8995_GP4_DB_SHIFT                          8	/* GP4_DB */
+#define WM8995_GP4_DB_WIDTH                          1	/* GP4_DB */
+#define WM8995_GP4_LVL                          0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_MASK                     0x0040	/* GP4_LVL */
+#define WM8995_GP4_LVL_SHIFT                         6	/* GP4_LVL */
+#define WM8995_GP4_LVL_WIDTH                         1	/* GP4_LVL */
+#define WM8995_GP4_FN_MASK                      0x001F	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_SHIFT                          0	/* GP4_FN - [4:0] */
+#define WM8995_GP4_FN_WIDTH                          5	/* GP4_FN - [4:0] */
+
+/*
+ * R1796 (0x704) - GPIO 5
+ */
+#define WM8995_GP5_DIR                          0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_MASK                     0x8000	/* GP5_DIR */
+#define WM8995_GP5_DIR_SHIFT                        15	/* GP5_DIR */
+#define WM8995_GP5_DIR_WIDTH                         1	/* GP5_DIR */
+#define WM8995_GP5_PU                           0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_MASK                      0x4000	/* GP5_PU */
+#define WM8995_GP5_PU_SHIFT                         14	/* GP5_PU */
+#define WM8995_GP5_PU_WIDTH                          1	/* GP5_PU */
+#define WM8995_GP5_PD                           0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_MASK                      0x2000	/* GP5_PD */
+#define WM8995_GP5_PD_SHIFT                         13	/* GP5_PD */
+#define WM8995_GP5_PD_WIDTH                          1	/* GP5_PD */
+#define WM8995_GP5_POL                          0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_MASK                     0x0400	/* GP5_POL */
+#define WM8995_GP5_POL_SHIFT                        10	/* GP5_POL */
+#define WM8995_GP5_POL_WIDTH                         1	/* GP5_POL */
+#define WM8995_GP5_OP_CFG                       0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_MASK                  0x0200	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_SHIFT                      9	/* GP5_OP_CFG */
+#define WM8995_GP5_OP_CFG_WIDTH                      1	/* GP5_OP_CFG */
+#define WM8995_GP5_DB                           0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_MASK                      0x0100	/* GP5_DB */
+#define WM8995_GP5_DB_SHIFT                          8	/* GP5_DB */
+#define WM8995_GP5_DB_WIDTH                          1	/* GP5_DB */
+#define WM8995_GP5_LVL                          0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_MASK                     0x0040	/* GP5_LVL */
+#define WM8995_GP5_LVL_SHIFT                         6	/* GP5_LVL */
+#define WM8995_GP5_LVL_WIDTH                         1	/* GP5_LVL */
+#define WM8995_GP5_FN_MASK                      0x001F	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_SHIFT                          0	/* GP5_FN - [4:0] */
+#define WM8995_GP5_FN_WIDTH                          5	/* GP5_FN - [4:0] */
+
+/*
+ * R1797 (0x705) - GPIO 6
+ */
+#define WM8995_GP6_DIR                          0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_MASK                     0x8000	/* GP6_DIR */
+#define WM8995_GP6_DIR_SHIFT                        15	/* GP6_DIR */
+#define WM8995_GP6_DIR_WIDTH                         1	/* GP6_DIR */
+#define WM8995_GP6_PU                           0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_MASK                      0x4000	/* GP6_PU */
+#define WM8995_GP6_PU_SHIFT                         14	/* GP6_PU */
+#define WM8995_GP6_PU_WIDTH                          1	/* GP6_PU */
+#define WM8995_GP6_PD                           0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_MASK                      0x2000	/* GP6_PD */
+#define WM8995_GP6_PD_SHIFT                         13	/* GP6_PD */
+#define WM8995_GP6_PD_WIDTH                          1	/* GP6_PD */
+#define WM8995_GP6_POL                          0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_MASK                     0x0400	/* GP6_POL */
+#define WM8995_GP6_POL_SHIFT                        10	/* GP6_POL */
+#define WM8995_GP6_POL_WIDTH                         1	/* GP6_POL */
+#define WM8995_GP6_OP_CFG                       0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_MASK                  0x0200	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_SHIFT                      9	/* GP6_OP_CFG */
+#define WM8995_GP6_OP_CFG_WIDTH                      1	/* GP6_OP_CFG */
+#define WM8995_GP6_DB                           0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_MASK                      0x0100	/* GP6_DB */
+#define WM8995_GP6_DB_SHIFT                          8	/* GP6_DB */
+#define WM8995_GP6_DB_WIDTH                          1	/* GP6_DB */
+#define WM8995_GP6_LVL                          0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_MASK                     0x0040	/* GP6_LVL */
+#define WM8995_GP6_LVL_SHIFT                         6	/* GP6_LVL */
+#define WM8995_GP6_LVL_WIDTH                         1	/* GP6_LVL */
+#define WM8995_GP6_FN_MASK                      0x001F	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_SHIFT                          0	/* GP6_FN - [4:0] */
+#define WM8995_GP6_FN_WIDTH                          5	/* GP6_FN - [4:0] */
+
+/*
+ * R1798 (0x706) - GPIO 7
+ */
+#define WM8995_GP7_DIR                          0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_MASK                     0x8000	/* GP7_DIR */
+#define WM8995_GP7_DIR_SHIFT                        15	/* GP7_DIR */
+#define WM8995_GP7_DIR_WIDTH                         1	/* GP7_DIR */
+#define WM8995_GP7_PU                           0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_MASK                      0x4000	/* GP7_PU */
+#define WM8995_GP7_PU_SHIFT                         14	/* GP7_PU */
+#define WM8995_GP7_PU_WIDTH                          1	/* GP7_PU */
+#define WM8995_GP7_PD                           0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_MASK                      0x2000	/* GP7_PD */
+#define WM8995_GP7_PD_SHIFT                         13	/* GP7_PD */
+#define WM8995_GP7_PD_WIDTH                          1	/* GP7_PD */
+#define WM8995_GP7_POL                          0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_MASK                     0x0400	/* GP7_POL */
+#define WM8995_GP7_POL_SHIFT                        10	/* GP7_POL */
+#define WM8995_GP7_POL_WIDTH                         1	/* GP7_POL */
+#define WM8995_GP7_OP_CFG                       0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_MASK                  0x0200	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_SHIFT                      9	/* GP7_OP_CFG */
+#define WM8995_GP7_OP_CFG_WIDTH                      1	/* GP7_OP_CFG */
+#define WM8995_GP7_DB                           0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_MASK                      0x0100	/* GP7_DB */
+#define WM8995_GP7_DB_SHIFT                          8	/* GP7_DB */
+#define WM8995_GP7_DB_WIDTH                          1	/* GP7_DB */
+#define WM8995_GP7_LVL                          0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_MASK                     0x0040	/* GP7_LVL */
+#define WM8995_GP7_LVL_SHIFT                         6	/* GP7_LVL */
+#define WM8995_GP7_LVL_WIDTH                         1	/* GP7_LVL */
+#define WM8995_GP7_FN_MASK                      0x001F	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_SHIFT                          0	/* GP7_FN - [4:0] */
+#define WM8995_GP7_FN_WIDTH                          5	/* GP7_FN - [4:0] */
+
+/*
+ * R1799 (0x707) - GPIO 8
+ */
+#define WM8995_GP8_DIR                          0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_MASK                     0x8000	/* GP8_DIR */
+#define WM8995_GP8_DIR_SHIFT                        15	/* GP8_DIR */
+#define WM8995_GP8_DIR_WIDTH                         1	/* GP8_DIR */
+#define WM8995_GP8_PU                           0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_MASK                      0x4000	/* GP8_PU */
+#define WM8995_GP8_PU_SHIFT                         14	/* GP8_PU */
+#define WM8995_GP8_PU_WIDTH                          1	/* GP8_PU */
+#define WM8995_GP8_PD                           0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_MASK                      0x2000	/* GP8_PD */
+#define WM8995_GP8_PD_SHIFT                         13	/* GP8_PD */
+#define WM8995_GP8_PD_WIDTH                          1	/* GP8_PD */
+#define WM8995_GP8_POL                          0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_MASK                     0x0400	/* GP8_POL */
+#define WM8995_GP8_POL_SHIFT                        10	/* GP8_POL */
+#define WM8995_GP8_POL_WIDTH                         1	/* GP8_POL */
+#define WM8995_GP8_OP_CFG                       0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_MASK                  0x0200	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_SHIFT                      9	/* GP8_OP_CFG */
+#define WM8995_GP8_OP_CFG_WIDTH                      1	/* GP8_OP_CFG */
+#define WM8995_GP8_DB                           0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_MASK                      0x0100	/* GP8_DB */
+#define WM8995_GP8_DB_SHIFT                          8	/* GP8_DB */
+#define WM8995_GP8_DB_WIDTH                          1	/* GP8_DB */
+#define WM8995_GP8_LVL                          0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_MASK                     0x0040	/* GP8_LVL */
+#define WM8995_GP8_LVL_SHIFT                         6	/* GP8_LVL */
+#define WM8995_GP8_LVL_WIDTH                         1	/* GP8_LVL */
+#define WM8995_GP8_FN_MASK                      0x001F	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_SHIFT                          0	/* GP8_FN - [4:0] */
+#define WM8995_GP8_FN_WIDTH                          5	/* GP8_FN - [4:0] */
+
+/*
+ * R1800 (0x708) - GPIO 9
+ */
+#define WM8995_GP9_DIR                          0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_MASK                     0x8000	/* GP9_DIR */
+#define WM8995_GP9_DIR_SHIFT                        15	/* GP9_DIR */
+#define WM8995_GP9_DIR_WIDTH                         1	/* GP9_DIR */
+#define WM8995_GP9_PU                           0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_MASK                      0x4000	/* GP9_PU */
+#define WM8995_GP9_PU_SHIFT                         14	/* GP9_PU */
+#define WM8995_GP9_PU_WIDTH                          1	/* GP9_PU */
+#define WM8995_GP9_PD                           0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_MASK                      0x2000	/* GP9_PD */
+#define WM8995_GP9_PD_SHIFT                         13	/* GP9_PD */
+#define WM8995_GP9_PD_WIDTH                          1	/* GP9_PD */
+#define WM8995_GP9_POL                          0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_MASK                     0x0400	/* GP9_POL */
+#define WM8995_GP9_POL_SHIFT                        10	/* GP9_POL */
+#define WM8995_GP9_POL_WIDTH                         1	/* GP9_POL */
+#define WM8995_GP9_OP_CFG                       0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_MASK                  0x0200	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_SHIFT                      9	/* GP9_OP_CFG */
+#define WM8995_GP9_OP_CFG_WIDTH                      1	/* GP9_OP_CFG */
+#define WM8995_GP9_DB                           0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_MASK                      0x0100	/* GP9_DB */
+#define WM8995_GP9_DB_SHIFT                          8	/* GP9_DB */
+#define WM8995_GP9_DB_WIDTH                          1	/* GP9_DB */
+#define WM8995_GP9_LVL                          0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_MASK                     0x0040	/* GP9_LVL */
+#define WM8995_GP9_LVL_SHIFT                         6	/* GP9_LVL */
+#define WM8995_GP9_LVL_WIDTH                         1	/* GP9_LVL */
+#define WM8995_GP9_FN_MASK                      0x001F	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_SHIFT                          0	/* GP9_FN - [4:0] */
+#define WM8995_GP9_FN_WIDTH                          5	/* GP9_FN - [4:0] */
+
+/*
+ * R1801 (0x709) - GPIO 10
+ */
+#define WM8995_GP10_DIR                         0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_MASK                    0x8000	/* GP10_DIR */
+#define WM8995_GP10_DIR_SHIFT                       15	/* GP10_DIR */
+#define WM8995_GP10_DIR_WIDTH                        1	/* GP10_DIR */
+#define WM8995_GP10_PU                          0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_MASK                     0x4000	/* GP10_PU */
+#define WM8995_GP10_PU_SHIFT                        14	/* GP10_PU */
+#define WM8995_GP10_PU_WIDTH                         1	/* GP10_PU */
+#define WM8995_GP10_PD                          0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_MASK                     0x2000	/* GP10_PD */
+#define WM8995_GP10_PD_SHIFT                        13	/* GP10_PD */
+#define WM8995_GP10_PD_WIDTH                         1	/* GP10_PD */
+#define WM8995_GP10_POL                         0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_MASK                    0x0400	/* GP10_POL */
+#define WM8995_GP10_POL_SHIFT                       10	/* GP10_POL */
+#define WM8995_GP10_POL_WIDTH                        1	/* GP10_POL */
+#define WM8995_GP10_OP_CFG                      0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_MASK                 0x0200	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_SHIFT                     9	/* GP10_OP_CFG */
+#define WM8995_GP10_OP_CFG_WIDTH                     1	/* GP10_OP_CFG */
+#define WM8995_GP10_DB                          0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_MASK                     0x0100	/* GP10_DB */
+#define WM8995_GP10_DB_SHIFT                         8	/* GP10_DB */
+#define WM8995_GP10_DB_WIDTH                         1	/* GP10_DB */
+#define WM8995_GP10_LVL                         0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_MASK                    0x0040	/* GP10_LVL */
+#define WM8995_GP10_LVL_SHIFT                        6	/* GP10_LVL */
+#define WM8995_GP10_LVL_WIDTH                        1	/* GP10_LVL */
+#define WM8995_GP10_FN_MASK                     0x001F	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_SHIFT                         0	/* GP10_FN - [4:0] */
+#define WM8995_GP10_FN_WIDTH                         5	/* GP10_FN - [4:0] */
+
+/*
+ * R1802 (0x70A) - GPIO 11
+ */
+#define WM8995_GP11_DIR                         0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_MASK                    0x8000	/* GP11_DIR */
+#define WM8995_GP11_DIR_SHIFT                       15	/* GP11_DIR */
+#define WM8995_GP11_DIR_WIDTH                        1	/* GP11_DIR */
+#define WM8995_GP11_PU                          0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_MASK                     0x4000	/* GP11_PU */
+#define WM8995_GP11_PU_SHIFT                        14	/* GP11_PU */
+#define WM8995_GP11_PU_WIDTH                         1	/* GP11_PU */
+#define WM8995_GP11_PD                          0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_MASK                     0x2000	/* GP11_PD */
+#define WM8995_GP11_PD_SHIFT                        13	/* GP11_PD */
+#define WM8995_GP11_PD_WIDTH                         1	/* GP11_PD */
+#define WM8995_GP11_POL                         0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_MASK                    0x0400	/* GP11_POL */
+#define WM8995_GP11_POL_SHIFT                       10	/* GP11_POL */
+#define WM8995_GP11_POL_WIDTH                        1	/* GP11_POL */
+#define WM8995_GP11_OP_CFG                      0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_MASK                 0x0200	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_SHIFT                     9	/* GP11_OP_CFG */
+#define WM8995_GP11_OP_CFG_WIDTH                     1	/* GP11_OP_CFG */
+#define WM8995_GP11_DB                          0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_MASK                     0x0100	/* GP11_DB */
+#define WM8995_GP11_DB_SHIFT                         8	/* GP11_DB */
+#define WM8995_GP11_DB_WIDTH                         1	/* GP11_DB */
+#define WM8995_GP11_LVL                         0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_MASK                    0x0040	/* GP11_LVL */
+#define WM8995_GP11_LVL_SHIFT                        6	/* GP11_LVL */
+#define WM8995_GP11_LVL_WIDTH                        1	/* GP11_LVL */
+#define WM8995_GP11_FN_MASK                     0x001F	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_SHIFT                         0	/* GP11_FN - [4:0] */
+#define WM8995_GP11_FN_WIDTH                         5	/* GP11_FN - [4:0] */
+
+/*
+ * R1803 (0x70B) - GPIO 12
+ */
+#define WM8995_GP12_DIR                         0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_MASK                    0x8000	/* GP12_DIR */
+#define WM8995_GP12_DIR_SHIFT                       15	/* GP12_DIR */
+#define WM8995_GP12_DIR_WIDTH                        1	/* GP12_DIR */
+#define WM8995_GP12_PU                          0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_MASK                     0x4000	/* GP12_PU */
+#define WM8995_GP12_PU_SHIFT                        14	/* GP12_PU */
+#define WM8995_GP12_PU_WIDTH                         1	/* GP12_PU */
+#define WM8995_GP12_PD                          0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_MASK                     0x2000	/* GP12_PD */
+#define WM8995_GP12_PD_SHIFT                        13	/* GP12_PD */
+#define WM8995_GP12_PD_WIDTH                         1	/* GP12_PD */
+#define WM8995_GP12_POL                         0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_MASK                    0x0400	/* GP12_POL */
+#define WM8995_GP12_POL_SHIFT                       10	/* GP12_POL */
+#define WM8995_GP12_POL_WIDTH                        1	/* GP12_POL */
+#define WM8995_GP12_OP_CFG                      0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_MASK                 0x0200	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_SHIFT                     9	/* GP12_OP_CFG */
+#define WM8995_GP12_OP_CFG_WIDTH                     1	/* GP12_OP_CFG */
+#define WM8995_GP12_DB                          0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_MASK                     0x0100	/* GP12_DB */
+#define WM8995_GP12_DB_SHIFT                         8	/* GP12_DB */
+#define WM8995_GP12_DB_WIDTH                         1	/* GP12_DB */
+#define WM8995_GP12_LVL                         0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_MASK                    0x0040	/* GP12_LVL */
+#define WM8995_GP12_LVL_SHIFT                        6	/* GP12_LVL */
+#define WM8995_GP12_LVL_WIDTH                        1	/* GP12_LVL */
+#define WM8995_GP12_FN_MASK                     0x001F	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_SHIFT                         0	/* GP12_FN - [4:0] */
+#define WM8995_GP12_FN_WIDTH                         5	/* GP12_FN - [4:0] */
+
+/*
+ * R1804 (0x70C) - GPIO 13
+ */
+#define WM8995_GP13_DIR                         0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_MASK                    0x8000	/* GP13_DIR */
+#define WM8995_GP13_DIR_SHIFT                       15	/* GP13_DIR */
+#define WM8995_GP13_DIR_WIDTH                        1	/* GP13_DIR */
+#define WM8995_GP13_PU                          0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_MASK                     0x4000	/* GP13_PU */
+#define WM8995_GP13_PU_SHIFT                        14	/* GP13_PU */
+#define WM8995_GP13_PU_WIDTH                         1	/* GP13_PU */
+#define WM8995_GP13_PD                          0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_MASK                     0x2000	/* GP13_PD */
+#define WM8995_GP13_PD_SHIFT                        13	/* GP13_PD */
+#define WM8995_GP13_PD_WIDTH                         1	/* GP13_PD */
+#define WM8995_GP13_POL                         0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_MASK                    0x0400	/* GP13_POL */
+#define WM8995_GP13_POL_SHIFT                       10	/* GP13_POL */
+#define WM8995_GP13_POL_WIDTH                        1	/* GP13_POL */
+#define WM8995_GP13_OP_CFG                      0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_MASK                 0x0200	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_SHIFT                     9	/* GP13_OP_CFG */
+#define WM8995_GP13_OP_CFG_WIDTH                     1	/* GP13_OP_CFG */
+#define WM8995_GP13_DB                          0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_MASK                     0x0100	/* GP13_DB */
+#define WM8995_GP13_DB_SHIFT                         8	/* GP13_DB */
+#define WM8995_GP13_DB_WIDTH                         1	/* GP13_DB */
+#define WM8995_GP13_LVL                         0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_MASK                    0x0040	/* GP13_LVL */
+#define WM8995_GP13_LVL_SHIFT                        6	/* GP13_LVL */
+#define WM8995_GP13_LVL_WIDTH                        1	/* GP13_LVL */
+#define WM8995_GP13_FN_MASK                     0x001F	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_SHIFT                         0	/* GP13_FN - [4:0] */
+#define WM8995_GP13_FN_WIDTH                         5	/* GP13_FN - [4:0] */
+
+/*
+ * R1805 (0x70D) - GPIO 14
+ */
+#define WM8995_GP14_DIR                         0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_MASK                    0x8000	/* GP14_DIR */
+#define WM8995_GP14_DIR_SHIFT                       15	/* GP14_DIR */
+#define WM8995_GP14_DIR_WIDTH                        1	/* GP14_DIR */
+#define WM8995_GP14_PU                          0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_MASK                     0x4000	/* GP14_PU */
+#define WM8995_GP14_PU_SHIFT                        14	/* GP14_PU */
+#define WM8995_GP14_PU_WIDTH                         1	/* GP14_PU */
+#define WM8995_GP14_PD                          0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_MASK                     0x2000	/* GP14_PD */
+#define WM8995_GP14_PD_SHIFT                        13	/* GP14_PD */
+#define WM8995_GP14_PD_WIDTH                         1	/* GP14_PD */
+#define WM8995_GP14_POL                         0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_MASK                    0x0400	/* GP14_POL */
+#define WM8995_GP14_POL_SHIFT                       10	/* GP14_POL */
+#define WM8995_GP14_POL_WIDTH                        1	/* GP14_POL */
+#define WM8995_GP14_OP_CFG                      0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_MASK                 0x0200	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_SHIFT                     9	/* GP14_OP_CFG */
+#define WM8995_GP14_OP_CFG_WIDTH                     1	/* GP14_OP_CFG */
+#define WM8995_GP14_DB                          0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_MASK                     0x0100	/* GP14_DB */
+#define WM8995_GP14_DB_SHIFT                         8	/* GP14_DB */
+#define WM8995_GP14_DB_WIDTH                         1	/* GP14_DB */
+#define WM8995_GP14_LVL                         0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_MASK                    0x0040	/* GP14_LVL */
+#define WM8995_GP14_LVL_SHIFT                        6	/* GP14_LVL */
+#define WM8995_GP14_LVL_WIDTH                        1	/* GP14_LVL */
+#define WM8995_GP14_FN_MASK                     0x001F	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_SHIFT                         0	/* GP14_FN - [4:0] */
+#define WM8995_GP14_FN_WIDTH                         5	/* GP14_FN - [4:0] */
+
+/*
+ * R1824 (0x720) - Pull Control (1)
+ */
+#define WM8995_DMICDAT3_PD                      0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_MASK                 0x4000	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_SHIFT                    14	/* DMICDAT3_PD */
+#define WM8995_DMICDAT3_PD_WIDTH                     1	/* DMICDAT3_PD */
+#define WM8995_DMICDAT2_PD                      0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_MASK                 0x1000	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_SHIFT                    12	/* DMICDAT2_PD */
+#define WM8995_DMICDAT2_PD_WIDTH                     1	/* DMICDAT2_PD */
+#define WM8995_DMICDAT1_PD                      0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_MASK                 0x0400	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_SHIFT                    10	/* DMICDAT1_PD */
+#define WM8995_DMICDAT1_PD_WIDTH                     1	/* DMICDAT1_PD */
+#define WM8995_MCLK2_PU                         0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_MASK                    0x0200	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_SHIFT                        9	/* MCLK2_PU */
+#define WM8995_MCLK2_PU_WIDTH                        1	/* MCLK2_PU */
+#define WM8995_MCLK2_PD                         0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_MASK                    0x0100	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_SHIFT                        8	/* MCLK2_PD */
+#define WM8995_MCLK2_PD_WIDTH                        1	/* MCLK2_PD */
+#define WM8995_MCLK1_PU                         0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_MASK                    0x0080	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_SHIFT                        7	/* MCLK1_PU */
+#define WM8995_MCLK1_PU_WIDTH                        1	/* MCLK1_PU */
+#define WM8995_MCLK1_PD                         0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_MASK                    0x0040	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_SHIFT                        6	/* MCLK1_PD */
+#define WM8995_MCLK1_PD_WIDTH                        1	/* MCLK1_PD */
+#define WM8995_DACDAT1_PU                       0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_MASK                  0x0020	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_SHIFT                      5	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PU_WIDTH                      1	/* DACDAT1_PU */
+#define WM8995_DACDAT1_PD                       0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_MASK                  0x0010	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_SHIFT                      4	/* DACDAT1_PD */
+#define WM8995_DACDAT1_PD_WIDTH                      1	/* DACDAT1_PD */
+#define WM8995_DACLRCLK1_PU                     0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_MASK                0x0008	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_SHIFT                    3	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PU_WIDTH                    1	/* DACLRCLK1_PU */
+#define WM8995_DACLRCLK1_PD                     0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_MASK                0x0004	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_SHIFT                    2	/* DACLRCLK1_PD */
+#define WM8995_DACLRCLK1_PD_WIDTH                    1	/* DACLRCLK1_PD */
+#define WM8995_BCLK1_PU                         0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_MASK                    0x0002	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_SHIFT                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PU_WIDTH                        1	/* BCLK1_PU */
+#define WM8995_BCLK1_PD                         0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_MASK                    0x0001	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_SHIFT                        0	/* BCLK1_PD */
+#define WM8995_BCLK1_PD_WIDTH                        1	/* BCLK1_PD */
+
+/*
+ * R1825 (0x721) - Pull Control (2)
+ */
+#define WM8995_LDO1ENA_PD                       0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_MASK                  0x0010	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_SHIFT                      4	/* LDO1ENA_PD */
+#define WM8995_LDO1ENA_PD_WIDTH                      1	/* LDO1ENA_PD */
+#define WM8995_MODE_PD                          0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_MASK                     0x0004	/* MODE_PD */
+#define WM8995_MODE_PD_SHIFT                         2	/* MODE_PD */
+#define WM8995_MODE_PD_WIDTH                         1	/* MODE_PD */
+#define WM8995_CSNADDR_PD                       0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_MASK                  0x0001	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_SHIFT                      0	/* CSNADDR_PD */
+#define WM8995_CSNADDR_PD_WIDTH                      1	/* CSNADDR_PD */
+
+/*
+ * R1840 (0x730) - Interrupt Status 1
+ */
+#define WM8995_GP14_EINT                        0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_MASK                   0x2000	/* GP14_EINT */
+#define WM8995_GP14_EINT_SHIFT                      13	/* GP14_EINT */
+#define WM8995_GP14_EINT_WIDTH                       1	/* GP14_EINT */
+#define WM8995_GP13_EINT                        0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_MASK                   0x1000	/* GP13_EINT */
+#define WM8995_GP13_EINT_SHIFT                      12	/* GP13_EINT */
+#define WM8995_GP13_EINT_WIDTH                       1	/* GP13_EINT */
+#define WM8995_GP12_EINT                        0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_MASK                   0x0800	/* GP12_EINT */
+#define WM8995_GP12_EINT_SHIFT                      11	/* GP12_EINT */
+#define WM8995_GP12_EINT_WIDTH                       1	/* GP12_EINT */
+#define WM8995_GP11_EINT                        0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_MASK                   0x0400	/* GP11_EINT */
+#define WM8995_GP11_EINT_SHIFT                      10	/* GP11_EINT */
+#define WM8995_GP11_EINT_WIDTH                       1	/* GP11_EINT */
+#define WM8995_GP10_EINT                        0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_MASK                   0x0200	/* GP10_EINT */
+#define WM8995_GP10_EINT_SHIFT                       9	/* GP10_EINT */
+#define WM8995_GP10_EINT_WIDTH                       1	/* GP10_EINT */
+#define WM8995_GP9_EINT                         0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_MASK                    0x0100	/* GP9_EINT */
+#define WM8995_GP9_EINT_SHIFT                        8	/* GP9_EINT */
+#define WM8995_GP9_EINT_WIDTH                        1	/* GP9_EINT */
+#define WM8995_GP8_EINT                         0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_MASK                    0x0080	/* GP8_EINT */
+#define WM8995_GP8_EINT_SHIFT                        7	/* GP8_EINT */
+#define WM8995_GP8_EINT_WIDTH                        1	/* GP8_EINT */
+#define WM8995_GP7_EINT                         0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_MASK                    0x0040	/* GP7_EINT */
+#define WM8995_GP7_EINT_SHIFT                        6	/* GP7_EINT */
+#define WM8995_GP7_EINT_WIDTH                        1	/* GP7_EINT */
+#define WM8995_GP6_EINT                         0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_MASK                    0x0020	/* GP6_EINT */
+#define WM8995_GP6_EINT_SHIFT                        5	/* GP6_EINT */
+#define WM8995_GP6_EINT_WIDTH                        1	/* GP6_EINT */
+#define WM8995_GP5_EINT                         0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_MASK                    0x0010	/* GP5_EINT */
+#define WM8995_GP5_EINT_SHIFT                        4	/* GP5_EINT */
+#define WM8995_GP5_EINT_WIDTH                        1	/* GP5_EINT */
+#define WM8995_GP4_EINT                         0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_MASK                    0x0008	/* GP4_EINT */
+#define WM8995_GP4_EINT_SHIFT                        3	/* GP4_EINT */
+#define WM8995_GP4_EINT_WIDTH                        1	/* GP4_EINT */
+#define WM8995_GP3_EINT                         0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_MASK                    0x0004	/* GP3_EINT */
+#define WM8995_GP3_EINT_SHIFT                        2	/* GP3_EINT */
+#define WM8995_GP3_EINT_WIDTH                        1	/* GP3_EINT */
+#define WM8995_GP2_EINT                         0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_MASK                    0x0002	/* GP2_EINT */
+#define WM8995_GP2_EINT_SHIFT                        1	/* GP2_EINT */
+#define WM8995_GP2_EINT_WIDTH                        1	/* GP2_EINT */
+#define WM8995_GP1_EINT                         0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_MASK                    0x0001	/* GP1_EINT */
+#define WM8995_GP1_EINT_SHIFT                        0	/* GP1_EINT */
+#define WM8995_GP1_EINT_WIDTH                        1	/* GP1_EINT */
+
+/*
+ * R1841 (0x731) - Interrupt Status 2
+ */
+#define WM8995_DCS_DONE_23_EINT                 0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_MASK            0x1000	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_SHIFT               12	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_23_EINT_WIDTH                1	/* DCS_DONE_23_EINT */
+#define WM8995_DCS_DONE_01_EINT                 0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_MASK            0x0800	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_SHIFT               11	/* DCS_DONE_01_EINT */
+#define WM8995_DCS_DONE_01_EINT_WIDTH                1	/* DCS_DONE_01_EINT */
+#define WM8995_WSEQ_DONE_EINT                   0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_MASK              0x0400	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_SHIFT                 10	/* WSEQ_DONE_EINT */
+#define WM8995_WSEQ_DONE_EINT_WIDTH                  1	/* WSEQ_DONE_EINT */
+#define WM8995_FIFOS_ERR_EINT                   0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_MASK              0x0200	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_SHIFT                  9	/* FIFOS_ERR_EINT */
+#define WM8995_FIFOS_ERR_EINT_WIDTH                  1	/* FIFOS_ERR_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT             0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_MASK        0x0100	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_SHIFT            8	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF2DRC_SIG_DET_EINT_WIDTH            1	/* AIF2DRC_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT            0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_MASK       0x0080	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_SHIFT           7	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC2_SIG_DET_EINT_WIDTH           1	/* AIF1DRC2_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT            0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_MASK       0x0040	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_SHIFT           6	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_AIF1DRC1_SIG_DET_EINT_WIDTH           1	/* AIF1DRC1_SIG_DET_EINT */
+#define WM8995_SRC2_LOCK_EINT                   0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_MASK              0x0020	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_SHIFT                  5	/* SRC2_LOCK_EINT */
+#define WM8995_SRC2_LOCK_EINT_WIDTH                  1	/* SRC2_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT                   0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_MASK              0x0010	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_SHIFT                  4	/* SRC1_LOCK_EINT */
+#define WM8995_SRC1_LOCK_EINT_WIDTH                  1	/* SRC1_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT                   0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_MASK              0x0008	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_SHIFT                  3	/* FLL2_LOCK_EINT */
+#define WM8995_FLL2_LOCK_EINT_WIDTH                  1	/* FLL2_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT                   0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_MASK              0x0004	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_SHIFT                  2	/* FLL1_LOCK_EINT */
+#define WM8995_FLL1_LOCK_EINT_WIDTH                  1	/* FLL1_LOCK_EINT */
+#define WM8995_HP_DONE_EINT                     0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_MASK                0x0002	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_SHIFT                    1	/* HP_DONE_EINT */
+#define WM8995_HP_DONE_EINT_WIDTH                    1	/* HP_DONE_EINT */
+#define WM8995_MICD_EINT                        0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_MASK                   0x0001	/* MICD_EINT */
+#define WM8995_MICD_EINT_SHIFT                       0	/* MICD_EINT */
+#define WM8995_MICD_EINT_WIDTH                       1	/* MICD_EINT */
+
+/*
+ * R1842 (0x732) - Interrupt Raw Status 2
+ */
+#define WM8995_DCS_DONE_23_STS                  0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_MASK             0x1000	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_SHIFT                12	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_23_STS_WIDTH                 1	/* DCS_DONE_23_STS */
+#define WM8995_DCS_DONE_01_STS                  0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_MASK             0x0800	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_SHIFT                11	/* DCS_DONE_01_STS */
+#define WM8995_DCS_DONE_01_STS_WIDTH                 1	/* DCS_DONE_01_STS */
+#define WM8995_WSEQ_DONE_STS                    0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_MASK               0x0400	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_SHIFT                  10	/* WSEQ_DONE_STS */
+#define WM8995_WSEQ_DONE_STS_WIDTH                   1	/* WSEQ_DONE_STS */
+#define WM8995_FIFOS_ERR_STS                    0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_MASK               0x0200	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_SHIFT                   9	/* FIFOS_ERR_STS */
+#define WM8995_FIFOS_ERR_STS_WIDTH                   1	/* FIFOS_ERR_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS              0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_MASK         0x0100	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_SHIFT             8	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF2DRC_SIG_DET_STS_WIDTH             1	/* AIF2DRC_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS             0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_MASK        0x0080	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_SHIFT            7	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC2_SIG_DET_STS_WIDTH            1	/* AIF1DRC2_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS             0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_MASK        0x0040	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_SHIFT            6	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_AIF1DRC1_SIG_DET_STS_WIDTH            1	/* AIF1DRC1_SIG_DET_STS */
+#define WM8995_SRC2_LOCK_STS                    0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_MASK               0x0020	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_SHIFT                   5	/* SRC2_LOCK_STS */
+#define WM8995_SRC2_LOCK_STS_WIDTH                   1	/* SRC2_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS                    0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_MASK               0x0010	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_SHIFT                   4	/* SRC1_LOCK_STS */
+#define WM8995_SRC1_LOCK_STS_WIDTH                   1	/* SRC1_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS                    0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_MASK               0x0008	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_SHIFT                   3	/* FLL2_LOCK_STS */
+#define WM8995_FLL2_LOCK_STS_WIDTH                   1	/* FLL2_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS                    0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_MASK               0x0004	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_SHIFT                   2	/* FLL1_LOCK_STS */
+#define WM8995_FLL1_LOCK_STS_WIDTH                   1	/* FLL1_LOCK_STS */
+
+/*
+ * R1848 (0x738) - Interrupt Status 1 Mask
+ */
+#define WM8995_IM_GP14_EINT                     0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_MASK                0x2000	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_SHIFT                   13	/* IM_GP14_EINT */
+#define WM8995_IM_GP14_EINT_WIDTH                    1	/* IM_GP14_EINT */
+#define WM8995_IM_GP13_EINT                     0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_MASK                0x1000	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_SHIFT                   12	/* IM_GP13_EINT */
+#define WM8995_IM_GP13_EINT_WIDTH                    1	/* IM_GP13_EINT */
+#define WM8995_IM_GP12_EINT                     0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_MASK                0x0800	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_SHIFT                   11	/* IM_GP12_EINT */
+#define WM8995_IM_GP12_EINT_WIDTH                    1	/* IM_GP12_EINT */
+#define WM8995_IM_GP11_EINT                     0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_MASK                0x0400	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_SHIFT                   10	/* IM_GP11_EINT */
+#define WM8995_IM_GP11_EINT_WIDTH                    1	/* IM_GP11_EINT */
+#define WM8995_IM_GP10_EINT                     0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_MASK                0x0200	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_SHIFT                    9	/* IM_GP10_EINT */
+#define WM8995_IM_GP10_EINT_WIDTH                    1	/* IM_GP10_EINT */
+#define WM8995_IM_GP9_EINT                      0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_MASK                 0x0100	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_SHIFT                     8	/* IM_GP9_EINT */
+#define WM8995_IM_GP9_EINT_WIDTH                     1	/* IM_GP9_EINT */
+#define WM8995_IM_GP8_EINT                      0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_MASK                 0x0080	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_SHIFT                     7	/* IM_GP8_EINT */
+#define WM8995_IM_GP8_EINT_WIDTH                     1	/* IM_GP8_EINT */
+#define WM8995_IM_GP7_EINT                      0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_MASK                 0x0040	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_SHIFT                     6	/* IM_GP7_EINT */
+#define WM8995_IM_GP7_EINT_WIDTH                     1	/* IM_GP7_EINT */
+#define WM8995_IM_GP6_EINT                      0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_MASK                 0x0020	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_SHIFT                     5	/* IM_GP6_EINT */
+#define WM8995_IM_GP6_EINT_WIDTH                     1	/* IM_GP6_EINT */
+#define WM8995_IM_GP5_EINT                      0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_MASK                 0x0010	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_SHIFT                     4	/* IM_GP5_EINT */
+#define WM8995_IM_GP5_EINT_WIDTH                     1	/* IM_GP5_EINT */
+#define WM8995_IM_GP4_EINT                      0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_MASK                 0x0008	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_SHIFT                     3	/* IM_GP4_EINT */
+#define WM8995_IM_GP4_EINT_WIDTH                     1	/* IM_GP4_EINT */
+#define WM8995_IM_GP3_EINT                      0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_MASK                 0x0004	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_SHIFT                     2	/* IM_GP3_EINT */
+#define WM8995_IM_GP3_EINT_WIDTH                     1	/* IM_GP3_EINT */
+#define WM8995_IM_GP2_EINT                      0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_MASK                 0x0002	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_SHIFT                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP2_EINT_WIDTH                     1	/* IM_GP2_EINT */
+#define WM8995_IM_GP1_EINT                      0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_MASK                 0x0001	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_SHIFT                     0	/* IM_GP1_EINT */
+#define WM8995_IM_GP1_EINT_WIDTH                     1	/* IM_GP1_EINT */
+
+/*
+ * R1849 (0x739) - Interrupt Status 2 Mask
+ */
+#define WM8995_IM_DCS_DONE_23_EINT              0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_MASK         0x1000	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_SHIFT            12	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_23_EINT_WIDTH             1	/* IM_DCS_DONE_23_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT              0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_MASK         0x0800	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_SHIFT            11	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_DCS_DONE_01_EINT_WIDTH             1	/* IM_DCS_DONE_01_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT                0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_MASK           0x0400	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_SHIFT              10	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_WSEQ_DONE_EINT_WIDTH               1	/* IM_WSEQ_DONE_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT                0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_MASK           0x0200	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_SHIFT               9	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_FIFOS_ERR_EINT_WIDTH               1	/* IM_FIFOS_ERR_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT          0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_MASK     0x0100	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_SHIFT         8	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF2DRC_SIG_DET_EINT_WIDTH         1	/* IM_AIF2DRC_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT         0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_MASK    0x0080	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_SHIFT        7	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC2_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC2_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT         0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_MASK    0x0040	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_SHIFT        6	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_AIF1DRC1_SIG_DET_EINT_WIDTH        1	/* IM_AIF1DRC1_SIG_DET_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT                0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_MASK           0x0020	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_SHIFT               5	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC2_LOCK_EINT_WIDTH               1	/* IM_SRC2_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT                0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_MASK           0x0010	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_SHIFT               4	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_SRC1_LOCK_EINT_WIDTH               1	/* IM_SRC1_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT                0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_MASK           0x0008	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_SHIFT               3	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL2_LOCK_EINT_WIDTH               1	/* IM_FLL2_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT                0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_MASK           0x0004	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_SHIFT               2	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_FLL1_LOCK_EINT_WIDTH               1	/* IM_FLL1_LOCK_EINT */
+#define WM8995_IM_HP_DONE_EINT                  0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_MASK             0x0002	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_SHIFT                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_HP_DONE_EINT_WIDTH                 1	/* IM_HP_DONE_EINT */
+#define WM8995_IM_MICD_EINT                     0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_MASK                0x0001	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_SHIFT                    0	/* IM_MICD_EINT */
+#define WM8995_IM_MICD_EINT_WIDTH                    1	/* IM_MICD_EINT */
+
+/*
+ * R1856 (0x740) - Interrupt Control
+ */
+#define WM8995_IM_IRQ                           0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_MASK                      0x0001	/* IM_IRQ */
+#define WM8995_IM_IRQ_SHIFT                          0	/* IM_IRQ */
+#define WM8995_IM_IRQ_WIDTH                          1	/* IM_IRQ */
+
+/*
+ * R2048 (0x800) - Left PDM Speaker 1
+ */
+#define WM8995_SPK1L_ENA                        0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_MASK                   0x0010	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_SHIFT                       4	/* SPK1L_ENA */
+#define WM8995_SPK1L_ENA_WIDTH                       1	/* SPK1L_ENA */
+#define WM8995_SPK1L_MUTE                       0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_MASK                  0x0008	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_SHIFT                      3	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_WIDTH                      1	/* SPK1L_MUTE */
+#define WM8995_SPK1L_MUTE_ZC                    0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_MASK               0x0004	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_SHIFT                   2	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_MUTE_ZC_WIDTH                   1	/* SPK1L_MUTE_ZC */
+#define WM8995_SPK1L_SRC_MASK                   0x0003	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_SHIFT                       0	/* SPK1L_SRC - [1:0] */
+#define WM8995_SPK1L_SRC_WIDTH                       2	/* SPK1L_SRC - [1:0] */
+
+/*
+ * R2049 (0x801) - Right PDM Speaker 1
+ */
+#define WM8995_SPK1R_ENA                        0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_MASK                   0x0010	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_SHIFT                       4	/* SPK1R_ENA */
+#define WM8995_SPK1R_ENA_WIDTH                       1	/* SPK1R_ENA */
+#define WM8995_SPK1R_MUTE                       0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_MASK                  0x0008	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_SHIFT                      3	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_WIDTH                      1	/* SPK1R_MUTE */
+#define WM8995_SPK1R_MUTE_ZC                    0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_MASK               0x0004	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_SHIFT                   2	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_MUTE_ZC_WIDTH                   1	/* SPK1R_MUTE_ZC */
+#define WM8995_SPK1R_SRC_MASK                   0x0003	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_SHIFT                       0	/* SPK1R_SRC - [1:0] */
+#define WM8995_SPK1R_SRC_WIDTH                       2	/* SPK1R_SRC - [1:0] */
+
+/*
+ * R2050 (0x802) - PDM Speaker 1 Mute Sequence
+ */
+#define WM8995_SPK1_MUTE_SEQ1_MASK              0x00FF	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_SHIFT                  0	/* SPK1_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK1_MUTE_SEQ1_WIDTH                  8	/* SPK1_MUTE_SEQ1 - [7:0] */
+
+/*
+ * R2056 (0x808) - Left PDM Speaker 2
+ */
+#define WM8995_SPK2L_ENA                        0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_MASK                   0x0010	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_SHIFT                       4	/* SPK2L_ENA */
+#define WM8995_SPK2L_ENA_WIDTH                       1	/* SPK2L_ENA */
+#define WM8995_SPK2L_MUTE                       0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_MASK                  0x0008	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_SHIFT                      3	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_WIDTH                      1	/* SPK2L_MUTE */
+#define WM8995_SPK2L_MUTE_ZC                    0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_MASK               0x0004	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_SHIFT                   2	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_MUTE_ZC_WIDTH                   1	/* SPK2L_MUTE_ZC */
+#define WM8995_SPK2L_SRC_MASK                   0x0003	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_SHIFT                       0	/* SPK2L_SRC - [1:0] */
+#define WM8995_SPK2L_SRC_WIDTH                       2	/* SPK2L_SRC - [1:0] */
+
+/*
+ * R2057 (0x809) - Right PDM Speaker 2
+ */
+#define WM8995_SPK2R_ENA                        0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_MASK                   0x0010	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_SHIFT                       4	/* SPK2R_ENA */
+#define WM8995_SPK2R_ENA_WIDTH                       1	/* SPK2R_ENA */
+#define WM8995_SPK2R_MUTE                       0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_MASK                  0x0008	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_SHIFT                      3	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_WIDTH                      1	/* SPK2R_MUTE */
+#define WM8995_SPK2R_MUTE_ZC                    0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_MASK               0x0004	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_SHIFT                   2	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_MUTE_ZC_WIDTH                   1	/* SPK2R_MUTE_ZC */
+#define WM8995_SPK2R_SRC_MASK                   0x0003	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_SHIFT                       0	/* SPK2R_SRC - [1:0] */
+#define WM8995_SPK2R_SRC_WIDTH                       2	/* SPK2R_SRC - [1:0] */
+
+/*
+ * R2058 (0x80A) - PDM Speaker 2 Mute Sequence
+ */
+#define WM8995_SPK2_MUTE_SEQ1_MASK              0x00FF	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_SHIFT                  0	/* SPK2_MUTE_SEQ1 - [7:0] */
+#define WM8995_SPK2_MUTE_SEQ1_WIDTH                  8	/* SPK2_MUTE_SEQ1 - [7:0] */
+
+#define WM8995_CLASS_W_SWITCH(xname, reg, shift, max, invert) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_volsw, \
+	.get = snd_soc_dapm_get_volsw, .put = wm8995_put_class_w, \
+	.private_value =  SOC_SINGLE_VALUE(reg, shift, max, invert) \
+}
+
+struct wm8995_reg_access {
+	u16 read;
+	u16 write;
+	u16 vol;
+};
+
+/* Sources for AIF1/2 SYSCLK - use with set_dai_sysclk() */
+enum clk_src {
+	WM8995_SYSCLK_MCLK1 = 1,
+	WM8995_SYSCLK_MCLK2,
+	WM8995_SYSCLK_FLL1,
+	WM8995_SYSCLK_FLL2,
+	WM8995_SYSCLK_OPCLK
+};
+
+#define WM8995_FLL1 1
+#define WM8995_FLL2 2
+
+#define WM8995_FLL_SRC_MCLK1  1
+#define WM8995_FLL_SRC_MCLK2  2
+#define WM8995_FLL_SRC_LRCLK  3
+#define WM8995_FLL_SRC_BCLK   4
+
+#endif /* _WM8995_H */
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index a486670..43825b2 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -23,7 +23,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -158,7 +157,6 @@
 struct wm9081_priv {
 	enum snd_soc_control_type control_type;
 	void *control_data;
-	u16 reg_cache[WM9081_MAX_REGISTER + 1];
 	int sysclk_source;
 	int mclk_rate;
 	int sysclk_rate;
@@ -591,6 +589,10 @@
 	reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT;
 	snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5);
 
+	/* Set gain to the recommended value */
+	snd_soc_update_bits(codec, WM9081_FLL_CONTROL_4,
+			    WM9081_FLL_GAIN_MASK, 0);
+
 	/* Enable the FLL */
 	snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA);
 
@@ -805,7 +807,7 @@
 
 	case SND_SOC_BIAS_STANDBY:
 		/* Initial cold start */
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Disable LINEOUT discharge */
 			reg = snd_soc_read(codec, WM9081_ANTI_POP_CONTROL);
 			reg &= ~WM9081_LINEOUT_DISCH;
@@ -865,7 +867,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -1228,6 +1230,7 @@
 static int wm9081_probe(struct snd_soc_codec *codec)
 {
 	struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 	u16 reg;
 
@@ -1269,9 +1272,9 @@
 				     ARRAY_SIZE(wm9081_eq_controls));
 	}
 
-	snd_soc_dapm_new_controls(codec, wm9081_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9081_dapm_widgets,
 				  ARRAY_SIZE(wm9081_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
+	snd_soc_dapm_add_routes(dapm, audio_paths, ARRAY_SIZE(audio_paths));
 
 	return ret;
 }
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 6e5f64f..a788c42 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -28,7 +28,6 @@
 #include <linux/slab.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 #include <sound/wm9090.h>
 
@@ -442,31 +441,32 @@
 static int wm9090_add_controls(struct snd_soc_codec *codec)
 {
 	struct wm9090_priv *wm9090 = snd_soc_codec_get_drvdata(codec);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int i;
 
-	snd_soc_dapm_new_controls(codec, wm9090_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm9090_dapm_widgets,
 				  ARRAY_SIZE(wm9090_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	snd_soc_add_controls(codec, wm9090_controls,
 			     ARRAY_SIZE(wm9090_controls));
 
 	if (wm9090->pdata.lin1_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_diff,
 					ARRAY_SIZE(audio_map_in1_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in1_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in1_se,
 					ARRAY_SIZE(audio_map_in1_se));
 		snd_soc_add_controls(codec, wm9090_in1_se_controls,
 				     ARRAY_SIZE(wm9090_in1_se_controls));
 	}
 
 	if (wm9090->pdata.lin2_diff) {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_diff,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_diff,
 					ARRAY_SIZE(audio_map_in2_diff));
 	} else {
-		snd_soc_dapm_add_routes(codec, audio_map_in2_se,
+		snd_soc_dapm_add_routes(dapm, audio_map_in2_se,
 					ARRAY_SIZE(audio_map_in2_se));
 		snd_soc_add_controls(codec, wm9090_in2_se_controls,
 				     ARRAY_SIZE(wm9090_in2_se_controls));
@@ -513,7 +513,7 @@
 		break;
 
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF) {
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
 			/* Restore the register cache */
 			for (i = 1; i < codec->driver->reg_cache_size; i++) {
 				if (reg_cache[i] == wm9090_reg_defaults[i])
@@ -543,7 +543,7 @@
 		break;
 	}
 
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index a144acd..47b357a 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -19,7 +19,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9705.h"
 
@@ -203,9 +202,11 @@
 
 static int wm9705_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9705_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9705_dapm_widgets,
 					ARRAY_SIZE(wm9705_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index d2f224d..bf5d4ef 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -20,7 +20,6 @@
 #include <sound/ac97_codec.h>
 #include <sound/initval.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include "wm9712.h"
 
 #define WM9712_VERSION "0.4"
@@ -432,10 +431,11 @@
 
 static int wm9712_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9712_dapm_widgets,
-				  ARRAY_SIZE(wm9712_dapm_widgets));
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_new_controls(dapm, wm9712_dapm_widgets,
+				  ARRAY_SIZE(wm9712_dapm_widgets));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -570,7 +570,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 7da13b0..38ed985 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -26,7 +26,6 @@
 #include <sound/pcm_params.h>
 #include <sound/tlv.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "wm9713.h"
 
@@ -647,10 +646,12 @@
 
 static int wm9713_add_widgets(struct snd_soc_codec *codec)
 {
-	snd_soc_dapm_new_controls(codec, wm9713_dapm_widgets,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, wm9713_dapm_widgets,
 				  ARRAY_SIZE(wm9713_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
@@ -1147,7 +1148,7 @@
 		ac97_write(codec, AC97_POWERDOWN, 0xffff);
 		break;
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 	return 0;
 }
 
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 0e24092..c466982 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -22,7 +22,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
@@ -94,41 +93,61 @@
 	struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec);
 	u16 reg, reg_l, reg_r, dcs_cfg;
 
-	/* Set for 32 series updates */
-	snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
-			    WM8993_DCS_SERIES_NO_01_MASK,
-			    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
-	wait_for_dc_servo(codec,
-			  WM8993_DCS_TRIG_SERIES_0 | WM8993_DCS_TRIG_SERIES_1);
+	/* If we're using a digital only path and have a previously
+	 * callibrated DC servo offset stored then use that. */
+	if (hubs->class_w && hubs->class_w_dcs) {
+		dev_dbg(codec->dev, "Using cached DC servo offset %x\n",
+			hubs->class_w_dcs);
+		snd_soc_write(codec, WM8993_DC_SERVO_3, hubs->class_w_dcs);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_DAC_WR_0 |
+				  WM8993_DCS_TRIG_DAC_WR_1);
+		return;
+	}
+
+	/* Devices not using a DCS code correction have startup mode */
+	if (hubs->dcs_codes) {
+		/* Set for 32 series updates */
+		snd_soc_update_bits(codec, WM8993_DC_SERVO_1,
+				    WM8993_DCS_SERIES_NO_01_MASK,
+				    32 << WM8993_DCS_SERIES_NO_01_SHIFT);
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_SERIES_0 |
+				  WM8993_DCS_TRIG_SERIES_1);
+	} else {
+		wait_for_dc_servo(codec,
+				  WM8993_DCS_TRIG_STARTUP_0 |
+				  WM8993_DCS_TRIG_STARTUP_1);
+	}
+
+	/* Different chips in the family support different readback
+	 * methods.
+	 */
+	switch (hubs->dcs_readback_mode) {
+	case 0:
+		reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
+			& WM8993_DCS_INTEG_CHAN_0_MASK;
+		reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
+			& WM8993_DCS_INTEG_CHAN_1_MASK;
+		break;
+	case 1:
+		reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
+		reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
+			>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
+		break;
+	default:
+		WARN(1, "Unknown DCS readback method\n");
+		break;
+	}
+
+	dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
 
 	/* Apply correction to DC servo result */
 	if (hubs->dcs_codes) {
 		dev_dbg(codec->dev, "Applying %d code DC servo correction\n",
 			hubs->dcs_codes);
 
-		/* Different chips in the family support different
-		 * readback methods.
-		 */
-		switch (hubs->dcs_readback_mode) {
-		case 0:
-			reg_l = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_1)
-				& WM8993_DCS_INTEG_CHAN_0_MASK;;
-			reg_r = snd_soc_read(codec, WM8993_DC_SERVO_READBACK_2)
-				& WM8993_DCS_INTEG_CHAN_1_MASK;
-			break;
-		case 1:
-			reg = snd_soc_read(codec, WM8993_DC_SERVO_3);
-			reg_l = (reg & WM8993_DCS_DAC_WR_VAL_1_MASK)
-				>> WM8993_DCS_DAC_WR_VAL_1_SHIFT;
-			reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
-			break;
-		default:
-			WARN(1, "Unknown DCS readback method\n");
-			break;
-		}
-
-		dev_dbg(codec->dev, "DCS input: %x %x\n", reg_l, reg_r);
-
 		/* HPOUT1L */
 		if (reg_l + hubs->dcs_codes > 0 &&
 		    reg_l + hubs->dcs_codes < 0xff)
@@ -148,7 +167,15 @@
 		wait_for_dc_servo(codec,
 				  WM8993_DCS_TRIG_DAC_WR_0 |
 				  WM8993_DCS_TRIG_DAC_WR_1);
+	} else {
+		dcs_cfg = reg_l << WM8993_DCS_DAC_WR_VAL_1_SHIFT;
+		dcs_cfg |= reg_r;
 	}
+
+	/* Save the callibrated offset if we're in class W mode and
+	 * therefore don't have any analogue signal mixed in. */
+	if (hubs->class_w)
+		hubs->class_w_dcs = dcs_cfg;
 }
 
 /*
@@ -163,6 +190,9 @@
 
 	ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
 
+	/* Updating the analogue gains invalidates the DC servo cache */
+	hubs->class_w_dcs = 0;
+
 	/* If we're applying an offset correction then updating the
 	 * callibration would be likely to introduce further offsets. */
 	if (hubs->dcs_codes)
@@ -791,6 +821,8 @@
 
 int wm_hubs_add_analogue_controls(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* Latch volume update bits & default ZC on */
 	snd_soc_update_bits(codec, WM8993_LEFT_LINE_INPUT_1_2_VOLUME,
 			    WM8993_IN1_VU, WM8993_IN1_VU);
@@ -819,7 +851,7 @@
 	snd_soc_add_controls(codec, analogue_snd_controls,
 			     ARRAY_SIZE(analogue_snd_controls));
 
-	snd_soc_dapm_new_controls(codec, analogue_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, analogue_dapm_widgets,
 				  ARRAY_SIZE(analogue_dapm_widgets));
 	return 0;
 }
@@ -828,24 +860,26 @@
 int wm_hubs_add_analogue_routes(struct snd_soc_codec *codec,
 				int lineout1_diff, int lineout2_diff)
 {
-	snd_soc_dapm_add_routes(codec, analogue_routes,
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_add_routes(dapm, analogue_routes,
 				ARRAY_SIZE(analogue_routes));
 
 	if (lineout1_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_diff_routes,
 					ARRAY_SIZE(lineout1_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout1_se_routes,
 					ARRAY_SIZE(lineout1_se_routes));
 
 	if (lineout2_diff)
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_diff_routes,
 					ARRAY_SIZE(lineout2_diff_routes));
 	else
-		snd_soc_dapm_add_routes(codec,
+		snd_soc_dapm_add_routes(dapm,
 					lineout2_se_routes,
 					ARRAY_SIZE(lineout2_se_routes));
 
@@ -872,7 +906,7 @@
 	 * VMID as an output and can disable it.
 	 */
 	if (lineout1_diff && lineout2_diff)
-		codec->idle_bias_off = 1;
+		codec->dapm.idle_bias_off = 1;
 
 	if (lineout1fb)
 		snd_soc_update_bits(codec, WM8993_ADDITIONAL_CONTROL,
diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
index e51c166..f8a5e97 100644
--- a/sound/soc/codecs/wm_hubs.h
+++ b/sound/soc/codecs/wm_hubs.h
@@ -23,6 +23,9 @@
 	int dcs_codes;
 	int dcs_readback_mode;
 	int hp_startup_mode;
+
+	bool class_w;
+	u16 class_w_dcs;
 };
 
 extern int wm_hubs_add_analogue_controls(struct snd_soc_codec *);
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index bc9e6b0b..0c2d6ba 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
@@ -27,7 +26,6 @@
 #include <mach/edma.h>
 #include <mach/mux.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "davinci-pcm.h"
 #include "davinci-i2s.h"
 #include "davinci-mcasp.h"
@@ -132,26 +130,27 @@
 static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add davinci-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not connected */
-	snd_soc_dapm_disable_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_disable_pin(codec, "HPLCOM");
-	snd_soc_dapm_disable_pin(codec, "HPRCOM");
+	snd_soc_dapm_disable_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_disable_pin(dapm, "HPLCOM");
+	snd_soc_dapm_disable_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c
index 6c6666a..0fe558c 100644
--- a/sound/soc/davinci/davinci-sffsdr.c
+++ b/sound/soc/davinci/davinci-sffsdr.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/dma.h>
 #include <asm/mach-types.h>
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 4f48733..9ac93f6 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -352,13 +352,13 @@
 	.playback	= {
 		.channels_min	= 2,
 		.channels_max	= 2,
-		.rates		= SNDRV_PCM_RATE_8000_48000,
+		.rates		= SNDRV_PCM_RATE_8000_96000,
 		.formats	= EP93XX_I2S_FORMATS,
 	},
 	.capture	= {
 		 .channels_min	= 2,
 		 .channels_max	= 2,
-		 .rates		= SNDRV_PCM_RATE_8000_48000,
+		 .rates		= SNDRV_PCM_RATE_8000_96000,
 		 .formats	= EP93XX_I2S_FORMATS,
 	},
 	.ops		= &ep93xx_i2s_dai_ops,
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index 2f121dd..0667077 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -35,9 +35,9 @@
 				   SNDRV_PCM_INFO_INTERLEAVED	|
 				   SNDRV_PCM_INFO_BLOCK_TRANSFER),
 				   
-	.rates			= SNDRV_PCM_RATE_8000_48000,
+	.rates			= SNDRV_PCM_RATE_8000_96000,
 	.rate_min		= SNDRV_PCM_RATE_8000,
-	.rate_max		= SNDRV_PCM_RATE_48000,
+	.rate_max		= SNDRV_PCM_RATE_96000,
 	
 	.formats		= (SNDRV_PCM_FMTBIT_S16_LE |
 				   SNDRV_PCM_FMTBIT_S24_LE |
diff --git a/sound/soc/ep93xx/snappercl15.c b/sound/soc/ep93xx/snappercl15.c
index 28ab5ff..dfe1d7f 100644
--- a/sound/soc/ep93xx/snappercl15.c
+++ b/sound/soc/ep93xx/snappercl15.c
@@ -15,7 +15,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -79,11 +78,12 @@
 static int snappercl15_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	return 0;
 }
 
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index dd4fffd..e20c9e1 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 #include "../codecs/tlv320aic23.h"
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index 390b6ff..30894ea 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -456,13 +456,13 @@
 static struct snd_soc_dai_driver imx_ssi_dai = {
 	.probe = imx_ssi_dai_probe,
 	.playback = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
 	},
 	.capture = {
-		.channels_min = 2,
+		.channels_min = 1,
 		.channels_max = 2,
 		.rates = SNDRV_PCM_RATE_8000_96000,
 		.formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c
index 9eabc28..a7deb5c 100644
--- a/sound/soc/imx/phycore-ac97.c
+++ b/sound/soc/imx/phycore-ac97.c
@@ -17,7 +17,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/mach-types.h>
 
 static struct snd_soc_card imx_phycore;
diff --git a/sound/soc/imx/wm1133-ev1.c b/sound/soc/imx/wm1133-ev1.c
index 30fdb15..75b4c72 100644
--- a/sound/soc/imx/wm1133-ev1.c
+++ b/sound/soc/imx/wm1133-ev1.c
@@ -19,7 +19,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audmux.h>
 
@@ -213,11 +212,12 @@
 static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, wm1133_ev1_widgets,
+	snd_soc_dapm_new_controls(dapm, wm1133_ev1_widgets,
 				  ARRAY_SIZE(wm1133_ev1_widgets));
 
-	snd_soc_dapm_add_routes(codec, wm1133_ev1_map,
+	snd_soc_dapm_add_routes(dapm, wm1133_ev1_map,
 				ARRAY_SIZE(wm1133_ev1_map));
 
 	/* Headphone jack detection */
@@ -234,7 +234,7 @@
 	wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE,
 			       SND_JACK_BTN_0);
 
-	snd_soc_dapm_force_enable_pin(codec, "Mic Bias");
+	snd_soc_dapm_force_enable_pin(dapm, "Mic Bias");
 
 	return 0;
 }
diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
index f3cffd1..419bf4f 100644
--- a/sound/soc/jz4740/jz4740-i2s.c
+++ b/sound/soc/jz4740/jz4740-i2s.c
@@ -28,7 +28,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
 #include "jz4740-i2s.h"
diff --git a/sound/soc/jz4740/qi_lb60.c b/sound/soc/jz4740/qi_lb60.c
index ef1a99e..49723e3 100644
--- a/sound/soc/jz4740/qi_lb60.c
+++ b/sound/soc/jz4740/qi_lb60.c
@@ -19,7 +19,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 
 #define QI_LB60_SND_GPIO JZ_GPIO_PORTB(29)
@@ -59,10 +58,11 @@
 {
 	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_nc_pin(codec, "LIN");
-	snd_soc_dapm_nc_pin(codec, "RIN");
+	snd_soc_dapm_nc_pin(dapm, "LIN");
+	snd_soc_dapm_nc_pin(dapm, "RIN");
 
 	ret = snd_soc_dai_set_fmt(cpu_dai, QI_LB60_DAIFMT);
 	if (ret < 0) {
@@ -70,9 +70,11 @@
 		return ret;
 	}
 
-	snd_soc_dapm_new_controls(codec, qi_lb60_widgets, ARRAY_SIZE(qi_lb60_widgets));
-	snd_soc_dapm_add_routes(codec, qi_lb60_routes, ARRAY_SIZE(qi_lb60_routes));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_new_controls(dapm, qi_lb60_widgets,
+				  ARRAY_SIZE(qi_lb60_widgets));
+	snd_soc_dapm_add_routes(dapm, qi_lb60_routes,
+				ARRAY_SIZE(qi_lb60_routes));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 16ec2a2..8f49e16 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -11,10 +11,19 @@
 
 config SND_KIRKWOOD_SOC_OPENRD
 	tristate "SoC Audio support for Kirkwood Openrd Client"
-	depends on SND_KIRKWOOD_SOC && MACH_OPENRD_CLIENT
+	depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
 	select SND_KIRKWOOD_SOC_I2S
 	select SND_SOC_CS42L51
 	help
 	  Say Y if you want to add support for SoC audio on
 	  Openrd Client.
 
+config SND_KIRKWOOD_SOC_T5325
+	tristate "SoC Audio support for HP t5325"
+	depends on SND_KIRKWOOD_SOC && MACH_T5325
+	select SND_KIRKWOOD_SOC_I2S
+	select SND_SOC_ALC5623
+	help
+	  Say Y if you want to add support for SoC audio on
+	  the HP t5325 thin client.
+
diff --git a/sound/soc/kirkwood/Makefile b/sound/soc/kirkwood/Makefile
index 33a16dc..3e62ae9 100644
--- a/sound/soc/kirkwood/Makefile
+++ b/sound/soc/kirkwood/Makefile
@@ -5,5 +5,7 @@
 obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o
 
 snd-soc-openrd-objs := kirkwood-openrd.o
+snd-soc-t5325-objs := kirkwood-t5325.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC_OPENRD) += snd-soc-openrd.o
+obj-$(CONFIG_SND_KIRKWOOD_SOC_T5325) += snd-soc-t5325.o
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index 9d7c81e..d863afb 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -86,7 +86,7 @@
 {
 	int ret;
 
-	if (!machine_is_openrd_client())
+	if (!machine_is_openrd_client() && !machine_is_openrd_ultimate())
 		return 0;
 
 	openrd_client_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
new file mode 100644
index 0000000..c8d2195
--- /dev/null
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -0,0 +1,141 @@
+/*
+ * kirkwood-t5325.c
+ *
+ * (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <mach/kirkwood.h>
+#include <plat/audio.h>
+#include <asm/mach-types.h>
+#include "../codecs/alc5623.h"
+
+static int t5325_hw_params(struct snd_pcm_substream *substream,
+		struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	int ret;
+	unsigned int freq, fmt;
+
+	fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
+	ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+	if (ret < 0)
+		return ret;
+
+	freq = params_rate(params) * 256;
+
+	return snd_soc_dai_set_sysclk(codec_dai, 0, freq, SND_SOC_CLOCK_IN);
+
+}
+
+static struct snd_soc_ops t5325_ops = {
+	.hw_params = t5325_hw_params,
+};
+
+static const struct snd_soc_dapm_widget t5325_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route t5325_route[] = {
+	{ "Headphone Jack",	NULL,	"HPL" },
+	{ "Headphone Jack",	NULL,	"HPR" },
+
+	{"Speaker",		NULL,	"SPKOUT"},
+	{"Speaker",		NULL,	"SPKOUTN"},
+
+	{ "MIC1",		NULL,	"Mic Jack" },
+	{ "MIC2",		NULL,	"Mic Jack" },
+};
+
+static int t5325_dai_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	snd_soc_dapm_new_controls(dapm, t5325_dapm_widgets,
+				ARRAY_SIZE(t5325_dapm_widgets));
+
+	snd_soc_dapm_add_routes(dapm, t5325_route, ARRAY_SIZE(t5325_route));
+
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link t5325_dai[] = {
+{
+	.name = "ALC5621",
+	.stream_name = "ALC5621 HiFi",
+	.cpu_dai_name = "kirkwood-i2s",
+	.platform_name = "kirkwood-pcm-audio",
+	.codec_dai_name = "alc5621-hifi",
+	.codec_name = "alc562x-codec.0-001a",
+	.ops = &t5325_ops,
+	.init = t5325_dai_init,
+},
+};
+
+
+static struct snd_soc_card t5325 = {
+	.name = "t5325",
+	.dai_link = t5325_dai,
+	.num_links = ARRAY_SIZE(t5325_dai),
+};
+
+static struct platform_device *t5325_snd_device;
+
+static int __init t5325_init(void)
+{
+	int ret;
+
+	if (!machine_is_t5325())
+		return 0;
+
+	t5325_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!t5325_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(t5325_snd_device,
+			&t5325);
+
+	ret = platform_device_add(t5325_snd_device);
+	if (ret) {
+		printk(KERN_ERR "%s: platform_device_add failed\n", __func__);
+		platform_device_put(t5325_snd_device);
+	}
+
+	return ret;
+}
+module_init(t5325_init);
+
+static void __exit t5325_exit(void)
+{
+	platform_device_unregister(t5325_snd_device);
+}
+module_exit(t5325_exit);
+
+MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
+MODULE_DESCRIPTION("ALSA SoC t5325 audio client");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/nuc900/nuc900-audio.c b/sound/soc/nuc900/nuc900-audio.c
index 161f5b6..38a2d0d8 100644
--- a/sound/soc/nuc900/nuc900-audio.c
+++ b/sound/soc/nuc900/nuc900-audio.c
@@ -18,7 +18,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "nuc900-audio.h"
 
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 979dd50..1617504 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -114,20 +113,21 @@
 static int am3517evm_aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add am3517-evm specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up davinci-evm specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic In");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/ams-delta.c b/sound/soc/omap/ams-delta.c
index 438146a..2101bdc 100644
--- a/sound/soc/omap/ams-delta.c
+++ b/sound/soc/omap/ams-delta.c
@@ -26,7 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/tty.h>
 
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -94,6 +94,7 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct soc_enum *control = (struct soc_enum *)kcontrol->private_value;
 	unsigned short pins;
 	int pin, changed = 0;
@@ -112,48 +113,48 @@
 
 	/* Setup pins after corresponding bits if changed */
 	pin = !!(pins & (1 << AMS_DELTA_MOUTHPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Mouthpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Mouthpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_enable_pin(dapm, "Mouthpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Mouthpiece");
+			snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_EARPIECE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Earpiece")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Earpiece")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Earpiece");
+			snd_soc_dapm_enable_pin(dapm, "Earpiece");
 		else
-			snd_soc_dapm_disable_pin(codec, "Earpiece");
+			snd_soc_dapm_disable_pin(dapm, "Earpiece");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_MICROPHONE));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Microphone")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Microphone")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Microphone");
+			snd_soc_dapm_enable_pin(dapm, "Microphone");
 		else
-			snd_soc_dapm_disable_pin(codec, "Microphone");
+			snd_soc_dapm_disable_pin(dapm, "Microphone");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_SPEAKER));
-	if (pin != snd_soc_dapm_get_pin_status(codec, "Speaker")) {
+	if (pin != snd_soc_dapm_get_pin_status(dapm, "Speaker")) {
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "Speaker");
+			snd_soc_dapm_enable_pin(dapm, "Speaker");
 		else
-			snd_soc_dapm_disable_pin(codec, "Speaker");
+			snd_soc_dapm_disable_pin(dapm, "Speaker");
 	}
 	pin = !!(pins & (1 << AMS_DELTA_AGC));
 	if (pin != ams_delta_audio_agc) {
 		ams_delta_audio_agc = pin;
 		changed = 1;
 		if (pin)
-			snd_soc_dapm_enable_pin(codec, "AGCIN");
+			snd_soc_dapm_enable_pin(dapm, "AGCIN");
 		else
-			snd_soc_dapm_disable_pin(codec, "AGCIN");
+			snd_soc_dapm_disable_pin(dapm, "AGCIN");
 	}
 	if (changed)
-		snd_soc_dapm_sync(codec);
+		snd_soc_dapm_sync(dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -164,19 +165,20 @@
 					struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short pins, mode;
 
-	pins = ((snd_soc_dapm_get_pin_status(codec, "Mouthpiece") <<
+	pins = ((snd_soc_dapm_get_pin_status(dapm, "Mouthpiece") <<
 							AMS_DELTA_MOUTHPIECE) |
-			(snd_soc_dapm_get_pin_status(codec, "Earpiece") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Earpiece") <<
 							AMS_DELTA_EARPIECE));
 	if (pins)
-		pins |= (snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins |= (snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE);
 	else
-		pins = ((snd_soc_dapm_get_pin_status(codec, "Microphone") <<
+		pins = ((snd_soc_dapm_get_pin_status(dapm, "Microphone") <<
 							AMS_DELTA_MICROPHONE) |
-			(snd_soc_dapm_get_pin_status(codec, "Speaker") <<
+			(snd_soc_dapm_get_pin_status(dapm, "Speaker") <<
 							AMS_DELTA_SPEAKER) |
 			(ams_delta_audio_agc << AMS_DELTA_AGC));
 
@@ -300,6 +302,7 @@
 static void cx81801_close(struct tty_struct *tty)
 {
 	struct snd_soc_codec *codec = tty->disc_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	del_timer_sync(&cx81801_timer);
 
@@ -312,12 +315,12 @@
 	v253_ops.close(tty);
 
 	/* Revert back to default audio input/output constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_sync(dapm);
 }
 
 /* Line discipline .hangup() */
@@ -432,16 +435,16 @@
 	case SND_SOC_BIAS_ON:
 	case SND_SOC_BIAS_PREPARE:
 	case SND_SOC_BIAS_STANDBY:
-		if (codec->bias_level == SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						AMS_DELTA_LATCH2_MODEM_NRESET);
 		break;
 	case SND_SOC_BIAS_OFF:
-		if (codec->bias_level != SND_SOC_BIAS_OFF)
+		if (codec->dapm.bias_level != SND_SOC_BIAS_OFF)
 			ams_delta_latch2_write(AMS_DELTA_LATCH2_MODEM_NRESET,
 						0);
 	}
-	codec->bias_level = level;
+	codec->dapm.bias_level = level;
 
 	return 0;
 }
@@ -492,6 +495,7 @@
 static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	struct snd_soc_dai *codec_dai = rtd->codec_dai;
 	struct snd_soc_card *card = rtd->card;
 	int ret;
@@ -541,7 +545,7 @@
 	}
 
 	/* Add board specific DAPM widgets and routes */
-	ret = snd_soc_dapm_new_controls(codec, ams_delta_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, ams_delta_dapm_widgets,
 					ARRAY_SIZE(ams_delta_dapm_widgets));
 	if (ret) {
 		dev_warn(card->dev,
@@ -550,7 +554,7 @@
 		return 0;
 	}
 
-	ret = snd_soc_dapm_add_routes(codec, ams_delta_audio_map,
+	ret = snd_soc_dapm_add_routes(dapm, ams_delta_audio_map,
 					ARRAY_SIZE(ams_delta_audio_map));
 	if (ret) {
 		dev_warn(card->dev,
@@ -560,13 +564,13 @@
 	}
 
 	/* Set up initial pin constellation */
-	snd_soc_dapm_disable_pin(codec, "Mouthpiece");
-	snd_soc_dapm_enable_pin(codec, "Earpiece");
-	snd_soc_dapm_enable_pin(codec, "Microphone");
-	snd_soc_dapm_disable_pin(codec, "Speaker");
-	snd_soc_dapm_disable_pin(codec, "AGCIN");
-	snd_soc_dapm_disable_pin(codec, "AGCOUT");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, "Mouthpiece");
+	snd_soc_dapm_enable_pin(dapm, "Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Microphone");
+	snd_soc_dapm_disable_pin(dapm, "Speaker");
+	snd_soc_dapm_disable_pin(dapm, "AGCIN");
+	snd_soc_dapm_disable_pin(dapm, "AGCOUT");
+	snd_soc_dapm_sync(dapm);
 
 	/* Add virtual switch */
 	ret = snd_soc_add_controls(codec, ams_delta_audio_controls,
diff --git a/sound/soc/omap/igep0020.c b/sound/soc/omap/igep0020.c
index fd3a40f..0ae3470 100644
--- a/sound/soc/omap/igep0020.c
+++ b/sound/soc/omap/igep0020.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index a3b6d89..83d213b 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -27,7 +27,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -36,7 +35,6 @@
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define N810_HEADSET_AMP_GPIO	10
 #define N810_SPEAKER_AMP_GPIO	101
@@ -58,6 +56,7 @@
 
 static void n810_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int hp = 0, line1l = 0;
 
 	switch (n810_jack_func) {
@@ -72,25 +71,25 @@
 	}
 
 	if (n810_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	if (hp)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	if (line1l)
-		snd_soc_dapm_enable_pin(codec, "LINE1L");
+		snd_soc_dapm_enable_pin(dapm, "LINE1L");
 	else
-		snd_soc_dapm_disable_pin(codec, "LINE1L");
+		snd_soc_dapm_disable_pin(dapm, "LINE1L");
 
 	if (n810_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int n810_startup(struct snd_pcm_substream *substream)
@@ -274,17 +273,18 @@
 static int n810_aic33_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Not connected */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "HPLCOM");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPLCOM");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* Add N810 specific controls */
 	err = snd_soc_add_controls(codec, aic33_n810_controls,
@@ -293,13 +293,13 @@
 		return err;
 
 	/* Add N810 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic33_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic33_dapm_widgets,
 				  ARRAY_SIZE(aic33_dapm_widgets));
 
 	/* Set up N810 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 7e84f24..d203f4d 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -102,6 +102,17 @@
 static const int omap24xx_dma_reqs[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const int omap44xx_dma_reqs[][2] = {
+	{ OMAP44XX_DMA_MCBSP1_TX, OMAP44XX_DMA_MCBSP1_RX },
+	{ OMAP44XX_DMA_MCBSP2_TX, OMAP44XX_DMA_MCBSP2_RX },
+	{ OMAP44XX_DMA_MCBSP3_TX, OMAP44XX_DMA_MCBSP3_RX },
+	{ OMAP44XX_DMA_MCBSP4_TX, OMAP44XX_DMA_MCBSP4_RX },
+};
+#else
+static const int omap44xx_dma_reqs[][2] = {};
+#endif
+
 #if defined(CONFIG_ARCH_OMAP2420)
 static const unsigned long omap2420_mcbsp_port[][2] = {
 	{ OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
@@ -147,6 +158,21 @@
 static const unsigned long omap34xx_mcbsp_port[][2] = {};
 #endif
 
+#if defined(CONFIG_ARCH_OMAP4)
+static const unsigned long omap44xx_mcbsp_port[][2] = {
+	{ OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP3_BASE + OMAP_MCBSP_REG_DRR },
+	{ OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DXR,
+	  OMAP44XX_MCBSP4_BASE + OMAP_MCBSP_REG_DRR },
+};
+#else
+static const unsigned long omap44xx_mcbsp_port[][2] = {};
+#endif
+
 static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -224,7 +250,7 @@
 	 * 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words)
 	 * 4 channels: size is 128 / 4 = 32 frames (4 * 32 words)
 	 */
-	if (cpu_is_omap343x()) {
+	if (cpu_is_omap343x() || cpu_is_omap44xx()) {
 		/*
 		* Rule for the buffer size. We should not allow
 		* smaller buffer than the FIFO size to avoid underruns
@@ -332,6 +358,9 @@
 	} else if (cpu_is_omap343x()) {
 		dma = omap24xx_dma_reqs[bus_id][substream->stream];
 		port = omap34xx_mcbsp_port[bus_id][substream->stream];
+	 } else if (cpu_is_omap44xx()) {
+		dma = omap44xx_dma_reqs[bus_id][substream->stream];
+		port = omap44xx_mcbsp_port[bus_id][substream->stream];
 	} else {
 		return -ENODEV;
 	}
@@ -498,11 +527,11 @@
 	regs->spcr2	|= XINTM(3) | FREE;
 	regs->spcr1	|= RINTM(3);
 	/* RFIG and XFIG are not defined in 34xx */
-	if (!cpu_is_omap34xx()) {
+	if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) {
 		regs->rcr2	|= RFIG;
 		regs->xcr2	|= XFIG;
 	}
-	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
+	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
 		regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
 		regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
 	}
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
index ffdcc5a..110c106 100644
--- a/sound/soc/omap/omap-mcbsp.h
+++ b/sound/soc/omap/omap-mcbsp.h
@@ -50,6 +50,10 @@
 #undef  NUM_LINKS
 #define NUM_LINKS	3
 #endif
+#if defined(CONFIG_ARCH_OMAP4)
+#undef  NUM_LINKS
+#define NUM_LINKS	4
+#endif
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
 #undef  NUM_LINKS
 #define NUM_LINKS	5
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
index cf3fc8a..29b60d6 100644
--- a/sound/soc/omap/omap2evm.c
+++ b/sound/soc/omap/omap2evm.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3beagle.c b/sound/soc/omap/omap3beagle.c
index e56832b..40db813 100644
--- a/sound/soc/omap/omap3beagle.c
+++ b/sound/soc/omap/omap3beagle.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3evm.c b/sound/soc/omap/omap3evm.c
index 810f1e36..0daa044 100644
--- a/sound/soc/omap/omap3evm.c
+++ b/sound/soc/omap/omap3evm.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index 4ee33ce..8047c521e 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <plat/mcbsp.h>
@@ -170,51 +169,53 @@
 static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* All TWL4030 output pins are floating */
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "HSOL");
-	snd_soc_dapm_nc_pin(codec, "HSOR");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
-	snd_soc_dapm_nc_pin(codec, "HFL");
-	snd_soc_dapm_nc_pin(codec, "HFR");
-	snd_soc_dapm_nc_pin(codec, "VIBRA");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "HSOL");
+	snd_soc_dapm_nc_pin(dapm, "HSOR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "HFL");
+	snd_soc_dapm_nc_pin(dapm, "HFR");
+	snd_soc_dapm_nc_pin(dapm, "VIBRA");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_out_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_out_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_out_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_out_map,
 		ARRAY_SIZE(omap3pandora_out_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Not comnnected */
-	snd_soc_dapm_nc_pin(codec, "HSMIC");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "HSMIC");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	ret = snd_soc_dapm_new_controls(codec, omap3pandora_in_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, omap3pandora_in_dapm_widgets,
 				ARRAY_SIZE(omap3pandora_in_dapm_widgets));
 	if (ret < 0)
 		return ret;
 
-	snd_soc_dapm_add_routes(codec, omap3pandora_in_map,
+	snd_soc_dapm_add_routes(dapm, omap3pandora_in_map,
 		ARRAY_SIZE(omap3pandora_in_map));
 
-	return snd_soc_dapm_sync(codec);
+	return snd_soc_dapm_sync(dapm);
 }
 
 static struct snd_soc_ops omap3pandora_ops = {
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c
index 65ae00e..7e75e77 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/omap/osk5912.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -116,19 +115,20 @@
 static int osk_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add osk5912 specific widgets */
-	snd_soc_dapm_new_controls(codec, tlv320aic23_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tlv320aic23_dapm_widgets,
 				  ARRAY_SIZE(tlv320aic23_dapm_widgets));
 
 	/* Set up osk5912 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/omap/overo.c b/sound/soc/omap/overo.c
index e95a607..bbcf380 100644
--- a/sound/soc/omap/overo.c
+++ b/sound/soc/omap/overo.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
diff --git a/sound/soc/omap/rx51.c b/sound/soc/omap/rx51.c
index 04b5723..09fb0df 100644
--- a/sound/soc/omap/rx51.c
+++ b/sound/soc/omap/rx51.c
@@ -30,14 +30,12 @@
 #include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <plat/mcbsp.h>
 
 #include <asm/mach-types.h>
 
 #include "omap-mcbsp.h"
 #include "omap-pcm.h"
-#include "../codecs/tlv320aic3x.h"
 
 #define RX51_TVOUT_SEL_GPIO		40
 #define RX51_JACK_DETECT_GPIO		177
@@ -58,19 +56,21 @@
 
 static void rx51_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (rx51_spk_func)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 	if (rx51_dmic_func)
-		snd_soc_dapm_enable_pin(codec, "DMic");
+		snd_soc_dapm_enable_pin(dapm, "DMic");
 	else
-		snd_soc_dapm_disable_pin(codec, "DMic");
+		snd_soc_dapm_disable_pin(dapm, "DMic");
 
 	gpio_set_value(RX51_TVOUT_SEL_GPIO,
 		       rx51_jack_func == RX51_JACK_TVOUT);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int rx51_startup(struct snd_pcm_substream *substream)
@@ -244,12 +244,13 @@
 static int rx51_aic34_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "MIC3L");
-	snd_soc_dapm_nc_pin(codec, "MIC3R");
-	snd_soc_dapm_nc_pin(codec, "LINE1R");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L");
+	snd_soc_dapm_nc_pin(dapm, "MIC3R");
+	snd_soc_dapm_nc_pin(dapm, "LINE1R");
 
 	/* Add RX-51 specific controls */
 	err = snd_soc_add_controls(codec, aic34_rx51_controls,
@@ -258,13 +259,13 @@
 		return err;
 
 	/* Add RX-51 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic34_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic34_dapm_widgets,
 				  ARRAY_SIZE(aic34_dapm_widgets));
 
 	/* Set up RX-51 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	/* AV jack detection */
 	err = snd_soc_jack_new(codec, "AV Jack",
diff --git a/sound/soc/omap/sdp3430.c b/sound/soc/omap/sdp3430.c
index 07fbcf7..3f72d17 100644
--- a/sound/soc/omap/sdp3430.c
+++ b/sound/soc/omap/sdp3430.c
@@ -28,7 +28,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -191,39 +190,40 @@
 static int sdp3430_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP3430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp3430_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp3430_twl4030_dapm_widgets,
 				ARRAY_SIZE(sdp3430_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP3430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP3430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AUXL");
-	snd_soc_dapm_nc_pin(codec, "AUXR");
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "AUXL");
+	snd_soc_dapm_nc_pin(dapm, "AUXR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
 
-	snd_soc_dapm_nc_pin(codec, "OUTL");
-	snd_soc_dapm_nc_pin(codec, "OUTR");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "OUTL");
+	snd_soc_dapm_nc_pin(dapm, "OUTR");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/omap/sdp4430.c b/sound/soc/omap/sdp4430.c
index 4b4463d..189e039 100644
--- a/sound/soc/omap/sdp4430.c
+++ b/sound/soc/omap/sdp4430.c
@@ -24,7 +24,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
+#include <sound/jack.h>
 
 #include <asm/mach-types.h>
 #include <plat/hardware.h>
@@ -66,6 +66,21 @@
 	.hw_params = sdp4430_hw_params,
 };
 
+/* Headset jack */
+static struct snd_soc_jack hs_jack;
+
+/*Headset jack detection DAPM pins */
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+	{
+		.pin = "Headset Mic",
+		.mask = SND_JACK_MICROPHONE,
+	},
+	{
+		.pin = "Headset Stereophone",
+		.mask = SND_JACK_HEADPHONE,
+	},
+};
+
 static int sdp4430_get_power_mode(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol)
 {
@@ -102,6 +117,7 @@
 	SND_SOC_DAPM_MIC("Headset Mic", NULL),
 	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
 	SND_SOC_DAPM_SPK("Earphone Spk", NULL),
+	SND_SOC_DAPM_INPUT("Aux/FM Stereo In"),
 };
 
 static const struct snd_soc_dapm_route audio_map[] = {
@@ -124,11 +140,16 @@
 
 	/* Earphone speaker */
 	{"Earphone Spk", NULL, "EP"},
+
+	/* Aux/FM Stereo In: AFML, AFMR */
+	{"AFML", NULL, "Aux/FM Stereo In"},
+	{"AFMR", NULL, "Aux/FM Stereo In"},
 };
 
 static int sdp4430_twl6040_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add SDP4430 specific controls */
@@ -138,25 +159,39 @@
 		return ret;
 
 	/* Add SDP4430 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, sdp4430_twl6040_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, sdp4430_twl6040_dapm_widgets,
 				ARRAY_SIZE(sdp4430_twl6040_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up SDP4430 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* SDP4430 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "AFML");
+	snd_soc_dapm_enable_pin(dapm, "AFMR");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
 
-	/* TWL6040 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "AFML");
-	snd_soc_dapm_nc_pin(codec, "AFMR");
+	ret = snd_soc_dapm_sync(dapm);
+	if (ret)
+		return ret;
 
-	ret = snd_soc_dapm_sync(codec);
+	/* Headset jack detection */
+	ret = snd_soc_jack_new(codec, "Headset Jack",
+				SND_JACK_HEADSET, &hs_jack);
+	if (ret)
+		return ret;
+
+	ret = snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+				hs_jack_pins);
+
+	if (machine_is_omap_4430sdp())
+		twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
+	else
+		snd_soc_jack_report(&hs_jack, SND_JACK_HEADSET, SND_JACK_HEADSET);
 
 	return ret;
 }
diff --git a/sound/soc/omap/zoom2.c b/sound/soc/omap/zoom2.c
index 718031e..0170994 100644
--- a/sound/soc/omap/zoom2.c
+++ b/sound/soc/omap/zoom2.c
@@ -24,7 +24,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/hardware.h>
@@ -162,35 +161,36 @@
 static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* Add Zoom2 specific widgets */
-	ret = snd_soc_dapm_new_controls(codec, zoom2_twl4030_dapm_widgets,
+	ret = snd_soc_dapm_new_controls(dapm, zoom2_twl4030_dapm_widgets,
 				ARRAY_SIZE(zoom2_twl4030_dapm_widgets));
 	if (ret)
 		return ret;
 
 	/* Set up Zoom2 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Zoom2 connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Mic");
-	snd_soc_dapm_enable_pin(codec, "Ext Spk");
-	snd_soc_dapm_enable_pin(codec, "Headset Mic");
-	snd_soc_dapm_enable_pin(codec, "Headset Stereophone");
-	snd_soc_dapm_enable_pin(codec, "Aux In");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic");
+	snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+	snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Aux In");
 
 	/* TWL4030 not connected pins */
-	snd_soc_dapm_nc_pin(codec, "CARKITMIC");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC0");
-	snd_soc_dapm_nc_pin(codec, "DIGIMIC1");
-	snd_soc_dapm_nc_pin(codec, "EARPIECE");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVEL");
-	snd_soc_dapm_nc_pin(codec, "PREDRIVER");
-	snd_soc_dapm_nc_pin(codec, "CARKITL");
-	snd_soc_dapm_nc_pin(codec, "CARKITR");
+	snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
+	snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
+	snd_soc_dapm_nc_pin(dapm, "EARPIECE");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
+	snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
+	snd_soc_dapm_nc_pin(dapm, "CARKITL");
+	snd_soc_dapm_nc_pin(dapm, "CARKITR");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 
 	return ret;
 }
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index f451acd..fc592f0 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/corgi.h>
@@ -48,51 +47,53 @@
 
 static void corgi_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (corgi_jack_func) {
 	case CORGI_HP:
 		/* set = unmute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 1);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_MIC:
 		/* reset = mute headphone */
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_LINE:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case CORGI_HEADSET:
 		gpio_set_value(CORGI_GPIO_MUTE_L, 0);
 		gpio_set_value(CORGI_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (corgi_spk_func == CORGI_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int corgi_startup(struct snd_pcm_substream *substream)
@@ -279,10 +280,11 @@
 static int corgi_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
 
 	/* Add corgi specific controls */
 	err = snd_soc_add_controls(codec, wm8731_corgi_controls,
@@ -291,13 +293,13 @@
 		return err;
 
 	/* Add corgi specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up corgi specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index c82cedb..28333e7 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -92,23 +91,24 @@
 static int e740_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "HPOUTL");
-	snd_soc_dapm_nc_pin(codec, "HPOUTR");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTL");
+	snd_soc_dapm_nc_pin(dapm, "HPOUTR");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e740_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e740_dapm_widgets,
 					ARRAY_SIZE(e740_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 4c14380..01bf316 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <mach/audio.h>
 #include <mach/eseries-gpio.h>
@@ -74,23 +73,24 @@
 static int e750_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_nc_pin(codec, "LOUT");
-	snd_soc_dapm_nc_pin(codec, "ROUT");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "CDINL");
-	snd_soc_dapm_nc_pin(codec, "CDINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "LOUT");
+	snd_soc_dapm_nc_pin(dapm, "ROUT");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "CDINL");
+	snd_soc_dapm_nc_pin(dapm, "CDINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	snd_soc_dapm_new_controls(codec, e750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e750_dapm_widgets,
 					ARRAY_SIZE(e750_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index d42e5fe..c6a37c6e 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -16,7 +16,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
@@ -75,12 +74,13 @@
 static int e800_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, e800_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, e800_dapm_widgets,
 					ARRAY_SIZE(e800_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index eadf9d3..fc22e6e 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/audio.h>
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index 5ef0526..67dcc36 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -26,7 +26,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/uda1380.h>
 
 #include <mach/magician.h>
@@ -44,27 +43,29 @@
 
 static void magician_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (magician_spk_switch)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 	if (magician_hp_switch)
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	else
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 
 	switch (magician_in_sel) {
 	case MAGICIAN_MIC:
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case MAGICIAN_MIC_EXT:
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
 		break;
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int magician_startup(struct snd_pcm_substream *substream)
@@ -399,15 +400,16 @@
 static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "VOUTLHP");
-	snd_soc_dapm_nc_pin(codec, "VOUTRHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
+	snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
 
 	/* FIXME: is anything connected here? */
-	snd_soc_dapm_nc_pin(codec, "VINL");
-	snd_soc_dapm_nc_pin(codec, "VINR");
+	snd_soc_dapm_nc_pin(dapm, "VINL");
+	snd_soc_dapm_nc_pin(dapm, "VINR");
 
 	/* Add magician specific controls */
 	err = snd_soc_add_controls(codec, uda1380_magician_controls,
@@ -416,13 +418,13 @@
 		return err;
 
 	/* Add magician specific widgets */
-	snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
 				  ARRAY_SIZE(uda1380_dapm_widgets));
 
 	/* Set up magician specific audio path interconnects */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index f284cc5..0d70fc8 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -50,7 +50,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 #include <sound/ac97_codec.h>
 
@@ -130,13 +129,14 @@
 static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned short reg;
 
 	/* Add mioa701 specific widgets */
-	snd_soc_dapm_new_controls(codec, ARRAY_AND_SIZE(mioa701_dapm_widgets));
+	snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets));
 
 	/* Set up mioa701 specific audio path audio_mapnects */
-	snd_soc_dapm_add_routes(codec, ARRAY_AND_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map));
 
 	/* Prepare GPIO8 for rear speaker amplifier */
 	reg = codec->driver->read(codec, AC97_GPIO_CFG);
@@ -146,12 +146,12 @@
 	reg = codec->driver->read(codec, AC97_3D_CONTROL);
 	codec->driver->write(codec, AC97_3D_CONTROL, reg | 0xc000);
 
-	snd_soc_dapm_enable_pin(codec, "Front Speaker");
-	snd_soc_dapm_enable_pin(codec, "Rear Speaker");
-	snd_soc_dapm_enable_pin(codec, "Front Mic");
-	snd_soc_dapm_enable_pin(codec, "GSM Line In");
-	snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_enable_pin(dapm, "Front Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Rear Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Front Mic");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 13f6d48..857db96 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -77,37 +76,38 @@
 static int palm27x_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* add palm27x specific widgets */
-	err = snd_soc_dapm_new_controls(codec, palm27x_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, palm27x_dapm_widgets,
 				ARRAY_SIZE(palm27x_dapm_widgets));
 	if (err)
 		return err;
 
 	/* set up palm27x specific audio path audio_map */
-	err = snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	err = snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 	if (err)
 		return err;
 
 	/* connected pins */
 	if (machine_is_palmld())
-		snd_soc_dapm_enable_pin(codec, "MIC1");
-	snd_soc_dapm_enable_pin(codec, "HPOUTL");
-	snd_soc_dapm_enable_pin(codec, "HPOUTR");
-	snd_soc_dapm_enable_pin(codec, "LOUT2");
-	snd_soc_dapm_enable_pin(codec, "ROUT2");
+		snd_soc_dapm_enable_pin(dapm, "MIC1");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTL");
+	snd_soc_dapm_enable_pin(dapm, "HPOUTR");
+	snd_soc_dapm_enable_pin(dapm, "LOUT2");
+	snd_soc_dapm_enable_pin(dapm, "ROUT2");
 
 	/* not connected pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
-	snd_soc_dapm_nc_pin(codec, "LINEINL");
-	snd_soc_dapm_nc_pin(codec, "LINEINR");
-	snd_soc_dapm_nc_pin(codec, "PCBEEP");
-	snd_soc_dapm_nc_pin(codec, "PHONE");
-	snd_soc_dapm_nc_pin(codec, "MIC2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINEINL");
+	snd_soc_dapm_nc_pin(dapm, "LINEINR");
+	snd_soc_dapm_nc_pin(dapm, "PCBEEP");
+	snd_soc_dapm_nc_pin(dapm, "PHONE");
+	snd_soc_dapm_nc_pin(dapm, "MIC2");
 
-	err = snd_soc_dapm_sync(codec);
+	err = snd_soc_dapm_sync(dapm);
 	if (err)
 		return err;
 
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index 84edd03..6298ee1 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <asm/hardware/locomo.h>
@@ -46,6 +45,8 @@
 
 static void poodle_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	if (poodle_jack_func == POODLE_HP) {
 		/* set = unmute headphone */
@@ -53,23 +54,23 @@
 			POODLE_LOCOMO_GPIO_MUTE_L, 1);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 1);
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 	} else {
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_L, 0);
 		locomo_gpio_write(&poodle_locomo_device.dev,
 			POODLE_LOCOMO_GPIO_MUTE_R, 0);
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 	}
 
 	/* set the enpoints to their new connetion states */
 	if (poodle_spk_func == POODLE_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int poodle_startup(struct snd_pcm_substream *substream)
@@ -244,11 +245,12 @@
 static int poodle_wm8731_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "LLINEIN");
-	snd_soc_dapm_nc_pin(codec, "RLINEIN");
-	snd_soc_dapm_enable_pin(codec, "MICIN");
+	snd_soc_dapm_nc_pin(dapm, "LLINEIN");
+	snd_soc_dapm_nc_pin(dapm, "RLINEIN");
+	snd_soc_dapm_enable_pin(dapm, "MICIN");
 
 	/* Add poodle specific controls */
 	err = snd_soc_add_controls(codec, wm8731_poodle_controls,
@@ -257,13 +259,13 @@
 		return err;
 
 	/* Add poodle specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8731_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8731_dapm_widgets,
 				  ARRAY_SIZE(wm8731_dapm_widgets));
 
 	/* Set up poodle specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c
index 2cda82bc..0fd60f4 100644
--- a/sound/soc/pxa/raumfeld.c
+++ b/sound/soc/pxa/raumfeld.c
@@ -22,7 +22,6 @@
 #include <linux/gpio.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
diff --git a/sound/soc/pxa/saarb.c b/sound/soc/pxa/saarb.c
index d63cb47..9595189 100644
--- a/sound/soc/pxa/saarb.c
+++ b/sound/soc/pxa/saarb.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int saarb_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, saarb_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, saarb_dapm_widgets,
 				  ARRAY_SIZE(saarb_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index 0b30d7d..c2acb69 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -23,7 +23,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/spitz.h>
@@ -46,61 +45,63 @@
 
 static void spitz_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	if (spitz_spk_func == SPITZ_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Ext Spk");
+		snd_soc_dapm_enable_pin(dapm, "Ext Spk");
 	else
-		snd_soc_dapm_disable_pin(codec, "Ext Spk");
+		snd_soc_dapm_disable_pin(dapm, "Ext Spk");
 
 	/* set up jack connection */
 	switch (spitz_jack_func) {
 	case SPITZ_HP:
 		/* enable and unmute hp jack, disable mic bias */
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 1);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_MIC:
 		/* enable mic jack and bias, mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_LINE:
 		/* enable line jack, disable mic bias and mute hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_enable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_enable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	case SPITZ_HEADSET:
 		/* enable and unmute headset jack enable mic bias, mute L hp */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 1);
 		break;
 	case SPITZ_HP_OFF:
 
 		/* jack removed, everything off */
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
-		snd_soc_dapm_disable_pin(codec, "Mic Jack");
-		snd_soc_dapm_disable_pin(codec, "Line Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic Jack");
+		snd_soc_dapm_disable_pin(dapm, "Line Jack");
 		gpio_set_value(SPITZ_GPIO_MUTE_L, 0);
 		gpio_set_value(SPITZ_GPIO_MUTE_R, 0);
 		break;
 	}
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int spitz_startup(struct snd_pcm_substream *substream)
@@ -281,16 +282,17 @@
 static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO1");
 
 	/* Add spitz specific controls */
 	err = snd_soc_add_controls(codec, wm8750_spitz_controls,
@@ -299,13 +301,13 @@
 		return err;
 
 	/* Add spitz specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				  ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up spitz specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/tavorevb3.c b/sound/soc/pxa/tavorevb3.c
index 248c283..f881f65 100644
--- a/sound/soc/pxa/tavorevb3.c
+++ b/sound/soc/pxa/tavorevb3.c
@@ -18,7 +18,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -133,20 +132,21 @@
 static int evb3_pm860x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
-	snd_soc_dapm_new_controls(codec, evb3_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, evb3_dapm_widgets,
 				  ARRAY_SIZE(evb3_dapm_widgets));
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* connected pins */
-	snd_soc_dapm_enable_pin(codec, "Ext Speaker");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 1");
-	snd_soc_dapm_enable_pin(codec, "Ext Mic 3");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic 2");
-	snd_soc_dapm_disable_pin(codec, "Headset Stereophone");
+	snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+	snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+	snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		return ret;
 
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 7b983f9..f75804e 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -26,7 +26,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 #include <mach/tosa.h>
@@ -49,31 +48,33 @@
 
 static void tosa_ext_control(struct snd_soc_codec *codec)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	/* set up jack connection */
 	switch (tosa_jack_func) {
 	case TOSA_HP:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_MIC_INT:
-		snd_soc_dapm_enable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_disable_pin(codec, "Headset Jack");
+		snd_soc_dapm_enable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_disable_pin(dapm, "Headset Jack");
 		break;
 	case TOSA_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Mic (Internal)");
-		snd_soc_dapm_disable_pin(codec, "Headphone Jack");
-		snd_soc_dapm_enable_pin(codec, "Headset Jack");
+		snd_soc_dapm_disable_pin(dapm, "Mic (Internal)");
+		snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
+		snd_soc_dapm_enable_pin(dapm, "Headset Jack");
 		break;
 	}
 
 	if (tosa_spk_func == TOSA_SPK_ON)
-		snd_soc_dapm_enable_pin(codec, "Speaker");
+		snd_soc_dapm_enable_pin(dapm, "Speaker");
 	else
-		snd_soc_dapm_disable_pin(codec, "Speaker");
+		snd_soc_dapm_disable_pin(dapm, "Speaker");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 }
 
 static int tosa_startup(struct snd_pcm_substream *substream)
@@ -191,10 +192,11 @@
 static int tosa_ac97_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONOOUT");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONOOUT");
 
 	/* add tosa specific controls */
 	err = snd_soc_add_controls(codec, tosa_controls,
@@ -203,13 +205,13 @@
 		return err;
 
 	/* add tosa specific widgets */
-	snd_soc_dapm_new_controls(codec, tosa_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, tosa_dapm_widgets,
 				  ARRAY_SIZE(tosa_dapm_widgets));
 
 	/* set up tosa specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/pxa/z2.c b/sound/soc/pxa/z2.c
index 4cc841b..2d4f896 100644
--- a/sound/soc/pxa/z2.c
+++ b/sound/soc/pxa/z2.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
@@ -140,22 +139,23 @@
 static int z2_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* NC codec pins */
-	snd_soc_dapm_disable_pin(codec, "LINPUT3");
-	snd_soc_dapm_disable_pin(codec, "RINPUT3");
-	snd_soc_dapm_disable_pin(codec, "OUT3");
-	snd_soc_dapm_disable_pin(codec, "MONO");
+	snd_soc_dapm_disable_pin(dapm, "LINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "RINPUT3");
+	snd_soc_dapm_disable_pin(dapm, "OUT3");
+	snd_soc_dapm_disable_pin(dapm, "MONO");
 
 	/* Add z2 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 				 ARRAY_SIZE(wm8750_dapm_widgets));
 
 	/* Set up z2 specific audio paths */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
-	ret = snd_soc_dapm_sync(codec);
+	ret = snd_soc_dapm_sync(dapm);
 	if (ret)
 		goto err;
 
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index d27e05a..b222a7d 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -20,7 +20,6 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm9713.h"
 #include "pxa2xx-ac97.h"
@@ -73,21 +72,22 @@
 static int zylonite_wm9713_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	if (clk_pout)
 		snd_soc_dai_set_pll(rtd->codec_dai, 0, 0,
 				    clk_get_rate(pout), 0);
 
-	snd_soc_dapm_new_controls(codec, zylonite_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, zylonite_dapm_widgets,
 				  ARRAY_SIZE(zylonite_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* Static setup for now */
-	snd_soc_dapm_enable_pin(codec, "Headphone");
-	snd_soc_dapm_enable_pin(codec, "Headset Earpiece");
+	snd_soc_dapm_enable_pin(dapm, "Headphone");
+	snd_soc_dapm_enable_pin(dapm, "Headset Earpiece");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
deleted file mode 100644
index d85bf8a..0000000
--- a/sound/soc/s3c24xx/Kconfig
+++ /dev/null
@@ -1,171 +0,0 @@
-config SND_S3C24XX_SOC
-	tristate "SoC Audio for the Samsung S3CXXXX chips"
-	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
-	select S3C64XX_DMA if ARCH_S3C64XX
-	select S3C2410_DMA if ARCH_S3C2410
-	help
-	  Say Y or M if you want to add support for codecs attached to
-	  the S3C24XX AC97 or I2S interfaces. You will also need to
-	  select the audio interfaces to support below.
-
-config SND_S3C24XX_SOC_I2S
-	tristate
-	select S3C2410_DMA
-
-config SND_S3C_I2SV2_SOC
-	tristate
-
-config SND_S3C2412_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C2410_DMA
-
-config SND_S3C64XX_SOC_I2S
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C64XX_SOC_I2S_V4
-	tristate
-	select SND_S3C_I2SV2_SOC
-	select S3C64XX_DMA
-
-config SND_S3C_SOC_PCM
-	tristate
-
-config SND_S3C_SOC_AC97
-	tristate
-	select SND_SOC_AC97_BUS
-
-config SND_S5P_SOC_SPDIF
-	tristate
-	select SND_SOC_SPDIF
-
-config SND_S3C24XX_SOC_NEO1973_WM8753
-	tristate "SoC I2S Audio support for NEO1973 - WM8753"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA01
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  Say Y if you want to add support for SoC audio on smdk2440
-	  with the WM8753.
-
-config SND_S3C24XX_SOC_NEO1973_GTA02_WM8753
-	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
-	depends on SND_S3C24XX_SOC && MACH_NEO1973_GTA02
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_WM8753
-	help
-	  This driver provides audio support for the Openmoko Neo FreeRunner
-	  smartphone.
-	  
-config SND_S3C24XX_SOC_JIVE_WM8750
-	tristate "SoC I2S Audio support for Jive"
-	depends on SND_S3C24XX_SOC && MACH_JIVE
-	select SND_SOC_WM8750
-	select SND_S3C2412_SOC_I2S
-	help
-	  Sat Y if you want to add support for SoC audio on the Jive.
-
-config SND_S3C64XX_SOC_WM8580
-	tristate "SoC I2S Audio support for WM8580 on SMDK64XX"
-	depends on SND_S3C24XX_SOC && MACH_SMDK6410
-	select SND_SOC_WM8580
-	select SND_S3C64XX_SOC_I2S_V4
-	help
-	  Say Y if you want to add support for SoC audio on the SMDK6410.
-
-config SND_S3C24XX_SOC_SMDK2443_WM9710
-	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
-	depends on SND_S3C24XX_SOC && MACH_SMDK2443
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on smdk2443
-	  with the WM9710.
-
-config SND_S3C24XX_SOC_LN2440SBC_ALC650
-	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select S3C2410_DMA
-	select AC97_BUS
-	select SND_SOC_AC97_CODEC
-	select SND_S3C_SOC_AC97
-	help
-	  Say Y if you want to add support for SoC audio on ln2440sbc
-	  with the ALC650.
-
-config SND_S3C24XX_SOC_S3C24XX_UDA134X
-	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
-       	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-       	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_L3
-       	select SND_SOC_UDA134X
-
-config SND_S3C24XX_SOC_SIMTEC
-	tristate
-	help
-	  Internal node for common S3C24XX/Simtec suppor
-
-config SND_S3C24XX_SOC_SIMTEC_TLV320AIC23
-	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC23
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_SIMTEC_HERMES
-	tristate "SoC I2S Audio support for Simtec Hermes board"
-	depends on SND_S3C24XX_SOC && ARCH_S3C2410
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_TLV320AIC3X
-	select SND_S3C24XX_SOC_SIMTEC
-
-config SND_S3C24XX_SOC_RX1950_UDA1380
-	tristate "Audio support for the HP iPAQ RX1950"
-	depends on SND_S3C24XX_SOC && MACH_RX1950
-	select SND_S3C24XX_SOC_I2S
-	select SND_SOC_UDA1380
-	help
-	  This driver provides audio support for HP iPAQ RX1950 PDA.
-
-config SND_SOC_SMDK_WM9713
-	tristate "SoC AC97 Audio support for SMDK with WM9713"
-	depends on SND_S3C24XX_SOC && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
-	select SND_SOC_WM9713
-	select SND_S3C_SOC_AC97
-	help
-	  Sat Y if you want to add support for SoC audio on the SMDK.
-
-config SND_S3C64XX_SOC_SMARTQ
-	tristate "SoC I2S Audio support for SmartQ board"
-	depends on SND_S3C24XX_SOC && MACH_SMARTQ
-	select SND_S3C64XX_SOC_I2S
-	select SND_SOC_WM8750
-
-config SND_S5PC110_SOC_AQUILA_WM8994
-	tristate "SoC I2S Audio support for AQUILA - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_AQUILA
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on aquila
-	  with the WM8994.
-
-config SND_S5PV210_SOC_GONI_WM8994
-	tristate "SoC I2S Audio support for GONI - WM8994"
-	depends on SND_S3C24XX_SOC && MACH_GONI
-	select SND_S3C64XX_SOC_I2S_V4
-	select SND_SOC_WM8994
-	help
-	  Say Y if you want to add support for SoC audio on goni
-	  with the WM8994.
-
-config SND_SOC_SMDK_SPDIF
-	tristate "SoC S/PDIF Audio support for SMDK"
-	depends on SND_S3C24XX_SOC && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
-	select SND_S5P_SOC_SPDIF
-	help
-	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/s3c24xx/Makefile b/sound/soc/s3c24xx/Makefile
deleted file mode 100644
index ee8f41d..0000000
--- a/sound/soc/s3c24xx/Makefile
+++ /dev/null
@@ -1,55 +0,0 @@
-# S3c24XX Platform Support
-snd-soc-s3c24xx-objs := s3c-dma.o
-snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
-snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
-snd-soc-s3c64xx-i2s-objs := s3c64xx-i2s.o
-snd-soc-s3c-ac97-objs := s3c-ac97.o
-snd-soc-s3c64xx-i2s-v4-objs := s3c64xx-i2s-v4.o
-snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
-snd-soc-s3c-pcm-objs := s3c-pcm.o
-snd-soc-samsung-spdif-objs := spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC) += snd-soc-s3c24xx.o
-obj-$(CONFIG_SND_S3C24XX_SOC_I2S) += snd-soc-s3c24xx-i2s.o
-obj-$(CONFIG_SND_S3C_SOC_AC97) += snd-soc-s3c-ac97.o
-obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S) += snd-soc-s3c64xx-i2s.o
-obj-$(CONFIG_SND_S3C64XX_SOC_I2S_V4) += snd-soc-s3c64xx-i2s-v4.o
-obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
-obj-$(CONFIG_SND_S3C_SOC_PCM) += snd-soc-s3c-pcm.o
-obj-$(CONFIG_SND_S5P_SOC_SPDIF) += snd-soc-samsung-spdif.o
-
-# S3C24XX Machine Support
-snd-soc-jive-wm8750-objs := jive_wm8750.o
-snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
-snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
-snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
-snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
-snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
-snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
-snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
-snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
-snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
-snd-soc-smdk64xx-wm8580-objs := smdk64xx_wm8580.o
-snd-soc-smdk-wm9713-objs := smdk_wm9713.o
-snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
-snd-soc-aquila-wm8994-objs := aquila_wm8994.o
-snd-soc-goni-wm8994-objs := goni_wm8994.o
-snd-soc-smdk-spdif-objs := smdk_spdif.o
-
-obj-$(CONFIG_SND_S3C24XX_SOC_JIVE_WM8750) += snd-soc-jive-wm8750.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
-obj-$(CONFIG_SND_S3C24XX_SOC_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
-obj-$(CONFIG_SND_S3C24XX_SOC_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC) += snd-soc-s3c24xx-simtec.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
-obj-$(CONFIG_SND_S3C24XX_SOC_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
-obj-$(CONFIG_SND_S3C24XX_SOC_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
-obj-$(CONFIG_SND_S3C64XX_SOC_WM8580) += snd-soc-smdk64xx-wm8580.o
-obj-$(CONFIG_SND_SOC_SMDK_WM9713) += snd-soc-smdk-wm9713.o
-obj-$(CONFIG_SND_S3C64XX_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
-obj-$(CONFIG_SND_S5PC110_SOC_AQUILA_WM8994) += snd-soc-aquila-wm8994.o
-obj-$(CONFIG_SND_S5PV210_SOC_GONI_WM8994) += snd-soc-goni-wm8994.o
-obj-$(CONFIG_SND_SOC_SMDK_SPDIF) += snd-soc-smdk-spdif.o
diff --git a/sound/soc/s3c24xx/aquila_wm8994.c b/sound/soc/s3c24xx/aquila_wm8994.c
deleted file mode 100644
index 235d197..0000000
--- a/sound/soc/s3c24xx/aquila_wm8994.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * aquila_wm8994.c
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <sound/jack.h>
-#include <asm/mach-types.h>
-#include <mach/gpio.h>
-#include <mach/regs-clock.h>
-
-#include <linux/mfd/wm8994/core.h>
-#include <linux/mfd/wm8994/registers.h>
-#include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-static struct snd_soc_card aquila;
-static struct platform_device *aquila_snd_device;
-
-/* 3.5 pie jack */
-static struct snd_soc_jack jack;
-
-/* 3.5 pie jack detection DAPM pins */
-static struct snd_soc_jack_pin jack_pins[] = {
-	{
-		.pin = "Headset Mic",
-		.mask = SND_JACK_MICROPHONE,
-	}, {
-		.pin = "Headset Stereophone",
-		.mask = SND_JACK_HEADPHONE | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-	},
-};
-
-/* 3.5 pie jack detection gpios */
-static struct snd_soc_jack_gpio jack_gpios[] = {
-	{
-		.gpio = S5PV210_GPH0(6),
-		.name = "DET_3.5",
-		.report = SND_JACK_HEADSET | SND_JACK_MECHANICAL |
-			SND_JACK_AVOUT,
-		.debounce_time = 200,
-	},
-};
-
-static const struct snd_soc_dapm_widget aquila_dapm_widgets[] = {
-	SND_SOC_DAPM_SPK("Ext Spk", NULL),
-	SND_SOC_DAPM_SPK("Ext Rcv", NULL),
-	SND_SOC_DAPM_HP("Headset Stereophone", NULL),
-	SND_SOC_DAPM_MIC("Headset Mic", NULL),
-	SND_SOC_DAPM_MIC("Main Mic", NULL),
-	SND_SOC_DAPM_MIC("2nd Mic", NULL),
-	SND_SOC_DAPM_LINE("Radio In", NULL),
-};
-
-static const struct snd_soc_dapm_route aquila_dapm_routes[] = {
-	{"Ext Spk", NULL, "SPKOUTLP"},
-	{"Ext Spk", NULL, "SPKOUTLN"},
-
-	{"Ext Rcv", NULL, "HPOUT2N"},
-	{"Ext Rcv", NULL, "HPOUT2P"},
-
-	{"Headset Stereophone", NULL, "HPOUT1L"},
-	{"Headset Stereophone", NULL, "HPOUT1R"},
-
-	{"IN1RN", NULL, "Headset Mic"},
-	{"IN1RP", NULL, "Headset Mic"},
-
-	{"IN1RN", NULL, "2nd Mic"},
-	{"IN1RP", NULL, "2nd Mic"},
-
-	{"IN1LN", NULL, "Main Mic"},
-	{"IN1LP", NULL, "Main Mic"},
-
-	{"IN2LN", NULL, "Radio In"},
-	{"IN2RN", NULL, "Radio In"},
-};
-
-static int aquila_wm8994_init(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-	int ret;
-
-	/* add aquila specific widgets */
-	snd_soc_dapm_new_controls(codec, aquila_dapm_widgets,
-			ARRAY_SIZE(aquila_dapm_widgets));
-
-	/* set up aquila specific audio routes */
-	snd_soc_dapm_add_routes(codec, aquila_dapm_routes,
-			ARRAY_SIZE(aquila_dapm_routes));
-
-	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRN");
-	snd_soc_dapm_nc_pin(codec, "SPKOUTRP");
-
-	snd_soc_dapm_sync(codec);
-
-	/* Headset jack detection */
-	ret = snd_soc_jack_new(&aquila, "Headset Jack",
-			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
-			&jack);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_pins(&jack, ARRAY_SIZE(jack_pins), jack_pins);
-	if (ret)
-		return ret;
-
-	ret = snd_soc_jack_add_gpios(&jack, ARRAY_SIZE(jack_gpios), jack_gpios);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-static int aquila_hifi_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	/* set the cpu DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
-			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_ops aquila_hifi_ops = {
-	.hw_params = aquila_hifi_hw_params,
-};
-
-static int aquila_voice_hw_params(struct snd_pcm_substream *substream,
-		struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out = 24000000;
-	int ret = 0;
-
-	if (params_rate(params) != 8000)
-		return -EINVAL;
-
-	/* set codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
-			SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec FLL */
-	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL2, 0, pll_out,
-			params_rate(params) * 256);
-	if (ret < 0)
-		return ret;
-
-	/* set the codec system clock */
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL2,
-			params_rate(params) * 256, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-static struct snd_soc_dai_driver voice_dai = {
-	.name = "aquila-voice-dai",
-	.playback = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-	.capture = {
-		.channels_min = 1,
-		.channels_max = 2,
-		.rates = SNDRV_PCM_RATE_8000,
-		.formats = SNDRV_PCM_FMTBIT_S16_LE,},
-};
-
-static struct snd_soc_ops aquila_voice_ops = {
-	.hw_params = aquila_voice_hw_params,
-};
-
-static struct snd_soc_dai_link aquila_dai[] = {
-{
-	.name = "WM8994",
-	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
-	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.init = aquila_wm8994_init,
-	.ops = &aquila_hifi_ops,
-}, {
-	.name = "WM8994 Voice",
-	.stream_name = "Voice",
-	.cpu_dai_name = "aquila-voice-dai",
-	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8994-codec.0-0x1a",
-	.ops = &aquila_voice_ops,
-},
-};
-
-static struct snd_soc_card aquila = {
-	.name = "aquila",
-	.dai_link = aquila_dai,
-	.num_links = ARRAY_SIZE(aquila_dai),
-};
-
-static int __init aquila_init(void)
-{
-	int ret;
-
-	if (!machine_is_aquila())
-		return -ENODEV;
-
-	aquila_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!aquila_snd_device)
-		return -ENOMEM;
-
-	/* register voice DAI here */
-	ret = snd_soc_register_dai(&aquila_snd_device->dev, &voice_dai);
-	if (ret)
-		return ret;
-
-	platform_set_drvdata(aquila_snd_device, &aquila);
-	ret = platform_device_add(aquila_snd_device);
-
-	if (ret)
-		platform_device_put(aquila_snd_device);
-
-	return ret;
-}
-
-static void __exit aquila_exit(void)
-{
-	platform_device_unregister(aquila_snd_device);
-}
-
-module_init(aquila_init);
-module_exit(aquila_exit);
-
-/* Module information */
-MODULE_DESCRIPTION("ALSA SoC WM8994 Aquila(S5PC110)");
-MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c b/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
deleted file mode 100644
index a962847..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s-v4.c
+++ /dev/null
@@ -1,230 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s-v4.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2Sv4 driver
- * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-
-#include <sound/soc.h>
-#include <sound/pcm_params.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2Sv4 PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2Sv4 PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_out;
-static struct s3c_dma_params s3c64xx_i2sv4_pcm_stereo_in;
-static struct s3c_i2sv2_info s3c64xx_i2sv4;
-
-static int s3c64xx_i2sv4_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	int ret = 0;
-
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-
-	return ret;
-}
-
-static int s3c_i2sv4_hw_params(struct snd_pcm_substream *substream,
-				 struct snd_pcm_hw_params *params,
-				 struct snd_soc_dai *cpu_dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(cpu_dai);
-	struct s3c_dma_params *dma_data;
-	u32 iismod;
-
-	dev_dbg(cpu_dai->dev, "Entered %s\n", __func__);
-
-	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-		dma_data = i2s->dma_playback;
-	else
-		dma_data = i2s->dma_capture;
-
-	snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data);
-
-	iismod = readl(i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: r: IISMOD: %x\n", __func__, iismod);
-
-	iismod &= ~S3C64XX_IISMOD_BLC_MASK;
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_S8:
-		iismod |= S3C64XX_IISMOD_BLC_8BIT;
-		break;
-	case SNDRV_PCM_FORMAT_S16_LE:
-		break;
-	case SNDRV_PCM_FORMAT_S24_LE:
-		iismod |= S3C64XX_IISMOD_BLC_24BIT;
-		break;
-	}
-
-	writel(iismod, i2s->regs + S3C2412_IISMOD);
-	dev_dbg(cpu_dai->dev, "%s: w: IISMOD: %x\n", __func__, iismod);
-
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2sv4_dai_ops = {
-	.hw_params	= s3c_i2sv4_hw_params,
-};
-
-static struct snd_soc_dai_driver s3c64xx_i2s_v4_dai = {
-	.symmetric_rates = 1,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,
-	},
-	.probe = s3c64xx_i2sv4_probe,
-	.ops = &s3c64xx_i2sv4_dai_ops,
-};
-
-static __devinit int s3c64xx_i2sv4_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int ret;
-
-	i2s = &s3c64xx_i2sv4;
-
-	i2s->feature |= S3C_FEATURE_CDCLKCON;
-
-	i2s->dma_capture = &s3c64xx_i2sv4_pcm_stereo_in;
-	i2s->dma_playback = &s3c64xx_i2sv4_pcm_stereo_out;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s-v4")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-
-	i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(&pdev->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_register_dai(&pdev->dev, pdev->id, &s3c64xx_i2s_v4_dai);
-	if (ret != 0)
-		goto err_i2sv2;
-
-	return 0;
-
-err_i2sv2:
-	clk_put(i2s->iis_cclk);
-err:
-	return ret;
-}
-
-static __devexit int s3c64xx_i2sv4_dev_remove(struct platform_device *pdev)
-{
-	struct s3c_i2sv2_info *i2s = &s3c64xx_i2sv4;
-	struct resource *res;
-
-	snd_soc_unregister_dai(&pdev->dev);
-	clk_put(i2s->iis_cclk);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (res)
-		release_mem_region(res->start, resource_size(res));
-	else
-		dev_warn(&pdev->dev, "Unable to get I2S SFR address\n");
-		
-	return 0;
-}
-
-static struct platform_driver s3c64xx_i2sv4_driver = {
-	.probe  = s3c64xx_i2sv4_dev_probe,
-	.remove = s3c64xx_i2sv4_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis-v4",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2sv4_init(void)
-{
-	return platform_driver_register(&s3c64xx_i2sv4_driver);
-}
-module_init(s3c64xx_i2sv4_init);
-
-static void __exit s3c64xx_i2sv4_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_i2sv4_driver);
-}
-module_exit(s3c64xx_i2sv4_exit);
-
-/* Module information */
-MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
-MODULE_DESCRIPTION("S3C64XX I2Sv4 SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis-v4");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
deleted file mode 100644
index ae7acb6..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ /dev/null
@@ -1,242 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.c
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <sound/soc.h>
-
-#include <plat/audio.h>
-
-#include <mach/map.h>
-#include <mach/dma.h>
-
-#include "s3c-dma.h"
-#include "regs-i2s-v2.h"
-#include "s3c64xx-i2s.h"
-
-/* The value should be set to maximum of the total number
- * of I2Sv3 controllers that any supported SoC has.
- */
-#define MAX_I2SV3	2
-
-static struct s3c2410_dma_client s3c64xx_dma_client_out = {
-	.name		= "I2S PCM Stereo out"
-};
-
-static struct s3c2410_dma_client s3c64xx_dma_client_in = {
-	.name		= "I2S PCM Stereo in"
-};
-
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[MAX_I2SV3];
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[MAX_I2SV3];
-static struct s3c_i2sv2_info s3c64xx_i2s[MAX_I2SV3];
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-	u32 iismod = readl(i2s->regs + S3C2412_IISMOD);
-
-	if (iismod & S3C2412_IISMOD_IMS_SYSMUX)
-		return i2s->iis_cclk;
-	else
-		return i2s->iis_pclk;
-}
-EXPORT_SYMBOL_GPL(s3c64xx_i2s_get_clock);
-
-static int s3c64xx_i2s_probe(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s;
-	int ret;
-
-	if (dai->id >= MAX_I2SV3) {
-		dev_err(dai->dev, "id %d out of range\n", dai->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[dai->id];
-	snd_soc_dai_set_drvdata(dai, i2s);
-
-	i2s->iis_cclk = clk_get(dai->dev, "audio-bus");
-	if (IS_ERR(i2s->iis_cclk)) {
-		dev_err(dai->dev, "failed to get audio-bus\n");
-		ret = PTR_ERR(i2s->iis_cclk);
-		goto err;
-	}
-
-	clk_enable(i2s->iis_cclk);
-
-	ret = s3c_i2sv2_probe(dai, i2s, i2s->base);
-	if (ret)
-		goto err_clk;
-
-	return 0;
-
-err_clk:
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-err:
-	kfree(i2s);
-	return ret;
-}
-
-static int s3c64xx_i2s_remove(struct snd_soc_dai *dai)
-{
-	struct s3c_i2sv2_info *i2s = snd_soc_dai_get_drvdata(dai);
-
-	clk_disable(i2s->iis_cclk);
-	clk_put(i2s->iis_cclk);
-	kfree(i2s);
-	return 0;
-}
-
-static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops;
-
-static struct snd_soc_dai_driver s3c64xx_i2s_dai[MAX_I2SV3] = {
-{
-	.name = "s3c64xx-i2s-0",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-}, {
-	.name = "s3c64xx-i2s-1",
-	.probe = s3c64xx_i2s_probe,
-	.remove = s3c64xx_i2s_remove,
-	.playback = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.capture = {
-		.channels_min = 2,
-		.channels_max = 2,
-		.rates = S3C64XX_I2S_RATES,
-		.formats = S3C64XX_I2S_FMTS,},
-	.ops = &s3c64xx_i2s_dai_ops,
-	.symmetric_rates = 1,
-},};
-
-static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
-{
-	struct s3c_audio_pdata *i2s_pdata;
-	struct s3c_i2sv2_info *i2s;
-	struct resource *res;
-	int i, ret;
-
-	if (pdev->id >= MAX_I2SV3) {
-		dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
-		return -EINVAL;
-	}
-
-	i2s = &s3c64xx_i2s[pdev->id];
-
-	i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
-	i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_playback->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
-		return -ENXIO;
-	}
-	i2s->dma_capture->channel = res->start;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
-		return -ENXIO;
-	}
-
-	if (!request_mem_region(res->start, resource_size(res),
-				"s3c64xx-i2s")) {
-		dev_err(&pdev->dev, "Unable to request SFR region\n");
-		return -EBUSY;
-	}
-	i2s->base = res->start;
-
-	i2s_pdata = pdev->dev.platform_data;
-	if (i2s_pdata && i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
-		dev_err(&pdev->dev, "Unable to configure gpio\n");
-		return -EINVAL;
-	}
-	i2s->dma_capture->dma_addr = res->start + S3C2412_IISRXD;
-	i2s->dma_playback->dma_addr = res->start + S3C2412_IISTXD;
-
-	i2s->dma_capture->client = &s3c64xx_dma_client_in;
-	i2s->dma_capture->dma_size = 4;
-	i2s->dma_playback->client = &s3c64xx_dma_client_out;
-	i2s->dma_playback->dma_size = 4;
-
-	for (i = 0; i < ARRAY_SIZE(s3c64xx_i2s_dai); i++) {
-		ret = s3c_i2sv2_register_dai(&pdev->dev, i,
-						&s3c64xx_i2s_dai[i]);
-		if (ret != 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static __devexit int s3c64xx_iis_dev_remove(struct platform_device *pdev)
-{
-	snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(s3c64xx_i2s_dai));
-	return 0;
-}
-
-static struct platform_driver s3c64xx_iis_driver = {
-	.probe  = s3c64xx_iis_dev_probe,
-	.remove = s3c64xx_iis_dev_remove,
-	.driver = {
-		.name = "s3c64xx-iis",
-		.owner = THIS_MODULE,
-	},
-};
-
-static int __init s3c64xx_i2s_init(void)
-{
-	return platform_driver_register(&s3c64xx_iis_driver);
-}
-module_init(s3c64xx_i2s_init);
-
-static void __exit s3c64xx_i2s_exit(void)
-{
-	platform_driver_unregister(&s3c64xx_iis_driver);
-}
-module_exit(s3c64xx_i2s_exit);
-
-/* Module information */
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C64XX I2S SoC Interface");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c64xx-iis");
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.h b/sound/soc/s3c24xx/s3c64xx-i2s.h
deleted file mode 100644
index de4075d..0000000
--- a/sound/soc/s3c24xx/s3c64xx-i2s.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* sound/soc/s3c24xx/s3c64xx-i2s.h
- *
- * ALSA SoC Audio Layer - S3C64XX I2S driver
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- *      Ben Dooks <ben@simtec.co.uk>
- *      http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __SND_SOC_S3C24XX_S3C64XX_I2S_H
-#define __SND_SOC_S3C24XX_S3C64XX_I2S_H __FILE__
-
-struct clk;
-
-#include "s3c-i2s-v2.h"
-
-#define S3C64XX_DIV_BCLK	S3C_I2SV2_DIV_BCLK
-#define S3C64XX_DIV_RCLK	S3C_I2SV2_DIV_RCLK
-#define S3C64XX_DIV_PRESCALER	S3C_I2SV2_DIV_PRESCALER
-
-#define S3C64XX_CLKSRC_PCLK	S3C_I2SV2_CLKSRC_PCLK
-#define S3C64XX_CLKSRC_MUX	S3C_I2SV2_CLKSRC_AUDIOBUS
-#define S3C64XX_CLKSRC_CDCLK    S3C_I2SV2_CLKSRC_CDCLK
-
-#define S3C64XX_I2S_RATES \
-	(SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | SNDRV_PCM_RATE_16000 | \
-	SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
-	SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
-
-#define S3C64XX_I2S_FMTS \
-	(SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE |\
-	 SNDRV_PCM_FMTBIT_S24_LE)
-
-struct clk *s3c64xx_i2s_get_clock(struct snd_soc_dai *dai);
-
-#endif /* __SND_SOC_S3C24XX_S3C64XX_I2S_H */
diff --git a/sound/soc/s3c24xx/smdk64xx_wm8580.c b/sound/soc/s3c24xx/smdk64xx_wm8580.c
deleted file mode 100644
index 052e499..0000000
--- a/sound/soc/s3c24xx/smdk64xx_wm8580.c
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- *  smdk64xx_wm8580.c
- *
- *  Copyright (c) 2009 Samsung Electronics Co. Ltd
- *  Author: Jaswinder Singh <jassi.brar@samsung.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- */
-
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-
-#include "../codecs/wm8580.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
-
-/*
- * Default CFG switch settings to use this driver:
- *
- *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
- */
-
-/* SMDK64XX has a 12MHZ crystal attached to WM8580 */
-#define SMDK64XX_WM8580_FREQ 12000000
-
-static int smdk64xx_hw_params(struct snd_pcm_substream *substream,
-	struct snd_pcm_hw_params *params)
-{
-	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-	struct snd_soc_dai *codec_dai = rtd->codec_dai;
-	unsigned int pll_out;
-	int bfs, rfs, ret;
-
-	switch (params_format(params)) {
-	case SNDRV_PCM_FORMAT_U8:
-	case SNDRV_PCM_FORMAT_S8:
-		bfs = 16;
-		break;
-	case SNDRV_PCM_FORMAT_U16_LE:
-	case SNDRV_PCM_FORMAT_S16_LE:
-		bfs = 32;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
-	 * This criterion can't be met if we request PLL output
-	 * as {8000x256, 64000x256, 11025x256}Hz.
-	 * As a wayout, we rather change rfs to a minimum value that
-	 * results in (params_rate(params) * rfs), and itself, acceptable
-	 * to both - the CODEC and the CPU.
-	 */
-	switch (params_rate(params)) {
-	case 16000:
-	case 22050:
-	case 32000:
-	case 44100:
-	case 48000:
-	case 88200:
-	case 96000:
-		rfs = 256;
-		break;
-	case 64000:
-		rfs = 384;
-		break;
-	case 8000:
-	case 11025:
-		rfs = 512;
-		break;
-	default:
-		return -EINVAL;
-	}
-	pll_out = params_rate(params) * rfs;
-
-	/* Set the Codec DAI configuration */
-	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	/* Set the AP DAI configuration */
-	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
-					 | SND_SOC_DAIFMT_NB_NF
-					 | SND_SOC_DAIFMT_CBM_CFM);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_CDCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* We use PCLK for basic ops in SoC-Slave mode */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-					0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	/* Set WM8580 to drive MCLK from its PLLA */
-	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
-					WM8580_CLKSRC_PLLA);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
-					SMDK64XX_WM8580_FREQ, pll_out);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
-				     pll_out, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_BCLK, bfs);
-	if (ret < 0)
-		return ret;
-
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, rfs);
-	if (ret < 0)
-		return ret;
-
-	return 0;
-}
-
-/*
- * SMDK64XX WM8580 DAI operations.
- */
-static struct snd_soc_ops smdk64xx_ops = {
-	.hw_params = smdk64xx_hw_params,
-};
-
-/* SMDK64xx Playback widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
-	SND_SOC_DAPM_HP("Front", NULL),
-	SND_SOC_DAPM_HP("Center+Sub", NULL),
-	SND_SOC_DAPM_HP("Rear", NULL),
-};
-
-/* SMDK64xx Capture widgets */
-static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
-	SND_SOC_DAPM_MIC("MicIn", NULL),
-	SND_SOC_DAPM_LINE("LineIn", NULL),
-};
-
-/* SMDK-PAIFTX connections */
-static const struct snd_soc_dapm_route audio_map_tx[] = {
-	/* MicIn feeds AINL */
-	{"AINL", NULL, "MicIn"},
-
-	/* LineIn feeds AINL/R */
-	{"AINL", NULL, "LineIn"},
-	{"AINR", NULL, "LineIn"},
-};
-
-/* SMDK-PAIFRX connections */
-static const struct snd_soc_dapm_route audio_map_rx[] = {
-	/* Front Left/Right are fed VOUT1L/R */
-	{"Front", NULL, "VOUT1L"},
-	{"Front", NULL, "VOUT1R"},
-
-	/* Center/Sub are fed VOUT2L/R */
-	{"Center+Sub", NULL, "VOUT2L"},
-	{"Center+Sub", NULL, "VOUT2R"},
-
-	/* Rear Left/Right are fed VOUT3L/R */
-	{"Rear", NULL, "VOUT3L"},
-	{"Rear", NULL, "VOUT3R"},
-};
-
-static int smdk64xx_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Capture widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_cpt,
-				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
-
-	/* Set up PAIFTX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_tx, ARRAY_SIZE(audio_map_tx));
-
-	/* Enabling the microphone requires the fitting of a 0R
-	 * resistor to connect the line from the microphone jack.
-	 */
-	snd_soc_dapm_disable_pin(codec, "MicIn");
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static int smdk64xx_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
-{
-	struct snd_soc_codec *codec = rtd->codec;
-
-	/* Add smdk64xx specific Playback widgets */
-	snd_soc_dapm_new_controls(codec, wm8580_dapm_widgets_pbk,
-				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
-
-	/* Set up PAIFRX audio path */
-	snd_soc_dapm_add_routes(codec, audio_map_rx, ARRAY_SIZE(audio_map_rx));
-
-	/* signal a DAPM event */
-	snd_soc_dapm_sync(codec);
-
-	return 0;
-}
-
-static struct snd_soc_dai_link smdk64xx_dai[] = {
-{ /* Primary Playback i/f */
-	.name = "WM8580 PAIF RX",
-	.stream_name = "Playback",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-playback",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paifrx,
-	.ops = &smdk64xx_ops,
-},
-{ /* Primary Capture i/f */
-	.name = "WM8580 PAIF TX",
-	.stream_name = "Capture",
-	.cpu_dai_name = "s3c64xx-iis-v4",
-	.codec_dai_name = "wm8580-hifi-capture",
-	.platform_name = "s3c24xx-pcm-audio",
-	.codec_name = "wm8580-codec.0-001b",
-	.init = smdk64xx_wm8580_init_paiftx,
-	.ops = &smdk64xx_ops,
-},
-};
-
-static struct snd_soc_card smdk64xx = {
-	.name = "SMDK64xx 5.1",
-	.dai_link = smdk64xx_dai,
-	.num_links = ARRAY_SIZE(smdk64xx_dai),
-};
-
-static struct platform_device *smdk64xx_snd_device;
-
-static int __init smdk64xx_audio_init(void)
-{
-	int ret;
-
-	smdk64xx_snd_device = platform_device_alloc("soc-audio", -1);
-	if (!smdk64xx_snd_device)
-		return -ENOMEM;
-
-	platform_set_drvdata(smdk64xx_snd_device, &smdk64xx);
-	ret = platform_device_add(smdk64xx_snd_device);
-
-	if (ret)
-		platform_device_put(smdk64xx_snd_device);
-
-	return ret;
-}
-module_init(smdk64xx_audio_init);
-
-MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
-MODULE_DESCRIPTION("ALSA SoC SMDK64XX WM8580");
-MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index c1244c5..5890e43 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -18,11 +18,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <variant/dmac.h>
 
-#include "../codecs/tlv320aic3x.h"
 #include "s6000-pcm.h"
 #include "s6000-i2s.h"
 
@@ -107,6 +105,7 @@
 			   struct snd_ctl_elem_value *ucontrol)
 {
 	struct snd_soc_codec *codec = kcontrol->private_data;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
 	char *differential = "Audio Out Differential";
 	char *stereo = "Audio Out Stereo";
@@ -114,10 +113,10 @@
 	if (kcontrol->private_value == val)
 		return 0;
 	kcontrol->private_value = val;
-	snd_soc_dapm_disable_pin(codec, val ? differential : stereo);
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, val ? stereo : differential);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
+	snd_soc_dapm_sync(dapm);
 
 	return 1;
 }
@@ -137,35 +136,36 @@
 static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
 	/* Add s6105 specific widgets */
-	snd_soc_dapm_new_controls(codec, aic3x_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, aic3x_dapm_widgets,
 				  ARRAY_SIZE(aic3x_dapm_widgets));
 
 	/* Set up s6105 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* not present */
-	snd_soc_dapm_nc_pin(codec, "MONO_LOUT");
-	snd_soc_dapm_nc_pin(codec, "LINE2L");
-	snd_soc_dapm_nc_pin(codec, "LINE2R");
+	snd_soc_dapm_nc_pin(dapm, "MONO_LOUT");
+	snd_soc_dapm_nc_pin(dapm, "LINE2L");
+	snd_soc_dapm_nc_pin(dapm, "LINE2R");
 
 	/* not connected */
-	snd_soc_dapm_nc_pin(codec, "MIC3L"); /* LINE2L on this chip */
-	snd_soc_dapm_nc_pin(codec, "MIC3R"); /* LINE2R on this chip */
-	snd_soc_dapm_nc_pin(codec, "LLOUT");
-	snd_soc_dapm_nc_pin(codec, "RLOUT");
-	snd_soc_dapm_nc_pin(codec, "HPRCOM");
+	snd_soc_dapm_nc_pin(dapm, "MIC3L"); /* LINE2L on this chip */
+	snd_soc_dapm_nc_pin(dapm, "MIC3R"); /* LINE2R on this chip */
+	snd_soc_dapm_nc_pin(dapm, "LLOUT");
+	snd_soc_dapm_nc_pin(dapm, "RLOUT");
+	snd_soc_dapm_nc_pin(dapm, "HPRCOM");
 
 	/* always connected */
-	snd_soc_dapm_enable_pin(codec, "Audio In");
+	snd_soc_dapm_enable_pin(dapm, "Audio In");
 
 	/* must correspond to audio_out_mux.private_value initializer */
-	snd_soc_dapm_disable_pin(codec, "Audio Out Differential");
-	snd_soc_dapm_sync(codec);
-	snd_soc_dapm_enable_pin(codec, "Audio Out Stereo");
+	snd_soc_dapm_disable_pin(dapm, "Audio Out Differential");
+	snd_soc_dapm_sync(dapm);
+	snd_soc_dapm_enable_pin(dapm, "Audio Out Stereo");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
 
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
new file mode 100644
index 0000000..a6a6b5f
--- /dev/null
+++ b/sound/soc/samsung/Kconfig
@@ -0,0 +1,171 @@
+config SND_SOC_SAMSUNG
+	tristate "ASoC support for Samsung"
+	depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_S5P64X0 || ARCH_S5P6442 || ARCH_S5PV310
+	select S3C64XX_DMA if ARCH_S3C64XX
+	select S3C2410_DMA if ARCH_S3C2410
+	help
+	  Say Y or M if you want to add support for codecs attached to
+	  the Samsung SoCs' Audio interfaces. You will also need to
+	  select the audio interfaces to support below.
+
+config SND_S3C24XX_I2S
+	tristate
+	select S3C2410_DMA
+
+config SND_S3C_I2SV2_SOC
+	tristate
+
+config SND_S3C2412_SOC_I2S
+	tristate
+	select SND_S3C_I2SV2_SOC
+	select S3C2410_DMA
+
+config SND_SAMSUNG_PCM
+	tristate
+
+config SND_SAMSUNG_AC97
+	tristate
+	select SND_SOC_AC97_BUS
+
+config SND_SAMSUNG_SPDIF
+	tristate
+	select SND_SOC_SPDIF
+
+config SND_SAMSUNG_I2S
+	tristate
+
+config SND_SOC_SAMSUNG_NEO1973_WM8753
+	tristate "SoC I2S Audio support for NEO1973 - WM8753"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA01
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  Say Y if you want to add support for SoC audio on smdk2440
+	  with the WM8753.
+
+config SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753
+	tristate "Audio support for the Openmoko Neo FreeRunner (GTA02)"
+	depends on SND_SOC_SAMSUNG && MACH_NEO1973_GTA02
+	select SND_S3C24XX_I2S
+	select SND_SOC_WM8753
+	help
+	  This driver provides audio support for the Openmoko Neo FreeRunner
+	  smartphone.
+	  
+config SND_SOC_SAMSUNG_JIVE_WM8750
+	tristate "SoC I2S Audio support for Jive"
+	depends on SND_SOC_SAMSUNG && MACH_JIVE
+	select SND_SOC_WM8750
+	select SND_S3C2412_SOC_I2S
+	help
+	  Sat Y if you want to add support for SoC audio on the Jive.
+
+config SND_SOC_SAMSUNG_SMDK_WM8580
+	tristate "SoC I2S Audio support for WM8580 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDK6440 || MACH_SMDK6450 || MACH_SMDK6442 || MACH_SMDKV210 || MACH_SMDKC110)
+	select SND_SOC_WM8580
+	select SND_SAMSUNG_I2S
+	help
+	  Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK_WM8994
+	tristate "SoC I2S Audio support for WM8994 on SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM8994
+	select SND_SAMSUNG_I2S
+	help
+		Say Y if you want to add support for SoC audio on the SMDKs.
+
+config SND_SOC_SAMSUNG_SMDK2443_WM9710
+	tristate "SoC AC97 Audio support for SMDK2443 - WM9710"
+	depends on SND_SOC_SAMSUNG && MACH_SMDK2443
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on smdk2443
+	  with the WM9710.
+
+config SND_SOC_SAMSUNG_LN2440SBC_ALC650
+	tristate "SoC AC97 Audio support for LN2440SBC - ALC650"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select S3C2410_DMA
+	select AC97_BUS
+	select SND_SOC_AC97_CODEC
+	select SND_SAMSUNG_AC97
+	help
+	  Say Y if you want to add support for SoC audio on ln2440sbc
+	  with the ALC650.
+
+config SND_SOC_SAMSUNG_S3C24XX_UDA134X
+	tristate "SoC I2S Audio support UDA134X wired to a S3C24XX"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_L3
+	select SND_SOC_UDA134X
+
+config SND_SOC_SAMSUNG_SIMTEC
+	tristate
+	help
+	  Internal node for common S3C24XX/Simtec suppor
+
+config SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23
+	tristate "SoC I2S Audio support for TLV320AIC23 on Simtec boards"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC23
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_SIMTEC_HERMES
+	tristate "SoC I2S Audio support for Simtec Hermes board"
+	depends on SND_SOC_SAMSUNG && ARCH_S3C2410
+	select SND_S3C24XX_I2S
+	select SND_SOC_TLV320AIC3X
+	select SND_SOC_SAMSUNG_SIMTEC
+
+config SND_SOC_SAMSUNG_H1940_UDA1380
+	tristate "Audio support for the HP iPAQ H1940"
+	depends on SND_SOC_SAMSUNG && ARCH_H1940
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ h1940 PDA.
+
+config SND_SOC_SAMSUNG_RX1950_UDA1380
+	tristate "Audio support for the HP iPAQ RX1950"
+	depends on SND_SOC_SAMSUNG && MACH_RX1950
+	select SND_S3C24XX_I2S
+	select SND_SOC_UDA1380
+	help
+	  This driver provides audio support for HP iPAQ RX1950 PDA.
+
+config SND_SOC_SAMSUNG_SMDK_WM9713
+	tristate "SoC AC97 Audio support for SMDK with WM9713"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210)
+	select SND_SOC_WM9713
+	select SND_SAMSUNG_AC97
+	help
+	  Sat Y if you want to add support for SoC audio on the SMDK.
+
+config SND_SOC_SMARTQ
+	tristate "SoC I2S Audio support for SmartQ board"
+	depends on SND_SOC_SAMSUNG && MACH_SMARTQ
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8750
+
+config SND_SOC_GONI_AQUILA_WM8994
+	tristate "SoC I2S Audio support for AQUILA/GONI - WM8994"
+	depends on SND_SOC_SAMSUNG && (MACH_GONI || MACH_AQUILA)
+	select SND_SAMSUNG_I2S
+	select SND_SOC_WM8994
+	help
+	  Say Y if you want to add support for SoC audio on goni or aquila
+	  with the WM8994.
+
+config SND_SOC_SAMSUNG_SMDK_SPDIF
+	tristate "SoC S/PDIF Audio support for SMDK"
+	depends on SND_SOC_SAMSUNG && (MACH_SMDKC100 || MACH_SMDKC110 || MACH_SMDKV210)
+	select SND_SAMSUNG_SPDIF
+	help
+	  Say Y if you want to add support for SoC S/PDIF audio on the SMDK.
diff --git a/sound/soc/samsung/Makefile b/sound/soc/samsung/Makefile
new file mode 100644
index 0000000..705d4e8
--- /dev/null
+++ b/sound/soc/samsung/Makefile
@@ -0,0 +1,55 @@
+# S3c24XX Platform Support
+snd-soc-s3c24xx-objs := dma.o
+snd-soc-s3c24xx-i2s-objs := s3c24xx-i2s.o
+snd-soc-s3c2412-i2s-objs := s3c2412-i2s.o
+snd-soc-ac97-objs := ac97.o
+snd-soc-s3c-i2s-v2-objs := s3c-i2s-v2.o
+snd-soc-samsung-spdif-objs := spdif.o
+snd-soc-pcm-objs := pcm.o
+snd-soc-i2s-objs := i2s.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG) += snd-soc-s3c24xx.o
+obj-$(CONFIG_SND_S3C24XX_I2S) += snd-soc-s3c24xx-i2s.o
+obj-$(CONFIG_SND_SAMSUNG_AC97) += snd-soc-ac97.o
+obj-$(CONFIG_SND_S3C2412_SOC_I2S) += snd-soc-s3c2412-i2s.o
+obj-$(CONFIG_SND_S3C_I2SV2_SOC) += snd-soc-s3c-i2s-v2.o
+obj-$(CONFIG_SND_SAMSUNG_SPDIF) += snd-soc-samsung-spdif.o
+obj-$(CONFIG_SND_SAMSUNG_PCM) += snd-soc-pcm.o
+obj-$(CONFIG_SND_SAMSUNG_I2S) += snd-soc-i2s.o
+
+# S3C24XX Machine Support
+snd-soc-jive-wm8750-objs := jive_wm8750.o
+snd-soc-neo1973-wm8753-objs := neo1973_wm8753.o
+snd-soc-neo1973-gta02-wm8753-objs := neo1973_gta02_wm8753.o
+snd-soc-smdk2443-wm9710-objs := smdk2443_wm9710.o
+snd-soc-ln2440sbc-alc650-objs := ln2440sbc_alc650.o
+snd-soc-s3c24xx-uda134x-objs := s3c24xx_uda134x.o
+snd-soc-s3c24xx-simtec-objs := s3c24xx_simtec.o
+snd-soc-s3c24xx-simtec-hermes-objs := s3c24xx_simtec_hermes.o
+snd-soc-s3c24xx-simtec-tlv320aic23-objs := s3c24xx_simtec_tlv320aic23.o
+snd-soc-h1940-uda1380-objs := h1940_uda1380.o
+snd-soc-rx1950-uda1380-objs := rx1950_uda1380.o
+snd-soc-smdk-wm8580-objs := smdk_wm8580.o
+snd-soc-smdk-wm8994-objs := smdk_wm8994.o
+snd-soc-smdk-wm9713-objs := smdk_wm9713.o
+snd-soc-s3c64xx-smartq-wm8987-objs := smartq_wm8987.o
+snd-soc-goni-wm8994-objs := goni_wm8994.o
+snd-soc-smdk-spdif-objs := smdk_spdif.o
+
+obj-$(CONFIG_SND_SOC_SAMSUNG_JIVE_WM8750) += snd-soc-jive-wm8750.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_WM8753) += snd-soc-neo1973-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_NEO1973_GTA02_WM8753) += snd-soc-neo1973-gta02-wm8753.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK2443_WM9710) += snd-soc-smdk2443-wm9710.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_LN2440SBC_ALC650) += snd-soc-ln2440sbc-alc650.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_S3C24XX_UDA134X) += snd-soc-s3c24xx-uda134x.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC) += snd-soc-s3c24xx-simtec.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_HERMES) += snd-soc-s3c24xx-simtec-hermes.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SIMTEC_TLV320AIC23) += snd-soc-s3c24xx-simtec-tlv320aic23.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_H1940_UDA1380) += snd-soc-h1940-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_RX1950_UDA1380) += snd-soc-rx1950-uda1380.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8580) += snd-soc-smdk-wm8580.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM8994) += snd-soc-smdk-wm8994.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_WM9713) += snd-soc-smdk-wm9713.o
+obj-$(CONFIG_SND_SOC_SMARTQ) += snd-soc-s3c64xx-smartq-wm8987.o
+obj-$(CONFIG_SND_SOC_SAMSUNG_SMDK_SPDIF) += snd-soc-smdk-spdif.o
+obj-$(CONFIG_SND_SOC_GONI_AQUILA_WM8994) += snd-soc-goni-wm8994.o
diff --git a/sound/soc/s3c24xx/s3c-ac97.c b/sound/soc/samsung/ac97.c
similarity index 96%
rename from sound/soc/s3c24xx/s3c-ac97.c
rename to sound/soc/samsung/ac97.c
index f891eb7..4770a95 100644
--- a/sound/soc/s3c24xx/s3c-ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-ac97.c
+/* sound/soc/samsung/ac97.c
  *
  * ALSA SoC Audio Layer - S3C AC97 Controller driver
  * 	Evolved from s3c2443-ac97.c
@@ -24,8 +24,8 @@
 #include <mach/dma.h>
 #include <plat/audio.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 #define AC_CMD_ADDR(x) (x << 16)
 #define AC_CMD_DATA(x) (x & 0xffff)
@@ -122,7 +122,7 @@
 	data = (stat & 0xffff);
 
 	if (addr != reg)
-		pr_err("s3c-ac97: req addr = %02x, rep addr = %02x\n",
+		pr_err("ac97: req addr = %02x, rep addr = %02x\n",
 			reg, addr);
 
 	mutex_unlock(&s3c_ac97.lock);
@@ -334,7 +334,7 @@
 
 static struct snd_soc_dai_driver s3c_ac97_dai[] = {
 	[S3C_AC97_DAI_PCM] = {
-		.name =	"s3c-ac97",
+		.name =	"samsung-ac97",
 		.ac97_control = 1,
 		.playback = {
 			.stream_name = "AC97 Playback",
@@ -351,7 +351,7 @@
 		.ops = &s3c_ac97_dai_ops,
 	},
 	[S3C_AC97_DAI_MIC] = {
-		.name = "s3c-ac97-mic",
+		.name = "samsung-ac97-mic",
 		.ac97_control = 1,
 		.capture = {
 			.stream_name = "AC97 Mic Capture",
@@ -407,7 +407,7 @@
 	}
 
 	if (!request_mem_region(mem_res->start,
-				resource_size(mem_res), "s3c-ac97")) {
+				resource_size(mem_res), "ac97")) {
 		dev_err(&pdev->dev, "Unable to request register region\n");
 		return -EBUSY;
 	}
@@ -431,7 +431,7 @@
 
 	s3c_ac97.ac97_clk = clk_get(&pdev->dev, "ac97");
 	if (IS_ERR(s3c_ac97.ac97_clk)) {
-		dev_err(&pdev->dev, "s3c-ac97 failed to get ac97_clock\n");
+		dev_err(&pdev->dev, "ac97 failed to get ac97_clock\n");
 		ret = -ENODEV;
 		goto err2;
 	}
@@ -446,7 +446,7 @@
 	ret = request_irq(irq_res->start, s3c_ac97_irq,
 					IRQF_DISABLED, "AC97", NULL);
 	if (ret < 0) {
-		dev_err(&pdev->dev, "s3c-ac97: interrupt request failed.\n");
+		dev_err(&pdev->dev, "ac97: interrupt request failed.\n");
 		goto err4;
 	}
 
@@ -497,7 +497,7 @@
 	.probe  = s3c_ac97_probe,
 	.remove = s3c_ac97_remove,
 	.driver = {
-		.name = "s3c-ac97",
+		.name = "samsung-ac97",
 		.owner = THIS_MODULE,
 	},
 };
@@ -517,4 +517,4 @@
 MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
 MODULE_DESCRIPTION("AC97 driver for the Samsung SoC");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-ac97");
+MODULE_ALIAS("platform:samsung-ac97");
diff --git a/sound/soc/s3c24xx/s3c-ac97.h b/sound/soc/samsung/ac97.h
similarity index 73%
rename from sound/soc/s3c24xx/s3c-ac97.h
rename to sound/soc/samsung/ac97.h
index 5dcedd0..0d0e1b5 100644
--- a/sound/soc/s3c24xx/s3c-ac97.h
+++ b/sound/soc/samsung/ac97.h
@@ -1,11 +1,11 @@
-/* sound/soc/s3c24xx/s3c-ac97.h
+/* sound/soc/samsung/ac97.h
  *
  * ALSA SoC Audio Layer - S3C AC97 Controller driver
- * 	Evolved from s3c2443-ac97.h
+ *	Evolved from s3c2443-ac97.h
  *
  * Copyright (c) 2010 Samsung Electronics Co. Ltd
- * 	Author: Jaswinder Singh <jassi.brar@samsung.com>
- * 	Credits: Graeme Gregory, Sean Choi
+ *	Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *	Credits: Graeme Gregory, Sean Choi
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/s3c24xx/s3c-dma.c b/sound/soc/samsung/dma.c
similarity index 74%
rename from sound/soc/s3c24xx/s3c-dma.c
rename to sound/soc/samsung/dma.c
index 243f79b..2124019 100644
--- a/sound/soc/s3c24xx/s3c-dma.c
+++ b/sound/soc/samsung/dma.c
@@ -1,5 +1,5 @@
 /*
- * s3c-dma.c  --  ALSA Soc Audio Layer
+ * dma.c  --  ALSA Soc Audio Layer
  *
  * (c) 2006 Wolfson Microelectronics PLC.
  * Graeme Gregory graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
@@ -30,9 +30,9 @@
 #include <mach/hardware.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 
-static const struct snd_pcm_hardware s3c_dma_hardware = {
+static const struct snd_pcm_hardware dma_hardware = {
 	.info			= SNDRV_PCM_INFO_INTERLEAVED |
 				    SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				    SNDRV_PCM_INFO_MMAP |
@@ -53,7 +53,7 @@
 	.fifo_size		= 32,
 };
 
-struct s3c24xx_runtime_data {
+struct runtime_data {
 	spinlock_t lock;
 	int state;
 	unsigned int dma_loaded;
@@ -65,14 +65,14 @@
 	struct s3c_dma_params *params;
 };
 
-/* s3c_dma_enqueue
+/* dma_enqueue
  *
  * place a dma buffer onto the queue for the dma system
  * to handle.
 */
-static void s3c_dma_enqueue(struct snd_pcm_substream *substream)
+static void dma_enqueue(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	dma_addr_t pos = prtd->dma_pos;
 	unsigned int limit;
 	int ret;
@@ -112,12 +112,12 @@
 	prtd->dma_pos = pos;
 }
 
-static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel,
+static void audio_buffdone(struct s3c2410_dma_chan *channel,
 				void *dev_id, int size,
 				enum s3c2410_dma_buffresult result)
 {
 	struct snd_pcm_substream *substream = dev_id;
-	struct s3c24xx_runtime_data *prtd;
+	struct runtime_data *prtd;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -132,17 +132,17 @@
 	spin_lock(&prtd->lock);
 	if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) {
 		prtd->dma_loaded--;
-		s3c_dma_enqueue(substream);
+		dma_enqueue(substream);
 	}
 
 	spin_unlock(&prtd->lock);
 }
 
-static int s3c_dma_hw_params(struct snd_pcm_substream *substream,
+static int dma_hw_params(struct snd_pcm_substream *substream,
 	struct snd_pcm_hw_params *params)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
 	unsigned long totbytes = params_buffer_bytes(params);
 	struct s3c_dma_params *dma =
@@ -181,7 +181,7 @@
 	}
 
 	s3c2410_dma_set_buffdone_fn(prtd->params->channel,
-				    s3c24xx_audio_buffdone);
+				    audio_buffdone);
 
 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
 
@@ -199,9 +199,9 @@
 	return 0;
 }
 
-static int s3c_dma_hw_free(struct snd_pcm_substream *substream)
+static int dma_hw_free(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -216,9 +216,9 @@
 	return 0;
 }
 
-static int s3c_dma_prepare(struct snd_pcm_substream *substream)
+static int dma_prepare(struct snd_pcm_substream *substream)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	int ret = 0;
 
 	pr_debug("Entered %s\n", __func__);
@@ -249,14 +249,14 @@
 	prtd->dma_pos = prtd->dma_start;
 
 	/* enqueue dma buffers */
-	s3c_dma_enqueue(substream);
+	dma_enqueue(substream);
 
 	return ret;
 }
 
-static int s3c_dma_trigger(struct snd_pcm_substream *substream, int cmd)
+static int dma_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-	struct s3c24xx_runtime_data *prtd = substream->runtime->private_data;
+	struct runtime_data *prtd = substream->runtime->private_data;
 	int ret = 0;
 
 	pr_debug("Entered %s\n", __func__);
@@ -289,10 +289,10 @@
 }
 
 static snd_pcm_uframes_t
-s3c_dma_pointer(struct snd_pcm_substream *substream)
+dma_pointer(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 	unsigned long res;
 	dma_addr_t src, dst;
 
@@ -324,17 +324,17 @@
 	return bytes_to_frames(substream->runtime, res);
 }
 
-static int s3c_dma_open(struct snd_pcm_substream *substream)
+static int dma_open(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd;
+	struct runtime_data *prtd;
 
 	pr_debug("Entered %s\n", __func__);
 
 	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-	snd_soc_set_runtime_hwparams(substream, &s3c_dma_hardware);
+	snd_soc_set_runtime_hwparams(substream, &dma_hardware);
 
-	prtd = kzalloc(sizeof(struct s3c24xx_runtime_data), GFP_KERNEL);
+	prtd = kzalloc(sizeof(struct runtime_data), GFP_KERNEL);
 	if (prtd == NULL)
 		return -ENOMEM;
 
@@ -344,22 +344,22 @@
 	return 0;
 }
 
-static int s3c_dma_close(struct snd_pcm_substream *substream)
+static int dma_close(struct snd_pcm_substream *substream)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
-	struct s3c24xx_runtime_data *prtd = runtime->private_data;
+	struct runtime_data *prtd = runtime->private_data;
 
 	pr_debug("Entered %s\n", __func__);
 
 	if (!prtd)
-		pr_debug("s3c_dma_close called with prtd == NULL\n");
+		pr_debug("dma_close called with prtd == NULL\n");
 
 	kfree(prtd);
 
 	return 0;
 }
 
-static int s3c_dma_mmap(struct snd_pcm_substream *substream,
+static int dma_mmap(struct snd_pcm_substream *substream,
 	struct vm_area_struct *vma)
 {
 	struct snd_pcm_runtime *runtime = substream->runtime;
@@ -372,23 +372,23 @@
 				     runtime->dma_bytes);
 }
 
-static struct snd_pcm_ops s3c_dma_ops = {
-	.open		= s3c_dma_open,
-	.close		= s3c_dma_close,
+static struct snd_pcm_ops dma_ops = {
+	.open		= dma_open,
+	.close		= dma_close,
 	.ioctl		= snd_pcm_lib_ioctl,
-	.hw_params	= s3c_dma_hw_params,
-	.hw_free	= s3c_dma_hw_free,
-	.prepare	= s3c_dma_prepare,
-	.trigger	= s3c_dma_trigger,
-	.pointer	= s3c_dma_pointer,
-	.mmap		= s3c_dma_mmap,
+	.hw_params	= dma_hw_params,
+	.hw_free	= dma_hw_free,
+	.prepare	= dma_prepare,
+	.trigger	= dma_trigger,
+	.pointer	= dma_pointer,
+	.mmap		= dma_mmap,
 };
 
-static int s3c_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
+static int preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
 {
 	struct snd_pcm_substream *substream = pcm->streams[stream].substream;
 	struct snd_dma_buffer *buf = &substream->dma_buffer;
-	size_t size = s3c_dma_hardware.buffer_bytes_max;
+	size_t size = dma_hardware.buffer_bytes_max;
 
 	pr_debug("Entered %s\n", __func__);
 
@@ -403,7 +403,7 @@
 	return 0;
 }
 
-static void s3c_dma_free_dma_buffers(struct snd_pcm *pcm)
+static void dma_free_dma_buffers(struct snd_pcm *pcm)
 {
 	struct snd_pcm_substream *substream;
 	struct snd_dma_buffer *buf;
@@ -426,9 +426,9 @@
 	}
 }
 
-static u64 s3c_dma_mask = DMA_BIT_MASK(32);
+static u64 dma_mask = DMA_BIT_MASK(32);
 
-static int s3c_dma_new(struct snd_card *card,
+static int dma_new(struct snd_card *card,
 	struct snd_soc_dai *dai, struct snd_pcm *pcm)
 {
 	int ret = 0;
@@ -436,67 +436,67 @@
 	pr_debug("Entered %s\n", __func__);
 
 	if (!card->dev->dma_mask)
-		card->dev->dma_mask = &s3c_dma_mask;
+		card->dev->dma_mask = &dma_mask;
 	if (!card->dev->coherent_dma_mask)
 		card->dev->coherent_dma_mask = 0xffffffff;
 
 	if (dai->driver->playback.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
+		ret = preallocate_dma_buffer(pcm,
 			SNDRV_PCM_STREAM_PLAYBACK);
 		if (ret)
 			goto out;
 	}
 
 	if (dai->driver->capture.channels_min) {
-		ret = s3c_preallocate_dma_buffer(pcm,
+		ret = preallocate_dma_buffer(pcm,
 			SNDRV_PCM_STREAM_CAPTURE);
 		if (ret)
 			goto out;
 	}
- out:
+out:
 	return ret;
 }
 
-static struct snd_soc_platform_driver s3c24xx_soc_platform = {
-	.ops		= &s3c_dma_ops,
-	.pcm_new	= s3c_dma_new,
-	.pcm_free	= s3c_dma_free_dma_buffers,
+static struct snd_soc_platform_driver samsung_asoc_platform = {
+	.ops		= &dma_ops,
+	.pcm_new	= dma_new,
+	.pcm_free	= dma_free_dma_buffers,
 };
 
-static int __devinit s3c24xx_soc_platform_probe(struct platform_device *pdev)
+static int __devinit samsung_asoc_platform_probe(struct platform_device *pdev)
 {
-	return snd_soc_register_platform(&pdev->dev, &s3c24xx_soc_platform);
+	return snd_soc_register_platform(&pdev->dev, &samsung_asoc_platform);
 }
 
-static int __devexit s3c24xx_soc_platform_remove(struct platform_device *pdev)
+static int __devexit samsung_asoc_platform_remove(struct platform_device *pdev)
 {
 	snd_soc_unregister_platform(&pdev->dev);
 	return 0;
 }
 
-static struct platform_driver s3c24xx_pcm_driver = {
+static struct platform_driver asoc_dma_driver = {
 	.driver = {
-		.name = "s3c24xx-pcm-audio",
+		.name = "samsung-audio",
 		.owner = THIS_MODULE,
 	},
 
-	.probe = s3c24xx_soc_platform_probe,
-	.remove = __devexit_p(s3c24xx_soc_platform_remove),
+	.probe = samsung_asoc_platform_probe,
+	.remove = __devexit_p(samsung_asoc_platform_remove),
 };
 
-static int __init snd_s3c24xx_pcm_init(void)
+static int __init samsung_asoc_init(void)
 {
-	return platform_driver_register(&s3c24xx_pcm_driver);
+	return platform_driver_register(&asoc_dma_driver);
 }
-module_init(snd_s3c24xx_pcm_init);
+module_init(samsung_asoc_init);
 
-static void __exit snd_s3c24xx_pcm_exit(void)
+static void __exit samsung_asoc_exit(void)
 {
-	platform_driver_unregister(&s3c24xx_pcm_driver);
+	platform_driver_unregister(&asoc_dma_driver);
 }
-module_exit(snd_s3c24xx_pcm_exit);
+module_exit(samsung_asoc_exit);
 
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C Audio DMA module");
+MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c24xx-pcm-audio");
+MODULE_ALIAS("platform:samsung-audio");
diff --git a/sound/soc/s3c24xx/s3c-dma.h b/sound/soc/samsung/dma.h
similarity index 97%
rename from sound/soc/s3c24xx/s3c-dma.h
rename to sound/soc/samsung/dma.h
index 748c07d..f8cd2b4 100644
--- a/sound/soc/s3c24xx/s3c-dma.h
+++ b/sound/soc/samsung/dma.h
@@ -1,5 +1,5 @@
 /*
- *  s3c-dma.h --
+ *  dma.h --
  *
  *  This program is free software; you can redistribute  it and/or modify it
  *  under  the terms of  the GNU General  Public License as published by the
diff --git a/sound/soc/s3c24xx/goni_wm8994.c b/sound/soc/samsung/goni_wm8994.c
similarity index 83%
rename from sound/soc/s3c24xx/goni_wm8994.c
rename to sound/soc/samsung/goni_wm8994.c
index 694f702..34dd9ef 100644
--- a/sound/soc/s3c24xx/goni_wm8994.c
+++ b/sound/soc/samsung/goni_wm8994.c
@@ -16,7 +16,6 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/jack.h>
 #include <asm/mach-types.h>
 #include <mach/gpio.h>
@@ -25,8 +24,16 @@
 #include <linux/mfd/wm8994/core.h>
 #include <linux/mfd/wm8994/registers.h>
 #include "../codecs/wm8994.h"
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
+#include "dma.h"
+#include "i2s.h"
+
+#define MACHINE_NAME	0
+#define CPU_VOICE_DAI	1
+
+static const char *aquila_str[] = {
+	[MACHINE_NAME] = "aquila",
+	[CPU_VOICE_DAI] = "aquila-voice-dai",
+};
 
 static struct snd_soc_card goni;
 static struct platform_device *goni_snd_device;
@@ -97,28 +104,34 @@
 static int goni_wm8994_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int ret;
 
 	/* add goni specific widgets */
-	snd_soc_dapm_new_controls(codec, goni_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, goni_dapm_widgets,
 			ARRAY_SIZE(goni_dapm_widgets));
 
 	/* set up goni specific audio routes */
-	snd_soc_dapm_add_routes(codec, goni_dapm_routes,
+	snd_soc_dapm_add_routes(dapm, goni_dapm_routes,
 			ARRAY_SIZE(goni_dapm_routes));
 
 	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "IN2LP:VXRN");
-	snd_soc_dapm_nc_pin(codec, "IN2RP:VXRP");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT1P");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2N");
-	snd_soc_dapm_nc_pin(codec, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
 
-	snd_soc_dapm_sync(codec);
+	if (machine_is_aquila()) {
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+		snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	}
+
+	snd_soc_dapm_sync(dapm);
 
 	/* Headset jack detection */
-	ret = snd_soc_jack_new(&goni, "Headset Jack",
+	ret = snd_soc_jack_new(codec, "Headset Jack",
 			SND_JACK_HEADSET | SND_JACK_MECHANICAL | SND_JACK_AVOUT,
 			&jack);
 	if (ret)
@@ -150,12 +163,6 @@
 	if (ret < 0)
 		return ret;
 
-	/* set the cpu system clock */
-	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C64XX_CLKSRC_PCLK,
-			0, SND_SOC_CLOCK_IN);
-	if (ret < 0)
-		return ret;
-
 	/* set codec DAI configuration */
 	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
 			SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
@@ -236,9 +243,9 @@
 {
 	.name = "WM8994",
 	.stream_name = "WM8994 HiFi",
-	.cpu_dai_name = "s3c64xx-i2s-v4",
+	.cpu_dai_name = "samsung-i2s.0",
 	.codec_dai_name = "wm8994-hifi",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.codec_name = "wm8994-codec.0-0x1a",
 	.init = goni_wm8994_init,
 	.ops = &goni_hifi_ops,
@@ -247,7 +254,7 @@
 	.stream_name = "Voice",
 	.cpu_dai_name = "goni-voice-dai",
 	.codec_dai_name = "wm8994-voice",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.codec_name = "wm8994-codec.0-0x1a",
 	.ops = &goni_voice_ops,
 },
@@ -263,7 +270,11 @@
 {
 	int ret;
 
-	if (!machine_is_goni())
+	if (machine_is_aquila()) {
+		voice_dai.name = aquila_str[CPU_VOICE_DAI];
+		goni_dai[1].cpu_dai_name = aquila_str[CPU_VOICE_DAI];
+		goni.name = aquila_str[MACHINE_NAME];
+	} else if (!machine_is_goni())
 		return -ENODEV;
 
 	goni_snd_device = platform_device_alloc("soc-audio", -1);
@@ -272,20 +283,25 @@
 
 	/* register voice DAI here */
 	ret = snd_soc_register_dai(&goni_snd_device->dev, &voice_dai);
-	if (ret)
+	if (ret) {
+		platform_device_put(goni_snd_device);
 		return ret;
+	}
 
 	platform_set_drvdata(goni_snd_device, &goni);
 	ret = platform_device_add(goni_snd_device);
 
-	if (ret)
+	if (ret) {
+		snd_soc_unregister_dai(&goni_snd_device->dev);
 		platform_device_put(goni_snd_device);
+	}
 
 	return ret;
 }
 
 static void __exit goni_exit(void)
 {
+	snd_soc_unregister_dai(&goni_snd_device->dev);
 	platform_device_unregister(goni_snd_device);
 }
 
diff --git a/sound/soc/samsung/h1940_uda1380.c b/sound/soc/samsung/h1940_uda1380.c
new file mode 100644
index 0000000..c45f7ce
--- /dev/null
+++ b/sound/soc/samsung/h1940_uda1380.c
@@ -0,0 +1,296 @@
+/*
+ * h1940-uda1380.c  --  ALSA Soc Audio Layer
+ *
+ * Copyright (c) 2010 Arnaud Patard <arnaud.patard@rtp-net.org>
+ * Copyright (c) 2010 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on version from Arnaud Patard <arnaud.patard@rtp-net.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+
+#include <sound/soc.h>
+#include <sound/uda1380.h>
+#include <sound/jack.h>
+
+#include <plat/regs-iis.h>
+
+#include <mach/h1940-latch.h>
+
+#include <asm/mach-types.h>
+
+#include "dma.h"
+#include "s3c24xx-i2s.h"
+#include "../codecs/uda1380.h"
+
+static unsigned int rates[] = {
+	11025,
+	22050,
+	44100,
+};
+
+static struct snd_pcm_hw_constraint_list hw_rates = {
+	.count = ARRAY_SIZE(rates),
+	.list = rates,
+	.mask = 0,
+};
+
+static struct snd_soc_jack hp_jack;
+
+static struct snd_soc_jack_pin hp_jack_pins[] = {
+	{
+		.pin	= "Headphone Jack",
+		.mask	= SND_JACK_HEADPHONE,
+	},
+	{
+		.pin	= "Speaker",
+		.mask	= SND_JACK_HEADPHONE,
+		.invert	= 1,
+	},
+};
+
+static struct snd_soc_jack_gpio hp_jack_gpios[] = {
+	{
+		.gpio			= S3C2410_GPG(4),
+		.name			= "hp-gpio",
+		.report			= SND_JACK_HEADPHONE,
+		.invert			= 1,
+		.debounce_time		= 200,
+	},
+};
+
+static int h1940_startup(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+
+	runtime->hw.rate_min = hw_rates.list[0];
+	runtime->hw.rate_max = hw_rates.list[hw_rates.count - 1];
+	runtime->hw.rates = SNDRV_PCM_RATE_KNOT;
+
+	return snd_pcm_hw_constraint_list(runtime, 0,
+					SNDRV_PCM_HW_PARAM_RATE,
+					&hw_rates);
+}
+
+static int h1940_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	int div;
+	int ret;
+	unsigned int rate = params_rate(params);
+
+	switch (rate) {
+	case 11025:
+	case 22050:
+	case 44100:
+		div = s3c24xx_i2s_get_clockrate() / (384 * rate);
+		if (s3c24xx_i2s_get_clockrate() % (384 * rate) > (192 * rate))
+			div++;
+		break;
+	default:
+		dev_err(&rtd->dev, "%s: rate %d is not supported\n",
+			__func__, rate);
+		return -EINVAL;
+	}
+
+	/* set codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* set cpu DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
+		SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
+	if (ret < 0)
+		return ret;
+
+	/* select clock source */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, S3C24XX_CLKSRC_PCLK, rate,
+			SND_SOC_CLOCK_OUT);
+	if (ret < 0)
+		return ret;
+
+	/* set MCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
+		S3C2410_IISMOD_384FS);
+	if (ret < 0)
+		return ret;
+
+	/* set BCLK division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_BCLK,
+		S3C2410_IISMOD_32FS);
+	if (ret < 0)
+		return ret;
+
+	/* set prescaler division for sample rate */
+	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
+		S3C24XX_PRESCALE(div, div));
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static struct snd_soc_ops h1940_ops = {
+	.startup	= h1940_startup,
+	.hw_params	= h1940_hw_params,
+};
+
+static int h1940_spk_power(struct snd_soc_dapm_widget *w,
+				struct snd_kcontrol *kcontrol, int event)
+{
+	if (SND_SOC_DAPM_EVENT_ON(event))
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 1);
+	else
+		gpio_set_value(H1940_LATCH_AUDIO_POWER, 0);
+
+	return 0;
+}
+
+/* h1940 machine dapm widgets */
+static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
+	SND_SOC_DAPM_HP("Headphone Jack", NULL),
+	SND_SOC_DAPM_MIC("Mic Jack", NULL),
+	SND_SOC_DAPM_SPK("Speaker", h1940_spk_power),
+};
+
+/* h1940 machine audio_map */
+static const struct snd_soc_dapm_route audio_map[] = {
+	/* headphone connected to VOUTLHP, VOUTRHP */
+	{"Headphone Jack", NULL, "VOUTLHP"},
+	{"Headphone Jack", NULL, "VOUTRHP"},
+
+	/* ext speaker connected to VOUTL, VOUTR  */
+	{"Speaker", NULL, "VOUTL"},
+	{"Speaker", NULL, "VOUTR"},
+
+	/* mic is connected to VINM */
+	{"VINM", NULL, "Mic Jack"},
+};
+
+static struct platform_device *s3c24xx_snd_device;
+
+static int h1940_uda1380_init(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+	int err;
+
+	/* Add h1940 specific widgets */
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
+				  ARRAY_SIZE(uda1380_dapm_widgets));
+	if (err)
+		return err;
+
+	/* Set up h1940 specific audio path audio_mapnects */
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
+				      ARRAY_SIZE(audio_map));
+	if (err)
+		return err;
+
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
+
+	snd_soc_dapm_sync(dapm);
+
+	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
+		&hp_jack);
+
+	snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins),
+		hp_jack_pins);
+
+	snd_soc_jack_add_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+
+	return 0;
+}
+
+/* s3c24xx digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link h1940_uda1380_dai[] = {
+	{
+		.name		= "uda1380",
+		.stream_name	= "UDA1380 Duplex",
+		.cpu_dai_name	= "s3c24xx-iis",
+		.codec_dai_name	= "uda1380-hifi",
+		.init		= h1940_uda1380_init,
+		.platform_name	= "samsung-audio",
+		.codec_name	= "uda1380-codec.0-001a",
+		.ops		= &h1940_ops,
+	},
+};
+
+static struct snd_soc_card h1940_asoc = {
+	.name = "h1940",
+	.dai_link = h1940_uda1380_dai,
+	.num_links = ARRAY_SIZE(h1940_uda1380_dai),
+};
+
+static int __init h1940_init(void)
+{
+	int ret;
+
+	if (!machine_is_h1940())
+		return -ENODEV;
+
+	/* configure some gpios */
+	ret = gpio_request(H1940_LATCH_AUDIO_POWER, "speaker-power");
+	if (ret)
+		goto err_out;
+
+	ret = gpio_direction_output(H1940_LATCH_AUDIO_POWER, 0);
+	if (ret)
+		goto err_gpio;
+
+	s3c24xx_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!s3c24xx_snd_device) {
+		ret = -ENOMEM;
+		goto err_gpio;
+	}
+
+	platform_set_drvdata(s3c24xx_snd_device, &h1940_asoc);
+	ret = platform_device_add(s3c24xx_snd_device);
+
+	if (ret)
+		goto err_plat;
+
+	return 0;
+
+err_plat:
+	platform_device_put(s3c24xx_snd_device);
+err_gpio:
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+
+err_out:
+	return ret;
+}
+
+static void __exit h1940_exit(void)
+{
+	platform_device_unregister(s3c24xx_snd_device);
+	snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
+		hp_jack_gpios);
+	gpio_free(H1940_LATCH_AUDIO_POWER);
+}
+
+module_init(h1940_init);
+module_exit(h1940_exit);
+
+/* Module information */
+MODULE_AUTHOR("Arnaud Patard, Vasily Khoruzhick");
+MODULE_DESCRIPTION("ALSA SoC H1940");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
new file mode 100644
index 0000000..d00ac3a
--- /dev/null
+++ b/sound/soc/samsung/i2s.c
@@ -0,0 +1,1258 @@
+/* sound/soc/samsung/i2s.c
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <plat/audio.h>
+
+#include "dma.h"
+#include "i2s.h"
+
+#define I2SCON		0x0
+#define I2SMOD		0x4
+#define I2SFIC		0x8
+#define I2SPSR		0xc
+#define I2STXD		0x10
+#define I2SRXD		0x14
+#define I2SFICS		0x18
+#define I2STXDS		0x1c
+
+#define CON_RSTCLR		(1 << 31)
+#define CON_FRXOFSTATUS		(1 << 26)
+#define CON_FRXORINTEN		(1 << 25)
+#define CON_FTXSURSTAT		(1 << 24)
+#define CON_FTXSURINTEN		(1 << 23)
+#define CON_TXSDMA_PAUSE	(1 << 20)
+#define CON_TXSDMA_ACTIVE	(1 << 18)
+
+#define CON_FTXURSTATUS		(1 << 17)
+#define CON_FTXURINTEN		(1 << 16)
+#define CON_TXFIFO2_EMPTY	(1 << 15)
+#define CON_TXFIFO1_EMPTY	(1 << 14)
+#define CON_TXFIFO2_FULL	(1 << 13)
+#define CON_TXFIFO1_FULL	(1 << 12)
+
+#define CON_LRINDEX		(1 << 11)
+#define CON_TXFIFO_EMPTY	(1 << 10)
+#define CON_RXFIFO_EMPTY	(1 << 9)
+#define CON_TXFIFO_FULL		(1 << 8)
+#define CON_RXFIFO_FULL		(1 << 7)
+#define CON_TXDMA_PAUSE		(1 << 6)
+#define CON_RXDMA_PAUSE		(1 << 5)
+#define CON_TXCH_PAUSE		(1 << 4)
+#define CON_RXCH_PAUSE		(1 << 3)
+#define CON_TXDMA_ACTIVE	(1 << 2)
+#define CON_RXDMA_ACTIVE	(1 << 1)
+#define CON_ACTIVE		(1 << 0)
+
+#define MOD_OPCLK_CDCLK_OUT	(0 << 30)
+#define MOD_OPCLK_CDCLK_IN	(1 << 30)
+#define MOD_OPCLK_BCLK_OUT	(2 << 30)
+#define MOD_OPCLK_PCLK		(3 << 30)
+#define MOD_OPCLK_MASK		(3 << 30)
+#define MOD_TXS_IDMA		(1 << 28) /* Sec_TXFIFO use I-DMA */
+
+#define MOD_BLCS_SHIFT	26
+#define MOD_BLCS_16BIT	(0 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_8BIT	(1 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_24BIT	(2 << MOD_BLCS_SHIFT)
+#define MOD_BLCS_MASK	(3 << MOD_BLCS_SHIFT)
+#define MOD_BLCP_SHIFT	24
+#define MOD_BLCP_16BIT	(0 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_8BIT	(1 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_24BIT	(2 << MOD_BLCP_SHIFT)
+#define MOD_BLCP_MASK	(3 << MOD_BLCP_SHIFT)
+
+#define MOD_C2DD_HHALF		(1 << 21) /* Discard Higher-half */
+#define MOD_C2DD_LHALF		(1 << 20) /* Discard Lower-half */
+#define MOD_C1DD_HHALF		(1 << 19)
+#define MOD_C1DD_LHALF		(1 << 18)
+#define MOD_DC2_EN		(1 << 17)
+#define MOD_DC1_EN		(1 << 16)
+#define MOD_BLC_16BIT		(0 << 13)
+#define MOD_BLC_8BIT		(1 << 13)
+#define MOD_BLC_24BIT		(2 << 13)
+#define MOD_BLC_MASK		(3 << 13)
+
+#define MOD_IMS_SYSMUX		(1 << 10)
+#define MOD_SLAVE		(1 << 11)
+#define MOD_TXONLY		(0 << 8)
+#define MOD_RXONLY		(1 << 8)
+#define MOD_TXRX		(2 << 8)
+#define MOD_MASK		(3 << 8)
+#define MOD_LR_LLOW		(0 << 7)
+#define MOD_LR_RLOW		(1 << 7)
+#define MOD_SDF_IIS		(0 << 5)
+#define MOD_SDF_MSB		(1 << 5)
+#define MOD_SDF_LSB		(2 << 5)
+#define MOD_SDF_MASK		(3 << 5)
+#define MOD_RCLK_256FS		(0 << 3)
+#define MOD_RCLK_512FS		(1 << 3)
+#define MOD_RCLK_384FS		(2 << 3)
+#define MOD_RCLK_768FS		(3 << 3)
+#define MOD_RCLK_MASK		(3 << 3)
+#define MOD_BCLK_32FS		(0 << 1)
+#define MOD_BCLK_48FS		(1 << 1)
+#define MOD_BCLK_16FS		(2 << 1)
+#define MOD_BCLK_24FS		(3 << 1)
+#define MOD_BCLK_MASK		(3 << 1)
+#define MOD_8BIT		(1 << 0)
+
+#define MOD_CDCLKCON		(1 << 12)
+
+#define PSR_PSREN		(1 << 15)
+
+#define FIC_TX2COUNT(x)		(((x) >>  24) & 0xf)
+#define FIC_TX1COUNT(x)		(((x) >>  16) & 0xf)
+
+#define FIC_TXFLUSH		(1 << 15)
+#define FIC_RXFLUSH		(1 << 7)
+#define FIC_TXCOUNT(x)		(((x) >>  8) & 0xf)
+#define FIC_RXCOUNT(x)		(((x) >>  0) & 0xf)
+#define FICS_TXCOUNT(x)		(((x) >>  8) & 0x7f)
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+
+struct i2s_dai {
+	/* Platform device for this DAI */
+	struct platform_device *pdev;
+	/* IOREMAP'd SFRs */
+	void __iomem	*addr;
+	/* Physical base address of SFRs */
+	u32	base;
+	/* Rate of RCLK source clock */
+	unsigned long rclk_srcrate;
+	/* Frame Clock */
+	unsigned frmclk;
+	/*
+	 * Specifically requested RCLK,BCLK by MACHINE Driver.
+	 * 0 indicates CPU driver is free to choose any value.
+	 */
+	unsigned rfs, bfs;
+	/* I2S Controller's core clock */
+	struct clk *clk;
+	/* Clock for generating I2S signals */
+	struct clk *op_clk;
+	/* Array of clock names for op_clk */
+	const char **src_clk;
+	/* Pointer to the Primary_Fifo if this is Sec_Fifo, NULL otherwise */
+	struct i2s_dai *pri_dai;
+	/* Pointer to the Secondary_Fifo if it has one, NULL otherwise */
+	struct i2s_dai *sec_dai;
+#define DAI_OPENED	(1 << 0) /* Dai is opened */
+#define DAI_MANAGER	(1 << 1) /* Dai is the manager */
+	unsigned mode;
+	/* Driver for this DAI */
+	struct snd_soc_dai_driver i2s_dai_drv;
+	/* DMA parameters */
+	struct s3c_dma_params dma_playback;
+	struct s3c_dma_params dma_capture;
+	u32	quirks;
+	u32	suspend_i2smod;
+	u32	suspend_i2scon;
+	u32	suspend_i2spsr;
+};
+
+/* Lock for cross i/f checks */
+static DEFINE_SPINLOCK(lock);
+
+/* If this is the 'overlay' stereo DAI */
+static inline bool is_secondary(struct i2s_dai *i2s)
+{
+	return i2s->pri_dai ? true : false;
+}
+
+/* If operating in SoC-Slave mode */
+static inline bool is_slave(struct i2s_dai *i2s)
+{
+	return (readl(i2s->addr + I2SMOD) & MOD_SLAVE) ? true : false;
+}
+
+/* If this interface of the controller is transmitting data */
+static inline bool tx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD);
+
+	if (is_secondary(i2s))
+		active &= CON_TXSDMA_ACTIVE;
+	else
+		active &= CON_TXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is transmitting data */
+static inline bool other_tx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return tx_active(other);
+}
+
+/* If any interface of the controller is transmitting data */
+static inline bool any_tx_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this interface of the controller is receiving data */
+static inline bool rx_active(struct i2s_dai *i2s)
+{
+	u32 active;
+
+	if (!i2s)
+		return false;
+
+	active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE;
+
+	return active ? true : false;
+}
+
+/* If the other interface of the controller is receiving data */
+static inline bool other_rx_active(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	return rx_active(other);
+}
+
+/* If any interface of the controller is receiving data */
+static inline bool any_rx_active(struct i2s_dai *i2s)
+{
+	return rx_active(i2s) || other_rx_active(i2s);
+}
+
+/* If the other DAI is transmitting or receiving data */
+static inline bool other_active(struct i2s_dai *i2s)
+{
+	return other_rx_active(i2s) || other_tx_active(i2s);
+}
+
+/* If this DAI is transmitting or receiving data */
+static inline bool this_active(struct i2s_dai *i2s)
+{
+	return tx_active(i2s) || rx_active(i2s);
+}
+
+/* If the controller is active anyway */
+static inline bool any_active(struct i2s_dai *i2s)
+{
+	return this_active(i2s) || other_active(i2s);
+}
+
+static inline struct i2s_dai *to_info(struct snd_soc_dai *dai)
+{
+	return snd_soc_dai_get_drvdata(dai);
+}
+
+static inline bool is_opened(struct i2s_dai *i2s)
+{
+	if (i2s && (i2s->mode & DAI_OPENED))
+		return true;
+	else
+		return false;
+}
+
+static inline bool is_manager(struct i2s_dai *i2s)
+{
+	if (is_opened(i2s) && (i2s->mode & DAI_MANAGER))
+		return true;
+	else
+		return false;
+}
+
+/* Read RCLK of I2S (in multiples of LRCLK) */
+static inline unsigned get_rfs(struct i2s_dai *i2s)
+{
+	u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3;
+
+	switch (rfs) {
+	case 3:	return 768;
+	case 2: return 384;
+	case 1:	return 512;
+	default: return 256;
+	}
+}
+
+/* Write RCLK of I2S (in multiples of LRCLK) */
+static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_RCLK_MASK;
+
+	switch (rfs) {
+	case 768:
+		mod |= MOD_RCLK_768FS;
+		break;
+	case 512:
+		mod |= MOD_RCLK_512FS;
+		break;
+	case 384:
+		mod |= MOD_RCLK_384FS;
+		break;
+	default:
+		mod |= MOD_RCLK_256FS;
+		break;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Read Bit-Clock of I2S (in multiples of LRCLK) */
+static inline unsigned get_bfs(struct i2s_dai *i2s)
+{
+	u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3;
+
+	switch (bfs) {
+	case 3: return 24;
+	case 2: return 16;
+	case 1:	return 48;
+	default: return 32;
+	}
+}
+
+/* Write Bit-Clock of I2S (in multiples of LRCLK) */
+static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
+{
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	mod &= ~MOD_BCLK_MASK;
+
+	switch (bfs) {
+	case 48:
+		mod |= MOD_BCLK_48FS;
+		break;
+	case 32:
+		mod |= MOD_BCLK_32FS;
+		break;
+	case 24:
+		mod |= MOD_BCLK_24FS;
+		break;
+	case 16:
+		mod |= MOD_BCLK_16FS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n");
+		return;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+}
+
+/* Sample-Size */
+static inline int get_blc(struct i2s_dai *i2s)
+{
+	int blc = readl(i2s->addr + I2SMOD);
+
+	blc = (blc >> 13) & 0x3;
+
+	switch (blc) {
+	case 2: return 24;
+	case 1:	return 8;
+	default: return 16;
+	}
+}
+
+/* TX Channel Control */
+static void i2s_txctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_ACTIVE;
+		con &= ~CON_TXCH_PAUSE;
+
+		if (is_secondary(i2s)) {
+			con |= CON_TXSDMA_ACTIVE;
+			con &= ~CON_TXSDMA_PAUSE;
+		} else {
+			con |= CON_TXDMA_ACTIVE;
+			con &= ~CON_TXDMA_PAUSE;
+		}
+
+		if (any_rx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_TXONLY;
+	} else {
+		if (is_secondary(i2s)) {
+			con |=  CON_TXSDMA_PAUSE;
+			con &= ~CON_TXSDMA_ACTIVE;
+		} else {
+			con |=  CON_TXDMA_PAUSE;
+			con &= ~CON_TXDMA_ACTIVE;
+		}
+
+		if (other_tx_active(i2s)) {
+			writel(con, addr + I2SCON);
+			return;
+		}
+
+		con |=  CON_TXCH_PAUSE;
+
+		if (any_rx_active(i2s))
+			mod |= MOD_RXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* RX Channel Control */
+static void i2s_rxctrl(struct i2s_dai *i2s, int on)
+{
+	void __iomem *addr = i2s->addr;
+	u32 con = readl(addr + I2SCON);
+	u32 mod = readl(addr + I2SMOD) & ~MOD_MASK;
+
+	if (on) {
+		con |= CON_RXDMA_ACTIVE | CON_ACTIVE;
+		con &= ~(CON_RXDMA_PAUSE | CON_RXCH_PAUSE);
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXRX;
+		else
+			mod |= MOD_RXONLY;
+	} else {
+		con |=  CON_RXDMA_PAUSE | CON_RXCH_PAUSE;
+		con &= ~CON_RXDMA_ACTIVE;
+
+		if (any_tx_active(i2s))
+			mod |= MOD_TXONLY;
+		else
+			con &= ~CON_ACTIVE;
+	}
+
+	writel(mod, addr + I2SMOD);
+	writel(con, addr + I2SCON);
+}
+
+/* Flush FIFO of an interface */
+static inline void i2s_fifo(struct i2s_dai *i2s, u32 flush)
+{
+	void __iomem *fic;
+	u32 val;
+
+	if (!i2s)
+		return;
+
+	if (is_secondary(i2s))
+		fic = i2s->addr + I2SFICS;
+	else
+		fic = i2s->addr + I2SFIC;
+
+	/* Flush the FIFO */
+	writel(readl(fic) | flush, fic);
+
+	/* Be patient */
+	val = msecs_to_loops(1) / 1000; /* 1 usec */
+	while (--val)
+		cpu_relax();
+
+	writel(readl(fic) & ~flush, fic);
+}
+
+static int i2s_set_sysclk(struct snd_soc_dai *dai,
+	  int clk_id, unsigned int rfs, int dir)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	switch (clk_id) {
+	case SAMSUNG_I2S_CDCLK:
+		/* Shouldn't matter in GATING(CLOCK_IN) mode */
+		if (dir == SND_SOC_CLOCK_IN)
+			rfs = 0;
+
+		if ((rfs && other->rfs && (other->rfs != rfs)) ||
+				(any_active(i2s) &&
+				(((dir == SND_SOC_CLOCK_IN)
+					&& !(mod & MOD_CDCLKCON)) ||
+				((dir == SND_SOC_CLOCK_OUT)
+					&& (mod & MOD_CDCLKCON))))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+
+		if (dir == SND_SOC_CLOCK_IN)
+			mod |= MOD_CDCLKCON;
+		else
+			mod &= ~MOD_CDCLKCON;
+
+		i2s->rfs = rfs;
+		break;
+
+	case SAMSUNG_I2S_RCLKSRC_0: /* clock corrsponding to IISMOD[10] := 0 */
+	case SAMSUNG_I2S_RCLKSRC_1: /* clock corrsponding to IISMOD[10] := 1 */
+		if ((i2s->quirks & QUIRK_NO_MUXPSR)
+				|| (clk_id == SAMSUNG_I2S_RCLKSRC_0))
+			clk_id = 0;
+		else
+			clk_id = 1;
+
+		if (!any_active(i2s)) {
+			if (i2s->op_clk) {
+				if ((clk_id && !(mod & MOD_IMS_SYSMUX)) ||
+					(!clk_id && (mod & MOD_IMS_SYSMUX))) {
+					clk_disable(i2s->op_clk);
+					clk_put(i2s->op_clk);
+				} else {
+					i2s->rclk_srcrate =
+						clk_get_rate(i2s->op_clk);
+					return 0;
+				}
+			}
+
+			i2s->op_clk = clk_get(&i2s->pdev->dev,
+						i2s->src_clk[clk_id]);
+			clk_enable(i2s->op_clk);
+			i2s->rclk_srcrate = clk_get_rate(i2s->op_clk);
+
+			/* Over-ride the other's */
+			if (other) {
+				other->op_clk = i2s->op_clk;
+				other->rclk_srcrate = i2s->rclk_srcrate;
+			}
+		} else if ((!clk_id && (mod & MOD_IMS_SYSMUX))
+				|| (clk_id && !(mod & MOD_IMS_SYSMUX))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		} else {
+			/* Call can't be on the active DAI */
+			i2s->op_clk = other->op_clk;
+			i2s->rclk_srcrate = other->rclk_srcrate;
+			return 0;
+		}
+
+		if (clk_id == 0)
+			mod &= ~MOD_IMS_SYSMUX;
+		else
+			mod |= MOD_IMS_SYSMUX;
+		break;
+
+	default:
+		dev_err(&i2s->pdev->dev, "We don't serve that!\n");
+		return -EINVAL;
+	}
+
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_set_fmt(struct snd_soc_dai *dai,
+	unsigned int fmt)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+	u32 tmp = 0;
+
+	/* Format is priority */
+	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+	case SND_SOC_DAIFMT_RIGHT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_MSB;
+		break;
+	case SND_SOC_DAIFMT_LEFT_J:
+		tmp |= MOD_LR_RLOW;
+		tmp |= MOD_SDF_LSB;
+		break;
+	case SND_SOC_DAIFMT_I2S:
+		tmp |= MOD_SDF_IIS;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format not supported\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * INV flag is relative to the FORMAT flag - if set it simply
+	 * flips the polarity specified by the Standard
+	 */
+	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+	case SND_SOC_DAIFMT_NB_NF:
+		break;
+	case SND_SOC_DAIFMT_NB_IF:
+		if (tmp & MOD_LR_RLOW)
+			tmp &= ~MOD_LR_RLOW;
+		else
+			tmp |= MOD_LR_RLOW;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Polarity not supported\n");
+		return -EINVAL;
+	}
+
+	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+	case SND_SOC_DAIFMT_CBM_CFM:
+		tmp |= MOD_SLAVE;
+		break;
+	case SND_SOC_DAIFMT_CBS_CFS:
+		/* Set default source clock in Master mode */
+		if (i2s->rclk_srcrate == 0)
+			i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0,
+							0, SND_SOC_CLOCK_IN);
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "master/slave format not supported\n");
+		return -EINVAL;
+	}
+
+	if (any_active(i2s) &&
+			((mod & (MOD_SDF_MASK | MOD_LR_RLOW
+				| MOD_SLAVE)) != tmp)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE);
+	mod |= tmp;
+	writel(mod, i2s->addr + I2SMOD);
+
+	return 0;
+}
+
+static int i2s_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 mod = readl(i2s->addr + I2SMOD);
+
+	if (!is_secondary(i2s))
+		mod &= ~(MOD_DC2_EN | MOD_DC1_EN);
+
+	switch (params_channels(params)) {
+	case 6:
+		mod |= MOD_DC2_EN;
+	case 4:
+		mod |= MOD_DC1_EN;
+		break;
+	case 2:
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "%d channels not supported\n",
+				params_channels(params));
+		return -EINVAL;
+	}
+
+	if (is_secondary(i2s))
+		mod &= ~MOD_BLCS_MASK;
+	else
+		mod &= ~MOD_BLCP_MASK;
+
+	if (is_manager(i2s))
+		mod &= ~MOD_BLC_MASK;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_S8:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_8BIT;
+		else
+			mod |= MOD_BLCP_8BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_8BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S16_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_16BIT;
+		else
+			mod |= MOD_BLCP_16BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_16BIT;
+		break;
+	case SNDRV_PCM_FORMAT_S24_LE:
+		if (is_secondary(i2s))
+			mod |= MOD_BLCS_24BIT;
+		else
+			mod |= MOD_BLCP_24BIT;
+		if (is_manager(i2s))
+			mod |= MOD_BLC_24BIT;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev, "Format(%d) not supported\n",
+				params_format(params));
+		return -EINVAL;
+	}
+	writel(mod, i2s->addr + I2SMOD);
+
+	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_playback);
+	else
+		snd_soc_dai_set_dma_data(dai, substream,
+			(void *)&i2s->dma_capture);
+
+	i2s->frmclk = params_rate(params);
+
+	return 0;
+}
+
+/* We set constraints on the substream acc to the version of I2S */
+static int i2s_startup(struct snd_pcm_substream *substream,
+	  struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode |= DAI_OPENED;
+
+	if (is_manager(other))
+		i2s->mode &= ~DAI_MANAGER;
+	else
+		i2s->mode |= DAI_MANAGER;
+
+	/* Enforce set_sysclk in Master mode */
+	i2s->rclk_srcrate = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	return 0;
+}
+
+static void i2s_shutdown(struct snd_pcm_substream *substream,
+	struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned long flags;
+
+	spin_lock_irqsave(&lock, flags);
+
+	i2s->mode &= ~DAI_OPENED;
+	i2s->mode &= ~DAI_MANAGER;
+
+	if (is_opened(other))
+		other->mode |= DAI_MANAGER;
+
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+
+	spin_unlock_irqrestore(&lock, flags);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+}
+
+static int config_setup(struct i2s_dai *i2s)
+{
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+	unsigned rfs, bfs, blc;
+	u32 psr;
+
+	blc = get_blc(i2s);
+
+	bfs = i2s->bfs;
+
+	if (!bfs && other)
+		bfs = other->bfs;
+
+	/* Select least possible multiple(2) if no constraint set */
+	if (!bfs)
+		bfs = blc * 2;
+
+	rfs = i2s->rfs;
+
+	if (!rfs && other)
+		rfs = other->rfs;
+
+	if ((rfs == 256 || rfs == 512) && (blc == 24)) {
+		dev_err(&i2s->pdev->dev,
+			"%d-RFS not supported for 24-blc\n", rfs);
+		return -EINVAL;
+	}
+
+	if (!rfs) {
+		if (bfs == 16 || bfs == 32)
+			rfs = 256;
+		else
+			rfs = 384;
+	}
+
+	/* If already setup and running */
+	if (any_active(i2s) && (get_rfs(i2s) != rfs || get_bfs(i2s) != bfs)) {
+		dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+		return -EAGAIN;
+	}
+
+	/* Don't bother RFS, BFS & PSR in Slave mode */
+	if (is_slave(i2s))
+		return 0;
+
+	set_bfs(i2s, bfs);
+	set_rfs(i2s, rfs);
+
+	if (!(i2s->quirks & QUIRK_NO_MUXPSR)) {
+		psr = i2s->rclk_srcrate / i2s->frmclk / rfs;
+		writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR);
+		dev_dbg(&i2s->pdev->dev,
+			"RCLK_SRC=%luHz PSR=%u, RCLK=%dfs, BCLK=%dfs\n",
+				i2s->rclk_srcrate, psr, rfs, bfs);
+	}
+
+	return 0;
+}
+
+static int i2s_trigger(struct snd_pcm_substream *substream,
+	int cmd, struct snd_soc_dai *dai)
+{
+	int capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct i2s_dai *i2s = to_info(rtd->cpu_dai);
+	unsigned long flags;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+		local_irq_save(flags);
+
+		if (config_setup(i2s)) {
+			local_irq_restore(flags);
+			return -EINVAL;
+		}
+
+		if (capture)
+			i2s_rxctrl(i2s, 1);
+		else
+			i2s_txctrl(i2s, 1);
+
+		local_irq_restore(flags);
+		break;
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+		local_irq_save(flags);
+
+		if (capture)
+			i2s_rxctrl(i2s, 0);
+		else
+			i2s_txctrl(i2s, 0);
+
+		if (capture)
+			i2s_fifo(i2s, FIC_RXFLUSH);
+		else
+			i2s_fifo(i2s, FIC_TXFLUSH);
+
+		local_irq_restore(flags);
+		break;
+	}
+
+	return 0;
+}
+
+static int i2s_set_clkdiv(struct snd_soc_dai *dai,
+	int div_id, int div)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	switch (div_id) {
+	case SAMSUNG_I2S_DIV_BCLK:
+		if ((any_active(i2s) && div && (get_bfs(i2s) != div))
+			|| (other && other->bfs && (other->bfs != div))) {
+			dev_err(&i2s->pdev->dev,
+				"%s:%d Other DAI busy\n", __func__, __LINE__);
+			return -EAGAIN;
+		}
+		i2s->bfs = div;
+		break;
+	default:
+		dev_err(&i2s->pdev->dev,
+			"Invalid clock divider(%d)\n", div_id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static snd_pcm_sframes_t
+i2s_delay(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	u32 reg = readl(i2s->addr + I2SFIC);
+	snd_pcm_sframes_t delay;
+
+	if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+		delay = FIC_RXCOUNT(reg);
+	else if (is_secondary(i2s))
+		delay = FICS_TXCOUNT(readl(i2s->addr + I2SFICS));
+	else
+		delay = FIC_TXCOUNT(reg);
+
+	return delay;
+}
+
+#ifdef CONFIG_PM
+static int i2s_suspend(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		i2s->suspend_i2smod = readl(i2s->addr + I2SMOD);
+		i2s->suspend_i2scon = readl(i2s->addr + I2SCON);
+		i2s->suspend_i2spsr = readl(i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+
+static int i2s_resume(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+
+	if (dai->active) {
+		writel(i2s->suspend_i2scon, i2s->addr + I2SCON);
+		writel(i2s->suspend_i2smod, i2s->addr + I2SMOD);
+		writel(i2s->suspend_i2spsr, i2s->addr + I2SPSR);
+	}
+
+	return 0;
+}
+#else
+#define i2s_suspend NULL
+#define i2s_resume  NULL
+#endif
+
+static int samsung_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = to_info(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other && other->clk) /* If this is probe on secondary */
+		goto probe_exit;
+
+	i2s->addr = ioremap(i2s->base, 0x100);
+	if (i2s->addr == NULL) {
+		dev_err(&i2s->pdev->dev, "cannot ioremap registers\n");
+		return -ENXIO;
+	}
+
+	i2s->clk = clk_get(&i2s->pdev->dev, "iis");
+	if (IS_ERR(i2s->clk)) {
+		dev_err(&i2s->pdev->dev, "failed to get i2s_clock\n");
+		iounmap(i2s->addr);
+		return -ENOENT;
+	}
+	clk_enable(i2s->clk);
+
+	if (other) {
+		other->addr = i2s->addr;
+		other->clk = i2s->clk;
+	}
+
+	if (i2s->quirks & QUIRK_NEED_RSTCLR)
+		writel(CON_RSTCLR, i2s->addr + I2SCON);
+
+probe_exit:
+	/* Reset any constraint on RFS and BFS */
+	i2s->rfs = 0;
+	i2s->bfs = 0;
+	i2s_txctrl(i2s, 0);
+	i2s_rxctrl(i2s, 0);
+	i2s_fifo(i2s, FIC_TXFLUSH);
+	i2s_fifo(other, FIC_TXFLUSH);
+	i2s_fifo(i2s, FIC_RXFLUSH);
+
+	/* Gate CDCLK by default */
+	if (!is_opened(other))
+		i2s_set_sysclk(dai, SAMSUNG_I2S_CDCLK,
+				0, SND_SOC_CLOCK_IN);
+
+	return 0;
+}
+
+static int samsung_i2s_dai_remove(struct snd_soc_dai *dai)
+{
+	struct i2s_dai *i2s = snd_soc_dai_get_drvdata(dai);
+	struct i2s_dai *other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (!other || !other->clk) {
+
+		if (i2s->quirks & QUIRK_NEED_RSTCLR)
+			writel(0, i2s->addr + I2SCON);
+
+		clk_disable(i2s->clk);
+		clk_put(i2s->clk);
+
+		iounmap(i2s->addr);
+	}
+
+	i2s->clk = NULL;
+
+	return 0;
+}
+
+static struct snd_soc_dai_ops samsung_i2s_dai_ops = {
+	.trigger = i2s_trigger,
+	.hw_params = i2s_hw_params,
+	.set_fmt = i2s_set_fmt,
+	.set_clkdiv = i2s_set_clkdiv,
+	.set_sysclk = i2s_set_sysclk,
+	.startup = i2s_startup,
+	.shutdown = i2s_shutdown,
+	.delay = i2s_delay,
+};
+
+#define SAMSUNG_I2S_RATES	SNDRV_PCM_RATE_8000_96000
+
+#define SAMSUNG_I2S_FMTS	(SNDRV_PCM_FMTBIT_S8 | \
+					SNDRV_PCM_FMTBIT_S16_LE | \
+					SNDRV_PCM_FMTBIT_S24_LE)
+
+static __devinit
+struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
+{
+	struct i2s_dai *i2s;
+
+	i2s = kzalloc(sizeof(struct i2s_dai), GFP_KERNEL);
+	if (i2s == NULL)
+		return NULL;
+
+	i2s->pdev = pdev;
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+	i2s->i2s_dai_drv.symmetric_rates = 1;
+	i2s->i2s_dai_drv.probe = samsung_i2s_dai_probe;
+	i2s->i2s_dai_drv.remove = samsung_i2s_dai_remove;
+	i2s->i2s_dai_drv.ops = &samsung_i2s_dai_ops;
+	i2s->i2s_dai_drv.suspend = i2s_suspend;
+	i2s->i2s_dai_drv.resume = i2s_resume;
+	i2s->i2s_dai_drv.playback.channels_min = 2;
+	i2s->i2s_dai_drv.playback.channels_max = 2;
+	i2s->i2s_dai_drv.playback.rates = SAMSUNG_I2S_RATES;
+	i2s->i2s_dai_drv.playback.formats = SAMSUNG_I2S_FMTS;
+
+	if (!sec) {
+		i2s->i2s_dai_drv.capture.channels_min = 2;
+		i2s->i2s_dai_drv.capture.channels_max = 2;
+		i2s->i2s_dai_drv.capture.rates = SAMSUNG_I2S_RATES;
+		i2s->i2s_dai_drv.capture.formats = SAMSUNG_I2S_FMTS;
+	} else {	/* Create a new platform_device for Secondary */
+		i2s->pdev = platform_device_register_resndata(NULL,
+				pdev->name, pdev->id + SAMSUNG_I2S_SECOFF,
+				NULL, 0, NULL, 0);
+		if (IS_ERR(i2s->pdev)) {
+			kfree(i2s);
+			return NULL;
+		}
+	}
+
+	/* Pre-assign snd_soc_dai_set_drvdata */
+	dev_set_drvdata(&i2s->pdev->dev, i2s);
+
+	return i2s;
+}
+
+static __devinit int samsung_i2s_probe(struct platform_device *pdev)
+{
+	u32 dma_pl_chan, dma_cp_chan, dma_pl_sec_chan;
+	struct i2s_dai *pri_dai, *sec_dai = NULL;
+	struct s3c_audio_pdata *i2s_pdata;
+	struct samsung_i2s *i2s_cfg;
+	struct resource *res;
+	u32 regs_base, quirks;
+	int ret = 0;
+
+	/* Call during Seconday interface registration */
+	if (pdev->id >= SAMSUNG_I2S_SECOFF) {
+		sec_dai = dev_get_drvdata(&pdev->dev);
+		snd_soc_register_dai(&sec_dai->pdev->dev,
+			&sec_dai->i2s_dai_drv);
+		return 0;
+	}
+
+	i2s_pdata = pdev->dev.platform_data;
+	if (i2s_pdata == NULL) {
+		dev_err(&pdev->dev, "Can't work without s3c_audio_pdata\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-TX dma resource\n");
+		return -ENXIO;
+	}
+	dma_pl_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S-RX dma resource\n");
+		return -ENXIO;
+	}
+	dma_cp_chan = res->start;
+
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 2);
+	if (res)
+		dma_pl_sec_chan = res->start;
+	else
+		dma_pl_sec_chan = 0;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Unable to get I2S SFR address\n");
+		return -ENXIO;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res),
+							"samsung-i2s")) {
+		dev_err(&pdev->dev, "Unable to request SFR region\n");
+		return -EBUSY;
+	}
+	regs_base = res->start;
+
+	i2s_cfg = &i2s_pdata->type.i2s;
+	quirks = i2s_cfg->quirks;
+
+	pri_dai = i2s_alloc_dai(pdev, false);
+	if (!pri_dai) {
+		dev_err(&pdev->dev, "Unable to alloc I2S_pri\n");
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
+	pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
+	pri_dai->dma_playback.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_playback;
+	pri_dai->dma_capture.client =
+		(struct s3c2410_dma_client *)&pri_dai->dma_capture;
+	pri_dai->dma_playback.channel = dma_pl_chan;
+	pri_dai->dma_capture.channel = dma_cp_chan;
+	pri_dai->src_clk = i2s_cfg->src_clk;
+	pri_dai->dma_playback.dma_size = 4;
+	pri_dai->dma_capture.dma_size = 4;
+	pri_dai->base = regs_base;
+	pri_dai->quirks = quirks;
+
+	if (quirks & QUIRK_PRI_6CHAN)
+		pri_dai->i2s_dai_drv.playback.channels_max = 6;
+
+	if (quirks & QUIRK_SEC_DAI) {
+		sec_dai = i2s_alloc_dai(pdev, true);
+		if (!sec_dai) {
+			dev_err(&pdev->dev, "Unable to alloc I2S_sec\n");
+			ret = -ENOMEM;
+			goto err2;
+		}
+		sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
+		sec_dai->dma_playback.client =
+			(struct s3c2410_dma_client *)&sec_dai->dma_playback;
+		/* Use iDMA always if SysDMA not provided */
+		sec_dai->dma_playback.channel = dma_pl_sec_chan ? : -1;
+		sec_dai->src_clk = i2s_cfg->src_clk;
+		sec_dai->dma_playback.dma_size = 4;
+		sec_dai->base = regs_base;
+		sec_dai->quirks = quirks;
+		sec_dai->pri_dai = pri_dai;
+		pri_dai->sec_dai = sec_dai;
+	}
+
+	if (i2s_pdata->cfg_gpio && i2s_pdata->cfg_gpio(pdev)) {
+		dev_err(&pdev->dev, "Unable to configure gpio\n");
+		ret = -EINVAL;
+		goto err3;
+	}
+
+	snd_soc_register_dai(&pri_dai->pdev->dev, &pri_dai->i2s_dai_drv);
+
+	return 0;
+err3:
+	kfree(sec_dai);
+err2:
+	kfree(pri_dai);
+err1:
+	release_mem_region(regs_base, resource_size(res));
+
+	return ret;
+}
+
+static __devexit int samsung_i2s_remove(struct platform_device *pdev)
+{
+	struct i2s_dai *i2s, *other;
+
+	i2s = dev_get_drvdata(&pdev->dev);
+	other = i2s->pri_dai ? : i2s->sec_dai;
+
+	if (other) {
+		other->pri_dai = NULL;
+		other->sec_dai = NULL;
+	} else {
+		struct resource *res;
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		if (res)
+			release_mem_region(res->start, resource_size(res));
+	}
+
+	i2s->pri_dai = NULL;
+	i2s->sec_dai = NULL;
+
+	kfree(i2s);
+
+	snd_soc_unregister_dai(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver samsung_i2s_driver = {
+	.probe  = samsung_i2s_probe,
+	.remove = samsung_i2s_remove,
+	.driver = {
+		.name = "samsung-i2s",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init samsung_i2s_init(void)
+{
+	return platform_driver_register(&samsung_i2s_driver);
+}
+module_init(samsung_i2s_init);
+
+static void __exit samsung_i2s_exit(void)
+{
+	platform_driver_unregister(&samsung_i2s_driver);
+}
+module_exit(samsung_i2s_exit);
+
+/* Module information */
+MODULE_AUTHOR("Jaswinder Singh, <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("Samsung I2S Interface");
+MODULE_ALIAS("platform:samsung-i2s");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/i2s.h b/sound/soc/samsung/i2s.h
new file mode 100644
index 0000000..8e15f6a
--- /dev/null
+++ b/sound/soc/samsung/i2s.h
@@ -0,0 +1,29 @@
+/* sound/soc/samsung/i2s.h
+ *
+ * ALSA SoC Audio Layer - Samsung I2S Controller driver
+ *
+ * Copyright (c) 2010 Samsung Electronics Co. Ltd.
+ *	Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __SND_SOC_SAMSUNG_I2S_H
+#define __SND_SOC_SAMSUNG_I2S_H
+
+/*
+ * Maximum number of I2S blocks that any SoC can have.
+ * The secondary interface of a CPU dai(if there exists any),
+ * is indexed at [cpu-dai's ID + SAMSUNG_I2S_SECOFF]
+ */
+#define SAMSUNG_I2S_SECOFF	4
+
+#define SAMSUNG_I2S_DIV_BCLK	1
+
+#define SAMSUNG_I2S_RCLKSRC_0	0
+#define SAMSUNG_I2S_RCLKSRC_1	1
+#define SAMSUNG_I2S_CDCLK		2
+
+#endif /* __SND_SOC_SAMSUNG_I2S_H */
diff --git a/sound/soc/s3c24xx/jive_wm8750.c b/sound/soc/samsung/jive_wm8750.c
similarity index 88%
rename from sound/soc/s3c24xx/jive_wm8750.c
rename to sound/soc/samsung/jive_wm8750.c
index 49605cd..0880252 100644
--- a/sound/soc/s3c24xx/jive_wm8750.c
+++ b/sound/soc/samsung/jive_wm8750.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/jive_wm8750.c
+/* sound/soc/samsung/jive_wm8750.c
  *
  * Copyright 2007,2008 Simtec Electronics
  *
@@ -21,11 +21,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c2412-i2s.h"
 
 #include "../codecs/wm8750.h"
@@ -111,18 +110,19 @@
 static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* These endpoints are not being used. */
-	snd_soc_dapm_nc_pin(codec, "LINPUT2");
-	snd_soc_dapm_nc_pin(codec, "RINPUT2");
-	snd_soc_dapm_nc_pin(codec, "LINPUT3");
-	snd_soc_dapm_nc_pin(codec, "RINPUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "MONO");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT2");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "MONO");
 
 	/* Add jive specific widgets */
-	err = snd_soc_dapm_new_controls(codec, wm8750_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, wm8750_dapm_widgets,
 					ARRAY_SIZE(wm8750_dapm_widgets));
 	if (err) {
 		printk(KERN_ERR "%s: failed to add widgets (%d)\n",
@@ -130,8 +130,8 @@
 		return err;
 	}
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -141,7 +141,7 @@
 	.stream_name	= "WM8750",
 	.cpu_dai_name	= "s3c2412-i2s",
 	.codec_dai_name = "wm8750-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.codec_name	= "wm8750-codec.0-0x1a",
 	.init		= jive_wm8750_init,
 	.ops		= &jive_ops,
diff --git a/sound/soc/s3c24xx/lm4857.h b/sound/soc/samsung/lm4857.h
similarity index 100%
rename from sound/soc/s3c24xx/lm4857.h
rename to sound/soc/samsung/lm4857.h
diff --git a/sound/soc/s3c24xx/ln2440sbc_alc650.c b/sound/soc/samsung/ln2440sbc_alc650.c
similarity index 92%
rename from sound/soc/s3c24xx/ln2440sbc_alc650.c
rename to sound/soc/samsung/ln2440sbc_alc650.c
index abe64ab..a2bb34d 100644
--- a/sound/soc/s3c24xx/ln2440sbc_alc650.c
+++ b/sound/soc/samsung/ln2440sbc_alc650.c
@@ -21,10 +21,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card ln2440sbc;
 
@@ -32,10 +31,10 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "ac97-hifi",
 	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 },
 };
 
diff --git a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c b/sound/soc/samsung/neo1973_gta02_wm8753.c
similarity index 88%
rename from sound/soc/s3c24xx/neo1973_gta02_wm8753.c
rename to sound/soc/samsung/neo1973_gta02_wm8753.c
index e97bdf1..3eec610 100644
--- a/sound/soc/s3c24xx/neo1973_gta02_wm8753.c
+++ b/sound/soc/samsung/neo1973_gta02_wm8753.c
@@ -22,7 +22,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <asm/mach-types.h>
 
@@ -32,7 +31,7 @@
 #include <asm/io.h>
 #include <mach/gta02.h>
 #include "../codecs/wm8753.h"
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 static struct snd_soc_card neo1973_gta02;
@@ -333,16 +332,17 @@
 static int neo1973_gta02_wm8753_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
 
 	/* Add neo1973 gta02 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
 				  ARRAY_SIZE(wm8753_dapm_widgets));
 
 	/* add neo1973 gta02 specific controls */
@@ -353,25 +353,25 @@
 		return err;
 
 	/* set up neo1973 gta02 specific audio path audio_map */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* set endpoints to default off mode */
-	snd_soc_dapm_disable_pin(codec, "Stereo Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-	snd_soc_dapm_disable_pin(codec, "GSM Line In");
-	snd_soc_dapm_disable_pin(codec, "Headset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Mic");
-	snd_soc_dapm_disable_pin(codec, "Handset Spk");
+	snd_soc_dapm_disable_pin(dapm, "Stereo Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+	snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+	snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Mic");
+	snd_soc_dapm_disable_pin(dapm, "Handset Spk");
 
 	/* allow audio paths from the GSM modem to run during suspend */
-	snd_soc_dapm_ignore_suspend(codec, "Stereo Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line Out");
-	snd_soc_dapm_ignore_suspend(codec, "GSM Line In");
-	snd_soc_dapm_ignore_suspend(codec, "Headset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Mic");
-	snd_soc_dapm_ignore_suspend(codec, "Handset Spk");
+	snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out");
+	snd_soc_dapm_ignore_suspend(dapm, "GSM Line In");
+	snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
+	snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -400,7 +400,7 @@
 	.cpu_dai_name = "s3c24xx-i2s",
 	.codec_dai_name = "wm8753-hifi",
 	.init = neo1973_gta02_wm8753_init,
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.codec_name = "wm8753-codec.0-0x1a",
 	.ops = &neo1973_gta02_hifi_ops,
 },
@@ -411,7 +411,7 @@
 	.codec_dai_name = "wm8753-voice",
 	.ops = &neo1973_gta02_voice_ops,
 	.codec_name = "wm8753-codec.0-0x1a",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 },
 };
 
@@ -438,25 +438,21 @@
 		return -ENOMEM;
 
 	/* register bluetooth DAI here */
-	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, -1, &bt_dai);
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
+	ret = snd_soc_register_dai(&neo1973_gta02_snd_device->dev, &bt_dai);
+	if (ret)
+		goto err_put_device;
 
 	platform_set_drvdata(neo1973_gta02_snd_device, &neo1973_gta02);
 	ret = platform_device_add(neo1973_gta02_snd_device);
 
-	if (ret) {
-		platform_device_put(neo1973_gta02_snd_device);
-		return ret;
-	}
+	if (ret)
+		goto err_unregister_dai;
 
 	/* Initialise GPIOs used by amp */
 	ret = gpio_request(GTA02_GPIO_HP_IN, "GTA02_HP_IN");
 	if (ret) {
 		pr_err("gta02_wm8753: Failed to register GPIO %d\n", GTA02_GPIO_HP_IN);
-		goto err_unregister_device;
+		goto err_del_device;
 	}
 
 	ret = gpio_direction_output(GTA02_GPIO_HP_IN, 1);
@@ -483,15 +479,19 @@
 	gpio_free(GTA02_GPIO_AMP_SHUT);
 err_free_gpio_hp_in:
 	gpio_free(GTA02_GPIO_HP_IN);
-err_unregister_device:
-	platform_device_unregister(neo1973_gta02_snd_device);
+err_del_device:
+	platform_device_del(neo1973_gta02_snd_device);
+err_unregister_dai:
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
+err_put_device:
+	platform_device_put(neo1973_gta02_snd_device);
 	return ret;
 }
 module_init(neo1973_gta02_init);
 
 static void __exit neo1973_gta02_exit(void)
 {
-	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev, -1);
+	snd_soc_unregister_dai(&neo1973_gta02_snd_device->dev);
 	platform_device_unregister(neo1973_gta02_snd_device);
 	gpio_free(GTA02_GPIO_HP_IN);
 	gpio_free(GTA02_GPIO_AMP_SHUT);
diff --git a/sound/soc/s3c24xx/neo1973_wm8753.c b/sound/soc/samsung/neo1973_wm8753.c
similarity index 83%
rename from sound/soc/s3c24xx/neo1973_wm8753.c
rename to sound/soc/samsung/neo1973_wm8753.c
index f4f2ee7..c7a2451 100644
--- a/sound/soc/s3c24xx/neo1973_wm8753.c
+++ b/sound/soc/samsung/neo1973_wm8753.c
@@ -21,7 +21,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/tlv.h>
 
 #include <asm/mach-types.h>
@@ -36,7 +35,7 @@
 
 #include "../codecs/wm8753.h"
 #include "lm4857.h"
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 /* define the scenarios */
@@ -237,81 +236,83 @@
 
 static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario)
 {
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
 	pr_debug("Entered %s\n", __func__);
 
 	switch (neo1973_scenario) {
 	case NEO_AUDIO_OFF:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_HANDSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_HEADSET:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_GSM_CALL_AUDIO_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_enable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_enable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_STEREO_TO_SPEAKERS:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_STEREO_TO_HEADPHONES:
-		snd_soc_dapm_enable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_enable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_HANDSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_enable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_enable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_HEADSET:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_enable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	case NEO_CAPTURE_BLUETOOTH:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 		break;
 	default:
-		snd_soc_dapm_disable_pin(codec, "Audio Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line Out");
-		snd_soc_dapm_disable_pin(codec, "GSM Line In");
-		snd_soc_dapm_disable_pin(codec, "Headset Mic");
-		snd_soc_dapm_disable_pin(codec, "Call Mic");
+		snd_soc_dapm_disable_pin(dapm, "Audio Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
+		snd_soc_dapm_disable_pin(dapm, "GSM Line In");
+		snd_soc_dapm_disable_pin(dapm, "Headset Mic");
+		snd_soc_dapm_disable_pin(dapm, "Call Mic");
 	}
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -502,20 +503,21 @@
 static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	pr_debug("Entered %s\n", __func__);
 
 	/* set up NC codec pins */
-	snd_soc_dapm_nc_pin(codec, "LOUT2");
-	snd_soc_dapm_nc_pin(codec, "ROUT2");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "OUT4");
-	snd_soc_dapm_nc_pin(codec, "LINE1");
-	snd_soc_dapm_nc_pin(codec, "LINE2");
+	snd_soc_dapm_nc_pin(dapm, "LOUT2");
+	snd_soc_dapm_nc_pin(dapm, "ROUT2");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "OUT4");
+	snd_soc_dapm_nc_pin(dapm, "LINE1");
+	snd_soc_dapm_nc_pin(dapm, "LINE2");
 
 	/* Add neo1973 specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8753_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8753_dapm_widgets,
 				  ARRAY_SIZE(wm8753_dapm_widgets));
 
 	/* set endpoints to default mode */
@@ -528,10 +530,10 @@
 		return err;
 
 	/* set up neo1973 specific audio routes */
-	err = snd_soc_dapm_add_routes(codec, dapm_routes,
+	err = snd_soc_dapm_add_routes(dapm, dapm_routes,
 				      ARRAY_SIZE(dapm_routes));
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 	return 0;
 }
 
@@ -556,7 +558,7 @@
 { /* Hifi Playback - for similatious use with voice below */
 	.name = "WM8753",
 	.stream_name = "WM8753 HiFi",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.cpu_dai_name = "s3c24xx-i2s",
 	.codec_dai_name = "wm8753-hifi",
 	.codec_name = "wm8753-codec.0-0x1a",
@@ -566,7 +568,7 @@
 { /* Voice via BT */
 	.name = "Bluetooth",
 	.stream_name = "Voice",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.cpu_dai_name = "bluetooth-dai",
 	.codec_dai_name = "wm8753-voice",
 	.codec_name = "wm8753-codec.0-0x1a",
diff --git a/sound/soc/s3c24xx/s3c-pcm.c b/sound/soc/samsung/pcm.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c-pcm.c
rename to sound/soc/samsung/pcm.c
index 2e020e1..48d0b75 100644
--- a/sound/soc/s3c24xx/s3c-pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-pcm.c
+/* sound/soc/samsung/pcm.c
  *
  * ALSA SoC Audio Layer - S3C PCM-Controller driver
  *
@@ -29,8 +29,8 @@
 #include <plat/audio.h>
 #include <plat/dma.h>
 
-#include "s3c-dma.h"
-#include "s3c-pcm.h"
+#include "dma.h"
+#include "pcm.h"
 
 static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
 	.name		= "PCM Stereo out"
diff --git a/sound/soc/s3c24xx/s3c-pcm.h b/sound/soc/samsung/pcm.h
similarity index 98%
rename from sound/soc/s3c24xx/s3c-pcm.h
rename to sound/soc/samsung/pcm.h
index f60baa1..03393dc 100644
--- a/sound/soc/s3c24xx/s3c-pcm.h
+++ b/sound/soc/samsung/pcm.h
@@ -1,4 +1,4 @@
-/*  sound/soc/s3c24xx/s3c-pcm.h
+/*  sound/soc/samsung/pcm.h
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/s3c24xx/regs-i2s-v2.h b/sound/soc/samsung/regs-i2s-v2.h
similarity index 100%
rename from sound/soc/s3c24xx/regs-i2s-v2.h
rename to sound/soc/samsung/regs-i2s-v2.h
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/samsung/rx1950_uda1380.c
similarity index 94%
rename from sound/soc/s3c24xx/rx1950_uda1380.c
rename to sound/soc/samsung/rx1950_uda1380.c
index 468cc11..f400274 100644
--- a/sound/soc/s3c24xx/rx1950_uda1380.c
+++ b/sound/soc/samsung/rx1950_uda1380.c
@@ -25,7 +25,6 @@
 #include <linux/clk.h>
 
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/uda1380.h>
 #include <sound/jack.h>
 
@@ -35,7 +34,7 @@
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "../codecs/uda1380.h"
 
@@ -95,7 +94,7 @@
 		.cpu_dai_name	= "s3c24xx-iis",
 		.codec_dai_name	= "uda1380-hifi",
 		.init		= rx1950_uda1380_init,
-		.platform_name	= "s3c24xx-pcm-audio",
+		.platform_name	= "samsung-audio",
 		.codec_name	= "uda1380-codec.0-001a",
 		.ops		= &rx1950_ops,
 	},
@@ -228,26 +227,28 @@
 static int rx1950_uda1380_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err;
 
 	/* Add rx1950 specific widgets */
-	err = snd_soc_dapm_new_controls(codec, uda1380_dapm_widgets,
+	err = snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
 				  ARRAY_SIZE(uda1380_dapm_widgets));
 
 	if (err)
 		return err;
 
 	/* Set up rx1950 specific audio path audio_mapnects */
-	err = snd_soc_dapm_add_routes(codec, audio_map,
+	err = snd_soc_dapm_add_routes(dapm, audio_map,
 				      ARRAY_SIZE(audio_map));
 
 	if (err)
 		return err;
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE,
 		&hp_jack);
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
similarity index 99%
rename from sound/soc/s3c24xx/s3c-i2s-v2.c
rename to sound/soc/samsung/s3c-i2s-v2.c
index b3866d5..094f36e 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.c
+++ b/sound/soc/samsung/s3c-i2s-v2.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-i2c-v2.c
+/* sound/soc/samsung/s3c-i2c-v2.c
  *
  * ALSA Soc Audio Layer - I2S core for newer Samsung SoCs.
  *
@@ -28,7 +28,7 @@
 
 #include "regs-i2s-v2.h"
 #include "s3c-i2s-v2.h"
-#include "s3c-dma.h"
+#include "dma.h"
 
 #undef S3C_IIS_V2_SUPPORTED
 
diff --git a/sound/soc/s3c24xx/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
similarity index 98%
rename from sound/soc/s3c24xx/s3c-i2s-v2.h
rename to sound/soc/samsung/s3c-i2s-v2.h
index d458301..f8297d9 100644
--- a/sound/soc/s3c24xx/s3c-i2s-v2.h
+++ b/sound/soc/samsung/s3c-i2s-v2.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c-i2s-v2.h
+/* sound/soc/samsung/s3c-i2s-v2.h
  *
  * ALSA Soc Audio Layer - S3C_I2SV2 I2S driver
  *
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c2412-i2s.c
rename to sound/soc/samsung/s3c2412-i2s.c
index 4a861cf..7ea8378 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
+/* sound/soc/samsung/s3c2412-i2s.c
  *
  * ALSA Soc Audio Layer - S3C2412 I2S driver
  *
@@ -35,7 +35,7 @@
 #include <mach/regs-gpio.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "regs-i2s-v2.h"
 #include "s3c2412-i2s.h"
 
diff --git a/sound/soc/s3c24xx/s3c2412-i2s.h b/sound/soc/samsung/s3c2412-i2s.h
similarity index 95%
rename from sound/soc/s3c24xx/s3c2412-i2s.h
rename to sound/soc/samsung/s3c2412-i2s.h
index 01a0471..02ad579 100644
--- a/sound/soc/s3c24xx/s3c2412-i2s.h
+++ b/sound/soc/samsung/s3c2412-i2s.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c2412-i2s.c
+/* sound/soc/samsung/s3c2412-i2s.c
  *
  * ALSA Soc Audio Layer - S3C2412 I2S driver
  *
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
similarity index 99%
rename from sound/soc/s3c24xx/s3c24xx-i2s.c
rename to sound/soc/samsung/s3c24xx-i2s.c
index e060daa..13e41ed 100644
--- a/sound/soc/s3c24xx/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -38,7 +38,7 @@
 
 #include <plat/regs-iis.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 
 static struct s3c2410_dma_client s3c24xx_dma_client_out = {
diff --git a/sound/soc/s3c24xx/s3c24xx-i2s.h b/sound/soc/samsung/s3c24xx-i2s.h
similarity index 100%
rename from sound/soc/s3c24xx/s3c24xx-i2s.h
rename to sound/soc/samsung/s3c24xx-i2s.h
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.c b/sound/soc/samsung/s3c24xx_simtec.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c24xx_simtec.c
rename to sound/soc/samsung/s3c24xx_simtec.c
index c4c1114..a434032d 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.c
+++ b/sound/soc/samsung/s3c24xx_simtec.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.c
+/* sound/soc/samsung/s3c24xx_simtec.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -17,11 +17,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec.h b/sound/soc/samsung/s3c24xx_simtec.h
similarity index 93%
rename from sound/soc/s3c24xx/s3c24xx_simtec.h
rename to sound/soc/samsung/s3c24xx_simtec.h
index e63d5ff..8270748 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec.h
+++ b/sound/soc/samsung/s3c24xx_simtec.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec.h
+/* sound/soc/samsung/s3c24xx_simtec.h
  *
  * Copyright 2009 Simtec Electronics
  *
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c b/sound/soc/samsung/s3c24xx_simtec_hermes.c
similarity index 86%
rename from sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
rename to sound/soc/samsung/s3c24xx_simtec_hermes.c
index f884537..bb4292e 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+++ b/sound/soc/samsung/s3c24xx_simtec_hermes.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_hermes.c
+/* sound/soc/samsung/s3c24xx_simtec_hermes.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -14,16 +14,13 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
-#include "../codecs/tlv320aic3x.h"
-
 static const struct snd_soc_dapm_widget dapm_widgets[] = {
 	SND_SOC_DAPM_LINE("GSM Out", NULL),
 	SND_SOC_DAPM_LINE("GSM In", NULL),
@@ -76,19 +73,20 @@
 static int simtec_hermes_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
 				  ARRAY_SIZE(dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
 	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -99,7 +97,7 @@
 	.codec_name	= "tlv320aic3x-codec.0-0x1a",
 	.cpu_dai_name	= "s3c24xx-i2s",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.init		= simtec_hermes_init,
 };
 
diff --git a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
similarity index 86%
rename from sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
rename to sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
index c096759..fbba4e3 100644
--- a/sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+++ b/sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/s3c24xx_simtec_tlv320aic23.c
+/* sound/soc/samsung/s3c24xx_simtec_tlv320aic23.c
  *
  * Copyright 2009 Simtec Electronics
  *
@@ -14,11 +14,10 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include <plat/audio-simtec.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "s3c24xx_simtec.h"
 
@@ -65,19 +64,20 @@
 static int simtec_tlv320aic23_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, dapm_widgets,
 				  ARRAY_SIZE(dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, base_map, ARRAY_SIZE(base_map));
+	snd_soc_dapm_add_routes(dapm, base_map, ARRAY_SIZE(base_map));
 
-	snd_soc_dapm_enable_pin(codec, "Headphone Jack");
-	snd_soc_dapm_enable_pin(codec, "Line In");
-	snd_soc_dapm_enable_pin(codec, "Line Out");
-	snd_soc_dapm_enable_pin(codec, "Mic Jack");
+	snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Line In");
+	snd_soc_dapm_enable_pin(dapm, "Line Out");
+	snd_soc_dapm_enable_pin(dapm, "Mic Jack");
 
 	simtec_audio_init(rtd);
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	return 0;
 }
@@ -88,7 +88,7 @@
 	.codec_name	= "tlv320aic3x-codec.0-0x1a",
 	.cpu_dai_name	= "s3c24xx-i2s",
 	.codec_dai_name = "tlv320aic3x-hifi",
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 	.init		= simtec_tlv320aic23_init,
 };
 
diff --git a/sound/soc/s3c24xx/s3c24xx_uda134x.c b/sound/soc/samsung/s3c24xx_uda134x.c
similarity index 98%
rename from sound/soc/s3c24xx/s3c24xx_uda134x.c
rename to sound/soc/samsung/s3c24xx_uda134x.c
index bd48ffb..cdc8ecb 100644
--- a/sound/soc/s3c24xx/s3c24xx_uda134x.c
+++ b/sound/soc/samsung/s3c24xx_uda134x.c
@@ -18,13 +18,12 @@
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/s3c24xx_uda134x.h>
 #include <sound/uda134x.h>
 
 #include <plat/regs-iis.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "s3c24xx-i2s.h"
 #include "../codecs/uda134x.h"
 
@@ -231,7 +230,7 @@
 	.codec_dai_name = "uda134x-hifi",
 	.cpu_dai_name = "s3c24xx-i2s",
 	.ops = &s3c24xx_uda134x_ops,
-	.platform_name	= "s3c24xx-pcm-audio",
+	.platform_name	= "samsung-audio",
 };
 
 static struct snd_soc_card snd_soc_s3c24xx_uda134x = {
diff --git a/sound/soc/s3c24xx/smartq_wm8987.c b/sound/soc/samsung/smartq_wm8987.c
similarity index 82%
rename from sound/soc/s3c24xx/smartq_wm8987.c
rename to sound/soc/samsung/smartq_wm8987.c
index dd20ca7..61e2b52 100644
--- a/sound/soc/s3c24xx/smartq_wm8987.c
+++ b/sound/soc/samsung/smartq_wm8987.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/smartq_wm8987.c
+/* sound/soc/samsung/smartq_wm8987.c
  *
  * Copyright 2010 Maurus Cuelenaere <mcuelenaere@gmail.com>
  *
@@ -19,13 +19,13 @@
 
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/jack.h>
 
 #include <asm/mach-types.h>
 
-#include "s3c-dma.h"
-#include "s3c64xx-i2s.h"
+#include "dma.h"
+#include "i2s.h"
 
 #include "../codecs/wm8750.h"
 
@@ -39,15 +39,11 @@
 	struct snd_pcm_hw_params *params)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
-	struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
-	struct s3c_i2sv2_rate_calc div;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
 	unsigned int clk = 0;
 	int ret;
 
-	s3c_i2sv2_iis_calc_rate(&div, NULL, params_rate(params),
-				s3c_i2sv2_get_clock(cpu_dai));
-
 	switch (params_rate(params)) {
 	case 8000:
 	case 16000:
@@ -78,23 +74,24 @@
 	if (ret < 0)
 		return ret;
 
+	/* Use PCLK for I2S signal generation */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_RCLKSRC_0,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	/* Gate the RCLK output on PAD */
+	ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_CDCLK,
+					0, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
 	/* set the codec system clock for DAC and ADC */
 	ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk,
 				     SND_SOC_CLOCK_IN);
 	if (ret < 0)
 		return ret;
 
-	/* set MCLK division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_RCLK, div.fs_div);
-	if (ret < 0)
-		return ret;
-
-	/* set prescaler division for sample rate */
-	ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C_I2SV2_DIV_PRESCALER,
-				     div.clk_div - 1);
-	if (ret < 0)
-		return ret;
-
 	return 0;
 }
 
@@ -156,12 +153,14 @@
 	{"LINPUT2", NULL, "Mic Bias"},
 };
 
-static int smartq_wm8987_init(struct snd_soc_codec *codec)
+static int smartq_wm8987_init(struct snd_soc_pcm_runtime *rtd)
 {
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 	int err = 0;
 
 	/* Add SmartQ specific widgets */
-	snd_soc_dapm_new_controls(codec, wm8987_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, wm8987_dapm_widgets,
 				  ARRAY_SIZE(wm8987_dapm_widgets));
 
 	/* add SmartQ specific controls */
@@ -172,25 +171,25 @@
 		return err;
 
 	/* setup SmartQ specific audio path */
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	/* set endpoints to not connected */
-	snd_soc_dapm_nc_pin(codec, "LINPUT1");
-	snd_soc_dapm_nc_pin(codec, "RINPUT1");
-	snd_soc_dapm_nc_pin(codec, "OUT3");
-	snd_soc_dapm_nc_pin(codec, "ROUT1");
+	snd_soc_dapm_nc_pin(dapm, "LINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "RINPUT1");
+	snd_soc_dapm_nc_pin(dapm, "OUT3");
+	snd_soc_dapm_nc_pin(dapm, "ROUT1");
 
 	/* set endpoints to default off mode */
-	snd_soc_dapm_enable_pin(codec, "Internal Speaker");
-	snd_soc_dapm_enable_pin(codec, "Internal Mic");
-	snd_soc_dapm_disable_pin(codec, "Headphone Jack");
+	snd_soc_dapm_enable_pin(dapm, "Internal Speaker");
+	snd_soc_dapm_enable_pin(dapm, "Internal Mic");
+	snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
 
-	err = snd_soc_dapm_sync(codec);
+	err = snd_soc_dapm_sync(dapm);
 	if (err)
 		return err;
 
 	/* Headphone jack detection */
-	err = snd_soc_jack_new(&snd_soc_smartq, "Headphone Jack",
+	err = snd_soc_jack_new(codec, "Headphone Jack",
 			       SND_JACK_HEADPHONE, &smartq_jack);
 	if (err)
 		return err;
@@ -211,9 +210,9 @@
 	{
 		.name		= "wm8987",
 		.stream_name	= "SmartQ Hi-Fi",
-		.cpu_dai_name	= "s3c64xx-i2s.0",
+		.cpu_dai_name	= "samsung-i2s.0",
 		.codec_dai_name	= "wm8750-hifi",
-		.platform_name	= "s3c24xx-pcm-audio",
+		.platform_name	= "samsung-audio",
 		.codec_name	= "wm8750-codec.0-0x1a",
 		.init		= smartq_wm8987_init,
 		.ops		= &smartq_hifi_ops,
@@ -275,6 +274,7 @@
 
 static void __exit smartq_exit(void)
 {
+	gpio_free(S3C64XX_GPK(12));
 	snd_soc_jack_free_gpios(&smartq_jack, ARRAY_SIZE(smartq_jack_gpios),
 				smartq_jack_gpios);
 
diff --git a/sound/soc/s3c24xx/smdk2443_wm9710.c b/sound/soc/samsung/smdk2443_wm9710.c
similarity index 92%
rename from sound/soc/s3c24xx/smdk2443_wm9710.c
rename to sound/soc/samsung/smdk2443_wm9710.c
index 4613288..3be7e7e 100644
--- a/sound/soc/s3c24xx/smdk2443_wm9710.c
+++ b/sound/soc/samsung/smdk2443_wm9710.c
@@ -17,10 +17,9 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card smdk2443;
 
@@ -28,10 +27,10 @@
 {
 	.name = "AC97",
 	.stream_name = "AC97 HiFi",
-	.cpu_dai_name = "s3c-ac97",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "ac97-hifi",
 	.codec_name = "ac97-codec",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 },
 };
 
diff --git a/sound/soc/s3c24xx/smdk_spdif.c b/sound/soc/samsung/smdk_spdif.c
similarity index 92%
rename from sound/soc/s3c24xx/smdk_spdif.c
rename to sound/soc/samsung/smdk_spdif.c
index c8bd904..b5c3fad 100644
--- a/sound/soc/s3c24xx/smdk_spdif.c
+++ b/sound/soc/samsung/smdk_spdif.c
@@ -18,7 +18,7 @@
 
 #include <sound/soc.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "spdif.h"
 
 /* Audio clock settings are belonged to board specific part. Every
@@ -28,7 +28,7 @@
 static int set_audio_clock_heirachy(struct platform_device *pdev)
 {
 	struct clk *fout_epll, *mout_epll, *sclk_audio0, *sclk_spdif;
-	int ret;
+	int ret = 0;
 
 	fout_epll = clk_get(NULL, "fout_epll");
 	if (IS_ERR(fout_epll)) {
@@ -61,7 +61,7 @@
 		goto out3;
 	}
 
-	/* Set audio clock heirachy for S/PDIF */
+	/* Set audio clock hierarchy for S/PDIF */
 	clk_set_parent(mout_epll, fout_epll);
 	clk_set_parent(sclk_audio0, mout_epll);
 	clk_set_parent(sclk_spdif, sclk_audio0);
@@ -79,7 +79,7 @@
 
 /* We should haved to set clock directly on this part because of clock
  * scheme of Samsudng SoCs did not support to set rates from abstrct
- * clock of it's heirachy.
+ * clock of it's hierarchy.
  */
 static int set_audio_clock_rate(unsigned long epll_rate,
 				unsigned long audio_rate)
@@ -152,12 +152,10 @@
 	.hw_params = smdk_hw_params,
 };
 
-static struct snd_soc_card smdk;
-
 static struct snd_soc_dai_link smdk_dai = {
 	.name = "S/PDIF",
 	.stream_name = "S/PDIF PCM Playback",
-	.platform_name = "s3c24xx-pcm-audio",
+	.platform_name = "samsung-audio",
 	.cpu_dai_name = "samsung-spdif",
 	.codec_dai_name = "dit-hifi",
 	.codec_name = "spdif-dit",
@@ -183,7 +181,7 @@
 
 	ret = platform_device_add(smdk_snd_spdif_dit_device);
 	if (ret)
-		goto err2;
+		goto err1;
 
 	smdk_snd_spdif_device = platform_device_alloc("soc-audio", -1);
 	if (!smdk_snd_spdif_device) {
@@ -195,17 +193,21 @@
 
 	ret = platform_device_add(smdk_snd_spdif_device);
 	if (ret)
-		goto err1;
+		goto err3;
 
-	/* Set audio clock heirachy manually */
+	/* Set audio clock hierarchy manually */
 	ret = set_audio_clock_heirachy(smdk_snd_spdif_device);
 	if (ret)
-		goto err1;
+		goto err4;
 
 	return 0;
-err1:
+err4:
+	platform_device_del(smdk_snd_spdif_device);
+err3:
 	platform_device_put(smdk_snd_spdif_device);
 err2:
+	platform_device_del(smdk_snd_spdif_dit_device);
+err1:
 	platform_device_put(smdk_snd_spdif_dit_device);
 	return ret;
 }
@@ -213,6 +215,7 @@
 static void __exit smdk_exit(void)
 {
 	platform_device_unregister(smdk_snd_spdif_device);
+	platform_device_unregister(smdk_snd_spdif_dit_device);
 }
 
 module_init(smdk_init);
diff --git a/sound/soc/samsung/smdk_wm8580.c b/sound/soc/samsung/smdk_wm8580.c
new file mode 100644
index 0000000..b2cff1a
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8580.c
@@ -0,0 +1,292 @@
+/*
+ *  smdk_wm8580.c
+ *
+ *  Copyright (c) 2009 Samsung Electronics Co. Ltd
+ *  Author: Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/mach-types.h>
+
+#include "../codecs/wm8580.h"
+#include "dma.h"
+#include "i2s.h"
+
+/*
+ * Default CFG switch settings to use this driver:
+ *
+ *   SMDK6410: Set CFG1 1-3 Off, CFG2 1-4 On
+ */
+
+/* SMDK has a 12MHZ crystal attached to WM8580 */
+#define SMDK_WM8580_FREQ 12000000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int bfs, rfs, ret;
+
+	switch (params_format(params)) {
+	case SNDRV_PCM_FORMAT_U8:
+	case SNDRV_PCM_FORMAT_S8:
+		bfs = 16;
+		break;
+	case SNDRV_PCM_FORMAT_U16_LE:
+	case SNDRV_PCM_FORMAT_S16_LE:
+		bfs = 32;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* The Fvco for WM8580 PLLs must fall within [90,100]MHz.
+	 * This criterion can't be met if we request PLL output
+	 * as {8000x256, 64000x256, 11025x256}Hz.
+	 * As a wayout, we rather change rfs to a minimum value that
+	 * results in (params_rate(params) * rfs), and itself, acceptable
+	 * to both - the CODEC and the CPU.
+	 */
+	switch (params_rate(params)) {
+	case 16000:
+	case 22050:
+	case 32000:
+	case 44100:
+	case 48000:
+	case 88200:
+	case 96000:
+		rfs = 256;
+		break;
+	case 64000:
+		rfs = 384;
+		break;
+	case 8000:
+	case 11025:
+		rfs = 512;
+		break;
+	default:
+		return -EINVAL;
+	}
+	pll_out = params_rate(params) * rfs;
+
+	/* Set the Codec DAI configuration */
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set the AP DAI configuration */
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	/* Set WM8580 to drive MCLK from its PLLA */
+	ret = snd_soc_dai_set_clkdiv(codec_dai, WM8580_MCLK,
+					WM8580_CLKSRC_PLLA);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8580_PLLA, 0,
+					SMDK_WM8580_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8580_CLKSRC_PLLA,
+				     pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8580 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+/* SMDK Playback widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_pbk[] = {
+	SND_SOC_DAPM_HP("Front", NULL),
+	SND_SOC_DAPM_HP("Center+Sub", NULL),
+	SND_SOC_DAPM_HP("Rear", NULL),
+};
+
+/* SMDK Capture widgets */
+static const struct snd_soc_dapm_widget wm8580_dapm_widgets_cpt[] = {
+	SND_SOC_DAPM_MIC("MicIn", NULL),
+	SND_SOC_DAPM_LINE("LineIn", NULL),
+};
+
+/* SMDK-PAIFTX connections */
+static const struct snd_soc_dapm_route audio_map_tx[] = {
+	/* MicIn feeds AINL */
+	{"AINL", NULL, "MicIn"},
+
+	/* LineIn feeds AINL/R */
+	{"AINL", NULL, "LineIn"},
+	{"AINR", NULL, "LineIn"},
+};
+
+/* SMDK-PAIFRX connections */
+static const struct snd_soc_dapm_route audio_map_rx[] = {
+	/* Front Left/Right are fed VOUT1L/R */
+	{"Front", NULL, "VOUT1L"},
+	{"Front", NULL, "VOUT1R"},
+
+	/* Center/Sub are fed VOUT2L/R */
+	{"Center+Sub", NULL, "VOUT2L"},
+	{"Center+Sub", NULL, "VOUT2R"},
+
+	/* Rear Left/Right are fed VOUT3L/R */
+	{"Rear", NULL, "VOUT3L"},
+	{"Rear", NULL, "VOUT3R"},
+};
+
+static int smdk_wm8580_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Capture widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_cpt,
+				  ARRAY_SIZE(wm8580_dapm_widgets_cpt));
+
+	/* Set up PAIFTX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_tx, ARRAY_SIZE(audio_map_tx));
+
+	/* Enabling the microphone requires the fitting of a 0R
+	 * resistor to connect the line from the microphone jack.
+	 */
+	snd_soc_dapm_disable_pin(dapm, "MicIn");
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static int smdk_wm8580_init_paifrx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* Add smdk specific Playback widgets */
+	snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets_pbk,
+				  ARRAY_SIZE(wm8580_dapm_widgets_pbk));
+
+	/* Set up PAIFRX audio path */
+	snd_soc_dapm_add_routes(dapm, audio_map_rx, ARRAY_SIZE(audio_map_rx));
+
+	/* signal a DAPM event */
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+enum {
+	PRI_PLAYBACK = 0,
+	PRI_CAPTURE,
+	SEC_PLAYBACK,
+};
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	[PRI_PLAYBACK] = { /* Primary Playback i/f */
+		.name = "WM8580 PAIF RX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+	[PRI_CAPTURE] = { /* Primary Capture i/f */
+		.name = "WM8580 PAIF TX",
+		.stream_name = "Capture",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8580-hifi-capture",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paiftx,
+		.ops = &smdk_ops,
+	},
+	[SEC_PLAYBACK] = { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Playback",
+		.cpu_dai_name = "samsung-i2s.x",
+		.codec_dai_name = "wm8580-hifi-playback",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8580-codec.0-001b",
+		.init = smdk_wm8580_init_paifrx,
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = 2,
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+	char *str;
+
+	if (machine_is_smdkc100() || machine_is_smdk6442()
+			|| machine_is_smdkv210() || machine_is_smdkc110()) {
+		smdk.num_links = 3;
+		/* Secondary is at offset SAMSUNG_I2S_SECOFF from Primary */
+		str = (char *)smdk_dai[SEC_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '0' + SAMSUNG_I2S_SECOFF;
+	} else if (machine_is_smdk6410()) {
+		str = (char *)smdk_dai[PRI_PLAYBACK].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+		str = (char *)smdk_dai[PRI_CAPTURE].cpu_dai_name;
+		str[strlen(str) - 1] = '2';
+	}
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+	ret = platform_device_add(smdk_snd_device);
+
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_AUTHOR("Jaswinder Singh, jassi.brar@samsung.com");
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8580");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
new file mode 100644
index 0000000..e7c1009
--- /dev/null
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -0,0 +1,176 @@
+/*
+ *  smdk_wm8994.c
+ *
+ *  This program is free software; you can redistribute  it and/or modify it
+ *  under  the terms of  the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the  License, or (at your
+ *  option) any later version.
+ */
+
+#include "../codecs/wm8994.h"
+
+ /*
+  * Default CFG switch settings to use this driver:
+  *	SMDKV310: CFG5-1000, CFG7-111111
+  */
+
+ /*
+  * Configure audio route as :-
+  * $ amixer sset 'DAC1' on,on
+  * $ amixer sset 'Right Headphone Mux' 'DAC'
+  * $ amixer sset 'Left Headphone Mux' 'DAC'
+  * $ amixer sset 'DAC1R Mixer AIF1.1' on
+  * $ amixer sset 'DAC1L Mixer AIF1.1' on
+  * $ amixer sset 'IN2L' on
+  * $ amixer sset 'IN2L PGA IN2LN' on
+  * $ amixer sset 'MIXINL IN2L' on
+  * $ amixer sset 'AIF1ADC1L Mixer ADC/DMIC' on
+  * $ amixer sset 'IN2R' on
+  * $ amixer sset 'IN2R PGA IN2RN' on
+  * $ amixer sset 'MIXINR IN2R' on
+  * $ amixer sset 'AIF1ADC1R Mixer ADC/DMIC' on
+  */
+
+/* SMDK has a 16.934MHZ crystal attached to WM8994 */
+#define SMDK_WM8994_FREQ 16934000
+
+static int smdk_hw_params(struct snd_pcm_substream *substream,
+	struct snd_pcm_hw_params *params)
+{
+	struct snd_soc_pcm_runtime *rtd = substream->private_data;
+	struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+	struct snd_soc_dai *codec_dai = rtd->codec_dai;
+	unsigned int pll_out;
+	int ret;
+
+	/* AIF1CLK should be >=3MHz for optimal performance */
+	if (params_rate(params) == 8000 || params_rate(params) == 11025)
+		pll_out = params_rate(params) * 512;
+	else
+		pll_out = params_rate(params) * 256;
+
+	ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
+					 | SND_SOC_DAIFMT_NB_NF
+					 | SND_SOC_DAIFMT_CBM_CFM);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
+					SMDK_WM8994_FREQ, pll_out);
+	if (ret < 0)
+		return ret;
+
+	ret = snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_FLL1,
+					pll_out, SND_SOC_CLOCK_IN);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * SMDK WM8994 DAI operations.
+ */
+static struct snd_soc_ops smdk_ops = {
+	.hw_params = smdk_hw_params,
+};
+
+static int smdk_wm8994_init_paiftx(struct snd_soc_pcm_runtime *rtd)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+	/* HeadPhone */
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1R");
+	snd_soc_dapm_enable_pin(dapm, "HPOUT1L");
+
+	/* MicIn */
+	snd_soc_dapm_enable_pin(dapm, "IN1LN");
+	snd_soc_dapm_enable_pin(dapm, "IN1RN");
+
+	/* LineIn */
+	snd_soc_dapm_enable_pin(dapm, "IN2LN");
+	snd_soc_dapm_enable_pin(dapm, "IN2RN");
+
+	/* Other pins NC */
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "HPOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLN");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTLP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRP");
+	snd_soc_dapm_nc_pin(dapm, "SPKOUTRN");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+	snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+	snd_soc_dapm_nc_pin(dapm, "IN1LP");
+	snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+	snd_soc_dapm_nc_pin(dapm, "IN1RP");
+	snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+
+	snd_soc_dapm_sync(dapm);
+
+	return 0;
+}
+
+static struct snd_soc_dai_link smdk_dai[] = {
+	{ /* Primary DAI i/f */
+		.name = "WM8994 AIF1",
+		.stream_name = "Pri_Dai",
+		.cpu_dai_name = "samsung-i2s.0",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.init = smdk_wm8994_init_paiftx,
+		.ops = &smdk_ops,
+	}, { /* Sec_Fifo Playback i/f */
+		.name = "Sec_FIFO TX",
+		.stream_name = "Sec_Dai",
+		.cpu_dai_name = "samsung-i2s.4",
+		.codec_dai_name = "wm8994-aif1",
+		.platform_name = "samsung-audio",
+		.codec_name = "wm8994-codec",
+		.ops = &smdk_ops,
+	},
+};
+
+static struct snd_soc_card smdk = {
+	.name = "SMDK-I2S",
+	.dai_link = smdk_dai,
+	.num_links = ARRAY_SIZE(smdk_dai),
+};
+
+static struct platform_device *smdk_snd_device;
+
+static int __init smdk_audio_init(void)
+{
+	int ret;
+
+	smdk_snd_device = platform_device_alloc("soc-audio", -1);
+	if (!smdk_snd_device)
+		return -ENOMEM;
+
+	platform_set_drvdata(smdk_snd_device, &smdk);
+
+	ret = platform_device_add(smdk_snd_device);
+	if (ret)
+		platform_device_put(smdk_snd_device);
+
+	return ret;
+}
+module_init(smdk_audio_init);
+
+static void __exit smdk_audio_exit(void)
+{
+	platform_device_unregister(smdk_snd_device);
+}
+module_exit(smdk_audio_exit);
+
+MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/s3c24xx/smdk_wm9713.c b/sound/soc/samsung/smdk_wm9713.c
similarity index 87%
rename from sound/soc/s3c24xx/smdk_wm9713.c
rename to sound/soc/samsung/smdk_wm9713.c
index 33ba8fd..ae5fed6 100644
--- a/sound/soc/s3c24xx/smdk_wm9713.c
+++ b/sound/soc/samsung/smdk_wm9713.c
@@ -15,8 +15,8 @@
 #include <linux/device.h>
 #include <sound/soc.h>
 
-#include "s3c-dma.h"
-#include "s3c-ac97.h"
+#include "dma.h"
+#include "ac97.h"
 
 static struct snd_soc_card smdk;
 
@@ -27,6 +27,7 @@
  *   SMDKC100: Set CFG6 1-3 On, CFG7 1   On
  *   SMDKC110: Set CFGB10 1-2 Off, CFGB12 1-3 On
  *   SMDKV210: Set CFGB10 1-2 Off, CFGB12 1-3 On
+ *   SMDKV310: Set CFG2 1-2 Off, CFG4 All On, CFG7 All Off, CFG8 1-On
  */
 
 /*
@@ -45,8 +46,8 @@
 static struct snd_soc_dai_link smdk_dai = {
 	.name = "AC97",
 	.stream_name = "AC97 PCM",
-	.platform_name = "s3c24xx-pcm-audio",
-	.cpu_dai_name = "s3c-ac97",
+	.platform_name = "samsung-audio",
+	.cpu_dai_name = "samsung-ac97",
 	.codec_dai_name = "wm9713-hifi",
 	.codec_name = "wm9713-codec",
 };
@@ -70,24 +71,27 @@
 
 	ret = platform_device_add(smdk_snd_wm9713_device);
 	if (ret)
-		goto err;
+		goto err1;
 
 	smdk_snd_ac97_device = platform_device_alloc("soc-audio", -1);
 	if (!smdk_snd_ac97_device) {
 		ret = -ENOMEM;
-		goto err;
+		goto err2;
 	}
 
 	platform_set_drvdata(smdk_snd_ac97_device, &smdk);
 
 	ret = platform_device_add(smdk_snd_ac97_device);
-	if (ret) {
-		platform_device_put(smdk_snd_ac97_device);
-		goto err;
-	}
+	if (ret)
+		goto err3;
 
 	return 0;
-err:
+
+err3:
+	platform_device_put(smdk_snd_ac97_device);
+err2:
+	platform_device_del(smdk_snd_wm9713_device);
+err1:
 	platform_device_put(smdk_snd_wm9713_device);
 	return ret;
 }
diff --git a/sound/soc/s3c24xx/spdif.c b/sound/soc/samsung/spdif.c
similarity index 99%
rename from sound/soc/s3c24xx/spdif.c
rename to sound/soc/samsung/spdif.c
index ce554e9..f0816404 100644
--- a/sound/soc/s3c24xx/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/spdif.c
+/* sound/soc/samsung/spdif.c
  *
  * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
  *
@@ -20,7 +20,7 @@
 #include <plat/audio.h>
 #include <mach/dma.h>
 
-#include "s3c-dma.h"
+#include "dma.h"
 #include "spdif.h"
 
 /* Registers */
diff --git a/sound/soc/s3c24xx/spdif.h b/sound/soc/samsung/spdif.h
similarity index 94%
rename from sound/soc/s3c24xx/spdif.h
rename to sound/soc/samsung/spdif.h
index 3ed5559..4f72cb4 100644
--- a/sound/soc/s3c24xx/spdif.h
+++ b/sound/soc/samsung/spdif.h
@@ -1,4 +1,4 @@
-/* sound/soc/s3c24xx/spdif.h
+/* sound/soc/samsung/spdif.h
  *
  * ALSA SoC Audio Layer - Samsung S/PDIF Controller driver
  *
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 7f0a496..d8e06a6 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -48,7 +48,7 @@
 
 config SND_FSI_AK4642
 	tristate "FSI-AK4642 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_AK4642
 	help
 	  This option enables generic sound support for the
@@ -56,7 +56,7 @@
 
 config SND_FSI_DA7210
 	tristate "FSI-DA7210 sound support"
-	depends on SND_SOC_SH4_FSI && I2C_SH_MOBILE
+	depends on SND_SOC_SH4_FSI && I2C
 	select SND_SOC_DA7210
 	help
 	  This option enables generic sound support for the
diff --git a/sound/soc/sh/fsi-ak4642.c b/sound/soc/sh/fsi-ak4642.c
index d96602d..a14820a 100644
--- a/sound/soc/sh/fsi-ak4642.c
+++ b/sound/soc/sh/fsi-ak4642.c
@@ -12,6 +12,14 @@
 #include <linux/platform_device.h>
 #include <sound/sh_fsi.h>
 
+struct fsi_ak4642_data {
+	const char *name;
+	const char *card;
+	const char *cpu_dai;
+	const char *codec;
+	const char *platform;
+};
+
 static int fsi_ak4642_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_dai *dai = rtd->codec_dai;
@@ -27,37 +35,42 @@
 }
 
 static struct snd_soc_dai_link fsi_dai_link = {
-	.name		= "AK4642",
-	.stream_name	= "AK4642",
-	.cpu_dai_name	= "fsia-dai", /* fsi A */
 	.codec_dai_name	= "ak4642-hifi",
-#ifdef CONFIG_MACH_AP4EVB
-	.platform_name	= "sh_fsi2",
-	.codec_name	= "ak4642-codec.0-0013",
-#else
-	.platform_name	= "sh_fsi.0",
-	.codec_name	= "ak4642-codec.0-0012",
-#endif
 	.init		= fsi_ak4642_dai_init,
-	.ops		= NULL,
 };
 
 static struct snd_soc_card fsi_soc_card  = {
-	.name		= "FSI (AK4642)",
 	.dai_link	= &fsi_dai_link,
 	.num_links	= 1,
 };
 
 static struct platform_device *fsi_snd_device;
 
-static int __init fsi_ak4642_init(void)
+static int fsi_ak4642_probe(struct platform_device *pdev)
 {
 	int ret = -ENOMEM;
+	const struct platform_device_id	*id_entry;
+	struct fsi_ak4642_data *pdata;
+
+	id_entry = pdev->id_entry;
+	if (!id_entry) {
+		dev_err(&pdev->dev, "unknown fsi ak4642\n");
+		return -ENODEV;
+	}
+
+	pdata = (struct fsi_ak4642_data *)id_entry->driver_data;
 
 	fsi_snd_device = platform_device_alloc("soc-audio", FSI_PORT_A);
 	if (!fsi_snd_device)
 		goto out;
 
+	fsi_dai_link.name		= pdata->name;
+	fsi_dai_link.stream_name	= pdata->name;
+	fsi_dai_link.cpu_dai_name	= pdata->cpu_dai;
+	fsi_dai_link.platform_name	= pdata->platform;
+	fsi_dai_link.codec_name		= pdata->codec;
+	fsi_soc_card.name		= pdata->card;
+
 	platform_set_drvdata(fsi_snd_device, &fsi_soc_card);
 	ret = platform_device_add(fsi_snd_device);
 
@@ -68,9 +81,108 @@
 	return ret;
 }
 
-static void __exit fsi_ak4642_exit(void)
+static int fsi_ak4642_remove(struct platform_device *pdev)
 {
 	platform_device_unregister(fsi_snd_device);
+	return 0;
+}
+
+static struct fsi_ak4642_data fsi_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIA (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSIB (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIA (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSIB (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi.0",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2A (AK4642)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4642 = {
+	.name		= "AK4642",
+	.card		= "FSI2B (AK4642)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0012",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_a_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2A (AK4643)",
+	.cpu_dai	= "fsia-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct fsi_ak4642_data fsi2_b_ak4643 = {
+	.name		= "AK4643",
+	.card		= "FSI2B (AK4643)",
+	.cpu_dai	= "fsib-dai",
+	.codec		= "ak4642-codec.0-0013",
+	.platform	= "sh_fsi2",
+};
+
+static struct platform_device_id fsi_id_table[] = {
+	/* FSI */
+	{ "sh_fsi_a_ak4642",	(kernel_ulong_t)&fsi_a_ak4642 },
+	{ "sh_fsi_b_ak4642",	(kernel_ulong_t)&fsi_b_ak4642 },
+	{ "sh_fsi_a_ak4643",	(kernel_ulong_t)&fsi_a_ak4643 },
+	{ "sh_fsi_b_ak4643",	(kernel_ulong_t)&fsi_b_ak4643 },
+
+	/* FSI 2 */
+	{ "sh_fsi2_a_ak4642",	(kernel_ulong_t)&fsi2_a_ak4642 },
+	{ "sh_fsi2_b_ak4642",	(kernel_ulong_t)&fsi2_b_ak4642 },
+	{ "sh_fsi2_a_ak4643",	(kernel_ulong_t)&fsi2_a_ak4643 },
+	{ "sh_fsi2_b_ak4643",	(kernel_ulong_t)&fsi2_b_ak4643 },
+	{},
+};
+
+static struct platform_driver fsi_ak4642 = {
+	.driver = {
+		.name	= "fsi-ak4642-audio",
+	},
+	.probe		= fsi_ak4642_probe,
+	.remove		= fsi_ak4642_remove,
+	.id_table	= fsi_id_table,
+};
+
+static int __init fsi_ak4642_init(void)
+{
+	return platform_driver_register(&fsi_ak4642);
+}
+
+static void __exit fsi_ak4642_exit(void)
+{
+	platform_driver_unregister(&fsi_ak4642);
 }
 
 module_init(fsi_ak4642_init);
diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c
index a6adb6e..e8df9da 100644
--- a/sound/soc/sh/fsi-da7210.c
+++ b/sound/soc/sh/fsi-da7210.c
@@ -18,7 +18,7 @@
 	struct snd_soc_dai *dai = rtd->codec_dai;
 
 	return snd_soc_dai_set_fmt(dai,
-				   SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+				   SND_SOC_DAIFMT_I2S |
 				   SND_SOC_DAIFMT_CBM_CFM);
 }
 
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 4c2404b..2b06402 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -19,20 +19,26 @@
 #include <sound/soc.h>
 #include <sound/sh_fsi.h>
 
-#define DO_FMT		0x0000
-#define DOFF_CTL	0x0004
-#define DOFF_ST		0x0008
-#define DI_FMT		0x000C
-#define DIFF_CTL	0x0010
-#define DIFF_ST		0x0014
-#define CKG1		0x0018
-#define CKG2		0x001C
-#define DIDT		0x0020
-#define DODT		0x0024
-#define MUTE_ST		0x0028
-#define OUT_SEL		0x0030
-#define REG_END		OUT_SEL
+/* PortA/PortB register */
+#define REG_DO_FMT	0x0000
+#define REG_DOFF_CTL	0x0004
+#define REG_DOFF_ST	0x0008
+#define REG_DI_FMT	0x000C
+#define REG_DIFF_CTL	0x0010
+#define REG_DIFF_ST	0x0014
+#define REG_CKG1	0x0018
+#define REG_CKG2	0x001C
+#define REG_DIDT	0x0020
+#define REG_DODT	0x0024
+#define REG_MUTE_ST	0x0028
+#define REG_OUT_SEL	0x0030
 
+/* master register */
+#define MST_CLK_RST	0x0210
+#define MST_SOFT_RST	0x0214
+#define MST_FIFO_SZ	0x0218
+
+/* core register (depend on FSI version) */
 #define A_MST_CTLR	0x0180
 #define B_MST_CTLR	0x01A0
 #define CPU_INT_ST	0x01F4
@@ -41,22 +47,23 @@
 #define INT_ST		0x0200
 #define IEMSK		0x0204
 #define IMSK		0x0208
-#define MUTE		0x020C
-#define CLK_RST		0x0210
-#define SOFT_RST	0x0214
-#define FIFO_SZ		0x0218
-#define MREG_START	A_MST_CTLR
-#define MREG_END	FIFO_SZ
 
 /* DO_FMT */
 /* DI_FMT */
+#define CR_BWS_24	(0x0 << 20) /* FSI2 */
+#define CR_BWS_16	(0x1 << 20) /* FSI2 */
+#define CR_BWS_20	(0x2 << 20) /* FSI2 */
+
+#define CR_DTMD_PCM		(0x0 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_PCM	(0x1 << 8) /* FSI2 */
+#define CR_DTMD_SPDIF_STREAM	(0x2 << 8) /* FSI2 */
+
 #define CR_MONO		(0x0 << 4)
 #define CR_MONO_D	(0x1 << 4)
 #define CR_PCM		(0x2 << 4)
 #define CR_I2S		(0x3 << 4)
 #define CR_TDM		(0x4 << 4)
 #define CR_TDM_D	(0x5 << 4)
-#define CR_SPDIF	0x00100120
 
 /* DOFF_CTL */
 /* DIFF_CTL */
@@ -93,6 +100,10 @@
 #define IR		(1 <<  4) /* Interrupt Reset */
 #define FSISR		(1 <<  0) /* Software Reset */
 
+/* OUT_SEL (FSI2) */
+#define DMMD		(1 << 4) /* SPDIF output timing 0: Biphase only */
+				 /*			1: Biphase and serial */
+
 /* FIFO_SZ */
 #define FIFO_SZ_MASK	0x7
 
@@ -123,6 +134,9 @@
 	int buff_len;
 	int period_len;
 	int period_num;
+
+	int uerr_num;
+	int oerr_num;
 };
 
 struct fsi_priv {
@@ -133,8 +147,6 @@
 	struct fsi_stream capture;
 
 	long rate;
-
-	u32 mst_ctrl;
 };
 
 struct fsi_core {
@@ -143,6 +155,8 @@
 	u32 int_st;
 	u32 iemsk;
 	u32 imsk;
+	u32 a_mclk;
+	u32 b_mclk;
 };
 
 struct fsi_master {
@@ -182,62 +196,22 @@
 	__fsi_reg_write(reg, val);
 }
 
-static void fsi_reg_write(struct fsi_priv *fsi, u32 reg, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
+#define fsi_reg_write(p, r, d)\
+	__fsi_reg_write((u32)(p->base + REG_##r), d)
 
-	__fsi_reg_write((u32)(fsi->base + reg), data);
-}
+#define fsi_reg_read(p, r)\
+	__fsi_reg_read((u32)(p->base + REG_##r))
 
-static u32 fsi_reg_read(struct fsi_priv *fsi, u32 reg)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
+#define fsi_reg_mask_set(p, r, m, d)\
+	__fsi_reg_mask_set((u32)(p->base + REG_##r), m, d)
 
-	return __fsi_reg_read((u32)(fsi->base + reg));
-}
-
-static void fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
-{
-	if (reg > REG_END) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	__fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
-}
-
-static void fsi_master_write(struct fsi_master *master, u32 reg, u32 data)
-{
-	unsigned long flags;
-
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
-	spin_lock_irqsave(&master->lock, flags);
-	__fsi_reg_write((u32)(master->base + reg), data);
-	spin_unlock_irqrestore(&master->lock, flags);
-}
-
-static u32 fsi_master_read(struct fsi_master *master, u32 reg)
+#define fsi_master_read(p, r) _fsi_master_read(p, MST_##r)
+#define fsi_core_read(p, r)   _fsi_master_read(p, p->core->r)
+static u32 _fsi_master_read(struct fsi_master *master, u32 reg)
 {
 	u32 ret;
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return 0;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	ret = __fsi_reg_read((u32)(master->base + reg));
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -245,17 +219,13 @@
 	return ret;
 }
 
-static void fsi_master_mask_set(struct fsi_master *master,
+#define fsi_master_mask_set(p, r, m, d) _fsi_master_mask_set(p, MST_##r, m, d)
+#define fsi_core_mask_set(p, r, m, d)  _fsi_master_mask_set(p, p->core->r, m, d)
+static void _fsi_master_mask_set(struct fsi_master *master,
 			       u32 reg, u32 mask, u32 data)
 {
 	unsigned long flags;
 
-	if ((reg < MREG_START) ||
-	    (reg > MREG_END)) {
-		pr_err("fsi: register access err (%s)\n", __func__);
-		return;
-	}
-
 	spin_lock_irqsave(&master->lock, flags);
 	__fsi_reg_mask_set((u32)(master->base + reg), mask, data);
 	spin_unlock_irqrestore(&master->lock, flags);
@@ -359,27 +329,41 @@
 	io->buff_offset	= 0;
 	io->period_len	= period_len;
 	io->period_num	= 0;
+	io->oerr_num	= -1; /* ignore 1st err */
+	io->uerr_num	= -1; /* ignore 1st err */
 }
 
 static void fsi_stream_pop(struct fsi_priv *fsi, int is_play)
 {
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
+	struct snd_soc_dai *dai = fsi_get_dai(io->substream);
+
+
+	if (io->oerr_num > 0)
+		dev_err(dai->dev, "over_run = %d\n", io->oerr_num);
+
+	if (io->uerr_num > 0)
+		dev_err(dai->dev, "under_run = %d\n", io->uerr_num);
 
 	io->substream	= NULL;
 	io->buff_len	= 0;
 	io->buff_offset	= 0;
 	io->period_len	= 0;
 	io->period_num	= 0;
+	io->oerr_num	= 0;
+	io->uerr_num	= 0;
 }
 
 static int fsi_get_fifo_data_num(struct fsi_priv *fsi, int is_play)
 {
 	u32 status;
-	u32 reg = is_play ? DOFF_ST : DIFF_ST;
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
 	int data_num;
 
-	status = fsi_reg_read(fsi, reg);
+	status = is_play ?
+		fsi_reg_read(fsi, DOFF_ST) :
+		fsi_reg_read(fsi, DIFF_ST);
+
 	data_num = 0x1ff & (status >> 8);
 	data_num *= io->chan_num;
 
@@ -406,6 +390,27 @@
 	return frames_to_bytes(runtime, 1) / io->chan_num;
 }
 
+static void fsi_count_fifo_err(struct fsi_priv *fsi)
+{
+	u32 ostatus = fsi_reg_read(fsi, DOFF_ST);
+	u32 istatus = fsi_reg_read(fsi, DIFF_ST);
+
+	if (ostatus & ERR_OVER)
+		fsi->playback.oerr_num++;
+
+	if (ostatus & ERR_UNDER)
+		fsi->playback.uerr_num++;
+
+	if (istatus & ERR_OVER)
+		fsi->capture.oerr_num++;
+
+	if (istatus & ERR_UNDER)
+		fsi->capture.uerr_num++;
+
+	fsi_reg_write(fsi, DOFF_ST, 0);
+	fsi_reg_write(fsi, DIFF_ST, 0);
+}
+
 /*
  *		dma function
  */
@@ -473,8 +478,8 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, data);
-	fsi_master_mask_set(master, master->core->iemsk, data, data);
+	fsi_core_mask_set(master, imsk,  data, data);
+	fsi_core_mask_set(master, iemsk, data, data);
 }
 
 static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
@@ -482,18 +487,13 @@
 	u32 data = AB_IO(1, fsi_get_port_shift(fsi, is_play));
 	struct fsi_master *master = fsi_get_master(fsi);
 
-	fsi_master_mask_set(master, master->core->imsk,  data, 0);
-	fsi_master_mask_set(master, master->core->iemsk, data, 0);
+	fsi_core_mask_set(master, imsk,  data, 0);
+	fsi_core_mask_set(master, iemsk, data, 0);
 }
 
 static u32 fsi_irq_get_status(struct fsi_master *master)
 {
-	return fsi_master_read(master, master->core->int_st);
-}
-
-static void fsi_irq_clear_all_status(struct fsi_master *master)
-{
-	fsi_master_write(master, master->core->int_st, 0);
+	return fsi_core_read(master, int_st);
 }
 
 static void fsi_irq_clear_status(struct fsi_priv *fsi)
@@ -505,7 +505,7 @@
 	data |= AB_IO(1, fsi_get_port_shift(fsi, 1));
 
 	/* clear interrupt factor */
-	fsi_master_mask_set(master, master->core->int_st, data, 0);
+	fsi_core_mask_set(master, int_st, data, 0);
 }
 
 /*
@@ -516,17 +516,19 @@
 static void fsi_spdif_clk_ctrl(struct fsi_priv *fsi, int enable)
 {
 	struct fsi_master *master = fsi_get_master(fsi);
-	u32 val = BP | SE;
+	u32 mask, val;
 
 	if (master->core->ver < 2) {
 		pr_err("fsi: register access err (%s)\n", __func__);
 		return;
 	}
 
-	if (enable)
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, val);
-	else
-		fsi_master_mask_set(master, fsi->mst_ctrl, val, 0);
+	mask = BP | SE;
+	val = enable ? mask : 0;
+
+	fsi_is_port_a(fsi) ?
+		fsi_core_mask_set(master, a_mclk, mask, val) :
+		fsi_core_mask_set(master, b_mclk, mask, val);
 }
 
 /*
@@ -550,7 +552,7 @@
 {
 	struct fsi_master *master = fsi_get_master(fsi);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 ctrl, shift, i;
+	u32 shift, i;
 
 	/* get on-chip RAM capacity */
 	shift = fsi_master_read(master, FIFO_SZ);
@@ -583,13 +585,17 @@
 	dev_dbg(dai->dev, "%d channel %d store\n",
 		io->chan_num, io->fifo_max_num);
 
-	ctrl = is_play ? DOFF_CTL : DIFF_CTL;
-
-	/* set interrupt generation factor */
-	fsi_reg_write(fsi, ctrl, IRQ_HALF);
-
-	/* clear FIFO */
-	fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
+	/*
+	 * set interrupt generation factor
+	 * clear FIFO
+	 */
+	if (is_play) {
+		fsi_reg_write(fsi,	DOFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DOFF_CTL, FIFO_CLR, FIFO_CLR);
+	} else {
+		fsi_reg_write(fsi,	DIFF_CTL, IRQ_HALF);
+		fsi_reg_mask_set(fsi,	DIFF_CTL, FIFO_CLR, FIFO_CLR);
+	}
 }
 
 static void fsi_soft_all_reset(struct fsi_master *master)
@@ -604,13 +610,12 @@
 	mdelay(10);
 }
 
-static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int startup, int stream)
+static int fsi_fifo_data_ctrl(struct fsi_priv *fsi, int stream)
 {
 	struct snd_pcm_runtime *runtime;
 	struct snd_pcm_substream *substream = NULL;
 	int is_play = fsi_stream_is_play(stream);
 	struct fsi_stream *io = fsi_get_stream(fsi, is_play);
-	u32 status_reg = is_play ? DOFF_ST : DIFF_ST;
 	int data_residue_num;
 	int data_num;
 	int data_num_max;
@@ -698,35 +703,20 @@
 	/* update buff_offset */
 	io->buff_offset += fsi_num2offset(data_num, ch_width);
 
-	/* check fifo status */
-	if (!startup) {
-		struct snd_soc_dai *dai = fsi_get_dai(substream);
-		u32 status = fsi_reg_read(fsi, status_reg);
-
-		if (status & ERR_OVER)
-			dev_err(dai->dev, "over run\n");
-		if (status & ERR_UNDER)
-			dev_err(dai->dev, "under run\n");
-	}
-	fsi_reg_write(fsi, status_reg, 0);
-
-	/* re-enable irq */
-	fsi_irq_enable(fsi, is_play);
-
 	if (over_period)
 		snd_pcm_period_elapsed(substream);
 
 	return 0;
 }
 
-static int fsi_data_pop(struct fsi_priv *fsi, int startup)
+static int fsi_data_pop(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_CAPTURE);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_CAPTURE);
 }
 
-static int fsi_data_push(struct fsi_priv *fsi, int startup)
+static int fsi_data_push(struct fsi_priv *fsi)
 {
-	return fsi_fifo_data_ctrl(fsi, startup, SNDRV_PCM_STREAM_PLAYBACK);
+	return fsi_fifo_data_ctrl(fsi, SNDRV_PCM_STREAM_PLAYBACK);
 }
 
 static irqreturn_t fsi_interrupt(int irq, void *data)
@@ -739,15 +729,19 @@
 	fsi_master_mask_set(master, SOFT_RST, IR, IR);
 
 	if (int_st & AB_IO(1, AO_SHIFT))
-		fsi_data_push(&master->fsia, 0);
+		fsi_data_push(&master->fsia);
 	if (int_st & AB_IO(1, BO_SHIFT))
-		fsi_data_push(&master->fsib, 0);
+		fsi_data_push(&master->fsib);
 	if (int_st & AB_IO(1, AI_SHIFT))
-		fsi_data_pop(&master->fsia, 0);
+		fsi_data_pop(&master->fsia);
 	if (int_st & AB_IO(1, BI_SHIFT))
-		fsi_data_pop(&master->fsib, 0);
+		fsi_data_pop(&master->fsib);
 
-	fsi_irq_clear_all_status(master);
+	fsi_count_fifo_err(&master->fsia);
+	fsi_count_fifo_err(&master->fsib);
+
+	fsi_irq_clear_status(&master->fsia);
+	fsi_irq_clear_status(&master->fsib);
 
 	return IRQ_HANDLED;
 }
@@ -764,7 +758,6 @@
 	struct fsi_stream *io;
 	u32 flags = fsi_get_info_flags(fsi);
 	u32 fmt;
-	u32 reg;
 	u32 data;
 	int is_play = fsi_is_play(substream);
 	int is_master;
@@ -796,7 +789,6 @@
 
 	/* do fmt, di fmt */
 	data = 0;
-	reg = is_play ? DO_FMT : DI_FMT;
 	fmt = is_play ? SH_FSI_GET_OFMT(flags) : SH_FSI_GET_IFMT(flags);
 	switch (fmt) {
 	case SH_FSI_FMT_MONO:
@@ -830,16 +822,18 @@
 			dev_err(dai->dev, "This FSI can not use SPDIF\n");
 			return -EINVAL;
 		}
-		data = CR_SPDIF;
+		data = CR_BWS_16 | CR_DTMD_SPDIF_PCM | CR_PCM;
 		io->chan_num = 2;
 		fsi_spdif_clk_ctrl(fsi, 1);
-		fsi_reg_mask_set(fsi, OUT_SEL, 0x0010, 0x0010);
+		fsi_reg_mask_set(fsi, OUT_SEL, DMMD, DMMD);
 		break;
 	default:
 		dev_err(dai->dev, "unknown format.\n");
 		return -EINVAL;
 	}
-	fsi_reg_write(fsi, reg, data);
+	is_play ?
+		fsi_reg_write(fsi, DO_FMT, data) :
+		fsi_reg_write(fsi, DI_FMT, data);
 
 	/* irq clear */
 	fsi_irq_disable(fsi, is_play);
@@ -883,7 +877,8 @@
 		fsi_stream_push(fsi, is_play, substream,
 				frames_to_bytes(runtime, runtime->buffer_size),
 				frames_to_bytes(runtime, runtime->period_size));
-		ret = is_play ? fsi_data_push(fsi, 1) : fsi_data_pop(fsi, 1);
+		ret = is_play ? fsi_data_push(fsi) : fsi_data_pop(fsi);
+		fsi_irq_enable(fsi, is_play);
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
 		fsi_irq_disable(fsi, is_play);
@@ -1174,12 +1169,10 @@
 	/* FSI A setting */
 	master->fsia.base	= master->base;
 	master->fsia.master	= master;
-	master->fsia.mst_ctrl	= A_MST_CTLR;
 
 	/* FSI B setting */
 	master->fsib.base	= master->base + 0x40;
 	master->fsib.master	= master;
-	master->fsib.mst_ctrl	= B_MST_CTLR;
 
 	pm_runtime_enable(&pdev->dev);
 	pm_runtime_resume(&pdev->dev);
@@ -1266,6 +1259,8 @@
 	.int_st	= CPU_INT_ST,
 	.iemsk	= CPU_IEMSK,
 	.imsk	= CPU_IMSK,
+	.a_mclk	= A_MST_CTLR,
+	.b_mclk	= B_MST_CTLR,
 };
 
 static struct platform_device_id fsi_id_table[] = {
diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c
index ac6c49c..6088a6a 100644
--- a/sound/soc/sh/migor.c
+++ b/sound/soc/sh/migor.c
@@ -8,11 +8,11 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/clkdev.h>
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/module.h>
 
-#include <asm/clkdev.h>
 #include <asm/clock.h>
 
 #include <cpu/sh7722.h>
@@ -20,7 +20,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 
 #include "../codecs/wm8978.h"
 #include "siu.h"
@@ -140,11 +139,12 @@
 static int migor_dai_init(struct snd_soc_pcm_runtime *rtd)
 {
 	struct snd_soc_codec *codec = rtd->codec;
+	struct snd_soc_dapm_context *dapm = &codec->dapm;
 
-	snd_soc_dapm_new_controls(codec, migor_dapm_widgets,
+	snd_soc_dapm_new_controls(dapm, migor_dapm_widgets,
 				  ARRAY_SIZE(migor_dapm_widgets));
 
-	snd_soc_dapm_add_routes(codec, audio_map, ARRAY_SIZE(audio_map));
+	snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
 
 	return 0;
 }
diff --git a/sound/soc/sh/sh7760-ac97.c b/sound/soc/sh/sh7760-ac97.c
index f8e0ab8..917d3ce 100644
--- a/sound/soc/sh/sh7760-ac97.c
+++ b/sound/soc/sh/sh7760-ac97.c
@@ -12,7 +12,6 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <asm/io.h>
 
 #define IPSEL 0xFE400034
@@ -23,7 +22,7 @@
 
 static int machine_init(struct snd_soc_pcm_runtime *rtd)
 {
-	snd_soc_dapm_sync(rtd->codec);
+	snd_soc_dapm_sync(&rtd->codec->dapm);
 	return 0;
 }
 
diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
index 9f4dcb9..83c3430 100644
--- a/sound/soc/sh/siu.h
+++ b/sound/soc/sh/siu.h
@@ -75,7 +75,7 @@
 
 #include <sound/core.h>
 #include <sound/pcm.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #define SIU_PERIOD_BYTES_MAX	8192		/* DMA transfer/period size */
 #define SIU_PERIOD_BYTES_MIN	256		/* DMA transfer/period size */
diff --git a/sound/soc/sh/siu_dai.c b/sound/soc/sh/siu_dai.c
index af53b64..4973c29 100644
--- a/sound/soc/sh/siu_dai.c
+++ b/sound/soc/sh/siu_dai.c
@@ -28,7 +28,7 @@
 #include <asm/siu.h>
 
 #include <sound/control.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include "siu.h"
 
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index ed29c9e..a423bab 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -29,7 +29,7 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dai.h>
+#include <sound/soc.h>
 
 #include <asm/siu.h>
 
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d214f02..8c2a21a 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -14,27 +14,34 @@
 #include <linux/i2c.h>
 #include <linux/spi/spi.h>
 #include <sound/soc.h>
+#include <linux/lzo.h>
+#include <linux/bitmap.h>
+#include <linux/rbtree.h>
 
 static unsigned int snd_soc_4_12_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_4_12_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -42,16 +49,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -77,7 +85,7 @@
 	msg[1] = data[0];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -94,23 +102,27 @@
 static unsigned int snd_soc_7_9_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 		snd_soc_codec_volatile_register(codec, reg)) {
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_7_9_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[2];
 	int ret;
 
@@ -118,16 +130,17 @@
 	data[1] = value & 0x00ff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 2);
 	if (ret == 2)
 		return 0;
@@ -153,7 +166,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -170,24 +183,25 @@
 static int snd_soc_8_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[2];
+	int ret;
 
 	reg &= 0xff;
 	data[0] = reg;
 	data[1] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 2) == 2)
 		return 0;
 	else
@@ -197,7 +211,8 @@
 static unsigned int snd_soc_8_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -205,10 +220,14 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -227,7 +246,7 @@
 	msg[1] = data[1];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -244,24 +263,25 @@
 static int snd_soc_8_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			      unsigned int value)
 {
-	u16 *reg_cache = codec->reg_cache;
 	u8 data[3];
+	int ret;
 
 	data[0] = reg;
 	data[1] = (value >> 8) & 0xff;
 	data[2] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-	    reg < codec->driver->reg_cache_size)
-		reg_cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	if (codec->hw_write(codec->control_data, data, 3) == 3)
 		return 0;
 	else
@@ -271,17 +291,22 @@
 static unsigned int snd_soc_8_16_read(struct snd_soc_codec *codec,
 				      unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
-	} else {
-		return cache[reg];
 	}
+
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 #if defined(CONFIG_SPI_MASTER)
@@ -301,7 +326,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -420,7 +445,8 @@
 static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec,
 				     unsigned int reg)
 {
-	u8 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	reg &= 0xff;
 	if (reg >= codec->driver->reg_cache_size ||
@@ -428,16 +454,19 @@
 			if (codec->cache_only)
 				return -1;
 
+			BUG_ON(!codec->hw_read);
 			return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+	return val;
 }
 
 static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg,
 			     unsigned int value)
 {
-	u8 *cache = codec->reg_cache;
 	u8 data[3];
 	int ret;
 
@@ -447,16 +476,17 @@
 
 	reg &= 0xff;
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 3);
 	if (ret == 3)
 		return 0;
@@ -483,7 +513,7 @@
 	msg[2] = data[2];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -534,23 +564,28 @@
 static unsigned int snd_soc_16_16_read(struct snd_soc_codec *codec,
 				       unsigned int reg)
 {
-	u16 *cache = codec->reg_cache;
+	int ret;
+	unsigned int val;
 
 	if (reg >= codec->driver->reg_cache_size ||
 	    snd_soc_codec_volatile_register(codec, reg)) {
 		if (codec->cache_only)
 			return -1;
 
+		BUG_ON(!codec->hw_read);
 		return codec->hw_read(codec, reg);
 	}
 
-	return cache[reg];
+	ret = snd_soc_cache_read(codec, reg, &val);
+	if (ret < 0)
+		return -1;
+
+	return val;
 }
 
 static int snd_soc_16_16_write(struct snd_soc_codec *codec, unsigned int reg,
 			       unsigned int value)
 {
-	u16 *cache = codec->reg_cache;
 	u8 data[4];
 	int ret;
 
@@ -560,16 +595,17 @@
 	data[3] = value & 0xff;
 
 	if (!snd_soc_codec_volatile_register(codec, reg) &&
-		reg < codec->driver->reg_cache_size)
-			cache[reg] = value;
+		reg < codec->driver->reg_cache_size) {
+		ret = snd_soc_cache_write(codec, reg, value);
+		if (ret < 0)
+			return -1;
+	}
 
 	if (codec->cache_only) {
 		codec->cache_sync = 1;
 		return 0;
 	}
 
-	dev_dbg(codec->dev, "0x%x = 0x%x\n", reg, value);
-
 	ret = codec->hw_write(codec->control_data, data, 4);
 	if (ret == 4)
 		return 0;
@@ -597,7 +633,7 @@
 	msg[3] = data[3];
 
 	spi_message_init(&m);
-	memset(&t, 0, (sizeof t));
+	memset(&t, 0, sizeof t);
 
 	t.tx_buf = &msg[0];
 	t.len = len;
@@ -692,8 +728,8 @@
 		return -EINVAL;
 	}
 
-	codec->driver->write = io_types[i].write;
-	codec->driver->read = io_types[i].read;
+	codec->write = io_types[i].write;
+	codec->read = io_types[i].read;
 
 	switch (control) {
 	case SND_SOC_CUSTOM:
@@ -724,3 +760,930 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
+
+struct snd_soc_rbtree_node {
+	struct rb_node node;
+	unsigned int reg;
+	unsigned int value;
+	unsigned int defval;
+} __attribute__ ((packed));
+
+struct snd_soc_rbtree_ctx {
+	struct rb_root root;
+};
+
+static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
+	struct rb_root *root, unsigned int reg)
+{
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+
+	node = root->rb_node;
+	while (node) {
+		rbnode = container_of(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->reg < reg)
+			node = node->rb_left;
+		else if (rbnode->reg > reg)
+			node = node->rb_right;
+		else
+			return rbnode;
+	}
+
+	return NULL;
+}
+
+static int snd_soc_rbtree_insert(struct rb_root *root,
+				 struct snd_soc_rbtree_node *rbnode)
+{
+	struct rb_node **new, *parent;
+	struct snd_soc_rbtree_node *rbnode_tmp;
+
+	parent = NULL;
+	new = &root->rb_node;
+	while (*new) {
+		rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
+					  node);
+		parent = *new;
+		if (rbnode_tmp->reg < rbnode->reg)
+			new = &((*new)->rb_left);
+		else if (rbnode_tmp->reg > rbnode->reg)
+			new = &((*new)->rb_right);
+		else
+			return 0;
+	}
+
+	/* insert the node into the rbtree */
+	rb_link_node(&rbnode->node, parent, new);
+	rb_insert_color(&rbnode->node, root);
+
+	return 1;
+}
+
+static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct rb_node *node;
+	struct snd_soc_rbtree_node *rbnode;
+	unsigned int val;
+	int ret;
+
+	rbtree_ctx = codec->reg_cache;
+	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+		rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
+		if (rbnode->value == rbnode->defval)
+			continue;
+		ret = snd_soc_cache_read(codec, rbnode->reg, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, rbnode->reg, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			rbnode->reg, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
+				      unsigned int reg, unsigned int value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		if (rbnode->value == value)
+			return 0;
+		rbnode->value = value;
+	} else {
+		/* bail out early, no need to create the rbnode yet */
+		if (!value)
+			return 0;
+		/*
+		 * for uninitialized registers whose value is changed
+		 * from the default zero, create an rbnode and insert
+		 * it into the tree.
+		 */
+		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
+		if (!rbnode)
+			return -ENOMEM;
+		rbnode->reg = reg;
+		rbnode->value = value;
+		snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
+				     unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbnode;
+
+	rbtree_ctx = codec->reg_cache;
+	rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
+	if (rbnode) {
+		*value = rbnode->value;
+	} else {
+		/* uninitialized registers default to 0 */
+		*value = 0;
+	}
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
+{
+	struct rb_node *next;
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+	struct snd_soc_rbtree_node *rbtree_node;
+
+	/* if we've already been called then just return */
+	rbtree_ctx = codec->reg_cache;
+	if (!rbtree_ctx)
+		return 0;
+
+	/* free up the rbtree */
+	next = rb_first(&rbtree_ctx->root);
+	while (next) {
+		rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
+		next = rb_next(&rbtree_node->node);
+		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+		kfree(rbtree_node);
+	}
+
+	/* release the resources */
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+
+	return 0;
+}
+
+static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_rbtree_ctx *rbtree_ctx;
+
+	codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	rbtree_ctx = codec->reg_cache;
+	rbtree_ctx->root = RB_ROOT;
+
+	if (!codec->reg_def_copy)
+		return 0;
+
+/*
+ * populate the rbtree with the initialized registers.  All other
+ * registers will be inserted into the tree when they are first written.
+ *
+ * The reasoning behind this, is that we need to step through and
+ * dereference the cache in u8/u16 increments without sacrificing
+ * portability.  This could also be done using memcpy() but that would
+ * be slightly more cryptic.
+ */
+#define snd_soc_rbtree_populate(cache)					\
+({									\
+	int ret, i;							\
+	struct snd_soc_rbtree_node *rbtree_node;			\
+									\
+	ret = 0;							\
+	cache = codec->reg_def_copy;					\
+	for (i = 0; i < codec->driver->reg_cache_size; ++i) {		\
+		if (!cache[i])						\
+			continue;					\
+		rbtree_node = kzalloc(sizeof *rbtree_node, GFP_KERNEL);	\
+		if (!rbtree_node) {					\
+			ret = -ENOMEM;					\
+			snd_soc_cache_exit(codec);			\
+			break;						\
+		}							\
+		rbtree_node->reg = i;					\
+		rbtree_node->value = cache[i];				\
+		rbtree_node->defval = cache[i];				\
+		snd_soc_rbtree_insert(&rbtree_ctx->root,		\
+				      rbtree_node);			\
+	}								\
+	ret;								\
+})
+
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		const u8 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	case 2: {
+		const u16 *cache;
+
+		return snd_soc_rbtree_populate(cache);
+	}
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+struct snd_soc_lzo_ctx {
+	void *wmem;
+	void *dst;
+	const void *src;
+	size_t src_len;
+	size_t dst_len;
+	size_t decompressed_size;
+	unsigned long *sync_bmp;
+	int sync_bmp_nbits;
+};
+
+#define LZO_BLOCK_NUM 8
+static int snd_soc_lzo_block_count(void)
+{
+	return LZO_BLOCK_NUM;
+}
+
+static int snd_soc_lzo_prepare(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+	if (!lzo_ctx->wmem)
+		return -ENOMEM;
+	return 0;
+}
+
+static int snd_soc_lzo_compress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t compress_size;
+	int ret;
+
+	ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
+			       lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
+	if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
+		return -EINVAL;
+	lzo_ctx->dst_len = compress_size;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress(struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	size_t dst_len;
+	int ret;
+
+	dst_len = lzo_ctx->dst_len;
+	ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
+				    lzo_ctx->dst, &dst_len);
+	if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
+		return -EINVAL;
+	return 0;
+}
+
+static int snd_soc_lzo_compress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_compress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static int snd_soc_lzo_decompress_cache_block(struct snd_soc_codec *codec,
+		struct snd_soc_lzo_ctx *lzo_ctx)
+{
+	int ret;
+
+	lzo_ctx->dst_len = lzo_ctx->decompressed_size;
+	lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
+	if (!lzo_ctx->dst) {
+		lzo_ctx->dst_len = 0;
+		return -ENOMEM;
+	}
+
+	ret = snd_soc_lzo_decompress(lzo_ctx);
+	if (ret < 0)
+		return ret;
+	return 0;
+}
+
+static inline int snd_soc_lzo_get_blkindex(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return (reg * codec_drv->reg_word_size) /
+	       DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static inline int snd_soc_lzo_get_blkpos(struct snd_soc_codec *codec,
+		unsigned int reg)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return reg % (DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count()) /
+		      codec_drv->reg_word_size);
+}
+
+static inline int snd_soc_lzo_get_blksize(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+	return DIV_ROUND_UP(reg_size, snd_soc_lzo_block_count());
+}
+
+static int snd_soc_lzo_cache_sync(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	unsigned int val;
+	int i;
+	int ret;
+
+	lzo_blocks = codec->reg_cache;
+	for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+
+	return 0;
+}
+
+static int snd_soc_lzo_cache_write(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		goto out;
+	}
+
+	/* write the new value to the cache */
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+		cache = lzo_block->dst;
+		if (cache[blkpos] == value) {
+			kfree(lzo_block->dst);
+			goto out;
+		}
+		cache[blkpos] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	/* prepare the source to be the decompressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* compress the block */
+	ret = snd_soc_lzo_compress_cache_block(codec, lzo_block);
+	if (ret < 0) {
+		kfree(lzo_block->dst);
+		kfree(lzo_block->src);
+		goto out;
+	}
+
+	/* set the bit so we know we have to sync this register */
+	set_bit(reg, lzo_block->sync_bmp);
+	kfree(tmp_dst);
+	kfree(lzo_block->src);
+	return 0;
+out:
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return ret;
+}
+
+static int snd_soc_lzo_cache_read(struct snd_soc_codec *codec,
+				  unsigned int reg, unsigned int *value)
+{
+	struct snd_soc_lzo_ctx *lzo_block, **lzo_blocks;
+	int ret, blkindex, blkpos;
+	size_t blksize, tmp_dst_len;
+	void *tmp_dst;
+
+	*value = 0;
+	/* index of the compressed lzo block */
+	blkindex = snd_soc_lzo_get_blkindex(codec, reg);
+	/* register index within the decompressed block */
+	blkpos = snd_soc_lzo_get_blkpos(codec, reg);
+	/* size of the compressed block */
+	blksize = snd_soc_lzo_get_blksize(codec);
+	lzo_blocks = codec->reg_cache;
+	lzo_block = lzo_blocks[blkindex];
+
+	/* save the pointer and length of the compressed block */
+	tmp_dst = lzo_block->dst;
+	tmp_dst_len = lzo_block->dst_len;
+
+	/* prepare the source to be the compressed block */
+	lzo_block->src = lzo_block->dst;
+	lzo_block->src_len = lzo_block->dst_len;
+
+	/* decompress the block */
+	ret = snd_soc_lzo_decompress_cache_block(codec, lzo_block);
+	if (ret >= 0) {
+		/* fetch the value from the cache */
+		switch (codec->driver->reg_word_size) {
+		case 1: {
+			u8 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		case 2: {
+			u16 *cache;
+			cache = lzo_block->dst;
+			*value = cache[blkpos];
+		}
+		break;
+		default:
+			BUG();
+		}
+	}
+
+	kfree(lzo_block->dst);
+	/* restore the pointer and length of the compressed block */
+	lzo_block->dst = tmp_dst;
+	lzo_block->dst_len = tmp_dst_len;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_exit(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	int i, blkcount;
+
+	lzo_blocks = codec->reg_cache;
+	if (!lzo_blocks)
+		return 0;
+
+	blkcount = snd_soc_lzo_block_count();
+	/*
+	 * the pointer to the bitmap used for syncing the cache
+	 * is shared amongst all lzo_blocks.  Ensure it is freed
+	 * only once.
+	 */
+	if (lzo_blocks[0])
+		kfree(lzo_blocks[0]->sync_bmp);
+	for (i = 0; i < blkcount; ++i) {
+		if (lzo_blocks[i]) {
+			kfree(lzo_blocks[i]->wmem);
+			kfree(lzo_blocks[i]->dst);
+		}
+		/* each lzo_block is a pointer returned by kmalloc or NULL */
+		kfree(lzo_blocks[i]);
+	}
+	kfree(lzo_blocks);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_lzo_cache_init(struct snd_soc_codec *codec)
+{
+	struct snd_soc_lzo_ctx **lzo_blocks;
+	size_t reg_size, bmp_size;
+	const struct snd_soc_codec_driver *codec_drv;
+	int ret, tofree, i, blksize, blkcount;
+	const char *p, *end;
+	unsigned long *sync_bmp;
+
+	ret = 0;
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * If we have not been given a default register cache
+	 * then allocate a dummy zero-ed out region, compress it
+	 * and remember to free it afterwards.
+	 */
+	tofree = 0;
+	if (!codec->reg_def_copy)
+		tofree = 1;
+
+	if (!codec->reg_def_copy) {
+		codec->reg_def_copy = kzalloc(reg_size,
+						       GFP_KERNEL);
+		if (!codec->reg_def_copy)
+			return -ENOMEM;
+	}
+
+	blkcount = snd_soc_lzo_block_count();
+	codec->reg_cache = kzalloc(blkcount * sizeof *lzo_blocks,
+				   GFP_KERNEL);
+	if (!codec->reg_cache) {
+		ret = -ENOMEM;
+		goto err_tofree;
+	}
+	lzo_blocks = codec->reg_cache;
+
+	/*
+	 * allocate a bitmap to be used when syncing the cache with
+	 * the hardware.  Each time a register is modified, the corresponding
+	 * bit is set in the bitmap, so we know that we have to sync
+	 * that register.
+	 */
+	bmp_size = codec_drv->reg_cache_size;
+	sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
+			   GFP_KERNEL);
+	if (!sync_bmp) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	bitmap_zero(sync_bmp, bmp_size);
+
+	/* allocate the lzo blocks and initialize them */
+	for (i = 0; i < blkcount; ++i) {
+		lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
+					GFP_KERNEL);
+		if (!lzo_blocks[i]) {
+			kfree(sync_bmp);
+			ret = -ENOMEM;
+			goto err;
+		}
+		lzo_blocks[i]->sync_bmp = sync_bmp;
+		lzo_blocks[i]->sync_bmp_nbits = bmp_size;
+		/* alloc the working space for the compressed block */
+		ret = snd_soc_lzo_prepare(lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+	}
+
+	blksize = snd_soc_lzo_get_blksize(codec);
+	p = codec->reg_def_copy;
+	end = codec->reg_def_copy + reg_size;
+	/* compress the register map and fill the lzo blocks */
+	for (i = 0; i < blkcount; ++i, p += blksize) {
+		lzo_blocks[i]->src = p;
+		if (p + blksize > end)
+			lzo_blocks[i]->src_len = end - p;
+		else
+			lzo_blocks[i]->src_len = blksize;
+		ret = snd_soc_lzo_compress_cache_block(codec,
+						       lzo_blocks[i]);
+		if (ret < 0)
+			goto err;
+		lzo_blocks[i]->decompressed_size =
+			lzo_blocks[i]->src_len;
+	}
+
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return 0;
+err:
+	snd_soc_cache_exit(codec);
+err_tofree:
+	if (tofree) {
+		kfree(codec->reg_def_copy);
+		codec->reg_def_copy = NULL;
+	}
+	return ret;
+}
+#endif
+
+static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
+{
+	int i;
+	int ret;
+	const struct snd_soc_codec_driver *codec_drv;
+	unsigned int val;
+
+	codec_drv = codec->driver;
+	for (i = 0; i < codec_drv->reg_cache_size; ++i) {
+		ret = snd_soc_cache_read(codec, i, &val);
+		if (ret)
+			return ret;
+		if (codec_drv->reg_cache_default) {
+			switch (codec_drv->reg_word_size) {
+			case 1: {
+				const u8 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			case 2: {
+				const u16 *cache;
+
+				cache = codec_drv->reg_cache_default;
+				if (cache[i] == val)
+					continue;
+			}
+			break;
+			default:
+				BUG();
+			}
+		}
+		ret = snd_soc_write(codec, i, val);
+		if (ret)
+			return ret;
+		dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
+			i, val);
+	}
+	return 0;
+}
+
+static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
+				    unsigned int reg, unsigned int value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		cache[reg] = value;
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
+				   unsigned int reg, unsigned int *value)
+{
+	switch (codec->driver->reg_word_size) {
+	case 1: {
+		u8 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	case 2: {
+		u16 *cache;
+
+		cache = codec->reg_cache;
+		*value = cache[reg];
+	}
+	break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
+{
+	if (!codec->reg_cache)
+		return 0;
+	kfree(codec->reg_cache);
+	codec->reg_cache = NULL;
+	return 0;
+}
+
+static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
+{
+	const struct snd_soc_codec_driver *codec_drv;
+	size_t reg_size;
+
+	codec_drv = codec->driver;
+	reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+
+	/*
+	 * for flat compression, we don't need to keep a copy of the
+	 * original defaults register cache as it will definitely not
+	 * be marked as __devinitconst
+	 */
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
+
+	if (codec_drv->reg_cache_default)
+		codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
+					   reg_size, GFP_KERNEL);
+	else
+		codec->reg_cache = kzalloc(reg_size, GFP_KERNEL);
+	if (!codec->reg_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/* an array of all supported compression types */
+static const struct snd_soc_cache_ops cache_types[] = {
+	/* Flat *must* be the first entry for fallback */
+	{
+		.id = SND_SOC_FLAT_COMPRESSION,
+		.name = "flat",
+		.init = snd_soc_flat_cache_init,
+		.exit = snd_soc_flat_cache_exit,
+		.read = snd_soc_flat_cache_read,
+		.write = snd_soc_flat_cache_write,
+		.sync = snd_soc_flat_cache_sync
+	},
+#ifdef CONFIG_SND_SOC_CACHE_LZO
+	{
+		.id = SND_SOC_LZO_COMPRESSION,
+		.name = "LZO",
+		.init = snd_soc_lzo_cache_init,
+		.exit = snd_soc_lzo_cache_exit,
+		.read = snd_soc_lzo_cache_read,
+		.write = snd_soc_lzo_cache_write,
+		.sync = snd_soc_lzo_cache_sync
+	},
+#endif
+	{
+		.id = SND_SOC_RBTREE_COMPRESSION,
+		.name = "rbtree",
+		.init = snd_soc_rbtree_cache_init,
+		.exit = snd_soc_rbtree_cache_exit,
+		.read = snd_soc_rbtree_cache_read,
+		.write = snd_soc_rbtree_cache_write,
+		.sync = snd_soc_rbtree_cache_sync
+	}
+};
+
+int snd_soc_cache_init(struct snd_soc_codec *codec)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
+		if (cache_types[i].id == codec->compress_type)
+			break;
+
+	/* Fall back to flat compression */
+	if (i == ARRAY_SIZE(cache_types)) {
+		dev_warn(codec->dev, "Could not match compress type: %d\n",
+			 codec->compress_type);
+		i = 0;
+	}
+
+	mutex_init(&codec->cache_rw_mutex);
+	codec->cache_ops = &cache_types[i];
+
+	if (codec->cache_ops->init) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->init(codec);
+	}
+	return -EINVAL;
+}
+
+/*
+ * NOTE: keep in mind that this function might be called
+ * multiple times.
+ */
+int snd_soc_cache_exit(struct snd_soc_codec *codec)
+{
+	if (codec->cache_ops && codec->cache_ops->exit) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		return codec->cache_ops->exit(codec);
+	}
+	return -EINVAL;
+}
+
+/**
+ * snd_soc_cache_read: Fetch the value of a given register from the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The value to be returned.
+ */
+int snd_soc_cache_read(struct snd_soc_codec *codec,
+		       unsigned int reg, unsigned int *value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (value && codec->cache_ops && codec->cache_ops->read) {
+		ret = codec->cache_ops->read(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_read);
+
+/**
+ * snd_soc_cache_write: Set the value of a given register in the cache.
+ *
+ * @codec: CODEC to configure.
+ * @reg: The register index.
+ * @value: The new register value.
+ */
+int snd_soc_cache_write(struct snd_soc_codec *codec,
+			unsigned int reg, unsigned int value)
+{
+	int ret;
+
+	mutex_lock(&codec->cache_rw_mutex);
+
+	if (codec->cache_ops && codec->cache_ops->write) {
+		ret = codec->cache_ops->write(codec, reg, value);
+		mutex_unlock(&codec->cache_rw_mutex);
+		return ret;
+	}
+
+	mutex_unlock(&codec->cache_rw_mutex);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_write);
+
+/**
+ * snd_soc_cache_sync: Sync the register cache with the hardware.
+ *
+ * @codec: CODEC to configure.
+ *
+ * Any registers that should not be synced should be marked as
+ * volatile.  In general drivers can choose not to use the provided
+ * syncing functionality if they so require.
+ */
+int snd_soc_cache_sync(struct snd_soc_codec *codec)
+{
+	int ret;
+
+	if (!codec->cache_sync) {
+		return 0;
+	}
+
+	if (codec->cache_ops && codec->cache_ops->sync) {
+		if (codec->cache_ops->name)
+			dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
+				codec->cache_ops->name, codec->name);
+		ret = codec->cache_ops->sync(codec);
+		if (!ret)
+			codec->cache_sync = 0;
+		return ret;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 85b7d54..bac7291 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -33,12 +33,15 @@
 #include <linux/slab.h>
 #include <sound/ac97_codec.h>
 #include <sound/core.h>
+#include <sound/jack.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <sound/initval.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/asoc.h>
+
 #define NAME_SIZE	32
 
 static DEFINE_MUTEX(pcm_mutex);
@@ -67,25 +70,6 @@
 module_param(pmdown_time, int, 0);
 MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
 
-/*
- * This function forces any delayed work to be queued and run.
- */
-static int run_delayed_work(struct delayed_work *dwork)
-{
-	int ret;
-
-	/* cancel any work waiting to be queued. */
-	ret = cancel_delayed_work(dwork);
-
-	/* if there was any work waiting then we run it now and
-	 * wait for it's completion */
-	if (ret) {
-		schedule_delayed_work(dwork, 0);
-		flush_scheduled_work();
-	}
-	return ret;
-}
-
 /* codec register dump */
 static ssize_t soc_codec_reg_show(struct snd_soc_codec *codec, char *buf)
 {
@@ -114,7 +98,7 @@
 			 * the register being volatile and the device being
 			 * powered off.
 			 */
-			ret = codec->driver->read(codec, i);
+			ret = snd_soc_read(codec, i);
 			if (ret >= 0)
 				count += snprintf(buf + count,
 						  PAGE_SIZE - count,
@@ -225,7 +209,7 @@
 		start++;
 	if (strict_strtoul(start, 16, &value))
 		return -EINVAL;
-	codec->driver->write(codec, reg, value);
+	snd_soc_write(codec, reg, value);
 	return buf_size;
 }
 
@@ -238,8 +222,10 @@
 
 static void soc_init_codec_debugfs(struct snd_soc_codec *codec)
 {
-	codec->debugfs_codec_root = debugfs_create_dir(codec->name ,
-						       debugfs_root);
+	struct dentry *debugfs_card_root = codec->card->debugfs_card_root;
+
+	codec->debugfs_codec_root = debugfs_create_dir(codec->name,
+						       debugfs_card_root);
 	if (!codec->debugfs_codec_root) {
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec debugfs directory\n");
@@ -253,20 +239,13 @@
 		printk(KERN_WARNING
 		       "ASoC: Failed to create codec register debugfs file\n");
 
-	codec->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
-						     codec->debugfs_codec_root,
-						     &codec->pop_time);
-	if (!codec->debugfs_pop_time)
-		printk(KERN_WARNING
-		       "Failed to create pop time debugfs file\n");
-
-	codec->debugfs_dapm = debugfs_create_dir("dapm",
+	codec->dapm.debugfs_dapm = debugfs_create_dir("dapm",
 						 codec->debugfs_codec_root);
-	if (!codec->debugfs_dapm)
+	if (!codec->dapm.debugfs_dapm)
 		printk(KERN_WARNING
 		       "Failed to create DAPM debugfs directory\n");
 
-	snd_soc_dapm_debugfs_init(codec);
+	snd_soc_dapm_debugfs_init(&codec->dapm);
 }
 
 static void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
@@ -374,6 +353,29 @@
 	.llseek = default_llseek,/* read accesses f_pos */
 };
 
+static void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+	card->debugfs_card_root = debugfs_create_dir(card->name,
+						     debugfs_root);
+	if (!card->debugfs_card_root) {
+		dev_warn(card->dev,
+			 "ASoC: Failed to create codec debugfs directory\n");
+		return;
+	}
+
+	card->debugfs_pop_time = debugfs_create_u32("dapm_pop_time", 0644,
+						    card->debugfs_card_root,
+						    &card->pop_time);
+	if (!card->debugfs_pop_time)
+		dev_warn(card->dev,
+		       "Failed to create pop time debugfs file\n");
+}
+
+static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+	debugfs_remove_recursive(card->debugfs_card_root);
+}
+
 #else
 
 static inline void soc_init_codec_debugfs(struct snd_soc_codec *codec)
@@ -383,6 +385,14 @@
 static inline void soc_cleanup_codec_debugfs(struct snd_soc_codec *codec)
 {
 }
+
+static inline void soc_init_card_debugfs(struct snd_soc_card *card)
+{
+}
+
+static inline void soc_cleanup_card_debugfs(struct snd_soc_card *card)
+{
+}
 #endif
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -497,7 +507,7 @@
 		}
 	}
 
-	/* Check that the codec and cpu DAI's are compatible */
+	/* Check that the codec and cpu DAIs are compatible */
 	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 		runtime->hw.rate_min =
 			max(codec_dai_drv->playback.rate_min,
@@ -846,7 +856,7 @@
 }
 
 /*
- * Free's resources allocated by hw_params, can be called multiple times
+ * Frees resources allocated by hw_params, can be called multiple times
  */
 static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
 {
@@ -870,7 +880,7 @@
 	if (platform->driver->ops->hw_free)
 		platform->driver->ops->hw_free(substream);
 
-	/* now free hw params for the DAI's  */
+	/* now free hw params for the DAIs  */
 	if (codec_dai->driver->ops->hw_free)
 		codec_dai->driver->ops->hw_free(substream, codec_dai);
 
@@ -958,6 +968,7 @@
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct snd_soc_card *card = platform_get_drvdata(pdev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* If the initialization of this soc device failed, there is no codec
@@ -976,7 +987,7 @@
 	/* we're going to block userspace touching us until resume completes */
 	snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
 
-	/* mute any active DAC's */
+	/* mute any active DACs */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_dai *dai = card->rtd[i].codec_dai;
 		struct snd_soc_dai_driver *drv = dai->driver;
@@ -1016,8 +1027,8 @@
 
 	/* close any waiting streams and save state */
 	for (i = 0; i < card->num_rtd; i++) {
-		run_delayed_work(&card->rtd[i].delayed_work);
-		card->rtd[i].codec->suspend_bias_level = card->rtd[i].codec->bias_level;
+		flush_delayed_work_sync(&card->rtd[i].delayed_work);
+		card->rtd[i].codec->dapm.suspend_bias_level = card->rtd[i].codec->dapm.bias_level;
 	}
 
 	for (i = 0; i < card->num_rtd; i++) {
@@ -1036,12 +1047,11 @@
 	}
 
 	/* suspend all CODECs */
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If there are paths active then the CODEC will be held with
 		 * bias _ON and should not be suspended. */
 		if (!codec->suspended && codec->driver->suspend) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->suspend(codec, PMSG_SUSPEND);
@@ -1078,6 +1088,7 @@
 	struct snd_soc_card *card =
 			container_of(work, struct snd_soc_card, deferred_resume_work);
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
 	int i;
 
 	/* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
@@ -1103,14 +1114,13 @@
 			cpu_dai->driver->resume(cpu_dai);
 	}
 
-	for (i = 0; i < card->num_rtd; i++) {
-		struct snd_soc_codec *codec = card->rtd[i].codec;
+	list_for_each_entry(codec, &card->codec_dev_list, card_list) {
 		/* If the CODEC was idle over suspend then it will have been
 		 * left with bias OFF or STANDBY and suspended so we must now
 		 * resume.  Otherwise the suspend was suppressed.
 		 */
 		if (codec->driver->resume && codec->suspended) {
-			switch (codec->bias_level) {
+			switch (codec->dapm.bias_level) {
 			case SND_SOC_BIAS_STANDBY:
 			case SND_SOC_BIAS_OFF:
 				codec->driver->resume(codec);
@@ -1249,9 +1259,6 @@
 		if (!strcmp(codec->name, dai_link->codec_name)) {
 			rtd->codec = codec;
 
-			if (!try_module_get(codec->dev->driver->owner))
-				return -ENODEV;
-
 			/* CODEC found, so find CODEC DAI from registered DAIs from this CODEC*/
 			list_for_each_entry(codec_dai, &dai_list, list) {
 				if (codec->dev == codec_dai->dev &&
@@ -1277,10 +1284,6 @@
 	/* no, then find CPU DAI from registered DAIs*/
 	list_for_each_entry(platform, &platform_list, list) {
 		if (!strcmp(platform->name, dai_link->platform_name)) {
-
-			if (!try_module_get(platform->dev->driver->owner))
-				return -ENODEV;
-
 			rtd->platform = platform;
 			goto out;
 		}
@@ -1299,6 +1302,27 @@
 	return 1;
 }
 
+static void soc_remove_codec(struct snd_soc_codec *codec)
+{
+	int err;
+
+	if (codec->driver->remove) {
+		err = codec->driver->remove(codec);
+		if (err < 0)
+			dev_err(codec->dev,
+				"asoc: failed to remove %s: %d\n",
+				codec->name, err);
+	}
+
+	/* Make sure all DAPM widgets are freed */
+	snd_soc_dapm_free(&codec->dapm);
+
+	soc_cleanup_codec_debugfs(codec);
+	codec->probed = 0;
+	list_del(&codec->card_list);
+	module_put(codec->dev->driver->owner);
+}
+
 static void soc_remove_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
@@ -1310,6 +1334,7 @@
 	/* unregister the rtd device */
 	if (rtd->dev_registered) {
 		device_remove_file(&rtd->dev, &dev_attr_pmdown_time);
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
 		device_unregister(&rtd->dev);
 		rtd->dev_registered = 0;
 	}
@@ -1338,22 +1363,8 @@
 	}
 
 	/* remove the CODEC */
-	if (codec && codec->probed) {
-		if (codec->driver->remove) {
-			err = codec->driver->remove(codec);
-			if (err < 0)
-				printk(KERN_ERR "asoc: failed to remove %s\n", codec->name);
-		}
-
-		/* Make sure all DAPM widgets are freed */
-		snd_soc_dapm_free(codec);
-
-		soc_cleanup_codec_debugfs(codec);
-		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
-		codec->probed = 0;
-		list_del(&codec->card_list);
-		module_put(codec->dev->driver->owner);
-	}
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
 
 	/* remove the cpu_dai */
 	if (cpu_dai && cpu_dai->probed) {
@@ -1368,8 +1379,126 @@
 	}
 }
 
+static void soc_set_name_prefix(struct snd_soc_card *card,
+				struct snd_soc_codec *codec)
+{
+	int i;
+
+	if (card->codec_conf == NULL)
+		return;
+
+	for (i = 0; i < card->num_configs; i++) {
+		struct snd_soc_codec_conf *map = &card->codec_conf[i];
+		if (map->dev_name && !strcmp(codec->name, map->dev_name)) {
+			codec->name_prefix = map->name_prefix;
+			break;
+		}
+	}
+}
+
+static int soc_probe_codec(struct snd_soc_card *card,
+			   struct snd_soc_codec *codec)
+{
+	int ret = 0;
+
+	codec->card = card;
+	codec->dapm.card = card;
+	soc_set_name_prefix(card, codec);
+
+	if (codec->driver->probe) {
+		ret = codec->driver->probe(codec);
+		if (ret < 0) {
+			dev_err(codec->dev,
+				"asoc: failed to probe CODEC %s: %d\n",
+				codec->name, ret);
+			return ret;
+		}
+	}
+
+	soc_init_codec_debugfs(codec);
+
+	/* mark codec as probed and add to card codec list */
+	if (!try_module_get(codec->dev->driver->owner))
+		return -ENODEV;
+
+	codec->probed = 1;
+	list_add(&codec->card_list, &card->codec_dev_list);
+	list_add(&codec->dapm.list, &card->dapm_list);
+
+	return ret;
+}
+
 static void rtd_release(struct device *dev) {}
 
+static int soc_post_component_init(struct snd_soc_card *card,
+				   struct snd_soc_codec *codec,
+				   int num, int dailess)
+{
+	struct snd_soc_dai_link *dai_link = NULL;
+	struct snd_soc_aux_dev *aux_dev = NULL;
+	struct snd_soc_pcm_runtime *rtd;
+	const char *temp, *name;
+	int ret = 0;
+
+	if (!dailess) {
+		dai_link = &card->dai_link[num];
+		rtd = &card->rtd[num];
+		name = dai_link->name;
+	} else {
+		aux_dev = &card->aux_dev[num];
+		rtd = &card->rtd_aux[num];
+		name = aux_dev->name;
+	}
+
+	/* machine controls, routes and widgets are not prefixed */
+	temp = codec->name_prefix;
+	codec->name_prefix = NULL;
+
+	/* do machine specific initialization */
+	if (!dailess && dai_link->init)
+		ret = dai_link->init(rtd);
+	else if (dailess && aux_dev->init)
+		ret = aux_dev->init(&codec->dapm);
+	if (ret < 0) {
+		dev_err(card->dev, "asoc: failed to init %s: %d\n", name, ret);
+		return ret;
+	}
+	codec->name_prefix = temp;
+
+	/* Make sure all DAPM widgets are instantiated */
+	snd_soc_dapm_new_widgets(&codec->dapm);
+	snd_soc_dapm_sync(&codec->dapm);
+
+	/* register the rtd device */
+	rtd->codec = codec;
+	rtd->card = card;
+	rtd->dev.parent = card->dev;
+	rtd->dev.release = rtd_release;
+	rtd->dev.init_name = name;
+	ret = device_register(&rtd->dev);
+	if (ret < 0) {
+		dev_err(card->dev,
+			"asoc: failed to register runtime device: %d\n", ret);
+		return ret;
+	}
+	rtd->dev_registered = 1;
+
+	/* add DAPM sysfs entries for this codec */
+	ret = snd_soc_dapm_sys_add(&rtd->dev);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec dapm sysfs entries: %d\n",
+			ret);
+
+	/* add codec sysfs entries */
+	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
+	if (ret < 0)
+		dev_err(codec->dev,
+			"asoc: failed to add codec sysfs files: %d\n", ret);
+
+	return 0;
+}
+
 static int soc_probe_dai_link(struct snd_soc_card *card, int num)
 {
 	struct snd_soc_dai_link *dai_link = &card->dai_link[num];
@@ -1383,10 +1512,7 @@
 
 	/* config components */
 	codec_dai->codec = codec;
-	codec->card = card;
 	cpu_dai->platform = platform;
-	rtd->card = card;
-	rtd->dev.parent = card->dev;
 	codec_dai->card = card;
 	cpu_dai->card = card;
 
@@ -1410,20 +1536,9 @@
 
 	/* probe the CODEC */
 	if (!codec->probed) {
-		if (codec->driver->probe) {
-			ret = codec->driver->probe(codec);
-			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to probe CODEC %s\n",
-						codec->name);
-				return ret;
-			}
-		}
-
-		soc_init_codec_debugfs(codec);
-
-		/* mark codec as probed and add to card codec list */
-		codec->probed = 1;
-		list_add(&codec->card_list, &card->codec_dev_list);
+		ret = soc_probe_codec(card, codec);
+		if (ret < 0)
+			return ret;
 	}
 
 	/* probe the platform */
@@ -1437,6 +1552,10 @@
 			}
 		}
 		/* mark platform as probed and add to card platform list */
+
+		if (!try_module_get(platform->dev->driver->owner))
+			return -ENODEV;
+
 		platform->probed = 1;
 		list_add(&platform->card_list, &card->platform_dev_list);
 	}
@@ -1460,43 +1579,14 @@
 	/* DAPM dai link stream work */
 	INIT_DELAYED_WORK(&rtd->delayed_work, close_delayed_work);
 
-	/* now that all clients have probed, initialise the DAI link */
-	if (dai_link->init) {
-		ret = dai_link->init(rtd);
-		if (ret < 0) {
-			printk(KERN_ERR "asoc: failed to init %s\n", dai_link->stream_name);
-			return ret;
-		}
-	}
-
-	/* Make sure all DAPM widgets are instantiated */
-	snd_soc_dapm_new_widgets(codec);
-	snd_soc_dapm_sync(codec);
-
-	/* register the rtd device */
-	rtd->dev.release = rtd_release;
-	rtd->dev.init_name = dai_link->name;
-	ret = device_register(&rtd->dev);
-	if (ret < 0) {
-		printk(KERN_ERR "asoc: failed to register DAI runtime device %d\n", ret);
+	ret = soc_post_component_init(card, codec, num, 0);
+	if (ret)
 		return ret;
-	}
 
-	rtd->dev_registered = 1;
 	ret = device_create_file(&rtd->dev, &dev_attr_pmdown_time);
 	if (ret < 0)
 		printk(KERN_WARNING "asoc: failed to add pmdown_time sysfs\n");
 
-	/* add DAPM sysfs entries for this codec */
-	ret = snd_soc_dapm_sys_add(&rtd->dev);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec dapm sysfs entries\n");
-
-	/* add codec sysfs entries */
-	ret = device_create_file(&rtd->dev, &dev_attr_codec_reg);
-	if (ret < 0)
-		printk(KERN_WARNING "asoc: failed to add codec sysfs files\n");
-
 	/* create the pcm */
 	ret = soc_new_pcm(rtd, num);
 	if (ret < 0) {
@@ -1551,9 +1641,85 @@
 }
 #endif
 
+static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_aux_dev *aux_dev = &card->aux_dev[num];
+	struct snd_soc_codec *codec;
+	int ret = -ENODEV;
+
+	/* find CODEC from registered CODECs*/
+	list_for_each_entry(codec, &codec_list, list) {
+		if (!strcmp(codec->name, aux_dev->codec_name)) {
+			if (codec->probed) {
+				dev_err(codec->dev,
+					"asoc: codec already probed");
+				ret = -EBUSY;
+				goto out;
+			}
+			goto found;
+		}
+	}
+	/* codec not found */
+	dev_err(card->dev, "asoc: codec %s not found", aux_dev->codec_name);
+	goto out;
+
+found:
+	if (!try_module_get(codec->dev->driver->owner))
+		return -ENODEV;
+
+	ret = soc_probe_codec(card, codec);
+	if (ret < 0)
+		return ret;
+
+	ret = soc_post_component_init(card, codec, num, 1);
+
+out:
+	return ret;
+}
+
+static void soc_remove_aux_dev(struct snd_soc_card *card, int num)
+{
+	struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
+	struct snd_soc_codec *codec = rtd->codec;
+
+	/* unregister the rtd device */
+	if (rtd->dev_registered) {
+		device_remove_file(&rtd->dev, &dev_attr_codec_reg);
+		device_unregister(&rtd->dev);
+		rtd->dev_registered = 0;
+	}
+
+	if (codec && codec->probed)
+		soc_remove_codec(codec);
+}
+
+static int snd_soc_init_codec_cache(struct snd_soc_codec *codec,
+				    enum snd_soc_compress_type compress_type)
+{
+	int ret;
+
+	if (codec->cache_init)
+		return 0;
+
+	/* override the compress_type if necessary */
+	if (compress_type && codec->compress_type != compress_type)
+		codec->compress_type = compress_type;
+	ret = snd_soc_cache_init(codec);
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to set cache compression type: %d\n",
+			ret);
+		return ret;
+	}
+	codec->cache_init = 1;
+	return 0;
+}
+
 static void snd_soc_instantiate_card(struct snd_soc_card *card)
 {
 	struct platform_device *pdev = to_platform_device(card->dev);
+	struct snd_soc_codec *codec;
+	struct snd_soc_codec_conf *codec_conf;
+	enum snd_soc_compress_type compress_type;
 	int ret, i;
 
 	mutex_lock(&card->mutex);
@@ -1573,6 +1739,39 @@
 		return;
 	}
 
+	/* initialize the register cache for each available codec */
+	list_for_each_entry(codec, &codec_list, list) {
+		if (codec->cache_init)
+			continue;
+		/* check to see if we need to override the compress_type */
+		for (i = 0; i < card->num_configs; ++i) {
+			codec_conf = &card->codec_conf[i];
+			if (!strcmp(codec->name, codec_conf->dev_name)) {
+				compress_type = codec_conf->compress_type;
+				if (compress_type && compress_type
+				    != codec->compress_type)
+					break;
+			}
+		}
+		if (i == card->num_configs) {
+			/* no need to override the compress_type so
+			 * go ahead and do the standard thing */
+			ret = snd_soc_init_codec_cache(codec, 0);
+			if (ret < 0) {
+				mutex_unlock(&card->mutex);
+				return;
+			}
+			continue;
+		}
+		/* override the compress_type with the one supplied in
+		 * the machine driver */
+		ret = snd_soc_init_codec_cache(codec, compress_type);
+		if (ret < 0) {
+			mutex_unlock(&card->mutex);
+			return;
+		}
+	}
+
 	/* card bind complete so register a sound card */
 	ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
 			card->owner, 0, &card->snd_card);
@@ -1605,6 +1804,15 @@
 		}
 	}
 
+	for (i = 0; i < card->num_aux_devs; i++) {
+		ret = soc_probe_aux_dev(card, i);
+		if (ret < 0) {
+			pr_err("asoc: failed to add auxiliary devices %s: %d\n",
+			       card->name, ret);
+			goto probe_aux_dev_err;
+		}
+	}
+
 	snprintf(card->snd_card->shortname, sizeof(card->snd_card->shortname),
 		 "%s",  card->name);
 	snprintf(card->snd_card->longname, sizeof(card->snd_card->longname),
@@ -1613,7 +1821,7 @@
 	ret = snd_card_register(card->snd_card);
 	if (ret < 0) {
 		printk(KERN_ERR "asoc: failed to register soundcard for %s\n", card->name);
-		goto probe_dai_err;
+		goto probe_aux_dev_err;
 	}
 
 #ifdef CONFIG_SND_SOC_AC97_BUS
@@ -1623,8 +1831,8 @@
 		if (ret < 0) {
 			printk(KERN_ERR "asoc: failed to register AC97 %s\n", card->name);
 			while (--i >= 0)
-				soc_unregister_ac97_dai_link(&card->rtd[i]);
-			goto probe_dai_err;
+				soc_unregister_ac97_dai_link(card->rtd[i].codec);
+			goto probe_aux_dev_err;
 		}
 	}
 #endif
@@ -1633,6 +1841,10 @@
 	mutex_unlock(&card->mutex);
 	return;
 
+probe_aux_dev_err:
+	for (i = 0; i < card->num_aux_devs; i++)
+		soc_remove_aux_dev(card, i);
+
 probe_dai_err:
 	for (i = 0; i < card->num_links; i++)
 		soc_remove_dai_link(card, i);
@@ -1668,6 +1880,11 @@
 	INIT_LIST_HEAD(&card->dai_dev_list);
 	INIT_LIST_HEAD(&card->codec_dev_list);
 	INIT_LIST_HEAD(&card->platform_dev_list);
+	INIT_LIST_HEAD(&card->widgets);
+	INIT_LIST_HEAD(&card->paths);
+	INIT_LIST_HEAD(&card->dapm_list);
+
+	soc_init_card_debugfs(card);
 
 	ret = snd_soc_register_card(card);
 	if (ret != 0) {
@@ -1689,13 +1906,19 @@
 		/* make sure any delayed work runs */
 		for (i = 0; i < card->num_rtd; i++) {
 			struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-			run_delayed_work(&rtd->delayed_work);
+			flush_delayed_work_sync(&rtd->delayed_work);
 		}
 
+		/* remove auxiliary devices */
+		for (i = 0; i < card->num_aux_devs; i++)
+			soc_remove_aux_dev(card, i);
+
 		/* remove and free each DAI */
 		for (i = 0; i < card->num_rtd; i++)
 			soc_remove_dai_link(card, i);
 
+		soc_cleanup_card_debugfs(card);
+
 		/* remove the card */
 		if (card->remove)
 			card->remove(pdev);
@@ -1720,7 +1943,7 @@
 	 * now, we're shutting down so no imminent restart. */
 	for (i = 0; i < card->num_rtd; i++) {
 		struct snd_soc_pcm_runtime *rtd = &card->rtd[i];
-		run_delayed_work(&rtd->delayed_work);
+		flush_delayed_work_sync(&rtd->delayed_work);
 	}
 
 	snd_soc_dapm_shutdown(card);
@@ -1879,6 +2102,27 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
 
+unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg)
+{
+	unsigned int ret;
+
+	ret = codec->read(codec, reg);
+	dev_dbg(codec->dev, "read %x => %x\n", reg, ret);
+	trace_snd_soc_reg_read(codec, reg, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_read);
+
+unsigned int snd_soc_write(struct snd_soc_codec *codec,
+			   unsigned int reg, unsigned int val)
+{
+	dev_dbg(codec->dev, "write %x = %x\n", reg, val);
+	trace_snd_soc_reg_write(codec, reg, val);
+	return codec->write(codec, reg, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_write);
+
 /**
  * snd_soc_update_bits - update codec register bits
  * @codec: audio codec
@@ -2019,14 +2263,22 @@
 	const struct snd_kcontrol_new *controls, int num_controls)
 {
 	struct snd_card *card = codec->card->snd_card;
+	char prefixed_name[44], *name;
 	int err, i;
 
 	for (i = 0; i < num_controls; i++) {
 		const struct snd_kcontrol_new *control = &controls[i];
-		err = snd_ctl_add(card, snd_soc_cnew(control, codec, NULL));
+		if (codec->name_prefix) {
+			snprintf(prefixed_name, sizeof(prefixed_name), "%s %s",
+				 codec->name_prefix, control->name);
+			name = prefixed_name;
+		} else {
+			name = control->name;
+		}
+		err = snd_ctl_add(card, snd_soc_cnew(control, codec, name));
 		if (err < 0) {
 			dev_err(codec->dev, "%s: Failed to add %s: %d\n",
-				codec->name, control->name, err);
+				codec->name, name, err);
 			return err;
 		}
 	}
@@ -2863,10 +3115,12 @@
 	if (!card->name || !card->dev)
 		return -EINVAL;
 
-	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) * card->num_links,
-			GFP_KERNEL);
+	card->rtd = kzalloc(sizeof(struct snd_soc_pcm_runtime) *
+			    (card->num_links + card->num_aux_devs),
+			    GFP_KERNEL);
 	if (card->rtd == NULL)
 		return -ENOMEM;
+	card->rtd_aux = &card->rtd[card->num_links];
 
 	for (i = 0; i < card->num_links; i++)
 		card->rtd[i].dai_link = &card->dai_link[i];
@@ -2908,7 +3162,7 @@
  * Simplify DAI link configuration by removing ".-1" from device names
  * and sanitizing names.
  */
-static inline char *fmt_single_name(struct device *dev, int *id)
+static char *fmt_single_name(struct device *dev, int *id)
 {
 	char *found, name[NAME_SIZE];
 	int id1, id2;
@@ -2916,7 +3170,7 @@
 	if (dev_name(dev) == NULL)
 		return NULL;
 
-	strncpy(name, dev_name(dev), NAME_SIZE);
+	strlcpy(name, dev_name(dev), NAME_SIZE);
 
 	/* are we a "%s.%d" name (platform and SPI components) */
 	found = strstr(name, dev->driver->name);
@@ -2939,7 +3193,7 @@
 
 			/* sanitize component name for DAI link creation */
 			snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
-			strncpy(name, tmp, NAME_SIZE);
+			strlcpy(name, tmp, NAME_SIZE);
 		} else
 			*id = 0;
 	}
@@ -3204,9 +3458,11 @@
  * @codec: codec to register
  */
 int snd_soc_register_codec(struct device *dev,
-		struct snd_soc_codec_driver *codec_drv,
-		struct snd_soc_dai_driver *dai_drv, int num_dai)
+			   const struct snd_soc_codec_driver *codec_drv,
+			   struct snd_soc_dai_driver *dai_drv,
+			   int num_dai)
 {
+	size_t reg_size;
 	struct snd_soc_codec *codec;
 	int ret, i;
 
@@ -3223,30 +3479,37 @@
 		return -ENOMEM;
 	}
 
-	/* allocate CODEC register cache */
-	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+	if (codec_drv->compress_type)
+		codec->compress_type = codec_drv->compress_type;
+	else
+		codec->compress_type = SND_SOC_FLAT_COMPRESSION;
 
-		if (codec_drv->reg_cache_default)
-			codec->reg_cache = kmemdup(codec_drv->reg_cache_default,
-				codec_drv->reg_cache_size * codec_drv->reg_word_size, GFP_KERNEL);
-		else
-			codec->reg_cache = kzalloc(codec_drv->reg_cache_size *
-				codec_drv->reg_word_size, GFP_KERNEL);
-
-		if (codec->reg_cache == NULL) {
-			kfree(codec->name);
-			kfree(codec);
-			return -ENOMEM;
-		}
-	}
-
+	codec->write = codec_drv->write;
+	codec->read = codec_drv->read;
+	codec->dapm.bias_level = SND_SOC_BIAS_OFF;
+	codec->dapm.dev = dev;
+	codec->dapm.codec = codec;
 	codec->dev = dev;
 	codec->driver = codec_drv;
-	codec->bias_level = SND_SOC_BIAS_OFF;
 	codec->num_dai = num_dai;
 	mutex_init(&codec->mutex);
-	INIT_LIST_HEAD(&codec->dapm_widgets);
-	INIT_LIST_HEAD(&codec->dapm_paths);
+
+	/* allocate CODEC register cache */
+	if (codec_drv->reg_cache_size && codec_drv->reg_word_size) {
+		reg_size = codec_drv->reg_cache_size * codec_drv->reg_word_size;
+		/* it is necessary to make a copy of the default register cache
+		 * because in the case of using a compression type that requires
+		 * the default register cache to be marked as __devinitconst the
+		 * kernel might have freed the array by the time we initialize
+		 * the cache.
+		 */
+		codec->reg_def_copy = kmemdup(codec_drv->reg_cache_default,
+					      reg_size, GFP_KERNEL);
+		if (!codec->reg_def_copy) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+	}
 
 	for (i = 0; i < num_dai; i++) {
 		fixup_codec_formats(&dai_drv[i].playback);
@@ -3257,7 +3520,7 @@
 	if (num_dai) {
 		ret = snd_soc_register_dais(dev, dai_drv, num_dai);
 		if (ret < 0)
-			goto error;
+			goto fail;
 	}
 
 	mutex_lock(&client_mutex);
@@ -3268,9 +3531,9 @@
 	pr_debug("Registered codec '%s'\n", codec->name);
 	return 0;
 
-error:
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+fail:
+	kfree(codec->reg_def_copy);
+	codec->reg_def_copy = NULL;
 	kfree(codec->name);
 	kfree(codec);
 	return ret;
@@ -3304,8 +3567,8 @@
 
 	pr_debug("Unregistered codec '%s'\n", codec->name);
 
-	if (codec->reg_cache)
-		kfree(codec->reg_cache);
+	snd_soc_cache_exit(codec);
+	kfree(codec->reg_def_copy);
 	kfree(codec->name);
 	kfree(codec);
 }
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c721502..499730a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -42,9 +42,11 @@
 #include <sound/core.h>
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
-#include <sound/soc-dapm.h>
+#include <sound/soc.h>
 #include <sound/initval.h>
 
+#include <trace/events/asoc.h>
+
 /* dapm power sequences - make this per codec in the future */
 static int dapm_up_seq[] = {
 	[snd_soc_dapm_pre] = 0,
@@ -54,12 +56,14 @@
 	[snd_soc_dapm_aif_out] = 3,
 	[snd_soc_dapm_mic] = 4,
 	[snd_soc_dapm_mux] = 5,
+	[snd_soc_dapm_virt_mux] = 5,
 	[snd_soc_dapm_value_mux] = 5,
 	[snd_soc_dapm_dac] = 6,
 	[snd_soc_dapm_mixer] = 7,
 	[snd_soc_dapm_mixer_named_ctl] = 7,
 	[snd_soc_dapm_pga] = 8,
 	[snd_soc_dapm_adc] = 9,
+	[snd_soc_dapm_out_drv] = 10,
 	[snd_soc_dapm_hp] = 10,
 	[snd_soc_dapm_spk] = 10,
 	[snd_soc_dapm_post] = 11,
@@ -70,6 +74,7 @@
 	[snd_soc_dapm_adc] = 1,
 	[snd_soc_dapm_hp] = 2,
 	[snd_soc_dapm_spk] = 2,
+	[snd_soc_dapm_out_drv] = 2,
 	[snd_soc_dapm_pga] = 4,
 	[snd_soc_dapm_mixer_named_ctl] = 5,
 	[snd_soc_dapm_mixer] = 5,
@@ -77,6 +82,7 @@
 	[snd_soc_dapm_mic] = 7,
 	[snd_soc_dapm_micbias] = 8,
 	[snd_soc_dapm_mux] = 9,
+	[snd_soc_dapm_virt_mux] = 9,
 	[snd_soc_dapm_value_mux] = 9,
 	[snd_soc_dapm_aif_in] = 10,
 	[snd_soc_dapm_aif_out] = 10,
@@ -90,17 +96,24 @@
 		schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
 }
 
-static void pop_dbg(u32 pop_time, const char *fmt, ...)
+static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
 {
 	va_list args;
+	char *buf;
+
+	if (!pop_time)
+		return;
+
+	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return;
 
 	va_start(args, fmt);
-
-	if (pop_time) {
-		vprintk(fmt, args);
-	}
-
+	vsnprintf(buf, PAGE_SIZE, fmt, args);
+	dev_info(dev, "%s", buf);
 	va_end(args);
+
+	kfree(buf);
 }
 
 /* create a new dapm widget */
@@ -120,36 +133,45 @@
  * Returns 0 for success else error.
  */
 static int snd_soc_dapm_set_bias_level(struct snd_soc_card *card,
-		struct snd_soc_codec *codec, enum snd_soc_bias_level level)
+				       struct snd_soc_dapm_context *dapm,
+				       enum snd_soc_bias_level level)
 {
 	int ret = 0;
 
 	switch (level) {
 	case SND_SOC_BIAS_ON:
-		dev_dbg(codec->dev, "Setting full bias\n");
+		dev_dbg(dapm->dev, "Setting full bias\n");
 		break;
 	case SND_SOC_BIAS_PREPARE:
-		dev_dbg(codec->dev, "Setting bias prepare\n");
+		dev_dbg(dapm->dev, "Setting bias prepare\n");
 		break;
 	case SND_SOC_BIAS_STANDBY:
-		dev_dbg(codec->dev, "Setting standby bias\n");
+		dev_dbg(dapm->dev, "Setting standby bias\n");
 		break;
 	case SND_SOC_BIAS_OFF:
-		dev_dbg(codec->dev, "Setting bias off\n");
+		dev_dbg(dapm->dev, "Setting bias off\n");
 		break;
 	default:
-		dev_err(codec->dev, "Setting invalid bias %d\n", level);
+		dev_err(dapm->dev, "Setting invalid bias %d\n", level);
 		return -EINVAL;
 	}
 
+	trace_snd_soc_bias_level_start(card, level);
+
 	if (card && card->set_bias_level)
 		ret = card->set_bias_level(card, level);
 	if (ret == 0) {
-		if (codec->driver->set_bias_level)
-			ret = codec->driver->set_bias_level(codec, level);
+		if (dapm->codec && dapm->codec->driver->set_bias_level)
+			ret = dapm->codec->driver->set_bias_level(dapm->codec, level);
 		else
-			codec->bias_level = level;
+			dapm->bias_level = level;
 	}
+	if (ret == 0) {
+		if (card && card->set_bias_level_post)
+			ret = card->set_bias_level_post(card, level);
+	}
+
+	trace_snd_soc_bias_level_done(card, level);
 
 	return ret;
 }
@@ -196,6 +218,20 @@
 		}
 	}
 	break;
+	case snd_soc_dapm_virt_mux: {
+		struct soc_enum *e = (struct soc_enum *)w->kcontrols[i].private_value;
+
+		p->connect = 0;
+		/* since a virtual mux has no backing registers to
+		 * decide which path to connect, it will try to match
+		 * with the first enumeration.  This is to ensure
+		 * that the default mux choice (the first) will be
+		 * correctly powered up during initialization.
+		 */
+		if (!strcmp(p->name, e->texts[0]))
+			p->connect = 1;
+	}
+	break;
 	case snd_soc_dapm_value_mux: {
 		struct soc_enum *e = (struct soc_enum *)
 			w->kcontrols[i].private_value;
@@ -217,6 +253,7 @@
 	break;
 	/* does not effect routing - always connected */
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_input:
@@ -241,7 +278,7 @@
 }
 
 /* connect mux widget to its interconnecting audio paths */
-static int dapm_connect_mux(struct snd_soc_codec *codec,
+static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name,
 	const struct snd_kcontrol_new *kcontrol)
@@ -251,7 +288,7 @@
 
 	for (i = 0; i < e->max; i++) {
 		if (!(strcmp(control_name, e->texts[i]))) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = (char*)e->texts[i];
@@ -264,7 +301,7 @@
 }
 
 /* connect mixer widget to its interconnecting audio paths */
-static int dapm_connect_mixer(struct snd_soc_codec *codec,
+static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
 	struct snd_soc_dapm_path *path, const char *control_name)
 {
@@ -273,7 +310,7 @@
 	/* search for mixer kcontrol */
 	for (i = 0; i < dest->num_kcontrols; i++) {
 		if (!strcmp(control_name, dest->kcontrols[i].name)) {
-			list_add(&path->list, &codec->dapm_paths);
+			list_add(&path->list, &dapm->card->paths);
 			list_add(&path->list_sink, &dest->sources);
 			list_add(&path->list_source, &src->sinks);
 			path->name = dest->kcontrols[i].name;
@@ -290,6 +327,8 @@
 	int change, power;
 	unsigned int old, new;
 	struct snd_soc_codec *codec = widget->codec;
+	struct snd_soc_dapm_context *dapm = widget->dapm;
+	struct snd_soc_card *card = dapm->card;
 
 	/* check for valid widgets */
 	if (widget->reg < 0 || widget->id == snd_soc_dapm_input ||
@@ -309,24 +348,26 @@
 
 	change = old != new;
 	if (change) {
-		pop_dbg(codec->pop_time, "pop test %s : %s in %d ms\n",
+		pop_dbg(dapm->dev, card->pop_time,
+			"pop test %s : %s in %d ms\n",
 			widget->name, widget->power ? "on" : "off",
-			codec->pop_time);
-		pop_wait(codec->pop_time);
+			card->pop_time);
+		pop_wait(card->pop_time);
 		snd_soc_write(codec, widget->reg, new);
 	}
-	pr_debug("reg %x old %x new %x change %d\n", widget->reg,
-		 old, new, change);
+	dev_dbg(dapm->dev, "reg %x old %x new %x change %d\n", widget->reg,
+		old, new, change);
 	return change;
 }
 
 /* create new dapm mixer control */
-static int dapm_new_mixer(struct snd_soc_codec *codec,
+static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	int i, ret = 0;
 	size_t name_len;
 	struct snd_soc_dapm_path *path;
+	struct snd_card *card = dapm->codec->card->snd_card;
 
 	/* add kcontrol */
 	for (i = 0; i < w->num_kcontrols; i++) {
@@ -368,11 +409,11 @@
 
 			path->kcontrol = snd_soc_cnew(&w->kcontrols[i], w,
 				path->long_name);
-			ret = snd_ctl_add(codec->card->snd_card, path->kcontrol);
+			ret = snd_ctl_add(card, path->kcontrol);
 			if (ret < 0) {
-				printk(KERN_ERR "asoc: failed to add dapm kcontrol %s: %d\n",
-				       path->long_name,
-				       ret);
+				dev_err(dapm->dev,
+					"asoc: failed to add dapm kcontrol %s: %d\n",
+					path->long_name, ret);
 				kfree(path->long_name);
 				path->long_name = NULL;
 				return ret;
@@ -383,20 +424,22 @@
 }
 
 /* create new dapm mux control */
-static int dapm_new_mux(struct snd_soc_codec *codec,
+static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	struct snd_soc_dapm_path *path = NULL;
 	struct snd_kcontrol *kcontrol;
+	struct snd_card *card = dapm->codec->card->snd_card;
 	int ret = 0;
 
 	if (!w->num_kcontrols) {
-		printk(KERN_ERR "asoc: mux %s has no controls\n", w->name);
+		dev_err(dapm->dev, "asoc: mux %s has no controls\n", w->name);
 		return -EINVAL;
 	}
 
 	kcontrol = snd_soc_cnew(&w->kcontrols[0], w, w->name);
-	ret = snd_ctl_add(codec->card->snd_card, kcontrol);
+	ret = snd_ctl_add(card, kcontrol);
+
 	if (ret < 0)
 		goto err;
 
@@ -406,26 +449,27 @@
 	return ret;
 
 err:
-	printk(KERN_ERR "asoc: failed to add kcontrol %s\n", w->name);
+	dev_err(dapm->dev, "asoc: failed to add kcontrol %s\n", w->name);
 	return ret;
 }
 
 /* create new dapm volume control */
-static int dapm_new_pga(struct snd_soc_codec *codec,
+static int dapm_new_pga(struct snd_soc_dapm_context *dapm,
 	struct snd_soc_dapm_widget *w)
 {
 	if (w->num_kcontrols)
-		pr_err("asoc: PGA controls not supported: '%s'\n", w->name);
+		dev_err(w->dapm->dev,
+			"asoc: PGA controls not supported: '%s'\n", w->name);
 
 	return 0;
 }
 
 /* reset 'walked' bit for each dapm path */
-static inline void dapm_clear_walk(struct snd_soc_codec *codec)
+static inline void dapm_clear_walk(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_path *p;
 
-	list_for_each_entry(p, &codec->dapm_paths, list)
+	list_for_each_entry(p, &dapm->card->paths, list)
 		p->walked = 0;
 }
 
@@ -435,13 +479,14 @@
  */
 static int snd_soc_dapm_suspend_check(struct snd_soc_dapm_widget *widget)
 {
-	int level = snd_power_get_state(widget->codec->card->snd_card);
+	int level = snd_power_get_state(widget->dapm->codec->card->snd_card);
 
 	switch (level) {
 	case SNDRV_CTL_POWER_D3hot:
 	case SNDRV_CTL_POWER_D3cold:
 		if (widget->ignore_suspend)
-			pr_debug("%s ignoring suspend\n", widget->name);
+			dev_dbg(widget->dapm->dev, "%s ignoring suspend\n",
+				widget->name);
 		return widget->ignore_suspend;
 	default:
 		return 1;
@@ -572,7 +617,7 @@
 
 	/* call any power change event handlers */
 	if (w->event)
-		pr_debug("power %s event for %s flags %x\n",
+		dev_dbg(w->dapm->dev, "power %s event for %s flags %x\n",
 			 w->power ? "on" : "off",
 			 w->name, w->event_flags);
 
@@ -621,9 +666,9 @@
 	int in, out;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	return out != 0 && in != 0;
 }
 
@@ -634,7 +679,7 @@
 
 	if (w->active) {
 		in = is_connected_input_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return in != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -648,7 +693,7 @@
 
 	if (w->active) {
 		out = is_connected_output_ep(w);
-		dapm_clear_walk(w->codec);
+		dapm_clear_walk(w->dapm);
 		return out != 0;
 	} else {
 		return dapm_generic_check_power(w);
@@ -674,7 +719,7 @@
 		}
 	}
 
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	return power;
 }
@@ -687,8 +732,8 @@
 		return sort[a->id] - sort[b->id];
 	if (a->reg != b->reg)
 		return a->reg - b->reg;
-	if (a->codec != b->codec)
-		return (unsigned long)a->codec - (unsigned long)b->codec;
+	if (a->dapm != b->dapm)
+		return (unsigned long)a->dapm - (unsigned long)b->dapm;
 
 	return 0;
 }
@@ -709,12 +754,57 @@
 	list_add_tail(&new_widget->power_list, list);
 }
 
+static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
+				 struct snd_soc_dapm_widget *w, int event)
+{
+	struct snd_soc_card *card = dapm->card;
+	const char *ev_name;
+	int power, ret;
+
+	switch (event) {
+	case SND_SOC_DAPM_PRE_PMU:
+		ev_name = "PRE_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_POST_PMU:
+		ev_name = "POST_PMU";
+		power = 1;
+		break;
+	case SND_SOC_DAPM_PRE_PMD:
+		ev_name = "PRE_PMD";
+		power = 0;
+		break;
+	case SND_SOC_DAPM_POST_PMD:
+		ev_name = "POST_PMD";
+		power = 0;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	if (w->power != power)
+		return;
+
+	if (w->event && (w->event_flags & event)) {
+		pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n",
+			w->name, ev_name);
+		trace_snd_soc_dapm_widget_event_start(w, event);
+		ret = w->event(w, NULL, event);
+		trace_snd_soc_dapm_widget_event_done(w, event);
+		if (ret < 0)
+			pr_err("%s: %s event failed: %d\n",
+			       ev_name, w->name, ret);
+	}
+}
+
 /* Apply the coalesced changes from a DAPM sequence */
-static void dapm_seq_run_coalesced(struct snd_soc_codec *codec,
+static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
 				   struct list_head *pending)
 {
+	struct snd_soc_card *card = dapm->card;
 	struct snd_soc_dapm_widget *w;
-	int reg, power, ret;
+	int reg, power;
 	unsigned int value = 0;
 	unsigned int mask = 0;
 	unsigned int cur_mask;
@@ -735,64 +825,26 @@
 		if (power)
 			value |= cur_mask;
 
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
 			w->name, reg, value, mask);
 
-		/* power up pre event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMU\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMU);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down pre event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_PRE_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s PRE_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_PRE_PMD);
-			if (ret < 0)
-				pr_err("%s: pre event failed: %d\n",
-				       w->name, ret);
-		}
+		/* Check for events */
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD);
 	}
 
 	if (reg >= 0) {
-		pop_dbg(codec->pop_time,
+		pop_dbg(dapm->dev, card->pop_time,
 			"pop test : Applying 0x%x/0x%x to %x in %dms\n",
-			value, mask, reg, codec->pop_time);
-		pop_wait(codec->pop_time);
-		snd_soc_update_bits(codec, reg, mask, value);
+			value, mask, reg, card->pop_time);
+		pop_wait(card->pop_time);
+		snd_soc_update_bits(dapm->codec, reg, mask, value);
 	}
 
 	list_for_each_entry(w, pending, power_list) {
-		/* power up post event */
-		if (w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMU)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMU\n",
-				w->name);
-			ret = w->event(w,
-				       NULL, SND_SOC_DAPM_POST_PMU);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
-
-		/* power down post event */
-		if (!w->power && w->event &&
-		    (w->event_flags & SND_SOC_DAPM_POST_PMD)) {
-			pop_dbg(codec->pop_time, "pop test : %s POST_PMD\n",
-				w->name);
-			ret = w->event(w, NULL, SND_SOC_DAPM_POST_PMD);
-			if (ret < 0)
-				pr_err("%s: post event failed: %d\n",
-				       w->name, ret);
-		}
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU);
+		dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD);
 	}
 }
 
@@ -804,26 +856,29 @@
  * Currently anything that requires more than a single write is not
  * handled.
  */
-static void dapm_seq_run(struct snd_soc_codec *codec, struct list_head *list,
-			 int event, int sort[])
+static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
+			 struct list_head *list, int event, int sort[])
 {
 	struct snd_soc_dapm_widget *w, *n;
 	LIST_HEAD(pending);
 	int cur_sort = -1;
 	int cur_reg = SND_SOC_NOPM;
+	struct snd_soc_dapm_context *cur_dapm = NULL;
 	int ret;
 
 	list_for_each_entry_safe(w, n, list, power_list) {
 		ret = 0;
 
 		/* Do we need to apply any queued changes? */
-		if (sort[w->id] != cur_sort || w->reg != cur_reg) {
+		if (sort[w->id] != cur_sort || w->reg != cur_reg ||
+		    w->dapm != cur_dapm) {
 			if (!list_empty(&pending))
-				dapm_seq_run_coalesced(codec, &pending);
+				dapm_seq_run_coalesced(cur_dapm, &pending);
 
 			INIT_LIST_HEAD(&pending);
 			cur_sort = -1;
 			cur_reg = SND_SOC_NOPM;
+			cur_dapm = NULL;
 		}
 
 		switch (w->id) {
@@ -867,19 +922,55 @@
 			/* Queue it up for application */
 			cur_sort = sort[w->id];
 			cur_reg = w->reg;
+			cur_dapm = w->dapm;
 			list_move(&w->power_list, &pending);
 			break;
 		}
 
 		if (ret < 0)
-			pr_err("Failed to apply widget power: %d\n",
-			       ret);
+			dev_err(w->dapm->dev,
+				"Failed to apply widget power: %d\n", ret);
 	}
 
 	if (!list_empty(&pending))
-		dapm_seq_run_coalesced(codec, &pending);
+		dapm_seq_run_coalesced(dapm, &pending);
 }
 
+static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
+{
+	struct snd_soc_dapm_update *update = dapm->update;
+	struct snd_soc_dapm_widget *w;
+	int ret;
+
+	if (!update)
+		return;
+
+	w = update->widget;
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
+		if (ret != 0)
+			pr_err("%s DAPM pre-event failed: %d\n",
+			       w->name, ret);
+	}
+
+	ret = snd_soc_update_bits(w->codec, update->reg, update->mask,
+				  update->val);
+	if (ret < 0)
+		pr_err("%s DAPM update failed: %d\n", w->name, ret);
+
+	if (w->event &&
+	    (w->event_flags & SND_SOC_DAPM_POST_REG)) {
+		ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
+		if (ret != 0)
+			pr_err("%s DAPM post-event failed: %d\n",
+			       w->name, ret);
+	}
+}
+
+
+
 /*
  * Scan each dapm widget for complete audio path.
  * A complete path is a route that has valid endpoints i.e.:-
@@ -889,20 +980,26 @@
  *  o Input pin to Output pin (bypass, sidetone)
  *  o DAC to ADC (loopback).
  */
-static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
+static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
 {
-	struct snd_soc_card *card = codec->card;
+	struct snd_soc_card *card = dapm->codec->card;
 	struct snd_soc_dapm_widget *w;
+	struct snd_soc_dapm_context *d;
 	LIST_HEAD(up_list);
 	LIST_HEAD(down_list);
 	int ret = 0;
 	int power;
-	int sys_power = 0;
+
+	trace_snd_soc_dapm_start(card);
+
+	list_for_each_entry(d, &card->dapm_list, list)
+		if (d->n_widgets)
+			d->dev_power = 0;
 
 	/* Check which widgets we need to power and store them in
 	 * lists indicating if they should be powered up or down.
 	 */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &card->widgets, list) {
 		switch (w->id) {
 		case snd_soc_dapm_pre:
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
@@ -920,11 +1017,13 @@
 			else
 				power = 1;
 			if (power)
-				sys_power = 1;
+				w->dapm->dev_power = 1;
 
 			if (w->power == power)
 				continue;
 
+			trace_snd_soc_dapm_widget_power(w, power);
+
 			if (power)
 				dapm_seq_insert(w, &up_list, dapm_up_seq);
 			else
@@ -938,26 +1037,26 @@
 	/* If there are no DAPM widgets then try to figure out power from the
 	 * event type.
 	 */
-	if (list_empty(&codec->dapm_widgets)) {
+	if (!dapm->n_widgets) {
 		switch (event) {
 		case SND_SOC_DAPM_STREAM_START:
 		case SND_SOC_DAPM_STREAM_RESUME:
-			sys_power = 1;
+			dapm->dev_power = 1;
 			break;
 		case SND_SOC_DAPM_STREAM_STOP:
-			sys_power = !!codec->active;
+			dapm->dev_power = !!dapm->codec->active;
 			break;
 		case SND_SOC_DAPM_STREAM_SUSPEND:
-			sys_power = 0;
+			dapm->dev_power = 0;
 			break;
 		case SND_SOC_DAPM_STREAM_NOP:
-			switch (codec->bias_level) {
+			switch (dapm->bias_level) {
 				case SND_SOC_BIAS_STANDBY:
 				case SND_SOC_BIAS_OFF:
-					sys_power = 0;
+					dapm->dev_power = 0;
 					break;
 				default:
-					sys_power = 1;
+					dapm->dev_power = 1;
 					break;
 			}
 			break;
@@ -966,52 +1065,71 @@
 		}
 	}
 
-	if (sys_power && codec->bias_level == SND_SOC_BIAS_OFF) {
-		ret = snd_soc_dapm_set_bias_level(card, codec,
-						  SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to turn on bias: %d\n", ret);
-	}
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		if (d->dev_power && d->bias_level == SND_SOC_BIAS_OFF) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn on bias: %d\n", ret);
+		}
 
-	/* If we're changing to all on or all off then prepare */
-	if ((sys_power && codec->bias_level == SND_SOC_BIAS_STANDBY) ||
-	    (!sys_power && codec->bias_level == SND_SOC_BIAS_ON)) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_PREPARE);
-		if (ret != 0)
-			pr_err("Failed to prepare bias: %d\n", ret);
+		/* If we're changing to all on or all off then prepare */
+		if ((d->dev_power && d->bias_level == SND_SOC_BIAS_STANDBY) ||
+		    (!d->dev_power && d->bias_level == SND_SOC_BIAS_ON)) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_PREPARE);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to prepare bias: %d\n", ret);
+		}
 	}
 
 	/* Power down widgets first; try to avoid amplifying pops. */
-	dapm_seq_run(codec, &down_list, event, dapm_down_seq);
+	dapm_seq_run(dapm, &down_list, event, dapm_down_seq);
+
+	dapm_widget_update(dapm);
 
 	/* Now power up. */
-	dapm_seq_run(codec, &up_list, event, dapm_up_seq);
+	dapm_seq_run(dapm, &up_list, event, dapm_up_seq);
 
-	/* If we just powered the last thing off drop to standby bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && !sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_STANDBY);
-		if (ret != 0)
-			pr_err("Failed to apply standby bias: %d\n", ret);
+	list_for_each_entry(d, &dapm->card->dapm_list, list) {
+		/* If we just powered the last thing off drop to standby bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && !d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_STANDBY);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply standby bias: %d\n",
+					ret);
+		}
+
+		/* If we're in standby and can support bias off then do that */
+		if (d->bias_level == SND_SOC_BIAS_STANDBY &&
+		    d->idle_bias_off) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_OFF);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to turn off bias: %d\n", ret);
+		}
+
+		/* If we just powered up then move to active bias */
+		if (d->bias_level == SND_SOC_BIAS_PREPARE && d->dev_power) {
+			ret = snd_soc_dapm_set_bias_level(card, d,
+							  SND_SOC_BIAS_ON);
+			if (ret != 0)
+				dev_err(d->dev,
+					"Failed to apply active bias: %d\n",
+					ret);
+		}
 	}
 
-	/* If we're in standby and can support bias off then do that */
-	if (codec->bias_level == SND_SOC_BIAS_STANDBY &&
-	    codec->idle_bias_off) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
-		if (ret != 0)
-			pr_err("Failed to turn off bias: %d\n", ret);
-	}
+	pop_dbg(dapm->dev, card->pop_time,
+		"DAPM sequencing finished, waiting %dms\n", card->pop_time);
+	pop_wait(card->pop_time);
 
-	/* If we just powered up then move to active bias */
-	if (codec->bias_level == SND_SOC_BIAS_PREPARE && sys_power) {
-		ret = snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_ON);
-		if (ret != 0)
-			pr_err("Failed to apply active bias: %d\n", ret);
-	}
-
-	pop_dbg(codec->pop_time, "DAPM sequencing finished, waiting %dms\n",
-		codec->pop_time);
-	pop_wait(codec->pop_time);
+	trace_snd_soc_dapm_done(card);
 
 	return 0;
 }
@@ -1038,9 +1156,9 @@
 		return -ENOMEM;
 
 	in = is_connected_input_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 	out = is_connected_output_ep(w);
-	dapm_clear_walk(w->codec);
+	dapm_clear_walk(w->dapm);
 
 	ret = snprintf(buf, PAGE_SIZE, "%s: %s  in %d out %d",
 		       w->name, w->power ? "On" : "Off", in, out);
@@ -1090,29 +1208,29 @@
 	.llseek = default_llseek,
 };
 
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	struct dentry *d;
 
-	if (!codec->debugfs_dapm)
+	if (!dapm->debugfs_dapm)
 		return;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
-		if (!w->name)
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (!w->name || w->dapm != dapm)
 			continue;
 
 		d = debugfs_create_file(w->name, 0444,
-					codec->debugfs_dapm, w,
+					dapm->debugfs_dapm, w,
 					&dapm_widget_power_fops);
 		if (!d)
-			printk(KERN_WARNING
-			       "ASoC: Failed to create %s debugfs file\n",
-			       w->name);
+			dev_warn(w->dapm->dev,
+				"ASoC: Failed to create %s debugfs file\n",
+				w->name);
 	}
 }
 #else
-void snd_soc_dapm_debugfs_init(struct snd_soc_codec *codec)
+void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm)
 {
 }
 #endif
@@ -1126,6 +1244,7 @@
 	int found = 0;
 
 	if (widget->id != snd_soc_dapm_mux &&
+	    widget->id != snd_soc_dapm_virt_mux &&
 	    widget->id != snd_soc_dapm_value_mux)
 		return -ENODEV;
 
@@ -1133,7 +1252,7 @@
 		return 0;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1149,7 +1268,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1167,7 +1286,7 @@
 		return -ENODEV;
 
 	/* find dapm widget path assoc with kcontrol */
-	list_for_each_entry(path, &widget->codec->dapm_paths, list) {
+	list_for_each_entry(path, &widget->dapm->card->paths, list) {
 		if (path->kcontrol != kcontrol)
 			continue;
 
@@ -1178,7 +1297,7 @@
 	}
 
 	if (found)
-		dapm_power_widgets(widget->codec, SND_SOC_DAPM_STREAM_NOP);
+		dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
 
 	return 0;
 }
@@ -1194,7 +1313,9 @@
 	int count = 0;
 	char *state = "not set";
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &codec->card->widgets, list) {
+		if (w->dapm != &codec->dapm)
+			continue;
 
 		/* only display widgets that burnm power */
 		switch (w->id) {
@@ -1206,6 +1327,7 @@
 		case snd_soc_dapm_dac:
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 		case snd_soc_dapm_supply:
@@ -1218,7 +1340,7 @@
 		}
 	}
 
-	switch (codec->bias_level) {
+	switch (codec->dapm.bias_level) {
 	case SND_SOC_BIAS_ON:
 		state = "On";
 		break;
@@ -1250,31 +1372,50 @@
 }
 
 /* free all dapm widgets and resources */
-static void dapm_free_widgets(struct snd_soc_codec *codec)
+static void dapm_free_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w, *next_w;
 	struct snd_soc_dapm_path *p, *next_p;
 
-	list_for_each_entry_safe(w, next_w, &codec->dapm_widgets, list) {
+	list_for_each_entry_safe(w, next_w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		list_del(&w->list);
+		/*
+		 * remove source and sink paths associated to this widget.
+		 * While removing the path, remove reference to it from both
+		 * source and sink widgets so that path is removed only once.
+		 */
+		list_for_each_entry_safe(p, next_p, &w->sources, list_sink) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		list_for_each_entry_safe(p, next_p, &w->sinks, list_source) {
+			list_del(&p->list_sink);
+			list_del(&p->list_source);
+			list_del(&p->list);
+			kfree(p->long_name);
+			kfree(p);
+		}
+		kfree(w->name);
 		kfree(w);
 	}
-
-	list_for_each_entry_safe(p, next_p, &codec->dapm_paths, list) {
-		list_del(&p->list);
-		kfree(p->long_name);
-		kfree(p);
-	}
 }
 
-static int snd_soc_dapm_set_pin(struct snd_soc_codec *codec,
+static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
 				const char *pin, int status)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev, "dapm: pin %s = %d\n",
+				pin, status);
 			w->connected = status;
 			/* Allow disabling of forced pins */
 			if (status == 0)
@@ -1283,46 +1424,72 @@
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 
 /**
  * snd_soc_dapm_sync - scan and power dapm paths
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Walks all dapm audio paths and powers widgets according to their
  * stream or path usage.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_sync(struct snd_soc_codec *codec)
+int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
 {
-	return dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	return dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
 
-static int snd_soc_dapm_add_route(struct snd_soc_codec *codec,
+static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
 				  const struct snd_soc_dapm_route *route)
 {
 	struct snd_soc_dapm_path *path;
 	struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
-	const char *sink = route->sink;
+	struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
+	const char *sink;
 	const char *control = route->control;
-	const char *source = route->source;
+	const char *source;
+	char prefixed_sink[80];
+	char prefixed_source[80];
 	int ret = 0;
 
-	/* find src and dest widgets */
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	if (dapm->codec->name_prefix) {
+		snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+			 dapm->codec->name_prefix, route->sink);
+		sink = prefixed_sink;
+		snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+			 dapm->codec->name_prefix, route->source);
+		source = prefixed_source;
+	} else {
+		sink = route->sink;
+		source = route->source;
+	}
 
+	/*
+	 * find src and dest widgets over all widgets but favor a widget from
+	 * current DAPM context
+	 */
+	list_for_each_entry(w, &dapm->card->widgets, list) {
 		if (!wsink && !(strcmp(w->name, sink))) {
-			wsink = w;
+			wtsink = w;
+			if (w->dapm == dapm)
+				wsink = w;
 			continue;
 		}
 		if (!wsource && !(strcmp(w->name, source))) {
-			wsource = w;
+			wtsource = w;
+			if (w->dapm == dapm)
+				wsource = w;
 		}
 	}
+	/* use widget from another DAPM context if not found from this */
+	if (!wsink)
+		wsink = wtsink;
+	if (!wsource)
+		wsource = wtsource;
 
 	if (wsource == NULL || wsink == NULL)
 		return -ENODEV;
@@ -1356,7 +1523,7 @@
 
 	/* connect static paths */
 	if (control == NULL) {
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
@@ -1368,6 +1535,7 @@
 	case snd_soc_dapm_adc:
 	case snd_soc_dapm_dac:
 	case snd_soc_dapm_pga:
+	case snd_soc_dapm_out_drv:
 	case snd_soc_dapm_input:
 	case snd_soc_dapm_output:
 	case snd_soc_dapm_micbias:
@@ -1377,14 +1545,15 @@
 	case snd_soc_dapm_supply:
 	case snd_soc_dapm_aif_in:
 	case snd_soc_dapm_aif_out:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 1;
 		return 0;
 	case snd_soc_dapm_mux:
+	case snd_soc_dapm_virt_mux:
 	case snd_soc_dapm_value_mux:
-		ret = dapm_connect_mux(codec, wsource, wsink, path, control,
+		ret = dapm_connect_mux(dapm, wsource, wsink, path, control,
 			&wsink->kcontrols[0]);
 		if (ret != 0)
 			goto err;
@@ -1392,7 +1561,7 @@
 	case snd_soc_dapm_switch:
 	case snd_soc_dapm_mixer:
 	case snd_soc_dapm_mixer_named_ctl:
-		ret = dapm_connect_mixer(codec, wsource, wsink, path, control);
+		ret = dapm_connect_mixer(dapm, wsource, wsink, path, control);
 		if (ret != 0)
 			goto err;
 		break;
@@ -1400,7 +1569,7 @@
 	case snd_soc_dapm_mic:
 	case snd_soc_dapm_line:
 	case snd_soc_dapm_spk:
-		list_add(&path->list, &codec->dapm_paths);
+		list_add(&path->list, &dapm->card->paths);
 		list_add(&path->list_sink, &wsink->sources);
 		list_add(&path->list_source, &wsource->sinks);
 		path->connect = 0;
@@ -1409,15 +1578,15 @@
 	return 0;
 
 err:
-	printk(KERN_WARNING "asoc: no dapm match for %s --> %s --> %s\n", source,
-		control, sink);
+	dev_warn(dapm->dev, "asoc: no dapm match for %s --> %s --> %s\n",
+		 source, control, sink);
 	kfree(path);
 	return ret;
 }
 
 /**
  * snd_soc_dapm_add_routes - Add routes between DAPM widgets
- * @codec: codec
+ * @dapm: DAPM context
  * @route: audio routes
  * @num: number of routes
  *
@@ -1428,17 +1597,16 @@
  * Returns 0 for success else error. On error all resources can be freed
  * with a call to snd_soc_card_free().
  */
-int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
+int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
 			    const struct snd_soc_dapm_route *route, int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_add_route(codec, route);
+		ret = snd_soc_dapm_add_route(dapm, route);
 		if (ret < 0) {
-			printk(KERN_ERR "Failed to add route %s->%s\n",
-			       route->source,
-			       route->sink);
+			dev_err(dapm->dev, "Failed to add route %s->%s\n",
+				route->source, route->sink);
 			return ret;
 		}
 		route++;
@@ -1450,17 +1618,17 @@
 
 /**
  * snd_soc_dapm_new_widgets - add new dapm widgets
- * @codec: audio codec
+ * @dapm: DAPM context
  *
  * Checks the codec for any new dapm widgets and creates them if found.
  *
  * Returns 0 for success.
  */
-int snd_soc_dapm_new_widgets(struct snd_soc_codec *codec)
+int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
 		if (w->new)
 			continue;
@@ -1470,12 +1638,13 @@
 		case snd_soc_dapm_mixer:
 		case snd_soc_dapm_mixer_named_ctl:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mixer(codec, w);
+			dapm_new_mixer(dapm, w);
 			break;
 		case snd_soc_dapm_mux:
+		case snd_soc_dapm_virt_mux:
 		case snd_soc_dapm_value_mux:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_mux(codec, w);
+			dapm_new_mux(dapm, w);
 			break;
 		case snd_soc_dapm_adc:
 		case snd_soc_dapm_aif_out:
@@ -1486,8 +1655,9 @@
 			w->power_check = dapm_dac_check_power;
 			break;
 		case snd_soc_dapm_pga:
+		case snd_soc_dapm_out_drv:
 			w->power_check = dapm_generic_check_power;
-			dapm_new_pga(codec, w);
+			dapm_new_pga(dapm, w);
 			break;
 		case snd_soc_dapm_input:
 		case snd_soc_dapm_output:
@@ -1508,7 +1678,7 @@
 		w->new = 1;
 	}
 
-	dapm_power_widgets(codec, SND_SOC_DAPM_STREAM_NOP);
+	dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
@@ -1569,13 +1739,12 @@
 		(struct soc_mixer_control *)kcontrol->private_value;
 	unsigned int reg = mc->reg;
 	unsigned int shift = mc->shift;
-	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	unsigned int mask = (1 << fls(max)) - 1;
 	unsigned int invert = mc->invert;
-	unsigned int val, val2, val_mask;
-	int connect;
-	int ret;
+	unsigned int val, val_mask;
+	int connect, change;
+	struct snd_soc_dapm_update update;
 
 	val = (ucontrol->value.integer.value[0] & mask);
 
@@ -1583,18 +1752,12 @@
 		val = max - val;
 	val_mask = mask << shift;
 	val = val << shift;
-	if (shift != rshift) {
-		val2 = (ucontrol->value.integer.value[1] & mask);
-		if (invert)
-			val2 = max - val2;
-		val_mask |= mask << rshift;
-		val |= val2 << rshift;
-	}
 
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 
-	if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
+	change = snd_soc_test_bits(widget->codec, reg, val_mask, val);
+	if (change) {
 		if (val)
 			/* new connection */
 			connect = invert ? 0:1;
@@ -1602,28 +1765,20 @@
 			/* old connection must be powered down */
 			connect = invert ? 1:0;
 
+		update.kcontrol = kcontrol;
+		update.widget = widget;
+		update.reg = reg;
+		update.mask = mask;
+		update.val = val;
+		widget->dapm->update = &update;
+
 		dapm_mixer_update_power(widget, kcontrol, connect);
+
+		widget->dapm->update = NULL;
 	}
 
-	if (widget->event) {
-		if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_PRE_REG);
-			if (ret < 0) {
-				ret = 1;
-				goto out;
-			}
-		}
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-		if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-			ret = widget->event(widget, kcontrol,
-						SND_SOC_DAPM_POST_REG);
-	} else
-		ret = snd_soc_update_bits(widget->codec, reg, val_mask, val);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
 
@@ -1671,7 +1826,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask, bitmask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	for (bitmask = 1; bitmask < e->max; bitmask <<= 1)
 		;
@@ -1690,24 +1845,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
 
@@ -1819,7 +1970,7 @@
 	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
 	unsigned int val, mux, change;
 	unsigned int mask;
-	int ret = 0;
+	struct snd_soc_dapm_update update;
 
 	if (ucontrol->value.enumerated.item[0] > e->max - 1)
 		return -EINVAL;
@@ -1836,24 +1987,20 @@
 	mutex_lock(&widget->codec->mutex);
 	widget->value = val;
 	change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+
+	update.kcontrol = kcontrol;
+	update.widget = widget;
+	update.reg = e->reg;
+	update.mask = mask;
+	update.val = val;
+	widget->dapm->update = &update;
+
 	dapm_mux_update_power(widget, kcontrol, change, mux, e);
 
-	if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_PRE_REG);
-		if (ret < 0)
-			goto out;
-	}
+	widget->dapm->update = NULL;
 
-	ret = snd_soc_update_bits(widget->codec, e->reg, mask, val);
-
-	if (widget->event_flags & SND_SOC_DAPM_POST_REG)
-		ret = widget->event(widget,
-				    kcontrol, SND_SOC_DAPM_POST_REG);
-
-out:
 	mutex_unlock(&widget->codec->mutex);
-	return ret;
+	return change;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_put_value_enum_double);
 
@@ -1892,7 +2039,7 @@
 	mutex_lock(&codec->mutex);
 
 	ucontrol->value.integer.value[0] =
-		snd_soc_dapm_get_pin_status(codec, pin);
+		snd_soc_dapm_get_pin_status(&codec->dapm, pin);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1915,11 +2062,11 @@
 	mutex_lock(&codec->mutex);
 
 	if (ucontrol->value.integer.value[0])
-		snd_soc_dapm_enable_pin(codec, pin);
+		snd_soc_dapm_enable_pin(&codec->dapm, pin);
 	else
-		snd_soc_dapm_disable_pin(codec, pin);
+		snd_soc_dapm_disable_pin(&codec->dapm, pin);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(&codec->dapm);
 
 	mutex_unlock(&codec->mutex);
 
@@ -1929,26 +2076,43 @@
 
 /**
  * snd_soc_dapm_new_control - create new dapm control
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget template
  *
  * Creates a new dapm control based upon the template.
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_control(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget)
 {
 	struct snd_soc_dapm_widget *w;
+	size_t name_len;
 
 	if ((w = dapm_cnew_widget(widget)) == NULL)
 		return -ENOMEM;
 
-	w->codec = codec;
+	name_len = strlen(widget->name) + 1;
+	if (dapm->codec->name_prefix)
+		name_len += 1 + strlen(dapm->codec->name_prefix);
+	w->name = kmalloc(name_len, GFP_KERNEL);
+	if (w->name == NULL) {
+		kfree(w);
+		return -ENOMEM;
+	}
+	if (dapm->codec->name_prefix)
+		snprintf(w->name, name_len, "%s %s",
+			dapm->codec->name_prefix, widget->name);
+	else
+		snprintf(w->name, name_len, "%s", widget->name);
+
+	dapm->n_widgets++;
+	w->dapm = dapm;
+	w->codec = dapm->codec;
 	INIT_LIST_HEAD(&w->sources);
 	INIT_LIST_HEAD(&w->sinks);
 	INIT_LIST_HEAD(&w->list);
-	list_add(&w->list, &codec->dapm_widgets);
+	list_add(&w->list, &dapm->card->widgets);
 
 	/* machine layer set ups unconnected pins and insertions */
 	w->connected = 1;
@@ -1958,7 +2122,7 @@
 
 /**
  * snd_soc_dapm_new_controls - create new dapm controls
- * @codec: audio codec
+ * @dapm: DAPM context
  * @widget: widget array
  * @num: number of widgets
  *
@@ -1966,18 +2130,18 @@
  *
  * Returns 0 for success else error.
  */
-int snd_soc_dapm_new_controls(struct snd_soc_codec *codec,
+int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
 	const struct snd_soc_dapm_widget *widget,
 	int num)
 {
 	int i, ret;
 
 	for (i = 0; i < num; i++) {
-		ret = snd_soc_dapm_new_control(codec, widget);
+		ret = snd_soc_dapm_new_control(dapm, widget);
 		if (ret < 0) {
-			printk(KERN_ERR
-			       "ASoC: Failed to create DAPM control %s: %d\n",
-			       widget->name, ret);
+			dev_err(dapm->dev,
+				"ASoC: Failed to create DAPM control %s: %d\n",
+				widget->name, ret);
 			return ret;
 		}
 		widget++;
@@ -1986,34 +2150,17 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls);
 
-
-/**
- * snd_soc_dapm_stream_event - send a stream event to the dapm core
- * @codec: audio codec
- * @stream: stream name
- * @event: stream event
- *
- * Sends a stream event to the dapm core. The core then makes any
- * necessary widget power changes.
- *
- * Returns 0 for success else error.
- */
-int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+static void soc_dapm_stream_event(struct snd_soc_dapm_context *dapm,
 	const char *stream, int event)
 {
-	struct snd_soc_codec *codec = rtd->codec;
 	struct snd_soc_dapm_widget *w;
 
-	if (stream == NULL)
-		return 0;
-
-	mutex_lock(&codec->mutex);
-	list_for_each_entry(w, &codec->dapm_widgets, list)
+	list_for_each_entry(w, &dapm->card->widgets, list)
 	{
-		if (!w->sname)
+		if (!w->sname || w->dapm != dapm)
 			continue;
-		pr_debug("widget %s\n %s stream %s event %d\n",
-			 w->name, w->sname, stream, event);
+		dev_dbg(w->dapm->dev, "widget %s\n %s stream %s event %d\n",
+			w->name, w->sname, stream, event);
 		if (strstr(w->sname, stream)) {
 			switch(event) {
 			case SND_SOC_DAPM_STREAM_START:
@@ -2031,7 +2178,30 @@
 		}
 	}
 
-	dapm_power_widgets(codec, event);
+	dapm_power_widgets(dapm, event);
+}
+
+/**
+ * snd_soc_dapm_stream_event - send a stream event to the dapm core
+ * @rtd: PCM runtime data
+ * @stream: stream name
+ * @event: stream event
+ *
+ * Sends a stream event to the dapm core. The core then makes any
+ * necessary widget power changes.
+ *
+ * Returns 0 for success else error.
+ */
+int snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd,
+	const char *stream, int event)
+{
+	struct snd_soc_codec *codec = rtd->codec;
+
+	if (stream == NULL)
+		return 0;
+
+	mutex_lock(&codec->mutex);
+	soc_dapm_stream_event(&codec->dapm, stream, event);
 	mutex_unlock(&codec->mutex);
 	return 0;
 }
@@ -2039,7 +2209,7 @@
 
 /**
  * snd_soc_dapm_enable_pin - enable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin and its parents or children widgets iff there is
@@ -2047,15 +2217,15 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 1);
+	return snd_soc_dapm_set_pin(dapm, pin, 1);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
 
 /**
  * snd_soc_dapm_force_enable_pin - force a pin to be enabled
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Enables input/output pin regardless of any other state.  This is
@@ -2065,42 +2235,47 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_force_enable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
+				  const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
-			pr_debug("dapm: %s: pin %s\n", codec->name, pin);
+			dev_dbg(w->dapm->dev,
+				"dapm: force enable pin %s\n", pin);
 			w->connected = 1;
 			w->force = 1;
 			return 0;
 		}
 	}
 
-	pr_err("dapm: %s: configuring unknown pin %s\n", codec->name, pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
 
 /**
  * snd_soc_dapm_disable_pin - disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Disables input/output pin and its parents or children widgets.
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_disable_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
+			     const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
 
 /**
  * snd_soc_dapm_nc_pin - permanently disable pin.
- * @codec: SoC codec
+ * @dapm: DAPM context
  * @pin: pin name
  *
  * Marks the specified pin as being not connected, disabling it along
@@ -2112,26 +2287,29 @@
  * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
  * do any widget power switching.
  */
-int snd_soc_dapm_nc_pin(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin)
 {
-	return snd_soc_dapm_set_pin(codec, pin, 0);
+	return snd_soc_dapm_set_pin(dapm, pin, 0);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin);
 
 /**
  * snd_soc_dapm_get_pin_status - get audio pin status
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Get audio pin status - connected or disconnected.
  *
  * Returns 1 for connected otherwise 0.
  */
-int snd_soc_dapm_get_pin_status(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin))
 			return w->connected;
 	}
@@ -2142,7 +2320,7 @@
 
 /**
  * snd_soc_dapm_ignore_suspend - ignore suspend status for DAPM endpoint
- * @codec: audio codec
+ * @dapm: DAPM context
  * @pin: audio signal pin endpoint (or start point)
  *
  * Mark the given endpoint or pin as ignoring suspend.  When the
@@ -2151,18 +2329,21 @@
  * normal means at suspend time, it will not be turned on if it was not
  * already enabled.
  */
-int snd_soc_dapm_ignore_suspend(struct snd_soc_codec *codec, const char *pin)
+int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
+				const char *pin)
 {
 	struct snd_soc_dapm_widget *w;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (!strcmp(w->name, pin)) {
 			w->ignore_suspend = 1;
 			return 0;
 		}
 	}
 
-	pr_err("Unknown DAPM pin: %s\n", pin);
+	dev_err(dapm->dev, "dapm: unknown pin %s\n", pin);
 	return -EINVAL;
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
@@ -2173,20 +2354,23 @@
  *
  * Free all dapm widgets and resources.
  */
-void snd_soc_dapm_free(struct snd_soc_codec *codec)
+void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm)
 {
-	snd_soc_dapm_sys_remove(codec->dev);
-	dapm_free_widgets(codec);
+	snd_soc_dapm_sys_remove(dapm->dev);
+	dapm_free_widgets(dapm);
+	list_del(&dapm->list);
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
 
-static void soc_dapm_shutdown_codec(struct snd_soc_codec *codec)
+static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
 {
 	struct snd_soc_dapm_widget *w;
 	LIST_HEAD(down_list);
 	int powerdown = 0;
 
-	list_for_each_entry(w, &codec->dapm_widgets, list) {
+	list_for_each_entry(w, &dapm->card->widgets, list) {
+		if (w->dapm != dapm)
+			continue;
 		if (w->power) {
 			dapm_seq_insert(w, &down_list, dapm_down_seq);
 			w->power = 0;
@@ -2198,9 +2382,9 @@
 	 * standby.
 	 */
 	if (powerdown) {
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_PREPARE);
-		dapm_seq_run(codec, &down_list, 0, dapm_down_seq);
-		snd_soc_dapm_set_bias_level(NULL, codec, SND_SOC_BIAS_STANDBY);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_PREPARE);
+		dapm_seq_run(dapm, &down_list, 0, dapm_down_seq);
+		snd_soc_dapm_set_bias_level(NULL, dapm, SND_SOC_BIAS_STANDBY);
 	}
 }
 
@@ -2211,10 +2395,10 @@
 {
 	struct snd_soc_codec *codec;
 
-	list_for_each_entry(codec, &card->codec_dev_list, list)
-		soc_dapm_shutdown_codec(codec);
-
-	snd_soc_dapm_set_bias_level(card, codec, SND_SOC_BIAS_OFF);
+	list_for_each_entry(codec, &card->codec_dev_list, list) {
+		soc_dapm_shutdown_codec(&codec->dapm);
+		snd_soc_dapm_set_bias_level(card, &codec->dapm, SND_SOC_BIAS_OFF);
+	}
 }
 
 /* Module information */
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 8a0a920..ac5a5bc 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -13,11 +13,11 @@
 
 #include <sound/jack.h>
 #include <sound/soc.h>
-#include <sound/soc-dapm.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/workqueue.h>
 #include <linux/delay.h>
+#include <trace/events/asoc.h>
 
 /**
  * snd_soc_jack_new - Create a new jack
@@ -60,14 +60,18 @@
 void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask)
 {
 	struct snd_soc_codec *codec;
+	struct snd_soc_dapm_context *dapm;
 	struct snd_soc_jack_pin *pin;
 	int enable;
 	int oldstatus;
 
+	trace_snd_soc_jack_report(jack, mask, status);
+
 	if (!jack)
 		return;
 
 	codec = jack->codec;
+	dapm =  &codec->dapm;
 
 	mutex_lock(&codec->mutex);
 
@@ -81,6 +85,8 @@
 	if (mask && (jack->status == oldstatus))
 		goto out;
 
+	trace_snd_soc_jack_notify(jack, status);
+
 	list_for_each_entry(pin, &jack->pins, list) {
 		enable = pin->mask & jack->status;
 
@@ -88,15 +94,15 @@
 			enable = !enable;
 
 		if (enable)
-			snd_soc_dapm_enable_pin(codec, pin->pin);
+			snd_soc_dapm_enable_pin(dapm, pin->pin);
 		else
-			snd_soc_dapm_disable_pin(codec, pin->pin);
+			snd_soc_dapm_disable_pin(dapm, pin->pin);
 	}
 
 	/* Report before the DAPM sync to help users updating micbias status */
 	blocking_notifier_call_chain(&jack->notifier, status, NULL);
 
-	snd_soc_dapm_sync(codec);
+	snd_soc_dapm_sync(dapm);
 
 	snd_jack_report(jack->jack, status);
 
@@ -207,6 +213,12 @@
 static irqreturn_t gpio_handler(int irq, void *data)
 {
 	struct snd_soc_jack_gpio *gpio = data;
+	struct device *dev = gpio->jack->codec->card->dev;
+
+	trace_snd_soc_jack_irq(gpio->name);
+
+	if (device_may_wakeup(dev))
+		pm_wakeup_event(dev, gpio->debounce_time + 50);
 
 	schedule_delayed_work(&gpio->work,
 			      msecs_to_jiffies(gpio->debounce_time));
@@ -263,11 +275,12 @@
 		INIT_DELAYED_WORK(&gpios[i].work, gpio_work);
 		gpios[i].jack = jack;
 
-		ret = request_irq(gpio_to_irq(gpios[i].gpio),
-				gpio_handler,
-				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
-				jack->codec->dev->driver->name,
-				&gpios[i]);
+		ret = request_any_context_irq(gpio_to_irq(gpios[i].gpio),
+					      gpio_handler,
+					      IRQF_TRIGGER_RISING |
+					      IRQF_TRIGGER_FALLING,
+					      jack->codec->dev->driver->name,
+					      &gpios[i]);
 		if (ret)
 			goto err;
 
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 6914821..5b792d2 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -76,7 +76,10 @@
 		format = 1 << UAC_FORMAT_TYPE_I_PCM;
 	}
 	if (format & (1 << UAC_FORMAT_TYPE_I_PCM)) {
-		if (sample_width > sample_bytes * 8) {
+		if (chip->usb_id == USB_ID(0x0582, 0x0016) /* Edirol SD-90 */ &&
+		    sample_width == 24 && sample_bytes == 2)
+			sample_bytes = 3;
+		else if (sample_width > sample_bytes * 8) {
 			snd_printk(KERN_INFO "%d:%u:%d : sample bitwidth %d in over sample bytes %d\n",
 				   chip->dev->devnum, fp->iface, fp->altsetting,
 				   sample_width, sample_bytes);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index 25bce7e..db2dc5f 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -850,8 +850,8 @@
 		return;
 	}
 
-	memset(urb->transfer_buffer + count, 0xFD, 9 - count);
-	urb->transfer_buffer_length = count;
+	memset(urb->transfer_buffer + count, 0xFD, ep->max_transfer - count);
+	urb->transfer_buffer_length = ep->max_transfer;
 }
 
 static struct usb_protocol_ops snd_usbmidi_122l_ops = {
@@ -1295,6 +1295,13 @@
 	case USB_ID(0x1a86, 0x752d): /* QinHeng CH345 "USB2.0-MIDI" */
 		ep->max_transfer = 4;
 		break;
+		/*
+		 * Some devices only work with 9 bytes packet size:
+		 */
+	case USB_ID(0x0644, 0x800E): /* Tascam US-122L */
+	case USB_ID(0x0644, 0x800F): /* Tascam US-144 */
+		ep->max_transfer = 9;
+		break;
 	}
 	for (i = 0; i < OUTPUT_URBS; ++i) {
 		buffer = usb_alloc_coherent(umidi->dev,
@@ -1729,13 +1736,7 @@
 {
 	static const char *const names[] = { "High Load", "Light Load" };
 
-	info->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	info->count = 1;
-	info->value.enumerated.items = 2;
-	if (info->value.enumerated.item > 1)
-		info->value.enumerated.item = 1;
-	strcpy(info->value.enumerated.name, names[info->value.enumerated.item]);
-	return 0;
+	return snd_ctl_enum_info(info, 1, 2, names);
 }
 
 static int roland_load_get(struct snd_kcontrol *kcontrol,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index f2d74d6..7df89b3 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1633,18 +1633,11 @@
 static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
 {
 	struct usb_mixer_elem_info *cval = kcontrol->private_data;
-	char **itemlist = (char **)kcontrol->private_value;
+	const char **itemlist = (const char **)kcontrol->private_value;
 
 	if (snd_BUG_ON(!itemlist))
 		return -EINVAL;
-	uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-	uinfo->count = 1;
-	uinfo->value.enumerated.items = cval->max;
-	if (uinfo->value.enumerated.item >= cval->max)
-		uinfo->value.enumerated.item = cval->max - 1;
-	strlcpy(uinfo->value.enumerated.name, itemlist[uinfo->value.enumerated.item],
-		sizeof(uinfo->value.enumerated.name));
-	return 0;
+	return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist);
 }
 
 /* get callback for selector unit */
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index ad7079d..3599987 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -705,11 +705,11 @@
 		.data = (const struct snd_usb_audio_quirk[]) {
 			{
 				.ifnum = 0,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 1,
-				.type = QUIRK_IGNORE_INTERFACE
+				.type = QUIRK_AUDIO_STANDARD_INTERFACE
 			},
 			{
 				.ifnum = 2,
diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
index 6ef68e4..084e6fc 100644
--- a/sound/usb/usx2y/us122l.c
+++ b/sound/usb/usx2y/us122l.c
@@ -273,29 +273,26 @@
 					  struct file *file, poll_table *wait)
 {
 	struct us122l	*us122l = hw->private_data;
-	struct usb_stream *s = us122l->sk.s;
 	unsigned	*polled;
 	unsigned int	mask;
 
 	poll_wait(file, &us122l->sk.sleep, wait);
 
-	switch (s->state) {
-	case usb_stream_ready:
-		if (us122l->first == file)
-			polled = &s->periods_polled;
-		else
-			polled = &us122l->second_periods_polled;
-		if (*polled != s->periods_done) {
-			*polled = s->periods_done;
-			mask = POLLIN | POLLOUT | POLLWRNORM;
-			break;
+	mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
+	if (mutex_trylock(&us122l->mutex)) {
+		struct usb_stream *s = us122l->sk.s;
+		if (s && s->state == usb_stream_ready) {
+			if (us122l->first == file)
+				polled = &s->periods_polled;
+			else
+				polled = &us122l->second_periods_polled;
+			if (*polled != s->periods_done) {
+				*polled = s->periods_done;
+				mask = POLLIN | POLLOUT | POLLWRNORM;
+			} else
+				mask = 0;
 		}
-		/* Fall through */
-		mask = 0;
-		break;
-	default:
-		mask = POLLIN | POLLOUT | POLLWRNORM | POLLERR;
-		break;
+		mutex_unlock(&us122l->mutex);
 	}
 	return mask;
 }
@@ -381,6 +378,7 @@
 {
 	struct usb_stream_config *cfg;
 	struct us122l *us122l = hw->private_data;
+	struct usb_stream *s;
 	unsigned min_period_frames;
 	int err = 0;
 	bool high_speed;
@@ -426,18 +424,18 @@
 	snd_power_wait(hw->card, SNDRV_CTL_POWER_D0);
 
 	mutex_lock(&us122l->mutex);
+	s = us122l->sk.s;
 	if (!us122l->master)
 		us122l->master = file;
 	else if (us122l->master != file) {
-		if (memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg))) {
+		if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg))) {
 			err = -EIO;
 			goto unlock;
 		}
 		us122l->slave = file;
 	}
-	if (!us122l->sk.s ||
-	    memcmp(cfg, &us122l->sk.s->cfg, sizeof(*cfg)) ||
-	    us122l->sk.s->state == usb_stream_xrun) {
+	if (!s || memcmp(cfg, &s->cfg, sizeof(*cfg)) ||
+	    s->state == usb_stream_xrun) {
 		us122l_stop(us122l);
 		if (!us122l_start(us122l, cfg->sample_rate, cfg->period_frames))
 			err = -EIO;
@@ -448,6 +446,7 @@
 	mutex_unlock(&us122l->mutex);
 free:
 	kfree(cfg);
+	wake_up_all(&us122l->sk.sleep);
 	return err;
 }
 
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 1b9b13e..2b5387d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -227,7 +227,7 @@
   CFLAGS_OPTIMIZE = -O6
 endif
 
-CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
 ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 ALL_LDFLAGS = $(LDFLAGS)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 7bc0490..7069bd3 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -331,6 +331,9 @@
 			else if (err ==  ENODEV && cpu_list) {
 				die("No such device - did you specify"
 					" an out-of-range profile CPU?\n");
+			} else if (err == ENOENT) {
+				die("%s event is not supported. ",
+				     event_name(evsel));
 			} else if (err == EINVAL && sample_id_all_avail) {
 				/*
 				 * Old kernel, no attr->sample_id_type_all field
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 7a4ebeb..abd4b84 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -489,7 +489,8 @@
 
 	err = pthread_attr_init(&attr);
 	BUG_ON(err);
-	err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
+	err = pthread_attr_setstacksize(&attr,
+			(size_t) max(16 * 1024, PTHREAD_STACK_MIN));
 	BUG_ON(err);
 	err = pthread_mutex_lock(&start_work_mutex);
 	BUG_ON(err);
@@ -1861,7 +1862,7 @@
 	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
-	if (rec_argv)
+	if (rec_argv == NULL)
 		return -ENOMEM;
 
 	for (i = 0; i < ARRAY_SIZE(record_args); i++)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 02b2d80..c385a63 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -316,6 +316,8 @@
 				      "\t Consider tweaking"
 				      " /proc/sys/kernel/perf_event_paranoid or running as root.",
 				      system_wide ? "system-wide " : "");
+			} else if (errno == ENOENT) {
+				error("%s event is not supported. ", event_name(counter));
 			} else {
 				error("open_counter returned with %d (%s). "
 				      "/bin/dmesg may provide additional information.\n",
@@ -683,8 +685,7 @@
 		nr_counters = ARRAY_SIZE(default_attrs);
 
 		for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
-			pos = perf_evsel__new(default_attrs[c].type,
-					      default_attrs[c].config,
+			pos = perf_evsel__new(&default_attrs[c],
 					      nr_counters);
 			if (pos == NULL)
 				goto out;
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 1c98434..ed56961 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,7 @@
 	return err;
 }
 
+#include "util/cpumap.h"
 #include "util/evsel.h"
 #include <sys/types.h>
 
@@ -264,6 +265,7 @@
 	int err = -1, fd;
 	struct thread_map *threads;
 	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
 	unsigned int nr_open_calls = 111, i;
 	int id = trace_event__id("sys_enter_open");
 
@@ -278,7 +280,10 @@
 		return -1;
 	}
 
-	evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
 	if (evsel == NULL) {
 		pr_debug("perf_evsel__new\n");
 		goto out_thread_map_delete;
@@ -317,6 +322,111 @@
 	return err;
 }
 
+#include <sched.h>
+
+static int test__open_syscall_event_on_all_cpus(void)
+{
+	int err = -1, fd, cpu;
+	struct thread_map *threads;
+	struct cpu_map *cpus;
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+	unsigned int nr_open_calls = 111, i;
+	cpu_set_t *cpu_set;
+	size_t cpu_set_size;
+	int id = trace_event__id("sys_enter_open");
+
+	if (id < 0) {
+		pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+		return -1;
+	}
+
+	threads = thread_map__new(-1, getpid());
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	cpus = cpu_map__new(NULL);
+	if (threads == NULL) {
+		pr_debug("thread_map__new\n");
+		return -1;
+	}
+
+	cpu_set = CPU_ALLOC(cpus->nr);
+
+	if (cpu_set == NULL)
+		goto out_thread_map_delete;
+
+	cpu_set_size = CPU_ALLOC_SIZE(cpus->nr);
+	CPU_ZERO_S(cpu_set_size, cpu_set);
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_TRACEPOINT;
+	attr.config = id;
+	evsel = perf_evsel__new(&attr, 0);
+	if (evsel == NULL) {
+		pr_debug("perf_evsel__new\n");
+		goto out_cpu_free;
+	}
+
+	if (perf_evsel__open(evsel, cpus, threads) < 0) {
+		pr_debug("failed to open counter: %s, "
+			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+			 strerror(errno));
+		goto out_evsel_delete;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int ncalls = nr_open_calls + cpu;
+
+		CPU_SET(cpu, cpu_set);
+		sched_setaffinity(0, cpu_set_size, cpu_set);
+		for (i = 0; i < ncalls; ++i) {
+			fd = open("/etc/passwd", O_RDONLY);
+			close(fd);
+		}
+		CPU_CLR(cpu, cpu_set);
+	}
+
+	/*
+	 * Here we need to explicitely preallocate the counts, as if
+	 * we use the auto allocation it will allocate just for 1 cpu,
+	 * as we start by cpu 0.
+	 */
+	if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+		pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+		goto out_close_fd;
+	}
+
+	for (cpu = 0; cpu < cpus->nr; ++cpu) {
+		unsigned int expected;
+
+		if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+			pr_debug("perf_evsel__open_read_on_cpu\n");
+			goto out_close_fd;
+		}
+
+		expected = nr_open_calls + cpu;
+		if (evsel->counts->cpu[cpu].val != expected) {
+			pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %Ld\n",
+				 expected, cpu, evsel->counts->cpu[cpu].val);
+			goto out_close_fd;
+		}
+	}
+
+	err = 0;
+out_close_fd:
+	perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+	perf_evsel__delete(evsel);
+out_cpu_free:
+	CPU_FREE(cpu_set);
+out_thread_map_delete:
+	thread_map__delete(threads);
+	return err;
+}
+
 static struct test {
 	const char *desc;
 	int (*func)(void);
@@ -330,6 +440,10 @@
 		.func = test__open_syscall_event,
 	},
 	{
+		.desc = "detect open syscall event on all cpus",
+		.func = test__open_syscall_event_on_all_cpus,
+	},
+	{
 		.func = NULL,
 	},
 };
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1e67ab9..6ce4042 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1247,6 +1247,8 @@
 				die("Permission error - are you root?\n"
 					"\t Consider tweaking"
 					" /proc/sys/kernel/perf_event_paranoid.\n");
+			if (err == ENOENT)
+				die("%s event is not supported. ", event_name(evsel));
 			/*
 			 * If it's cycles then fall back to hrtimer
 			 * based cpu-clock-tick sw counter, which
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c95267e..f5cfed6 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -6,14 +6,13 @@
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
 {
 	struct perf_evsel *evsel = zalloc(sizeof(*evsel));
 
 	if (evsel != NULL) {
 		evsel->idx	   = idx;
-		evsel->attr.type   = type;
-		evsel->attr.config = config;
+		evsel->attr	   = *attr;
 		INIT_LIST_HEAD(&evsel->node);
 	}
 
@@ -128,59 +127,75 @@
 	return 0;
 }
 
-int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
+			      struct thread_map *threads)
 {
-	int cpu;
+	int cpu, thread;
 
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
+	if (evsel->fd == NULL &&
+	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
 		return -1;
 
 	for (cpu = 0; cpu < cpus->nr; cpu++) {
-		FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
-							cpus->map[cpu], -1, 0);
-		if (FD(evsel, cpu, 0) < 0)
-			goto out_close;
+		for (thread = 0; thread < threads->nr; thread++) {
+			FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
+								     threads->map[thread],
+								     cpus->map[cpu], -1, 0);
+			if (FD(evsel, cpu, thread) < 0)
+				goto out_close;
+		}
 	}
 
 	return 0;
 
 out_close:
-	while (--cpu >= 0) {
-		close(FD(evsel, cpu, 0));
-		FD(evsel, cpu, 0) = -1;
-	}
+	do {
+		while (--thread >= 0) {
+			close(FD(evsel, cpu, thread));
+			FD(evsel, cpu, thread) = -1;
+		}
+		thread = threads->nr;
+	} while (--cpu >= 0);
 	return -1;
 }
 
+static struct {
+	struct cpu_map map;
+	int cpus[1];
+} empty_cpu_map = {
+	.map.nr	= 1,
+	.cpus	= { -1, },
+};
+
+static struct {
+	struct thread_map map;
+	int threads[1];
+} empty_thread_map = {
+	.map.nr	 = 1,
+	.threads = { -1, },
+};
+
+int perf_evsel__open(struct perf_evsel *evsel,
+		     struct cpu_map *cpus, struct thread_map *threads)
+{
+
+	if (cpus == NULL) {
+		/* Work around old compiler warnings about strict aliasing */
+		cpus = &empty_cpu_map.map;
+	}
+
+	if (threads == NULL)
+		threads = &empty_thread_map.map;
+
+	return __perf_evsel__open(evsel, cpus, threads);
+}
+
+int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
+{
+	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
+}
+
 int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
 {
-	int thread;
-
-	if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
-		return -1;
-
-	for (thread = 0; thread < threads->nr; thread++) {
-		FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
-							   threads->map[thread], -1, -1, 0);
-		if (FD(evsel, 0, thread) < 0)
-			goto out_close;
-	}
-
-	return 0;
-
-out_close:
-	while (--thread >= 0) {
-		close(FD(evsel, 0, thread));
-		FD(evsel, 0, thread) = -1;
-	}
-	return -1;
-}
-
-int perf_evsel__open(struct perf_evsel *evsel, 
-		     struct cpu_map *cpus, struct thread_map *threads)
-{
-	if (threads == NULL)
-		return perf_evsel__open_per_cpu(evsel, cpus);
-
-	return perf_evsel__open_per_thread(evsel, threads);
+	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
 }
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index a0ccd69..b2d755f 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -37,7 +37,7 @@
 struct cpu_map;
 struct thread_map;
 
-struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
+struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx);
 void perf_evsel__delete(struct perf_evsel *evsel);
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 649083f..5cb6f4b 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -490,6 +490,31 @@
 	return EVT_HANDLED_ALL;
 }
 
+static int store_event_type(const char *orgname)
+{
+	char filename[PATH_MAX], *c;
+	FILE *file;
+	int id, n;
+
+	sprintf(filename, "%s/", debugfs_path);
+	strncat(filename, orgname, strlen(orgname));
+	strcat(filename, "/id");
+
+	c = strchr(filename, ':');
+	if (c)
+		*c = '/';
+
+	file = fopen(filename, "r");
+	if (!file)
+		return 0;
+	n = fscanf(file, "%i", &id);
+	fclose(file);
+	if (n < 1) {
+		pr_err("cannot store event ID\n");
+		return -EINVAL;
+	}
+	return perf_header__push_event(id, orgname);
+}
 
 static enum event_result parse_tracepoint_event(const char **strp,
 				    struct perf_event_attr *attr)
@@ -533,9 +558,13 @@
 		*strp += strlen(sys_name) + evt_length;
 		return parse_multiple_tracepoint_event(sys_name, evt_name,
 						       flags);
-	} else
+	} else {
+		if (store_event_type(evt_name) < 0)
+			return EVT_FAILED;
+
 		return parse_single_tracepoint_event(sys_name, evt_name,
 						     evt_length, attr, strp);
+	}
 }
 
 static enum event_result
@@ -778,41 +807,11 @@
 	return ret;
 }
 
-static int store_event_type(const char *orgname)
-{
-	char filename[PATH_MAX], *c;
-	FILE *file;
-	int id, n;
-
-	sprintf(filename, "%s/", debugfs_path);
-	strncat(filename, orgname, strlen(orgname));
-	strcat(filename, "/id");
-
-	c = strchr(filename, ':');
-	if (c)
-		*c = '/';
-
-	file = fopen(filename, "r");
-	if (!file)
-		return 0;
-	n = fscanf(file, "%i", &id);
-	fclose(file);
-	if (n < 1) {
-		pr_err("cannot store event ID\n");
-		return -EINVAL;
-	}
-	return perf_header__push_event(id, orgname);
-}
-
 int parse_events(const struct option *opt __used, const char *str, int unset __used)
 {
 	struct perf_event_attr attr;
 	enum event_result ret;
 
-	if (strchr(str, ':'))
-		if (store_event_type(str) < 0)
-			return -1;
-
 	for (;;) {
 		memset(&attr, 0, sizeof(attr));
 		ret = parse_event_symbols(&str, &attr);
@@ -824,7 +823,7 @@
 
 		if (ret != EVT_HANDLED_ALL) {
 			struct perf_evsel *evsel;
-			evsel = perf_evsel__new(attr.type, attr.config,
+			evsel = perf_evsel__new(&attr,
 						nr_counters);
 			if (evsel == NULL)
 				return -1;
@@ -1014,8 +1013,15 @@
 
 int perf_evsel_list__create_default(void)
 {
-	struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE,
-						   PERF_COUNT_HW_CPU_CYCLES, 0);
+	struct perf_evsel *evsel;
+	struct perf_event_attr attr;
+
+	memset(&attr, 0, sizeof(attr));
+	attr.type = PERF_TYPE_HARDWARE;
+	attr.config = PERF_COUNT_HW_CPU_CYCLES;
+
+	evsel = perf_evsel__new(&attr, 0);
+
 	if (evsel == NULL)
 		return -ENOMEM;
 
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 6fb4694..313dac2 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1007,7 +1007,7 @@
 	if (size == 0)
 		size = 8;
 
-	if (head + event->header.size >= mmap_size) {
+	if (head + event->header.size > mmap_size) {
 		if (mmaps[map_idx]) {
 			munmap(mmaps[map_idx], mmap_size);
 			mmaps[map_idx] = NULL;
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
new file mode 100644
index 0000000..fd8e1f1
--- /dev/null
+++ b/tools/power/x86/turbostat/Makefile
@@ -0,0 +1,8 @@
+turbostat : turbostat.c
+
+clean :
+	rm -f turbostat
+
+install :
+	install turbostat /usr/bin/turbostat
+	install turbostat.8 /usr/share/man/man8
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
new file mode 100644
index 0000000..ff75125
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -0,0 +1,172 @@
+.TH TURBOSTAT 8
+.SH NAME
+turbostat \- Report processor frequency and idle statistics
+.SH SYNOPSIS
+.ft B
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB command
+.br
+.B turbostat
+.RB [ "\-v" ]
+.RB [ "\-M MSR#" ]
+.RB [ "\-i interval_sec" ]
+.SH DESCRIPTION
+\fBturbostat \fP reports processor topology, frequency
+and idle power state statistics on modern X86 processors.
+Either \fBcommand\fP is forked and statistics are printed
+upon its completion, or statistics are printed periodically.
+
+\fBturbostat \fP
+requires that the processor
+supports an "invariant" TSC, plus the APERF and MPERF MSRs.
+\fBturbostat \fP will report idle cpu power state residency
+on processors that additionally support C-state residency counters.
+
+.SS Options
+The \fB-v\fP option increases verbosity.
+.PP
+The \fB-M MSR#\fP option dumps the specified MSR,
+in addition to the usual frequency and idle statistics.
+.PP
+The \fB-i interval_sec\fP option prints statistics every \fiinterval_sec\fP seconds.
+The default is 5 seconds.
+.PP
+The \fBcommand\fP parameter forks \fBcommand\fP and upon its exit,
+displays the statistics gathered since it was forked.
+.PP
+.SH FIELD DESCRIPTIONS
+.nf
+\fBpkg\fP processor package number.
+\fBcore\fP processor core number.
+\fBCPU\fP Linux CPU (logical processor) number.
+\fB%c0\fP percent of the interval that the CPU retired instructions.
+\fBGHz\fP average clock rate while the CPU was in c0 state.
+\fBTSC\fP average GHz that the TSC ran during the entire interval.
+\fB%c1, %c3, %c6\fP show the percentage residency in hardware core idle states.
+\fB%pc3, %pc6\fP percentage residency in hardware package idle states.
+.fi
+.PP
+.SH EXAMPLE
+Without any parameters, turbostat prints out counters ever 5 seconds.
+(override interval with "-i sec" option, or specify a command
+for turbostat to fork).
+
+The first row of statistics reflect the average for the entire system.
+Subsequent rows show per-CPU statistics.
+
+.nf
+[root@x980]# ./turbostat
+core CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+          0.04 1.62 3.38   0.11   0.00  99.85   0.00  95.07
+  0   0   0.04 1.62 3.38   0.06   0.00  99.90   0.00  95.07
+  0   6   0.02 1.62 3.38   0.08   0.00  99.90   0.00  95.07
+  1   2   0.10 1.62 3.38   0.29   0.00  99.61   0.00  95.07
+  1   8   0.11 1.62 3.38   0.28   0.00  99.61   0.00  95.07
+  2   4   0.01 1.62 3.38   0.01   0.00  99.98   0.00  95.07
+  2  10   0.01 1.61 3.38   0.02   0.00  99.98   0.00  95.07
+  8   1   0.07 1.62 3.38   0.15   0.00  99.78   0.00  95.07
+  8   7   0.03 1.62 3.38   0.19   0.00  99.78   0.00  95.07
+  9   3   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+  9   9   0.01 1.62 3.38   0.02   0.00  99.98   0.00  95.07
+ 10   5   0.01 1.62 3.38   0.13   0.00  99.86   0.00  95.07
+ 10  11   0.08 1.62 3.38   0.05   0.00  99.86   0.00  95.07
+.fi
+.SH VERBOSE EXAMPLE
+The "-v" option adds verbosity to the output:
+
+.nf
+GenuineIntel 11 CPUID levels; family:model:stepping 0x6:2c:2 (6:44:2)
+12 * 133 = 1600 MHz max efficiency
+25 * 133 = 3333 MHz TSC frequency
+26 * 133 = 3467 MHz max turbo 4 active cores
+26 * 133 = 3467 MHz max turbo 3 active cores
+27 * 133 = 3600 MHz max turbo 2 active cores
+27 * 133 = 3600 MHz max turbo 1 active cores
+
+.fi
+The \fBmax efficiency\fP frequency, a.k.a. Low Frequency Mode, is the frequency
+available at the minimum package voltage.  The \fBTSC frequency\fP is the nominal
+maximum frequency of the processor if turbo-mode were not available.  This frequency
+should be sustainable on all CPUs indefinitely, given nominal power and cooling.
+The remaining rows show what maximum turbo frequency is possible
+depending on the number of idle cores.  Note that this information is
+not available on all processors.
+.SH FORK EXAMPLE
+If turbostat is invoked with a command, it will fork that command
+and output the statistics gathered when the command exits.
+eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
+until ^C while the other CPUs are mostly idle:
+
+.nf
+[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
+
+^Ccore CPU   %c0   GHz  TSC   %c1    %c3    %c6   %pc3   %pc6
+           8.49 3.63 3.38  16.23   0.66  74.63   0.00   0.00
+   0   0   1.22 3.62 3.38  32.18   0.00  66.60   0.00   0.00
+   0   6   0.40 3.61 3.38  33.00   0.00  66.60   0.00   0.00
+   1   2   0.11 3.14 3.38   0.19   3.95  95.75   0.00   0.00
+   1   8   0.05 2.88 3.38   0.25   3.95  95.75   0.00   0.00
+   2   4   0.00 3.13 3.38   0.02   0.00  99.98   0.00   0.00
+   2  10   0.00 3.09 3.38   0.02   0.00  99.98   0.00   0.00
+   8   1   0.04 3.50 3.38  14.43   0.00  85.54   0.00   0.00
+   8   7   0.03 2.98 3.38  14.43   0.00  85.54   0.00   0.00
+   9   3   0.00 3.16 3.38 100.00   0.00   0.00   0.00   0.00
+   9   9  99.93 3.63 3.38   0.06   0.00   0.00   0.00   0.00
+  10   5   0.01 2.82 3.38   0.08   0.00  99.91   0.00   0.00
+  10  11   0.02 3.36 3.38   0.06   0.00  99.91   0.00   0.00
+6.950866 sec
+
+.fi
+Above the cycle soaker drives cpu9 up 3.6 Ghz turbo limit
+while the other processors are generally in various states of idle.
+
+Note that cpu3 is an HT sibling sharing core9
+with cpu9, and thus it is unable to get to an idle state
+deeper than c1 while cpu9 is busy.
+
+Note that turbostat reports average GHz of 3.61, while
+the arithmetic average of the GHz column above is 3.24.
+This is a weighted average, where the weight is %c0.  ie. it is the total number of
+un-halted cycles elapsed per time divided by the number of CPUs.
+.SH NOTES
+
+.B "turbostat "
+must be run as root.
+
+.B "turbostat "
+reads hardware counters, but doesn't write them.
+So it will not interfere with the OS or other programs, including
+multiple invocations of itself.
+
+\fBturbostat \fP
+may work poorly on Linux-2.6.20 through 2.6.29,
+as \fBacpi-cpufreq \fPperiodically cleared the APERF and MPERF
+in those kernels.
+
+The APERF, MPERF MSRs are defined to count non-halted cycles.
+Although it is not guaranteed by the architecture, turbostat assumes
+that they count at TSC rate, which is true on all processors tested to date.
+
+.SH REFERENCES
+"Intel® Turbo Boost Technology
+in Intel® Core™ Microarchitecture (Nehalem) Based Processors"
+http://download.intel.com/design/processor/applnots/320354.pdf
+
+"Intel® 64 and IA-32 Architectures Software Developer's Manual
+Volume 3B: System Programming Guide"
+http://www.intel.com/products/processor/manuals/
+
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4), vmstat(8)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
new file mode 100644
index 0000000..4c6983d
--- /dev/null
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -0,0 +1,1048 @@
+/*
+ * turbostat -- show CPU frequency and C-state residency
+ * on modern Intel turbo-capable processors.
+ *
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <string.h>
+#include <ctype.h>
+
+#define MSR_TSC	0x10
+#define MSR_NEHALEM_PLATFORM_INFO	0xCE
+#define MSR_NEHALEM_TURBO_RATIO_LIMIT	0x1AD
+#define MSR_APERF	0xE8
+#define MSR_MPERF	0xE7
+#define MSR_PKG_C2_RESIDENCY	0x60D	/* SNB only */
+#define MSR_PKG_C3_RESIDENCY	0x3F8
+#define MSR_PKG_C6_RESIDENCY	0x3F9
+#define MSR_PKG_C7_RESIDENCY	0x3FA	/* SNB only */
+#define MSR_CORE_C3_RESIDENCY	0x3FC
+#define MSR_CORE_C6_RESIDENCY	0x3FD
+#define MSR_CORE_C7_RESIDENCY	0x3FE	/* SNB only */
+
+char *proc_stat = "/proc/stat";
+unsigned int interval_sec = 5;	/* set with -i interval_sec */
+unsigned int verbose;		/* set with -v */
+unsigned int skip_c0;
+unsigned int skip_c1;
+unsigned int do_nhm_cstates;
+unsigned int do_snb_cstates;
+unsigned int has_aperf;
+unsigned int units = 1000000000;	/* Ghz etc */
+unsigned int genuine_intel;
+unsigned int has_invariant_tsc;
+unsigned int do_nehalem_platform_info;
+unsigned int do_nehalem_turbo_ratio_limit;
+unsigned int extra_msr_offset;
+double bclk;
+unsigned int show_pkg;
+unsigned int show_core;
+unsigned int show_cpu;
+
+int aperf_mperf_unstable;
+int backwards_count;
+char *progname;
+int need_reinitialize;
+
+int num_cpus;
+
+typedef struct per_cpu_counters {
+	unsigned long long tsc;		/* per thread */
+	unsigned long long aperf;	/* per thread */
+	unsigned long long mperf;	/* per thread */
+	unsigned long long c1;	/* per thread (calculated) */
+	unsigned long long c3;	/* per core */
+	unsigned long long c6;	/* per core */
+	unsigned long long c7;	/* per core */
+	unsigned long long pc2;	/* per package */
+	unsigned long long pc3;	/* per package */
+	unsigned long long pc6;	/* per package */
+	unsigned long long pc7;	/* per package */
+	unsigned long long extra_msr;	/* per thread */
+	int pkg;
+	int core;
+	int cpu;
+	struct per_cpu_counters *next;
+} PCC;
+
+PCC *pcc_even;
+PCC *pcc_odd;
+PCC *pcc_delta;
+PCC *pcc_average;
+struct timeval tv_even;
+struct timeval tv_odd;
+struct timeval tv_delta;
+
+unsigned long long get_msr(int cpu, off_t offset)
+{
+	ssize_t retval;
+	unsigned long long msr;
+	char pathname[32];
+	int fd;
+
+	sprintf(pathname, "/dev/cpu/%d/msr", cpu);
+	fd = open(pathname, O_RDONLY);
+	if (fd < 0) {
+		perror(pathname);
+		need_reinitialize = 1;
+		return 0;
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+	if (retval != sizeof msr) {
+		fprintf(stderr, "cpu%d pread(..., 0x%zx) = %jd\n",
+			cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+	return msr;
+}
+
+void print_header()
+{
+	if (show_pkg)
+		fprintf(stderr, "pkg ");
+	if (show_core)
+		fprintf(stderr, "core");
+	if (show_cpu)
+		fprintf(stderr, " CPU");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c0 ");
+	if (has_aperf)
+		fprintf(stderr, "  GHz");
+	fprintf(stderr, "  TSC");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c1 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "   %%c6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "   %%c7 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc2 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc3 ");
+	if (do_nhm_cstates)
+		fprintf(stderr, "  %%pc6 ");
+	if (do_snb_cstates)
+		fprintf(stderr, "  %%pc7 ");
+	if (extra_msr_offset)
+		fprintf(stderr, "       MSR 0x%x ", extra_msr_offset);
+
+	putc('\n', stderr);
+}
+
+void dump_pcc(PCC *pcc)
+{
+	fprintf(stderr, "package: %d ", pcc->pkg);
+	fprintf(stderr, "core:: %d ", pcc->core);
+	fprintf(stderr, "CPU: %d ", pcc->cpu);
+	fprintf(stderr, "TSC: %016llX\n", pcc->tsc);
+	fprintf(stderr, "c3: %016llX\n", pcc->c3);
+	fprintf(stderr, "c6: %016llX\n", pcc->c6);
+	fprintf(stderr, "c7: %016llX\n", pcc->c7);
+	fprintf(stderr, "aperf: %016llX\n", pcc->aperf);
+	fprintf(stderr, "pc2: %016llX\n", pcc->pc2);
+	fprintf(stderr, "pc3: %016llX\n", pcc->pc3);
+	fprintf(stderr, "pc6: %016llX\n", pcc->pc6);
+	fprintf(stderr, "pc7: %016llX\n", pcc->pc7);
+	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr);
+}
+
+void dump_list(PCC *pcc)
+{
+	printf("dump_list 0x%p\n", pcc);
+
+	for (; pcc; pcc = pcc->next)
+		dump_pcc(pcc);
+}
+
+void print_pcc(PCC *p)
+{
+	double interval_float;
+
+	interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
+
+	/* topology columns, print blanks on 1st (average) line */
+	if (p == pcc_average) {
+		if (show_pkg)
+			fprintf(stderr, "    ");
+		if (show_core)
+			fprintf(stderr, "    ");
+		if (show_cpu)
+			fprintf(stderr, "    ");
+	} else {
+		if (show_pkg)
+			fprintf(stderr, "%4d", p->pkg);
+		if (show_core)
+			fprintf(stderr, "%4d", p->core);
+		if (show_cpu)
+			fprintf(stderr, "%4d", p->cpu);
+	}
+
+	/* %c0 */
+	if (do_nhm_cstates) {
+		if (!skip_c0)
+			fprintf(stderr, "%7.2f", 100.0 * p->mperf/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+
+	/* GHz */
+	if (has_aperf) {
+		if (!aperf_mperf_unstable) {
+			fprintf(stderr, "%5.2f",
+				1.0 * p->tsc / units * p->aperf /
+				p->mperf / interval_float);
+		} else {
+			if (p->aperf > p->tsc || p->mperf > p->tsc) {
+				fprintf(stderr, " ****");
+			} else {
+				fprintf(stderr, "%4.1f*",
+					1.0 * p->tsc /
+					units * p->aperf /
+					p->mperf / interval_float);
+			}
+		}
+	}
+
+	/* TSC */
+	fprintf(stderr, "%5.2f", 1.0 * p->tsc/units/interval_float);
+
+	if (do_nhm_cstates) {
+		if (!skip_c1)
+			fprintf(stderr, "%7.2f", 100.0 * p->c1/p->tsc);
+		else
+			fprintf(stderr, "   ****");
+	}
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->c7/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc2/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc3/p->tsc);
+	if (do_nhm_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc6/p->tsc);
+	if (do_snb_cstates)
+		fprintf(stderr, "%7.2f", 100.0 * p->pc7/p->tsc);
+	if (extra_msr_offset)
+		fprintf(stderr, "  0x%016llx", p->extra_msr);
+	putc('\n', stderr);
+}
+
+void print_counters(PCC *cnt)
+{
+	PCC *pcc;
+
+	print_header();
+
+	if (num_cpus > 1)
+		print_pcc(pcc_average);
+
+	for (pcc = cnt; pcc != NULL; pcc = pcc->next)
+		print_pcc(pcc);
+
+}
+
+#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
+
+
+int compute_delta(PCC *after, PCC *before, PCC *delta)
+{
+	int errors = 0;
+	int perf_err = 0;
+
+	skip_c0 = skip_c1 = 0;
+
+	for ( ; after && before && delta;
+		after = after->next, before = before->next, delta = delta->next) {
+		if (before->cpu != after->cpu) {
+			printf("cpu configuration changed: %d != %d\n",
+				before->cpu, after->cpu);
+			return -1;
+		}
+
+		if (SUBTRACT_COUNTER(after->tsc, before->tsc, delta->tsc)) {
+			fprintf(stderr, "cpu%d TSC went backwards %llX to %llX\n",
+				before->cpu, before->tsc, after->tsc);
+			errors++;
+		}
+		/* check for TSC < 1 Mcycles over interval */
+		if (delta->tsc < (1000 * 1000)) {
+			fprintf(stderr, "Insanely slow TSC rate,"
+				" TSC stops in idle?\n");
+			fprintf(stderr, "You can disable all c-states"
+				" by booting with \"idle=poll\"\n");
+			fprintf(stderr, "or just the deep ones with"
+				" \"processor.max_cstate=1\"\n");
+			exit(-3);
+		}
+		if (SUBTRACT_COUNTER(after->c3, before->c3, delta->c3)) {
+			fprintf(stderr, "cpu%d c3 counter went backwards %llX to %llX\n",
+				before->cpu, before->c3, after->c3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c6, before->c6, delta->c6)) {
+			fprintf(stderr, "cpu%d c6 counter went backwards %llX to %llX\n",
+				before->cpu, before->c6, after->c6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->c7, before->c7, delta->c7)) {
+			fprintf(stderr, "cpu%d c7 counter went backwards %llX to %llX\n",
+				before->cpu, before->c7, after->c7);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc2, before->pc2, delta->pc2)) {
+			fprintf(stderr, "cpu%d pc2 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc2, after->pc2);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc3, before->pc3, delta->pc3)) {
+			fprintf(stderr, "cpu%d pc3 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc3, after->pc3);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc6, before->pc6, delta->pc6)) {
+			fprintf(stderr, "cpu%d pc6 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc6, after->pc6);
+			errors++;
+		}
+		if (SUBTRACT_COUNTER(after->pc7, before->pc7, delta->pc7)) {
+			fprintf(stderr, "cpu%d pc7 counter went backwards %llX to %llX\n",
+				before->cpu, before->pc7, after->pc7);
+			errors++;
+		}
+
+		perf_err = SUBTRACT_COUNTER(after->aperf, before->aperf, delta->aperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d aperf counter went backwards %llX to %llX\n",
+				before->cpu, before->aperf, after->aperf);
+		}
+		perf_err |= SUBTRACT_COUNTER(after->mperf, before->mperf, delta->mperf);
+		if (perf_err) {
+			fprintf(stderr, "cpu%d mperf counter went backwards %llX to %llX\n",
+				before->cpu, before->mperf, after->mperf);
+		}
+		if (perf_err) {
+			if (!aperf_mperf_unstable) {
+				fprintf(stderr, "%s: APERF or MPERF went backwards *\n", progname);
+				fprintf(stderr, "* Frequency results do not cover entire interval *\n");
+				fprintf(stderr, "* fix this by running Linux-2.6.30 or later *\n");
+
+				aperf_mperf_unstable = 1;
+			}
+			/*
+			 * mperf delta is likely a huge "positive" number
+			 * can not use it for calculating c0 time
+			 */
+			skip_c0 = 1;
+			skip_c1 = 1;
+		}
+
+		/*
+		 * As mperf and tsc collection are not atomic,
+		 * it is possible for mperf's non-halted cycles
+		 * to exceed TSC's all cycles: show c1 = 0% in that case.
+		 */
+		if (delta->mperf > delta->tsc)
+			delta->c1 = 0;
+		else /* normal case, derive c1 */
+			delta->c1 = delta->tsc - delta->mperf
+				- delta->c3 - delta->c6 - delta->c7;
+
+		if (delta->mperf == 0)
+			delta->mperf = 1;	/* divide by 0 protection */
+
+		/*
+		 * for "extra msr", just copy the latest w/o subtracting
+		 */
+		delta->extra_msr = after->extra_msr;
+		if (errors) {
+			fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
+			dump_pcc(before);
+			fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
+			dump_pcc(after);
+			errors = 0;
+		}
+	}
+	return 0;
+}
+
+void compute_average(PCC *delta, PCC *avg)
+{
+	PCC *sum;
+
+	sum = calloc(1, sizeof(PCC));
+	if (sum == NULL) {
+		perror("calloc sum");
+		exit(1);
+	}
+
+	for (; delta; delta = delta->next) {
+		sum->tsc += delta->tsc;
+		sum->c1 += delta->c1;
+		sum->c3 += delta->c3;
+		sum->c6 += delta->c6;
+		sum->c7 += delta->c7;
+		sum->aperf += delta->aperf;
+		sum->mperf += delta->mperf;
+		sum->pc2 += delta->pc2;
+		sum->pc3 += delta->pc3;
+		sum->pc6 += delta->pc6;
+		sum->pc7 += delta->pc7;
+	}
+	avg->tsc = sum->tsc/num_cpus;
+	avg->c1 = sum->c1/num_cpus;
+	avg->c3 = sum->c3/num_cpus;
+	avg->c6 = sum->c6/num_cpus;
+	avg->c7 = sum->c7/num_cpus;
+	avg->aperf = sum->aperf/num_cpus;
+	avg->mperf = sum->mperf/num_cpus;
+	avg->pc2 = sum->pc2/num_cpus;
+	avg->pc3 = sum->pc3/num_cpus;
+	avg->pc6 = sum->pc6/num_cpus;
+	avg->pc7 = sum->pc7/num_cpus;
+
+	free(sum);
+}
+
+void get_counters(PCC *pcc)
+{
+	for ( ; pcc; pcc = pcc->next) {
+		pcc->tsc = get_msr(pcc->cpu, MSR_TSC);
+		if (do_nhm_cstates)
+			pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY);
+		if (do_snb_cstates)
+			pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY);
+		if (has_aperf)
+			pcc->aperf = get_msr(pcc->cpu, MSR_APERF);
+		if (has_aperf)
+			pcc->mperf = get_msr(pcc->cpu, MSR_MPERF);
+		if (do_snb_cstates)
+			pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY);
+		if (do_nhm_cstates)
+			pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY);
+		if (do_snb_cstates)
+			pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY);
+		if (extra_msr_offset)
+			pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset);
+	}
+}
+
+
+void print_nehalem_info()
+{
+	unsigned long long msr;
+	unsigned int ratio;
+
+	if (!do_nehalem_platform_info)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_PLATFORM_INFO);
+
+	ratio = (msr >> 40) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency\n",
+		ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	fprintf(stderr, "%d * %.0f = %.0f MHz TSC frequency\n",
+		ratio, bclk, ratio * bclk);
+
+	if (verbose > 1)
+		fprintf(stderr, "MSR_NEHALEM_PLATFORM_INFO: 0x%llx\n", msr);
+
+	if (!do_nehalem_turbo_ratio_limit)
+		return;
+
+	msr = get_msr(0, MSR_NEHALEM_TURBO_RATIO_LIMIT);
+
+	ratio = (msr >> 24) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 16) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 8) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+	ratio = (msr >> 0) & 0xFF;
+	if (ratio)
+		fprintf(stderr, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
+			ratio, bclk, ratio * bclk);
+
+}
+
+void free_counter_list(PCC *list)
+{
+	PCC *p;
+
+	for (p = list; p; ) {
+		PCC *free_me;
+
+		free_me = p;
+		p = p->next;
+		free(free_me);
+	}
+	return;
+}
+
+void free_all_counters(void)
+{
+	free_counter_list(pcc_even);
+	pcc_even = NULL;
+
+	free_counter_list(pcc_odd);
+	pcc_odd = NULL;
+
+	free_counter_list(pcc_delta);
+	pcc_delta = NULL;
+
+	free_counter_list(pcc_average);
+	pcc_average = NULL;
+}
+
+void insert_cpu_counters(PCC **list, PCC *new)
+{
+	PCC *prev;
+
+	/*
+	 * list was empty
+	 */
+	if (*list == NULL) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	show_cpu = 1;	/* there is more than one CPU */
+
+	/*
+	 * insert on front of list.
+	 * It is sorted by ascending package#, core#, cpu#
+	 */
+	if (((*list)->pkg > new->pkg) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core > new->core)) ||
+	    (((*list)->pkg == new->pkg) && ((*list)->core == new->core) && ((*list)->cpu > new->cpu))) {
+		new->next = *list;
+		*list = new;
+		return;
+	}
+
+	prev = *list;
+
+	while (prev->next && (prev->next->pkg < new->pkg)) {
+		prev = prev->next;
+		show_pkg = 1;	/* there is more than 1 package */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core < new->core)) {
+		prev = prev->next;
+		show_core = 1;	/* there is more than 1 core */
+	}
+
+	while (prev->next && (prev->next->pkg == new->pkg)
+		&& (prev->next->core == new->core)
+		&& (prev->next->cpu < new->cpu)) {
+		prev = prev->next;
+	}
+
+	/*
+	 * insert after "prev"
+	 */
+	new->next = prev->next;
+	prev->next = new;
+
+	return;
+}
+
+void alloc_new_cpu_counters(int pkg, int core, int cpu)
+{
+	PCC *new;
+
+	if (verbose > 1)
+		printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_odd, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_even, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	insert_cpu_counters(&pcc_delta, new);
+
+	new = (PCC *)calloc(1, sizeof(PCC));
+	if (new == NULL) {
+		perror("calloc");
+		exit(1);
+	}
+	new->pkg = pkg;
+	new->core = core;
+	new->cpu = cpu;
+	pcc_average = new;
+}
+
+int get_physical_package_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int pkg;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &pkg);
+	fclose(filep);
+	return pkg;
+}
+
+int get_core_id(int cpu)
+{
+	char path[64];
+	FILE *filep;
+	int core;
+
+	sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
+	filep = fopen(path, "r");
+	if (filep == NULL) {
+		perror(path);
+		exit(1);
+	}
+	fscanf(filep, "%d", &core);
+	fclose(filep);
+	return core;
+}
+
+/*
+ * run func(index, cpu) on every cpu in /proc/stat
+ */
+
+int for_all_cpus(void (func)(int, int, int))
+{
+	FILE *fp;
+	int cpu_count;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	for (cpu_count = 0; ; cpu_count++) {
+		int cpu;
+
+		retval = fscanf(fp, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu);
+		if (retval != 1)
+			break;
+
+		func(get_physical_package_id(cpu), get_core_id(cpu), cpu);
+	}
+	fclose(fp);
+	return cpu_count;
+}
+
+void re_initialize(void)
+{
+	printf("turbostat: topology changed, re-initializing.\n");
+	free_all_counters();
+	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+	need_reinitialize = 0;
+	printf("num_cpus is now %d\n", num_cpus);
+}
+
+void dummy(int pkg, int core, int cpu) { return; }
+/*
+ * check to see if a cpu came on-line
+ */
+void verify_num_cpus()
+{
+	int new_num_cpus;
+
+	new_num_cpus = for_all_cpus(dummy);
+
+	if (new_num_cpus != num_cpus) {
+		if (verbose)
+			printf("num_cpus was %d, is now  %d\n",
+				num_cpus, new_num_cpus);
+		need_reinitialize = 1;
+	}
+
+	return;
+}
+
+void turbostat_loop()
+{
+restart:
+	get_counters(pcc_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	while (1) {
+		verify_num_cpus();
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(pcc_odd);
+		gettimeofday(&tv_odd, (struct timezone *)NULL);
+
+		compute_delta(pcc_odd, pcc_even, pcc_delta);
+		timersub(&tv_odd, &tv_even, &tv_delta);
+		compute_average(pcc_delta, pcc_average);
+		print_counters(pcc_delta);
+		if (need_reinitialize) {
+			re_initialize();
+			goto restart;
+		}
+		sleep(interval_sec);
+		get_counters(pcc_even);
+		gettimeofday(&tv_even, (struct timezone *)NULL);
+		compute_delta(pcc_even, pcc_odd, pcc_delta);
+		timersub(&tv_even, &tv_odd, &tv_delta);
+		compute_average(pcc_delta, pcc_average);
+		print_counters(pcc_delta);
+	}
+}
+
+void check_dev_msr()
+{
+	struct stat sb;
+
+	if (stat("/dev/cpu/0/msr", &sb)) {
+		fprintf(stderr, "no /dev/cpu/0/msr\n");
+		fprintf(stderr, "Try \"# modprobe msr\"\n");
+		exit(-5);
+	}
+}
+
+void check_super_user()
+{
+	if (getuid() != 0) {
+		fprintf(stderr, "must be root\n");
+		exit(-6);
+	}
+}
+
+int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	if (family != 6)
+		return 0;
+
+	switch (model) {
+	case 0x1A:	/* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
+	case 0x1E:	/* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
+	case 0x1F:	/* Core i7 and i5 Processor - Nehalem */
+	case 0x25:	/* Westmere Client - Clarkdale, Arrandale */
+	case 0x2C:	/* Westmere EP - Gulftown */
+	case 0x2A:	/* SNB */
+	case 0x2D:	/* SNB Xeon */
+		return 1;
+	case 0x2E:	/* Nehalem-EX Xeon - Beckton */
+	case 0x2F:	/* Westmere-EX Xeon - Eagleton */
+	default:
+		return 0;
+	}
+}
+
+int is_snb(unsigned int family, unsigned int model)
+{
+	if (!genuine_intel)
+		return 0;
+
+	switch (model) {
+	case 0x2A:
+	case 0x2D:
+		return 1;
+	}
+	return 0;
+}
+
+double discover_bclk(unsigned int family, unsigned int model)
+{
+	if (is_snb(family, model))
+		return 100.00;
+	else
+		return 133.33;
+}
+
+void check_cpuid()
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+
+	if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
+		genuine_intel = 1;
+
+	if (verbose)
+		fprintf(stderr, "%.4s%.4s%.4s ",
+			(char *)&ebx, (char *)&edx, (char *)&ecx);
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose)
+		fprintf(stderr, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
+			max_level, family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		fprintf(stderr, "CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * check max extended function levels of CPUID.
+	 * This is needed to check for invariant TSC.
+	 * This check is valid for both Intel and AMD.
+	 */
+	ebx = ecx = edx = 0;
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+
+	if (max_level < 0x80000007) {
+		fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
+		exit(1);
+	}
+
+	/*
+	 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
+	 * this check is valid for both Intel and AMD
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+	has_invariant_tsc = edx && (1 << 8);
+
+	if (!has_invariant_tsc) {
+		fprintf(stderr, "No invariant TSC\n");
+		exit(1);
+	}
+
+	/*
+	 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
+	 * this check is valid for both Intel and AMD
+	 */
+
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+	has_aperf = ecx && (1 << 0);
+	if (!has_aperf) {
+		fprintf(stderr, "No APERF MSR\n");
+		exit(1);
+	}
+
+	do_nehalem_platform_info = genuine_intel && has_invariant_tsc;
+	do_nhm_cstates = genuine_intel;	/* all Intel w/ non-stop TSC have NHM counters */
+	do_snb_cstates = is_snb(family, model);
+	bclk = discover_bclk(family, model);
+
+	do_nehalem_turbo_ratio_limit = has_nehalem_turbo_ratio_limit(family, model);
+}
+
+
+void usage()
+{
+	fprintf(stderr, "%s: [-v] [-M MSR#] [-i interval_sec | command ...]\n",
+		progname);
+	exit(1);
+}
+
+
+/*
+ * in /dev/cpu/ return success for names that are numbers
+ * ie. filter out ".", "..", "microcode".
+ */
+int dir_filter(const struct dirent *dirp)
+{
+	if (isdigit(dirp->d_name[0]))
+		return 1;
+	else
+		return 0;
+}
+
+int open_dev_cpu_msr(int dummy1)
+{
+	return 0;
+}
+
+void turbostat_init()
+{
+	check_cpuid();
+
+	check_dev_msr();
+	check_super_user();
+
+	num_cpus = for_all_cpus(alloc_new_cpu_counters);
+
+	if (verbose)
+		print_nehalem_info();
+}
+
+int fork_it(char **argv)
+{
+	int retval;
+	pid_t child_pid;
+	get_counters(pcc_even);
+	gettimeofday(&tv_even, (struct timezone *)NULL);
+
+	child_pid = fork();
+	if (!child_pid) {
+		/* child */
+		execvp(argv[0], argv);
+	} else {
+		int status;
+
+		/* parent */
+		if (child_pid == -1) {
+			perror("fork");
+			exit(1);
+		}
+
+		signal(SIGINT, SIG_IGN);
+		signal(SIGQUIT, SIG_IGN);
+		if (waitpid(child_pid, &status, 0) == -1) {
+			perror("wait");
+			exit(1);
+		}
+	}
+	get_counters(pcc_odd);
+	gettimeofday(&tv_odd, (struct timezone *)NULL);
+	retval = compute_delta(pcc_odd, pcc_even, pcc_delta);
+
+	timersub(&tv_odd, &tv_even, &tv_delta);
+	compute_average(pcc_delta, pcc_average);
+	if (!retval)
+		print_counters(pcc_delta);
+
+	fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
+
+	return 0;
+}
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+vi:M:")) != -1) {
+		switch (opt) {
+		case 'v':
+			verbose++;
+			break;
+		case 'i':
+			interval_sec = atoi(optarg);
+			break;
+		case 'M':
+			sscanf(optarg, "%x", &extra_msr_offset);
+			if (verbose > 1)
+				fprintf(stderr, "MSR 0x%X\n", extra_msr_offset);
+			break;
+		default:
+			usage();
+		}
+	}
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		fprintf(stderr, "turbostat Dec 6, 2010"
+			" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1)
+		fprintf(stderr, "http://userweb.kernel.org/~lenb/acpi/utils/pmtools/turbostat/\n");
+
+	turbostat_init();
+
+	/*
+	 * if any params left, it must be a command to fork
+	 */
+	if (argc - optind)
+		return fork_it(argv + optind);
+	else
+		turbostat_loop();
+
+	return 0;
+}
diff --git a/tools/power/x86/x86_energy_perf_policy/Makefile b/tools/power/x86/x86_energy_perf_policy/Makefile
new file mode 100644
index 0000000..f458237
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/Makefile
@@ -0,0 +1,8 @@
+x86_energy_perf_policy : x86_energy_perf_policy.c
+
+clean :
+	rm -f x86_energy_perf_policy
+
+install :
+	install x86_energy_perf_policy /usr/bin/
+	install x86_energy_perf_policy.8 /usr/share/man/man8/
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8 b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
new file mode 100644
index 0000000..8eaaad6
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.8
@@ -0,0 +1,104 @@
+.\"  This page Copyright (C) 2010 Len Brown <len.brown@intel.com>
+.\"  Distributed under the GPL, Copyleft 1994.
+.TH X86_ENERGY_PERF_POLICY 8
+.SH NAME
+x86_energy_perf_policy \- read or write MSR_IA32_ENERGY_PERF_BIAS
+.SH SYNOPSIS
+.ft B
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB "\-r"
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'performance'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'normal'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB 'powersave'
+.br
+.B x86_energy_perf_policy
+.RB [ "\-c cpu" ]
+.RB [ "\-v" ]
+.RB n
+.br
+.SH DESCRIPTION
+\fBx86_energy_perf_policy\fP
+allows software to convey
+its policy for the relative importance of performance
+versus energy savings to the processor.
+
+The processor uses this information in model-specific ways
+when it must select trade-offs between performance and
+energy efficiency.
+
+This policy hint does not supersede Processor Performance states
+(P-states) or CPU Idle power states (C-states), but allows
+software to have influence where it would otherwise be unable
+to express a preference.
+
+For example, this setting may tell the hardware how
+aggressively or conservatively to control frequency
+in the "turbo range" above the explicitly OS-controlled
+P-state frequency range.  It may also tell the hardware
+how aggressively is should enter the OS requested C-states.
+
+Support for this feature is indicated by CPUID.06H.ECX.bit3
+per the Intel Architectures Software Developer's Manual.
+
+.SS Options
+\fB-c\fP limits operation to a single CPU.
+The default is to operate on all CPUs.
+Note that MSR_IA32_ENERGY_PERF_BIAS is defined per
+logical processor, but that the initial implementations
+of the MSR were shared among all processors in each package.
+.PP
+\fB-v\fP increases verbosity.  By default
+x86_energy_perf_policy is silent.
+.PP
+\fB-r\fP is for "read-only" mode - the unchanged state
+is read and displayed.
+.PP
+.I performance
+Set a policy where performance is paramount.
+The processor will be unwilling to sacrifice any performance
+for the sake of energy saving. This is the hardware default.
+.PP
+.I normal
+Set a policy with a normal balance between performance and energy efficiency.
+The processor will tolerate minor performance compromise
+for potentially significant energy savings.
+This reasonable default for most desktops and servers.
+.PP
+.I powersave
+Set a policy where the processor can accept
+a measurable performance hit to maximize energy efficiency.
+.PP
+.I n
+Set MSR_IA32_ENERGY_PERF_BIAS to the specified number.
+The range of valid numbers is 0-15, where 0 is maximum
+performance and 15 is maximum energy efficiency.
+
+.SH NOTES
+.B "x86_energy_perf_policy "
+runs only as root.
+.SH FILES
+.ta
+.nf
+/dev/cpu/*/msr
+.fi
+
+.SH "SEE ALSO"
+msr(4)
+.PP
+.SH AUTHORS
+.nf
+Written by Len Brown <len.brown@intel.com>
diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
new file mode 100644
index 0000000..d9678a3
--- /dev/null
+++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c
@@ -0,0 +1,325 @@
+/*
+ * x86_energy_perf_policy -- set the energy versus performance
+ * policy preference bias on recent X86 processors.
+ */
+/*
+ * Copyright (c) 2010, Intel Corporation.
+ * Len Brown <len.brown@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/resource.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+
+unsigned int verbose;		/* set with -v */
+unsigned int read_only;		/* set with -r */
+char *progname;
+unsigned long long new_bias;
+int cpu = -1;
+
+/*
+ * Usage:
+ *
+ * -c cpu: limit action to a single CPU (default is all CPUs)
+ * -v: verbose output (can invoke more than once)
+ * -r: read-only, don't change any settings
+ *
+ *  performance
+ *	Performance is paramount.
+ *	Unwilling to sacrafice any performance
+ *	for the sake of energy saving. (hardware default)
+ *
+ *  normal
+ *	Can tolerate minor performance compromise
+ *	for potentially significant energy savings.
+ *	(reasonable default for most desktops and servers)
+ *
+ *  powersave
+ *	Can tolerate significant performance hit
+ *	to maximize energy savings.
+ *
+ * n
+ *	a numerical value to write to the underlying MSR.
+ */
+void usage(void)
+{
+	printf("%s: [-c cpu] [-v] "
+		"(-r | 'performance' | 'normal' | 'powersave' | n)\n",
+		progname);
+	exit(1);
+}
+
+#define MSR_IA32_ENERGY_PERF_BIAS	0x000001b0
+
+#define	BIAS_PERFORMANCE		0
+#define BIAS_BALANCE			6
+#define	BIAS_POWERSAVE			15
+
+void cmdline(int argc, char **argv)
+{
+	int opt;
+
+	progname = argv[0];
+
+	while ((opt = getopt(argc, argv, "+rvc:")) != -1) {
+		switch (opt) {
+		case 'c':
+			cpu = atoi(optarg);
+			break;
+		case 'r':
+			read_only = 1;
+			break;
+		case 'v':
+			verbose++;
+			break;
+		default:
+			usage();
+		}
+	}
+	/* if -r, then should be no additional optind */
+	if (read_only && (argc > optind))
+		usage();
+
+	/*
+	 * if no -r , then must be one additional optind
+	 */
+	if (!read_only) {
+
+		if (argc != optind + 1) {
+			printf("must supply -r or policy param\n");
+			usage();
+			}
+
+		if (!strcmp("performance", argv[optind])) {
+			new_bias = BIAS_PERFORMANCE;
+		} else if (!strcmp("normal", argv[optind])) {
+			new_bias = BIAS_BALANCE;
+		} else if (!strcmp("powersave", argv[optind])) {
+			new_bias = BIAS_POWERSAVE;
+		} else {
+			char *endptr;
+
+			new_bias = strtoull(argv[optind], &endptr, 0);
+			if (endptr == argv[optind] ||
+				new_bias > BIAS_POWERSAVE) {
+					fprintf(stderr, "invalid value: %s\n",
+						argv[optind]);
+				usage();
+			}
+		}
+	}
+}
+
+/*
+ * validate_cpuid()
+ * returns on success, quietly exits on failure (make verbose with -v)
+ */
+void validate_cpuid(void)
+{
+	unsigned int eax, ebx, ecx, edx, max_level;
+	char brand[16];
+	unsigned int fms, family, model, stepping;
+
+	eax = ebx = ecx = edx = 0;
+
+	asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx),
+		"=d" (edx) : "a" (0));
+
+	if (ebx != 0x756e6547 || edx != 0x49656e69 || ecx != 0x6c65746e) {
+		if (verbose)
+			fprintf(stderr, "%.4s%.4s%.4s != GenuineIntel",
+				(char *)&ebx, (char *)&edx, (char *)&ecx);
+		exit(1);
+	}
+
+	asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+	family = (fms >> 8) & 0xf;
+	model = (fms >> 4) & 0xf;
+	stepping = fms & 0xf;
+	if (family == 6 || family == 0xf)
+		model += ((fms >> 16) & 0xf) << 4;
+
+	if (verbose > 1)
+		printf("CPUID %s %d levels family:model:stepping "
+			"0x%x:%x:%x (%d:%d:%d)\n", brand, max_level,
+			family, model, stepping, family, model, stepping);
+
+	if (!(edx & (1 << 5))) {
+		if (verbose)
+			printf("CPUID: no MSR\n");
+		exit(1);
+	}
+
+	/*
+	 * Support for MSR_IA32_ENERGY_PERF_BIAS
+	 * is indicated by CPUID.06H.ECX.bit3
+	 */
+	asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (6));
+	if (verbose)
+		printf("CPUID.06H.ECX: 0x%x\n", ecx);
+	if (!(ecx & (1 << 3))) {
+		if (verbose)
+			printf("CPUID: No MSR_IA32_ENERGY_PERF_BIAS\n");
+		exit(1);
+	}
+	return;	/* success */
+}
+
+unsigned long long get_msr(int cpu, int offset)
+{
+	unsigned long long msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDONLY);
+	if (fd < 0) {
+		printf("Try \"# modprobe msr\"\n");
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &msr, sizeof msr, offset);
+
+	if (retval != sizeof msr) {
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+	close(fd);
+	return msr;
+}
+
+unsigned long long  put_msr(int cpu, unsigned long long new_msr, int offset)
+{
+	unsigned long long old_msr;
+	char msr_path[32];
+	int retval;
+	int fd;
+
+	sprintf(msr_path, "/dev/cpu/%d/msr", cpu);
+	fd = open(msr_path, O_RDWR);
+	if (fd < 0) {
+		perror(msr_path);
+		exit(1);
+	}
+
+	retval = pread(fd, &old_msr, sizeof old_msr, offset);
+	if (retval != sizeof old_msr) {
+		perror("pwrite");
+		printf("pread cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	retval = pwrite(fd, &new_msr, sizeof new_msr, offset);
+	if (retval != sizeof new_msr) {
+		perror("pwrite");
+		printf("pwrite cpu%d 0x%x = %d\n", cpu, offset, retval);
+		exit(-2);
+	}
+
+	close(fd);
+
+	return old_msr;
+}
+
+void print_msr(int cpu)
+{
+	printf("cpu%d: 0x%016llx\n",
+		cpu, get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS));
+}
+
+void update_msr(int cpu)
+{
+	unsigned long long previous_msr;
+
+	previous_msr = put_msr(cpu, new_bias, MSR_IA32_ENERGY_PERF_BIAS);
+
+	if (verbose)
+		printf("cpu%d  msr0x%x 0x%016llx -> 0x%016llx\n",
+			cpu, MSR_IA32_ENERGY_PERF_BIAS, previous_msr, new_bias);
+
+	return;
+}
+
+char *proc_stat = "/proc/stat";
+/*
+ * run func() on every cpu in /dev/cpu
+ */
+void for_every_cpu(void (func)(int))
+{
+	FILE *fp;
+	int retval;
+
+	fp = fopen(proc_stat, "r");
+	if (fp == NULL) {
+		perror(proc_stat);
+		exit(1);
+	}
+
+	retval = fscanf(fp, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
+	if (retval != 0) {
+		perror("/proc/stat format");
+		exit(1);
+	}
+
+	while (1) {
+		int cpu;
+
+		retval = fscanf(fp,
+			"cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n",
+			&cpu);
+		if (retval != 1)
+			return;
+
+		func(cpu);
+	}
+	fclose(fp);
+}
+
+int main(int argc, char **argv)
+{
+	cmdline(argc, argv);
+
+	if (verbose > 1)
+		printf("x86_energy_perf_policy Nov 24, 2010"
+				" - Len Brown <lenb@kernel.org>\n");
+	if (verbose > 1 && !read_only)
+		printf("new_bias %lld\n", new_bias);
+
+	validate_cpuid();
+
+	if (cpu != -1) {
+		if (read_only)
+			print_msr(cpu);
+		else
+			update_msr(cpu);
+	} else {
+		if (read_only)
+			for_every_cpu(print_msr);
+		else
+			for_every_cpu(update_msr);
+	}
+
+	return 0;
+}
diff --git a/tools/testing/ktest/compare-ktest-sample.pl b/tools/testing/ktest/compare-ktest-sample.pl
new file mode 100755
index 0000000..9a571e716
--- /dev/null
+++ b/tools/testing/ktest/compare-ktest-sample.pl
@@ -0,0 +1,30 @@
+#!/usr/bin/perl
+
+open (IN,"ktest.pl");
+while (<IN>) {
+    if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
+	/set_test_option\("(.*?)"/) {
+	$opt{$1} = 1;
+    }
+}
+close IN;
+
+open (IN, "sample.conf");
+while (<IN>) {
+    if (/^\s*#?\s*(\S+)\s*=/) {
+	$samp{$1} = 1;
+    }
+}
+close IN;
+
+foreach $opt (keys %opt) {
+    if (!defined($samp{$opt})) {
+	print "opt = $opt\n";
+    }
+}
+
+foreach $samp (keys %samp) {
+    if (!defined($opt{$samp})) {
+	print "samp = $samp\n";
+    }
+}
diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
new file mode 100755
index 0000000..e1c62ee
--- /dev/null
+++ b/tools/testing/ktest/ktest.pl
@@ -0,0 +1,2023 @@
+#!/usr/bin/perl -w
+#
+# Copywrite 2010 - Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
+# Licensed under the terms of the GNU GPL License version 2
+#
+
+use strict;
+use IPC::Open2;
+use Fcntl qw(F_GETFL F_SETFL O_NONBLOCK);
+use File::Path qw(mkpath);
+use File::Copy qw(cp);
+use FileHandle;
+
+my $VERSION = "0.2";
+
+$| = 1;
+
+my %opt;
+my %repeat_tests;
+my %repeats;
+my %default;
+
+#default opts
+$default{"NUM_TESTS"}		= 1;
+$default{"REBOOT_TYPE"}		= "grub";
+$default{"TEST_TYPE"}		= "test";
+$default{"BUILD_TYPE"}		= "randconfig";
+$default{"MAKE_CMD"}		= "make";
+$default{"TIMEOUT"}		= 120;
+$default{"TMP_DIR"}		= "/tmp/ktest";
+$default{"SLEEP_TIME"}		= 60;	# sleep time between tests
+$default{"BUILD_NOCLEAN"}	= 0;
+$default{"REBOOT_ON_ERROR"}	= 0;
+$default{"POWEROFF_ON_ERROR"}	= 0;
+$default{"REBOOT_ON_SUCCESS"}	= 1;
+$default{"POWEROFF_ON_SUCCESS"}	= 0;
+$default{"BUILD_OPTIONS"}	= "";
+$default{"BISECT_SLEEP_TIME"}	= 60;   # sleep time between bisects
+$default{"CLEAR_LOG"}		= 0;
+$default{"SUCCESS_LINE"}	= "login:";
+$default{"BOOTED_TIMEOUT"}	= 1;
+$default{"DIE_ON_FAILURE"}	= 1;
+$default{"SSH_EXEC"}		= "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
+$default{"SCP_TO_TARGET"}	= "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE";
+$default{"REBOOT"}		= "ssh \$SSH_USER\@\$MACHINE reboot";
+$default{"STOP_AFTER_SUCCESS"}	= 10;
+$default{"STOP_AFTER_FAILURE"}	= 60;
+$default{"LOCALVERSION"}	= "-test";
+
+my $ktest_config;
+my $version;
+my $machine;
+my $ssh_user;
+my $tmpdir;
+my $builddir;
+my $outputdir;
+my $output_config;
+my $test_type;
+my $build_type;
+my $build_options;
+my $reboot_type;
+my $reboot_script;
+my $power_cycle;
+my $reboot;
+my $reboot_on_error;
+my $poweroff_on_error;
+my $die_on_failure;
+my $powercycle_after_reboot;
+my $poweroff_after_halt;
+my $ssh_exec;
+my $scp_to_target;
+my $power_off;
+my $grub_menu;
+my $grub_number;
+my $target;
+my $make;
+my $post_install;
+my $noclean;
+my $minconfig;
+my $addconfig;
+my $in_bisect = 0;
+my $bisect_bad = "";
+my $reverse_bisect;
+my $in_patchcheck = 0;
+my $run_test;
+my $redirect;
+my $buildlog;
+my $dmesg;
+my $monitor_fp;
+my $monitor_pid;
+my $monitor_cnt = 0;
+my $sleep_time;
+my $bisect_sleep_time;
+my $store_failures;
+my $timeout;
+my $booted_timeout;
+my $console;
+my $success_line;
+my $stop_after_success;
+my $stop_after_failure;
+my $build_target;
+my $target_image;
+my $localversion;
+my $iteration = 0;
+my $successes = 0;
+
+my %entered_configs;
+my %config_help;
+
+$config_help{"MACHINE"} = << "EOF"
+ The machine hostname that you will test.
+EOF
+    ;
+$config_help{"SSH_USER"} = << "EOF"
+ The box is expected to have ssh on normal bootup, provide the user
+  (most likely root, since you need privileged operations)
+EOF
+    ;
+$config_help{"BUILD_DIR"} = << "EOF"
+ The directory that contains the Linux source code (full path).
+EOF
+    ;
+$config_help{"OUTPUT_DIR"} = << "EOF"
+ The directory that the objects will be built (full path).
+ (can not be same as BUILD_DIR)
+EOF
+    ;
+$config_help{"BUILD_TARGET"} = << "EOF"
+ The location of the compiled file to copy to the target.
+ (relative to OUTPUT_DIR)
+EOF
+    ;
+$config_help{"TARGET_IMAGE"} = << "EOF"
+ The place to put your image on the test machine.
+EOF
+    ;
+$config_help{"POWER_CYCLE"} = << "EOF"
+ A script or command to reboot the box.
+
+ Here is a digital loggers power switch example
+ POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin\@power/outlet?5=CCL'
+
+ Here is an example to reboot a virtual box on the current host
+ with the name "Guest".
+ POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+EOF
+    ;
+$config_help{"CONSOLE"} = << "EOF"
+ The script or command that reads the console
+
+  If you use ttywatch server, something like the following would work.
+CONSOLE = nc -d localhost 3001
+
+ For a virtual machine with guest name "Guest".
+CONSOLE =  virsh console Guest
+EOF
+    ;
+$config_help{"LOCALVERSION"} = << "EOF"
+ Required version ending to differentiate the test
+ from other linux builds on the system.
+EOF
+    ;
+$config_help{"REBOOT_TYPE"} = << "EOF"
+ Way to reboot the box to the test kernel.
+ Only valid options so far are "grub" and "script".
+
+ If you specify grub, it will assume grub version 1
+ and will search in /boot/grub/menu.lst for the title \$GRUB_MENU
+ and select that target to reboot to the kernel. If this is not
+ your setup, then specify "script" and have a command or script
+ specified in REBOOT_SCRIPT to boot to the target.
+
+ The entry in /boot/grub/menu.lst must be entered in manually.
+ The test will not modify that file.
+EOF
+    ;
+$config_help{"GRUB_MENU"} = << "EOF"
+ The grub title name for the test kernel to boot
+ (Only mandatory if REBOOT_TYPE = grub)
+
+ Note, ktest.pl will not update the grub menu.lst, you need to
+ manually add an option for the test. ktest.pl will search
+ the grub menu.lst for this option to find what kernel to
+ reboot into.
+
+ For example, if in the /boot/grub/menu.lst the test kernel title has:
+ title Test Kernel
+ kernel vmlinuz-test
+ GRUB_MENU = Test Kernel
+EOF
+    ;
+$config_help{"REBOOT_SCRIPT"} = << "EOF"
+ A script to reboot the target into the test kernel
+ (Only mandatory if REBOOT_TYPE = script)
+EOF
+    ;
+
+
+sub get_ktest_config {
+    my ($config) = @_;
+
+    return if (defined($opt{$config}));
+
+    if (defined($config_help{$config})) {
+	print "\n";
+	print $config_help{$config};
+    }
+
+    for (;;) {
+	print "$config = ";
+	if (defined($default{$config})) {
+	    print "\[$default{$config}\] ";
+	}
+	$entered_configs{$config} = <STDIN>;
+	$entered_configs{$config} =~ s/^\s*(.*\S)\s*$/$1/;
+	if ($entered_configs{$config} =~ /^\s*$/) {
+	    if ($default{$config}) {
+		$entered_configs{$config} = $default{$config};
+	    } else {
+		print "Your answer can not be blank\n";
+		next;
+	    }
+	}
+	last;
+    }
+}
+
+sub get_ktest_configs {
+    get_ktest_config("MACHINE");
+    get_ktest_config("SSH_USER");
+    get_ktest_config("BUILD_DIR");
+    get_ktest_config("OUTPUT_DIR");
+    get_ktest_config("BUILD_TARGET");
+    get_ktest_config("TARGET_IMAGE");
+    get_ktest_config("POWER_CYCLE");
+    get_ktest_config("CONSOLE");
+    get_ktest_config("LOCALVERSION");
+
+    my $rtype = $opt{"REBOOT_TYPE"};
+
+    if (!defined($rtype)) {
+	if (!defined($opt{"GRUB_MENU"})) {
+	    get_ktest_config("REBOOT_TYPE");
+	    $rtype = $entered_configs{"REBOOT_TYPE"};
+	} else {
+	    $rtype = "grub";
+	}
+    }
+
+    if ($rtype eq "grub") {
+	get_ktest_config("GRUB_MENU");
+    } else {
+	get_ktest_config("REBOOT_SCRIPT");
+    }
+}
+
+sub set_value {
+    my ($lvalue, $rvalue) = @_;
+
+    if (defined($opt{$lvalue})) {
+	die "Error: Option $lvalue defined more than once!\n";
+    }
+    if ($rvalue =~ /^\s*$/) {
+	delete $opt{$lvalue};
+    } else {
+	$opt{$lvalue} = $rvalue;
+    }
+}
+
+sub read_config {
+    my ($config) = @_;
+
+    open(IN, $config) || die "can't read file $config";
+
+    my $name = $config;
+    $name =~ s,.*/(.*),$1,;
+
+    my $test_num = 0;
+    my $default = 1;
+    my $repeat = 1;
+    my $num_tests_set = 0;
+    my $skip = 0;
+    my $rest;
+
+    while (<IN>) {
+
+	# ignore blank lines and comments
+	next if (/^\s*$/ || /\s*\#/);
+
+	if (/^\s*TEST_START(.*)/) {
+
+	    $rest = $1;
+
+	    if ($num_tests_set) {
+		die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+	    }
+
+	    my $old_test_num = $test_num;
+	    my $old_repeat = $repeat;
+
+	    $test_num += $repeat;
+	    $default = 0;
+	    $repeat = 1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest =~ /\s+ITERATE\s+(\d+)(.*)$/) {
+		$repeat = $1;
+		$rest = $2;
+		$repeat_tests{"$test_num"} = $repeat;
+	    }
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after TEST_START\n$_";
+	    }
+
+	    if ($skip) {
+		$test_num = $old_test_num;
+		$repeat = $old_repeat;
+	    }
+
+	} elsif (/^\s*DEFAULTS(.*)$/) {
+	    $default = 1;
+
+	    $rest = $1;
+
+	    if ($rest =~ /\s+SKIP(.*)/) {
+		$rest = $1;
+		$skip = 1;
+	    } else {
+		$skip = 0;
+	    }
+
+	    if ($rest !~ /^\s*$/) {
+		die "$name: $.: Gargbage found after DEFAULTS\n$_";
+	    }
+
+	} elsif (/^\s*([A-Z_\[\]\d]+)\s*=\s*(.*?)\s*$/) {
+
+	    next if ($skip);
+
+	    my $lvalue = $1;
+	    my $rvalue = $2;
+
+	    if (!$default &&
+		($lvalue eq "NUM_TESTS" ||
+		 $lvalue eq "LOG_FILE" ||
+		 $lvalue eq "CLEAR_LOG")) {
+		die "$name: $.: $lvalue must be set in DEFAULTS section\n";
+	    }
+
+	    if ($lvalue eq "NUM_TESTS") {
+		if ($test_num) {
+		    die "$name: $.: Can not specify both NUM_TESTS and TEST_START\n";
+		}
+		if (!$default) {
+		    die "$name: $.: NUM_TESTS must be set in default section\n";
+		}
+		$num_tests_set = 1;
+	    }
+
+	    if ($default || $lvalue =~ /\[\d+\]$/) {
+		set_value($lvalue, $rvalue);
+	    } else {
+		my $val = "$lvalue\[$test_num\]";
+		set_value($val, $rvalue);
+
+		if ($repeat > 1) {
+		    $repeats{$val} = $repeat;
+		}
+	    }
+	} else {
+	    die "$name: $.: Garbage found in config\n$_";
+	}
+    }
+
+    close(IN);
+
+    if ($test_num) {
+	$test_num += $repeat - 1;
+	$opt{"NUM_TESTS"} = $test_num;
+    }
+
+    # make sure we have all mandatory configs
+    get_ktest_configs;
+
+    # set any defaults
+
+    foreach my $default (keys %default) {
+	if (!defined($opt{$default})) {
+	    $opt{$default} = $default{$default};
+	}
+    }
+}
+
+sub _logit {
+    if (defined($opt{"LOG_FILE"})) {
+	open(OUT, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}";
+	print OUT @_;
+	close(OUT);
+    }
+}
+
+sub logit {
+    if (defined($opt{"LOG_FILE"})) {
+	_logit @_;
+    } else {
+	print @_;
+    }
+}
+
+sub doprint {
+    print @_;
+    _logit @_;
+}
+
+sub run_command;
+
+sub reboot {
+    # try to reboot normally
+    if (run_command $reboot) {
+	if (defined($powercycle_after_reboot)) {
+	    sleep $powercycle_after_reboot;
+	    run_command "$power_cycle";
+	}
+    } else {
+	# nope? power cycle it.
+	run_command "$power_cycle";
+    }
+}
+
+sub do_not_reboot {
+    my $i = $iteration;
+
+    return $test_type eq "build" ||
+	($test_type eq "patchcheck" && $opt{"PATCHCHECK_TYPE[$i]"} eq "build") ||
+	($test_type eq "bisect" && $opt{"BISECT_TYPE[$i]"} eq "build");
+}
+
+sub dodie {
+    doprint "CRITICAL FAILURE... ", @_, "\n";
+
+    my $i = $iteration;
+
+    if ($reboot_on_error && !do_not_reboot) {
+
+	doprint "REBOOTING\n";
+	reboot;
+
+    } elsif ($poweroff_on_error && defined($power_off)) {
+	doprint "POWERING OFF\n";
+	`$power_off`;
+    }
+
+    die @_, "\n";
+}
+
+sub open_console {
+    my ($fp) = @_;
+
+    my $flags;
+
+    my $pid = open($fp, "$console|") or
+	dodie "Can't open console $console";
+
+    $flags = fcntl($fp, F_GETFL, 0) or
+	dodie "Can't get flags for the socket: $!";
+    $flags = fcntl($fp, F_SETFL, $flags | O_NONBLOCK) or
+	dodie "Can't set flags for the socket: $!";
+
+    return $pid;
+}
+
+sub close_console {
+    my ($fp, $pid) = @_;
+
+    doprint "kill child process $pid\n";
+    kill 2, $pid;
+
+    print "closing!\n";
+    close($fp);
+}
+
+sub start_monitor {
+    if ($monitor_cnt++) {
+	return;
+    }
+    $monitor_fp = \*MONFD;
+    $monitor_pid = open_console $monitor_fp;
+
+    return;
+
+    open(MONFD, "Stop perl from warning about single use of MONFD");
+}
+
+sub end_monitor {
+    if (--$monitor_cnt) {
+	return;
+    }
+    close_console($monitor_fp, $monitor_pid);
+}
+
+sub wait_for_monitor {
+    my ($time) = @_;
+    my $line;
+
+    doprint "** Wait for monitor to settle down **\n";
+
+    # read the monitor and wait for the system to calm down
+    do {
+	$line = wait_for_input($monitor_fp, $time);
+	print "$line" if (defined($line));
+    } while (defined($line));
+    print "** Monitor flushed **\n";
+}
+
+sub fail {
+
+	if ($die_on_failure) {
+		dodie @_;
+	}
+
+	doprint "FAILED\n";
+
+	my $i = $iteration;
+
+	# no need to reboot for just building.
+	if (!do_not_reboot) {
+	    doprint "REBOOTING\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $sleep_time;
+	    end_monitor;
+	}
+
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "KTEST RESULT: TEST $i Failed: ", @_, "\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+	doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
+
+	return 1 if (!defined($store_failures));
+
+	my @t = localtime;
+	my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+		1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+
+	my $type = $build_type;
+	if ($type =~ /useconfig/) {
+	    $type = "useconfig";
+	}
+
+	my $dir = "$machine-$test_type-$type-fail-$date";
+	my $faildir = "$store_failures/$dir";
+
+	if (!-d $faildir) {
+	    mkpath($faildir) or
+		die "can't create $faildir";
+	}
+	if (-f "$output_config") {
+	    cp "$output_config", "$faildir/config" or
+		die "failed to copy .config";
+	}
+	if (-f $buildlog) {
+	    cp $buildlog, "$faildir/buildlog" or
+		die "failed to move $buildlog";
+	}
+	if (-f $dmesg) {
+	    cp $dmesg, "$faildir/dmesg" or
+		die "failed to move $dmesg";
+	}
+
+	doprint "*** Saved info to $faildir ***\n";
+
+	return 1;
+}
+
+sub run_command {
+    my ($command) = @_;
+    my $dolog = 0;
+    my $dord = 0;
+    my $pid;
+
+    $command =~ s/\$SSH_USER/$ssh_user/g;
+    $command =~ s/\$MACHINE/$machine/g;
+
+    doprint("$command ... ");
+
+    $pid = open(CMD, "$command 2>&1 |") or
+	(fail "unable to exec $command" and return 0);
+
+    if (defined($opt{"LOG_FILE"})) {
+	open(LOG, ">>$opt{LOG_FILE}") or
+	    dodie "failed to write to log";
+	$dolog = 1;
+    }
+
+    if (defined($redirect)) {
+	open (RD, ">$redirect") or
+	    dodie "failed to write to redirect $redirect";
+	$dord = 1;
+    }
+
+    while (<CMD>) {
+	print LOG if ($dolog);
+	print RD  if ($dord);
+    }
+
+    waitpid($pid, 0);
+    my $failed = $?;
+
+    close(CMD);
+    close(LOG) if ($dolog);
+    close(RD)  if ($dord);
+
+    if ($failed) {
+	doprint "FAILED!\n";
+    } else {
+	doprint "SUCCESS\n";
+    }
+
+    return !$failed;
+}
+
+sub run_ssh {
+    my ($cmd) = @_;
+    my $cp_exec = $ssh_exec;
+
+    $cp_exec =~ s/\$SSH_COMMAND/$cmd/g;
+    return run_command "$cp_exec";
+}
+
+sub run_scp {
+    my ($src, $dst) = @_;
+    my $cp_scp = $scp_to_target;
+
+    $cp_scp =~ s/\$SRC_FILE/$src/g;
+    $cp_scp =~ s/\$DST_FILE/$dst/g;
+
+    return run_command "$cp_scp";
+}
+
+sub get_grub_index {
+
+    if ($reboot_type ne "grub") {
+	return;
+    }
+    return if (defined($grub_number));
+
+    doprint "Find grub menu ... ";
+    $grub_number = -1;
+
+    my $ssh_grub = $ssh_exec;
+    $ssh_grub =~ s,\$SSH_COMMAND,cat /boot/grub/menu.lst,g;
+
+    open(IN, "$ssh_grub |")
+	or die "unable to get menu.lst";
+
+    while (<IN>) {
+	if (/^\s*title\s+$grub_menu\s*$/) {
+	    $grub_number++;
+	    last;
+	} elsif (/^\s*title\s/) {
+	    $grub_number++;
+	}
+    }
+    close(IN);
+
+    die "Could not find '$grub_menu' in /boot/grub/menu on $machine"
+	if ($grub_number < 0);
+    doprint "$grub_number\n";
+}
+
+sub wait_for_input
+{
+    my ($fp, $time) = @_;
+    my $rin;
+    my $ready;
+    my $line;
+    my $ch;
+
+    if (!defined($time)) {
+	$time = $timeout;
+    }
+
+    $rin = '';
+    vec($rin, fileno($fp), 1) = 1;
+    $ready = select($rin, undef, undef, $time);
+
+    $line = "";
+
+    # try to read one char at a time
+    while (sysread $fp, $ch, 1) {
+	$line .= $ch;
+	last if ($ch eq "\n");
+    }
+
+    if (!length($line)) {
+	return undef;
+    }
+
+    return $line;
+}
+
+sub reboot_to {
+    if ($reboot_type eq "grub") {
+	run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch; reboot)'";
+	return;
+    }
+
+    run_command "$reboot_script";
+}
+
+sub get_sha1 {
+    my ($commit) = @_;
+
+    doprint "git rev-list --max-count=1 $commit ... ";
+    my $sha1 = `git rev-list --max-count=1 $commit`;
+    my $ret = $?;
+
+    logit $sha1;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to get git $commit";
+    }
+
+    print "SUCCESS\n";
+
+    chomp $sha1;
+
+    return $sha1;
+}
+
+sub monitor {
+    my $booted = 0;
+    my $bug = 0;
+    my $skip_call_trace = 0;
+    my $loops;
+
+    wait_for_monitor 5;
+
+    my $line;
+    my $full_line = "";
+
+    open(DMESG, "> $dmesg") or
+	die "unable to write to $dmesg";
+
+    reboot_to;
+
+    my $success_start;
+    my $failure_start;
+
+    for (;;) {
+
+	if ($booted) {
+	    $line = wait_for_input($monitor_fp, $booted_timeout);
+	} else {
+	    $line = wait_for_input($monitor_fp);
+	}
+
+	last if (!defined($line));
+
+	doprint $line;
+	print DMESG $line;
+
+	# we are not guaranteed to get a full line
+	$full_line .= $line;
+
+	if ($full_line =~ /$success_line/) {
+	    $booted = 1;
+	    $success_start = time;
+	}
+
+	if ($booted && defined($stop_after_success) &&
+	    $stop_after_success >= 0) {
+	    my $now = time;
+	    if ($now - $success_start >= $stop_after_success) {
+		doprint "Test forced to stop after $stop_after_success seconds after success\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ backtrace testing \]/) {
+	    $skip_call_trace = 1;
+	}
+
+	if ($full_line =~ /call trace:/i) {
+	    if (!$skip_call_trace) {
+		$bug = 1;
+		$failure_start = time;
+	    }
+	}
+
+	if ($bug && defined($stop_after_failure) &&
+	    $stop_after_failure >= 0) {
+	    my $now = time;
+	    if ($now - $failure_start >= $stop_after_failure) {
+		doprint "Test forced to stop after $stop_after_failure seconds after failure\n";
+		last;
+	    }
+	}
+
+	if ($full_line =~ /\[ end of backtrace testing \]/) {
+	    $skip_call_trace = 0;
+	}
+
+	if ($full_line =~ /Kernel panic -/) {
+	    $bug = 1;
+	}
+
+	if ($line =~ /\n/) {
+	    $full_line = "";
+	}
+    }
+
+    close(DMESG);
+
+    if ($bug) {
+	return 0 if ($in_bisect);
+	fail "failed - got a bug report" and return 0;
+    }
+
+    if (!$booted) {
+	return 0 if ($in_bisect);
+	fail "failed - never got a boot prompt." and return 0;
+    }
+
+    return 1;
+}
+
+sub install {
+
+    run_scp "$outputdir/$build_target", "$target_image" or
+	dodie "failed to copy image";
+
+    my $install_mods = 0;
+
+    # should we process modules?
+    $install_mods = 0;
+    open(IN, "$output_config") or dodie("Can't read config file");
+    while (<IN>) {
+	if (/CONFIG_MODULES(=y)?/) {
+	    $install_mods = 1 if (defined($1));
+	    last;
+	}
+    }
+    close(IN);
+
+    if (!$install_mods) {
+	doprint "No modules needed\n";
+	return;
+    }
+
+    run_command "$make INSTALL_MOD_PATH=$tmpdir modules_install" or
+	dodie "Failed to install modules";
+
+    my $modlib = "/lib/modules/$version";
+    my $modtar = "ktest-mods.tar.bz2";
+
+    run_ssh "rm -rf $modlib" or
+	dodie "failed to remove old mods: $modlib";
+
+    # would be nice if scp -r did not follow symbolic links
+    run_command "cd $tmpdir && tar -cjf $modtar lib/modules/$version" or
+	dodie "making tarball";
+
+    run_scp "$tmpdir/$modtar", "/tmp" or
+	dodie "failed to copy modules";
+
+    unlink "$tmpdir/$modtar";
+
+    run_ssh "'(cd / && tar xf /tmp/$modtar)'" or
+	dodie "failed to tar modules";
+
+    run_ssh "rm -f /tmp/$modtar";
+
+    return if (!defined($post_install));
+
+    my $cp_post_install = $post_install;
+    $cp_post_install = s/\$KERNEL_VERSION/$version/g;
+    run_command "$cp_post_install" or
+	dodie "Failed to run post install";
+}
+
+sub check_buildlog {
+    my ($patch) = @_;
+
+    my @files = `git show $patch | diffstat -l`;
+
+    open(IN, "git show $patch |") or
+	dodie "failed to show $patch";
+    while (<IN>) {
+	if (m,^--- a/(.*),) {
+	    chomp $1;
+	    $files[$#files] = $1;
+	}
+    }
+    close(IN);
+
+    open(IN, $buildlog) or dodie "Can't open $buildlog";
+    while (<IN>) {
+	if (/^\s*(.*?):.*(warning|error)/) {
+	    my $err = $1;
+	    foreach my $file (@files) {
+		my $fullpath = "$builddir/$file";
+		if ($file eq $err || $fullpath eq $err) {
+		    fail "$file built with warnings" and return 0;
+		}
+	    }
+	}
+    }
+    close(IN);
+
+    return 1;
+}
+
+sub build {
+    my ($type) = @_;
+    my $defconfig = "";
+
+    unlink $buildlog;
+
+    if ($type =~ /^useconfig:(.*)/) {
+	run_command "cp $1 $output_config" or
+	    dodie "could not copy $1 to .config";
+
+	$type = "oldconfig";
+    }
+
+    # old config can ask questions
+    if ($type eq "oldconfig") {
+	$type = "oldnoconfig";
+
+	# allow for empty configs
+	run_command "touch $output_config";
+
+	run_command "mv $output_config $outputdir/config_temp" or
+	    dodie "moving .config";
+
+	if (!$noclean && !run_command "$make mrproper") {
+	    dodie "make mrproper";
+	}
+
+	run_command "mv $outputdir/config_temp $output_config" or
+	    dodie "moving config_temp";
+
+    } elsif (!$noclean) {
+	unlink "$output_config";
+	run_command "$make mrproper" or
+	    dodie "make mrproper";
+    }
+
+    # add something to distinguish this build
+    open(OUT, "> $outputdir/localversion") or dodie("Can't make localversion file");
+    print OUT "$localversion\n";
+    close(OUT);
+
+    if (defined($minconfig)) {
+	$defconfig = "KCONFIG_ALLCONFIG=$minconfig";
+    }
+
+    run_command "$defconfig $make $type" or
+	dodie "failed make config";
+
+    $redirect = "$buildlog";
+    if (!run_command "$make $build_options") {
+	undef $redirect;
+	# bisect may need this to pass
+	return 0 if ($in_bisect);
+	fail "failed build" and return 0;
+    }
+    undef $redirect;
+
+    return 1;
+}
+
+sub halt {
+    if (!run_ssh "halt" or defined($power_off)) {
+	if (defined($poweroff_after_halt)) {
+	    sleep $poweroff_after_halt;
+	    run_command "$power_off";
+	}
+    } else {
+	# nope? the zap it!
+	run_command "$power_off";
+    }
+}
+
+sub success {
+    my ($i) = @_;
+
+    $successes++;
+
+    doprint "\n\n*******************************************\n";
+    doprint     "*******************************************\n";
+    doprint     "KTEST RESULT: TEST $i SUCCESS!!!!         **\n";
+    doprint     "*******************************************\n";
+    doprint     "*******************************************\n";
+
+    if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
+	doprint "Reboot and wait $sleep_time seconds\n";
+	reboot;
+	start_monitor;
+	wait_for_monitor $sleep_time;
+	end_monitor;
+    }
+}
+
+sub get_version {
+    # get the release name
+    doprint "$make kernelrelease ... ";
+    $version = `$make kernelrelease | tail -1`;
+    chomp($version);
+    doprint "$version\n";
+}
+
+sub child_run_test {
+    my $failed = 0;
+
+    # child should have no power
+    $reboot_on_error = 0;
+    $poweroff_on_error = 0;
+    $die_on_failure = 1;
+
+    run_command $run_test or $failed = 1;
+    exit $failed;
+}
+
+my $child_done;
+
+sub child_finished {
+    $child_done = 1;
+}
+
+sub do_run_test {
+    my $child_pid;
+    my $child_exit;
+    my $line;
+    my $full_line;
+    my $bug = 0;
+
+    wait_for_monitor 1;
+
+    doprint "run test $run_test\n";
+
+    $child_done = 0;
+
+    $SIG{CHLD} = qw(child_finished);
+
+    $child_pid = fork;
+
+    child_run_test if (!$child_pid);
+
+    $full_line = "";
+
+    do {
+	$line = wait_for_input($monitor_fp, 1);
+	if (defined($line)) {
+
+	    # we are not guaranteed to get a full line
+	    $full_line .= $line;
+
+	    if ($full_line =~ /call trace:/i) {
+		$bug = 1;
+	    }
+
+	    if ($full_line =~ /Kernel panic -/) {
+		$bug = 1;
+	    }
+
+	    if ($line =~ /\n/) {
+		$full_line = "";
+	    }
+	}
+    } while (!$child_done && !$bug);
+
+    if ($bug) {
+	doprint "Detected kernel crash!\n";
+	# kill the child with extreme prejudice
+	kill 9, $child_pid;
+    }
+
+    waitpid $child_pid, 0;
+    $child_exit = $?;
+
+    if ($bug || $child_exit) {
+	return 0 if $in_bisect;
+	fail "test failed" and return 0;
+    }
+    return 1;
+}
+
+sub run_git_bisect {
+    my ($command) = @_;
+
+    doprint "$command ... ";
+
+    my $output = `$command 2>&1`;
+    my $ret = $?;
+
+    logit $output;
+
+    if ($ret) {
+	doprint "FAILED\n";
+	dodie "Failed to git bisect";
+    }
+
+    doprint "SUCCESS\n";
+    if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) {
+	doprint "$1 [$2]\n";
+    } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) {
+	$bisect_bad = $1;
+	doprint "Found bad commit... $1\n";
+	return 0;
+    } else {
+	# we already logged it, just print it now.
+	print $output;
+    }
+
+    return 1;
+}
+
+# returns 1 on success, 0 on failure
+sub run_bisect_test {
+    my ($type, $buildtype) = @_;
+
+    my $failed = 0;
+    my $result;
+    my $output;
+    my $ret;
+
+    $in_bisect = 1;
+
+    build $buildtype or $failed = 1;
+
+    if ($type ne "build") {
+	dodie "Failed on build" if $failed;
+
+	# Now boot the box
+	get_grub_index;
+	get_version;
+	install;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if ($type ne "boot") {
+	    dodie "Failed on boot" if $failed;
+
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+    }
+
+    if ($failed) {
+	$result = 0;
+
+	# reboot the box to a good kernel
+	if ($type ne "build") {
+	    doprint "Reboot and sleep $bisect_sleep_time seconds\n";
+	    reboot;
+	    start_monitor;
+	    wait_for_monitor $bisect_sleep_time;
+	    end_monitor;
+	}
+    } else {
+	$result = 1;
+    }
+    $in_bisect = 0;
+
+    return $result;
+}
+
+sub run_bisect {
+    my ($type) = @_;
+    my $buildtype = "oldconfig";
+
+    # We should have a minconfig to use?
+    if (defined($minconfig)) {
+	$buildtype = "useconfig:$minconfig";
+    }
+
+    my $ret = run_bisect_test $type, $buildtype;
+
+
+    # Are we looking for where it worked, not failed?
+    if ($reverse_bisect) {
+	$ret = !$ret;
+    }
+
+    if ($ret) {
+	return "good";
+    } else {
+	return  "bad";
+    }
+}
+
+sub bisect {
+    my ($i) = @_;
+
+    my $result;
+
+    die "BISECT_GOOD[$i] not defined\n"	if (!defined($opt{"BISECT_GOOD[$i]"}));
+    die "BISECT_BAD[$i] not defined\n"	if (!defined($opt{"BISECT_BAD[$i]"}));
+    die "BISECT_TYPE[$i] not defined\n"	if (!defined($opt{"BISECT_TYPE[$i]"}));
+
+    my $good = $opt{"BISECT_GOOD[$i]"};
+    my $bad = $opt{"BISECT_BAD[$i]"};
+    my $type = $opt{"BISECT_TYPE[$i]"};
+    my $start = $opt{"BISECT_START[$i]"};
+    my $replay = $opt{"BISECT_REPLAY[$i]"};
+
+    # convert to true sha1's
+    $good = get_sha1($good);
+    $bad = get_sha1($bad);
+
+    if (defined($opt{"BISECT_REVERSE[$i]"}) &&
+	$opt{"BISECT_REVERSE[$i]"} == 1) {
+	doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
+	$reverse_bisect = 1;
+    } else {
+	$reverse_bisect = 0;
+    }
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    my $check = $opt{"BISECT_CHECK[$i]"};
+    if (defined($check) && $check ne "0") {
+
+	# get current HEAD
+	my $head = get_sha1("HEAD");
+
+	if ($check ne "good") {
+	    doprint "TESTING BISECT BAD [$bad]\n";
+	    run_command "git checkout $bad" or
+		die "Failed to checkout $bad";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "bad") {
+		fail "Tested BISECT_BAD [$bad] and it succeeded" and return 0;
+	    }
+	}
+
+	if ($check ne "bad") {
+	    doprint "TESTING BISECT GOOD [$good]\n";
+	    run_command "git checkout $good" or
+		die "Failed to checkout $good";
+
+	    $result = run_bisect $type;
+
+	    if ($result ne "good") {
+		fail "Tested BISECT_GOOD [$good] and it failed" and return 0;
+	    }
+	}
+
+	# checkout where we started
+	run_command "git checkout $head" or
+	    die "Failed to checkout $head";
+    }
+
+    run_command "git bisect start" or
+	dodie "could not start bisect";
+
+    run_command "git bisect good $good" or
+	dodie "could not set bisect good to $good";
+
+    run_git_bisect "git bisect bad $bad" or
+	dodie "could not set bisect bad to $bad";
+
+    if (defined($replay)) {
+	run_command "git bisect replay $replay" or
+	    dodie "failed to run replay";
+    }
+
+    if (defined($start)) {
+	run_command "git checkout $start" or
+	    dodie "failed to checkout $start";
+    }
+
+    my $test;
+    do {
+	$result = run_bisect $type;
+	$test = run_git_bisect "git bisect $result";
+    } while ($test);
+
+    run_command "git bisect log" or
+	dodie "could not capture git bisect log";
+
+    run_command "git bisect reset" or
+	dodie "could not reset git bisect";
+
+    doprint "Bad commit was [$bisect_bad]\n";
+
+    success $i;
+}
+
+my %config_ignore;
+my %config_set;
+
+my %config_list;
+my %null_config;
+
+my %dependency;
+
+sub process_config_ignore {
+    my ($config) = @_;
+
+    open (IN, $config)
+	or dodie "Failed to read $config";
+
+    while (<IN>) {
+	if (/^(.*?(CONFIG\S*)(=.*| is not set))/) {
+	    $config_ignore{$2} = $1;
+	}
+    }
+
+    close(IN);
+}
+
+sub read_current_config {
+    my ($config_ref) = @_;
+
+    %{$config_ref} = ();
+    undef %{$config_ref};
+
+    my @key = keys %{$config_ref};
+    if ($#key >= 0) {
+	print "did not delete!\n";
+	exit;
+    }
+    open (IN, "$output_config");
+
+    while (<IN>) {
+	if (/^(CONFIG\S+)=(.*)/) {
+	    ${$config_ref}{$1} = $2;
+	}
+    }
+    close(IN);
+}
+
+sub get_dependencies {
+    my ($config) = @_;
+
+    my $arr = $dependency{$config};
+    if (!defined($arr)) {
+	return ();
+    }
+
+    my @deps = @{$arr};
+
+    foreach my $dep (@{$arr}) {
+	print "ADD DEP $dep\n";
+	@deps = (@deps, get_dependencies $dep);
+    }
+
+    return @deps;
+}
+
+sub create_config {
+    my @configs = @_;
+
+    open(OUT, ">$output_config") or dodie "Can not write to $output_config";
+
+    foreach my $config (@configs) {
+	print OUT "$config_set{$config}\n";
+	my @deps = get_dependencies $config;
+	foreach my $dep (@deps) {
+	    print OUT "$config_set{$dep}\n";
+	}
+    }
+
+    foreach my $config (keys %config_ignore) {
+	print OUT "$config_ignore{$config}\n";
+    }
+    close(OUT);
+
+#    exit;
+    run_command "$make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+}
+
+sub compare_configs {
+    my (%a, %b) = @_;
+
+    foreach my $item (keys %a) {
+	if (!defined($b{$item})) {
+	    print "diff $item\n";
+	    return 1;
+	}
+	delete $b{$item};
+    }
+
+    my @keys = keys %b;
+    if ($#keys) {
+	print "diff2 $keys[0]\n";
+    }
+    return -1 if ($#keys >= 0);
+
+    return 0;
+}
+
+sub run_config_bisect_test {
+    my ($type) = @_;
+
+    return run_bisect_test $type, "oldconfig";
+}
+
+sub process_passed {
+    my (%configs) = @_;
+
+    doprint "These configs had no failure: (Enabling them for further compiles)\n";
+    # Passed! All these configs are part of a good compile.
+    # Add them to the min options.
+    foreach my $config (keys %configs) {
+	if (defined($config_list{$config})) {
+	    doprint " removing $config\n";
+	    $config_ignore{$config} = $config_list{$config};
+	    delete $config_list{$config};
+	}
+    }
+    doprint "config copied to $outputdir/config_good\n";
+    run_command "cp -f $output_config $outputdir/config_good";
+}
+
+sub process_failed {
+    my ($config) = @_;
+
+    doprint "\n\n***************************************\n";
+    doprint "Found bad config: $config\n";
+    doprint "***************************************\n\n";
+}
+
+sub run_config_bisect {
+
+    my @start_list = keys %config_list;
+
+    if ($#start_list < 0) {
+	doprint "No more configs to test!!!\n";
+	return -1;
+    }
+
+    doprint "***** RUN TEST ***\n";
+    my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"};
+    my $ret;
+    my %current_config;
+
+    my $count = $#start_list + 1;
+    doprint "  $count configs to test\n";
+
+    my $half = int($#start_list / 2);
+
+    do {
+	my @tophalf = @start_list[0 .. $half];
+
+	create_config @tophalf;
+	read_current_config \%current_config;
+
+	$count = $#tophalf + 1;
+	doprint "Testing $count configs\n";
+	my $found = 0;
+	# make sure we test something
+	foreach my $config (@tophalf) {
+	    if (defined($current_config{$config})) {
+		logit " $config\n";
+		$found = 1;
+	    }
+	}
+	if (!$found) {
+	    # try the other half
+	    doprint "Top half produced no set configs, trying bottom half\n";
+	    @tophalf = @start_list[$half .. $#start_list];
+	    create_config @tophalf;
+	    read_current_config \%current_config;
+	    foreach my $config (@tophalf) {
+		if (defined($current_config{$config})) {
+		    logit " $config\n";
+		    $found = 1;
+		}
+	    }
+	    if (!$found) {
+		doprint "Failed: Can't make new config with current configs\n";
+		foreach my $config (@start_list) {
+		    doprint "  CONFIG: $config\n";
+		}
+		return -1;
+	    }
+	    $count = $#tophalf + 1;
+	    doprint "Testing $count configs\n";
+	}
+
+	$ret = run_config_bisect_test $type;
+
+	if ($ret) {
+	    process_passed %current_config;
+	    return 0;
+	}
+
+	doprint "This config had a failure.\n";
+	doprint "Removing these configs that were not set in this config:\n";
+	doprint "config copied to $outputdir/config_bad\n";
+	run_command "cp -f $output_config $outputdir/config_bad";
+
+	# A config exists in this group that was bad.
+	foreach my $config (keys %config_list) {
+	    if (!defined($current_config{$config})) {
+		doprint " removing $config\n";
+		delete $config_list{$config};
+	    }
+	}
+
+	@start_list = @tophalf;
+
+	if ($#start_list == 0) {
+	    process_failed $start_list[0];
+	    return 1;
+	}
+
+	# remove half the configs we are looking at and see if
+	# they are good.
+	$half = int($#start_list / 2);
+    } while ($half > 0);
+
+    # we found a single config, try it again
+    my @tophalf = @start_list[0 .. 0];
+
+    $ret = run_config_bisect_test $type;
+    if ($ret) {
+	process_passed %current_config;
+	return 0;
+    }
+
+    process_failed $start_list[0];
+    return 1;
+}
+
+sub config_bisect {
+    my ($i) = @_;
+
+    my $start_config = $opt{"CONFIG_BISECT[$i]"};
+
+    my $tmpconfig = "$tmpdir/use_config";
+
+    # Make the file with the bad config and the min config
+    if (defined($minconfig)) {
+	# read the min config for things to ignore
+	run_command "cp $minconfig $tmpconfig" or
+	    dodie "failed to copy $minconfig to $tmpconfig";
+    } else {
+	unlink $tmpconfig;
+    }
+
+    # Add other configs
+    if (defined($addconfig)) {
+	run_command "cat $addconfig >> $tmpconfig" or
+	    dodie "failed to append $addconfig";
+    }
+
+    my $defconfig = "";
+    if (-f $tmpconfig) {
+	$defconfig = "KCONFIG_ALLCONFIG=$tmpconfig";
+	process_config_ignore $tmpconfig;
+    }
+
+    # now process the start config
+    run_command "cp $start_config $output_config" or
+	dodie "failed to copy $start_config to $output_config";
+
+    # read directly what we want to check
+    my %config_check;
+    open (IN, $output_config)
+	or dodie "faied to open $output_config";
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    $config_check{$2} = $1;
+	}
+    }
+    close(IN);
+
+    # Now run oldconfig with the minconfig (and addconfigs)
+    run_command "$defconfig $make oldnoconfig" or
+	dodie "failed make config oldconfig";
+
+    # check to see what we lost (or gained)
+    open (IN, $output_config)
+	or dodie "Failed to read $start_config";
+
+    my %removed_configs;
+    my %added_configs;
+
+    while (<IN>) {
+	if (/^((CONFIG\S*)=.*)/) {
+	    # save off all options
+	    $config_set{$2} = $1;
+	    if (defined($config_check{$2})) {
+		if (defined($config_ignore{$2})) {
+		    $removed_configs{$2} = $1;
+		} else {
+		    $config_list{$2} = $1;
+		}
+	    } elsif (!defined($config_ignore{$2})) {
+		$added_configs{$2} = $1;
+		$config_list{$2} = $1;
+	    }
+	}
+    }
+    close(IN);
+
+    my @confs = keys %removed_configs;
+    if ($#confs >= 0) {
+	doprint "Configs overridden by default configs and removed from check:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+    @confs = keys %added_configs;
+    if ($#confs >= 0) {
+	doprint "Configs appearing in make oldconfig and added:\n";
+	foreach my $config (@confs) {
+	    doprint " $config\n";
+	}
+    }
+
+    my %config_test;
+    my $once = 0;
+
+    # Sometimes kconfig does weird things. We must make sure
+    # that the config we autocreate has everything we need
+    # to test, otherwise we may miss testing configs, or
+    # may not be able to create a new config.
+    # Here we create a config with everything set.
+    create_config (keys %config_list);
+    read_current_config \%config_test;
+    foreach my $config (keys %config_list) {
+	if (!defined($config_test{$config})) {
+	    if (!$once) {
+		$once = 1;
+		doprint "Configs not produced by kconfig (will not be checked):\n";
+	    }
+	    doprint "  $config\n";
+	    delete $config_list{$config};
+	}
+    }
+    my $ret;
+    do {
+	$ret = run_config_bisect;
+    } while (!$ret);
+
+    return $ret if ($ret < 0);
+
+    success $i;
+}
+
+sub patchcheck {
+    my ($i) = @_;
+
+    die "PATCHCHECK_START[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_START[$i]"}));
+    die "PATCHCHECK_TYPE[$i] not defined\n"
+	if (!defined($opt{"PATCHCHECK_TYPE[$i]"}));
+
+    my $start = $opt{"PATCHCHECK_START[$i]"};
+
+    my $end = "HEAD";
+    if (defined($opt{"PATCHCHECK_END[$i]"})) {
+	$end = $opt{"PATCHCHECK_END[$i]"};
+    }
+
+    # Get the true sha1's since we can use things like HEAD~3
+    $start = get_sha1($start);
+    $end = get_sha1($end);
+
+    my $type = $opt{"PATCHCHECK_TYPE[$i]"};
+
+    # Can't have a test without having a test to run
+    if ($type eq "test" && !defined($run_test)) {
+	$type = "boot";
+    }
+
+    open (IN, "git log --pretty=oneline $end|") or
+	dodie "could not get git list";
+
+    my @list;
+
+    while (<IN>) {
+	chomp;
+	$list[$#list+1] = $_;
+	last if (/^$start/);
+    }
+    close(IN);
+
+    if ($list[$#list] !~ /^$start/) {
+	fail "SHA1 $start not found";
+    }
+
+    # go backwards in the list
+    @list = reverse @list;
+
+    my $save_clean = $noclean;
+
+    $in_patchcheck = 1;
+    foreach my $item (@list) {
+	my $sha1 = $item;
+	$sha1 =~ s/^([[:xdigit:]]+).*/$1/;
+
+	doprint "\nProcessing commit $item\n\n";
+
+	run_command "git checkout $sha1" or
+	    die "Failed to checkout $sha1";
+
+	# only clean on the first and last patch
+	if ($item eq $list[0] ||
+	    $item eq $list[$#list]) {
+	    $noclean = $save_clean;
+	} else {
+	    $noclean = 1;
+	}
+
+	if (defined($minconfig)) {
+	    build "useconfig:$minconfig" or return 0;
+	} else {
+	    # ?? no config to use?
+	    build "oldconfig" or return 0;
+	}
+
+	check_buildlog $sha1 or return 0;
+
+	next if ($type eq "build");
+
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+
+	start_monitor;
+	monitor or $failed = 1;
+
+	if (!$failed && $type ne "boot"){
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	return 0 if ($failed);
+
+    }
+    $in_patchcheck = 0;
+    success $i;
+
+    return 1;
+}
+
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl config-file\n";
+
+if ($#ARGV == 0) {
+    $ktest_config = $ARGV[0];
+    if (! -f $ktest_config) {
+	print "$ktest_config does not exist.\n";
+	my $ans;
+        for (;;) {
+	    print "Create it? [Y/n] ";
+	    $ans = <STDIN>;
+	    chomp $ans;
+	    if ($ans =~ /^\s*$/) {
+		$ans = "y";
+	    }
+	    last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
+	    print "Please answer either 'y' or 'n'.\n";
+	}
+	if ($ans !~ /^y$/i) {
+	    exit 0;
+	}
+    }
+} else {
+    $ktest_config = "ktest.conf";
+}
+
+if (! -f $ktest_config) {
+    open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
+    print OUT << "EOF"
+# Generated by ktest.pl
+#
+# Define each test with TEST_START
+# The config options below it will override the defaults
+TEST_START
+
+DEFAULTS
+EOF
+;
+    close(OUT);
+}
+read_config $ktest_config;
+
+# Append any configs entered in manually to the config file.
+my @new_configs = keys %entered_configs;
+if ($#new_configs >= 0) {
+    print "\nAppending entered in configs to $ktest_config\n";
+    open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
+    foreach my $config (@new_configs) {
+	print OUT "$config = $entered_configs{$config}\n";
+	$opt{$config} = $entered_configs{$config};
+    }
+}
+
+if ($opt{"CLEAR_LOG"} && defined($opt{"LOG_FILE"})) {
+    unlink $opt{"LOG_FILE"};
+}
+
+doprint "\n\nSTARTING AUTOMATED TESTS\n\n";
+
+for (my $i = 0, my $repeat = 1; $i <= $opt{"NUM_TESTS"}; $i += $repeat) {
+
+    if (!$i) {
+	doprint "DEFAULT OPTIONS:\n";
+    } else {
+	doprint "\nTEST $i OPTIONS";
+	if (defined($repeat_tests{$i})) {
+	    $repeat = $repeat_tests{$i};
+	    doprint " ITERATE $repeat";
+	}
+	doprint "\n";
+    }
+
+    foreach my $option (sort keys %opt) {
+
+	if ($option =~ /\[(\d+)\]$/) {
+	    next if ($i != $1);
+	} else {
+	    next if ($i);
+	}
+
+	doprint "$option = $opt{$option}\n";
+    }
+}
+
+sub set_test_option {
+    my ($name, $i) = @_;
+
+    my $option = "$name\[$i\]";
+
+    if (defined($opt{$option})) {
+	return $opt{$option};
+    }
+
+    foreach my $test (keys %repeat_tests) {
+	if ($i >= $test &&
+	    $i < $test + $repeat_tests{$test}) {
+	    $option = "$name\[$test\]";
+	    if (defined($opt{$option})) {
+		return $opt{$option};
+	    }
+	}
+    }
+
+    if (defined($opt{$name})) {
+	return $opt{$name};
+    }
+
+    return undef;
+}
+
+# First we need to do is the builds
+for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
+
+    $iteration = $i;
+
+    my $makecmd = set_test_option("MAKE_CMD", $i);
+
+    $machine = set_test_option("MACHINE", $i);
+    $ssh_user = set_test_option("SSH_USER", $i);
+    $tmpdir = set_test_option("TMP_DIR", $i);
+    $outputdir = set_test_option("OUTPUT_DIR", $i);
+    $builddir = set_test_option("BUILD_DIR", $i);
+    $test_type = set_test_option("TEST_TYPE", $i);
+    $build_type = set_test_option("BUILD_TYPE", $i);
+    $build_options = set_test_option("BUILD_OPTIONS", $i);
+    $power_cycle = set_test_option("POWER_CYCLE", $i);
+    $reboot = set_test_option("REBOOT", $i);
+    $noclean = set_test_option("BUILD_NOCLEAN", $i);
+    $minconfig = set_test_option("MIN_CONFIG", $i);
+    $run_test = set_test_option("TEST", $i);
+    $addconfig = set_test_option("ADD_CONFIG", $i);
+    $reboot_type = set_test_option("REBOOT_TYPE", $i);
+    $grub_menu = set_test_option("GRUB_MENU", $i);
+    $post_install = set_test_option("POST_INSTALL", $i);
+    $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
+    $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
+    $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
+    $die_on_failure = set_test_option("DIE_ON_FAILURE", $i);
+    $power_off = set_test_option("POWER_OFF", $i);
+    $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i);
+    $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
+    $sleep_time = set_test_option("SLEEP_TIME", $i);
+    $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
+    $store_failures = set_test_option("STORE_FAILURES", $i);
+    $timeout = set_test_option("TIMEOUT", $i);
+    $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i);
+    $console = set_test_option("CONSOLE", $i);
+    $success_line = set_test_option("SUCCESS_LINE", $i);
+    $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
+    $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
+    $build_target = set_test_option("BUILD_TARGET", $i);
+    $ssh_exec = set_test_option("SSH_EXEC", $i);
+    $scp_to_target = set_test_option("SCP_TO_TARGET", $i);
+    $target_image = set_test_option("TARGET_IMAGE", $i);
+    $localversion = set_test_option("LOCALVERSION", $i);
+
+    chdir $builddir || die "can't change directory to $builddir";
+
+    if (!-d $tmpdir) {
+	mkpath($tmpdir) or
+	    die "can't create $tmpdir";
+    }
+
+    $ENV{"SSH_USER"} = $ssh_user;
+    $ENV{"MACHINE"} = $machine;
+
+    $target = "$ssh_user\@$machine";
+
+    $buildlog = "$tmpdir/buildlog-$machine";
+    $dmesg = "$tmpdir/dmesg-$machine";
+    $make = "$makecmd O=$outputdir";
+    $output_config = "$outputdir/.config";
+
+    if ($reboot_type eq "grub") {
+	dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+    } elsif (!defined($reboot_script)) {
+	dodie "REBOOT_SCRIPT not defined"
+    }
+
+    my $run_type = $build_type;
+    if ($test_type eq "patchcheck") {
+	$run_type = $opt{"PATCHCHECK_TYPE[$i]"};
+    } elsif ($test_type eq "bisect") {
+	$run_type = $opt{"BISECT_TYPE[$i]"};
+    } elsif ($test_type eq "config_bisect") {
+	$run_type = $opt{"CONFIG_BISECT_TYPE[$i]"};
+    }
+
+    # mistake in config file?
+    if (!defined($run_type)) {
+	$run_type = "ERROR";
+    }
+
+    doprint "\n\n";
+    doprint "RUNNING TEST $i of $opt{NUM_TESTS} with option $test_type $run_type\n\n";
+
+    unlink $dmesg;
+    unlink $buildlog;
+
+    if (!defined($minconfig)) {
+	$minconfig = $addconfig;
+
+    } elsif (defined($addconfig)) {
+	run_command "cat $addconfig $minconfig > $tmpdir/add_config" or
+	    dodie "Failed to create temp config";
+	$minconfig = "$tmpdir/add_config";
+    }
+
+    my $checkout = $opt{"CHECKOUT[$i]"};
+    if (defined($checkout)) {
+	run_command "git checkout $checkout" or
+	    die "failed to checkout $checkout";
+    }
+
+    if ($test_type eq "bisect") {
+	bisect $i;
+	next;
+    } elsif ($test_type eq "config_bisect") {
+	config_bisect $i;
+	next;
+    } elsif ($test_type eq "patchcheck") {
+	patchcheck $i;
+	next;
+    }
+
+    if ($build_type ne "nobuild") {
+	build $build_type or next;
+    }
+
+    if ($test_type ne "build") {
+	get_grub_index;
+	get_version;
+	install;
+
+	my $failed = 0;
+	start_monitor;
+	monitor or $failed = 1;;
+
+	if (!$failed && $test_type ne "boot" && defined($run_test)) {
+	    do_run_test or $failed = 1;
+	}
+	end_monitor;
+	next if ($failed);
+    }
+
+    success $i;
+}
+
+if ($opt{"POWEROFF_ON_SUCCESS"}) {
+    halt;
+} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
+    reboot;
+}
+
+doprint "\n    $successes of $opt{NUM_TESTS} tests were successful\n\n";
+
+exit 0;
diff --git a/tools/testing/ktest/sample.conf b/tools/testing/ktest/sample.conf
new file mode 100644
index 0000000..3408c59
--- /dev/null
+++ b/tools/testing/ktest/sample.conf
@@ -0,0 +1,622 @@
+#
+# Config file for ktest.pl
+#
+# Note, all paths must be absolute
+#
+
+# Options set in the beginning of the file are considered to be
+# default options. These options can be overriden by test specific
+# options, with the following exceptions:
+#
+#  LOG_FILE
+#  CLEAR_LOG
+#  POWEROFF_ON_SUCCESS
+#  REBOOT_ON_SUCCESS
+#
+# Test specific options are set after the label:
+#
+# TEST_START
+#
+# The options after a TEST_START label are specific to that test.
+# Each TEST_START label will set up a new test. If you want to
+# perform a test more than once, you can add the ITERATE label
+# to it followed by the number of times you want that test
+# to iterate. If the ITERATE is left off, the test will only
+# be performed once.
+#
+# TEST_START ITERATE 10
+#
+# You can skip a test by adding SKIP (before or after the ITERATE
+# and number)
+#
+# TEST_START SKIP
+#
+# TEST_START SKIP ITERATE 10
+#
+# TEST_START ITERATE 10 SKIP
+#
+# The SKIP label causes the options and the test itself to be ignored.
+# This is useful to set up several different tests in one config file, and
+# only enabling the ones you want to use for a current test run.
+#
+# You can add default options anywhere in the file as well
+# with the DEFAULTS tag. This allows you to have default options
+# after the test options to keep the test options at the top
+# of the file. You can even place the DEFAULTS tag between
+# test cases (but not in the middle of a single test case)
+#
+# TEST_START
+# MIN_CONFIG = /home/test/config-test1
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-default
+#
+# TEST_START ITERATE 10
+#
+# The above will run the first test with MIN_CONFIG set to
+# /home/test/config-test-1. Then 10 tests will be executed
+# with MIN_CONFIG with /home/test/config-default.
+#
+# You can also disable defaults with the SKIP option
+#
+# DEFAULTS SKIP
+# MIN_CONFIG = /home/test/config-use-sometimes
+#
+# DEFAULTS
+# MIN_CONFIG = /home/test/config-most-times
+#
+# The above will ignore the first MIN_CONFIG. If you want to
+# use the first MIN_CONFIG, remove the SKIP from the first
+# DEFAULTS tag and add it to the second. Be careful, options
+# may only be declared once per test or default. If you have
+# the same option name under the same test or as default
+# ktest will fail to execute, and no tests will run.
+#
+
+
+#### Mandatory Default Options ####
+
+# These options must be in the default section, although most
+# may be overridden by test options.
+
+# The machine hostname that you will test
+#MACHINE = target
+
+# The box is expected to have ssh on normal bootup, provide the user
+#  (most likely root, since you need privileged operations)
+#SSH_USER = root
+
+# The directory that contains the Linux source code
+#BUILD_DIR = /home/test/linux.git
+
+# The directory that the objects will be built
+# (can not be same as BUILD_DIR)
+#OUTPUT_DIR = /home/test/build/target
+
+# The location of the compiled file to copy to the target
+# (relative to OUTPUT_DIR)
+#BUILD_TARGET = arch/x86/boot/bzImage
+
+# The place to put your image on the test machine
+#TARGET_IMAGE = /boot/vmlinuz-test
+
+# A script or command to reboot the box
+#
+# Here is a digital loggers power switch example
+#POWER_CYCLE = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=CCL'
+#
+# Here is an example to reboot a virtual box on the current host
+# with the name "Guest".
+#POWER_CYCLE = virsh destroy Guest; sleep 5; virsh start Guest
+
+# The script or command that reads the console
+#
+#  If you use ttywatch server, something like the following would work.
+#CONSOLE = nc -d localhost 3001
+#
+# For a virtual machine with guest name "Guest".
+#CONSOLE =  virsh console Guest
+
+# Required version ending to differentiate the test
+# from other linux builds on the system.
+#LOCALVERSION = -test
+
+# The grub title name for the test kernel to boot
+# (Only mandatory if REBOOT_TYPE = grub)
+#
+# Note, ktest.pl will not update the grub menu.lst, you need to
+# manually add an option for the test. ktest.pl will search
+# the grub menu.lst for this option to find what kernel to
+# reboot into.
+#
+# For example, if in the /boot/grub/menu.lst the test kernel title has:
+# title Test Kernel
+# kernel vmlinuz-test
+#GRUB_MENU = Test Kernel
+
+# A script to reboot the target into the test kernel
+# (Only mandatory if REBOOT_TYPE = script)
+#REBOOT_SCRIPT =
+
+#### Optional Config Options (all have defaults) ####
+
+# Start a test setup. If you leave this off, all options
+# will be default and the test will run once.
+# This is a label and not really an option (it takes no value).
+# You can append ITERATE and a number after it to iterate the
+# test a number of times, or SKIP to ignore this test.
+#
+#TEST_START
+#TEST_START ITERATE 5
+#TEST_START SKIP
+
+# Have the following options as default again. Used after tests
+# have already been defined by TEST_START. Optionally, you can
+# just define all default options before the first TEST_START
+# and you do not need this option.
+#
+# This is a label and not really an option (it takes no value).
+# You can append SKIP to this label and the options within this
+# section will be ignored.
+#
+# DEFAULTS
+# DEFAULTS SKIP
+
+# The default test type (default test)
+# The test types may be:
+#   build - only build the kernel, do nothing else
+#   boot - build and boot the kernel
+#   test - build, boot and if TEST is set, run the test script
+#          (If TEST is not set, it defaults back to boot)
+#   bisect - Perform a bisect on the kernel (see BISECT_TYPE below)
+#   patchcheck - Do a test on a series of commits in git (see PATCHCHECK below)
+#TEST_TYPE = test
+
+# Test to run if there is a successful boot and TEST_TYPE is test.
+# Must exit with 0 on success and non zero on error
+# default (undefined)
+#TEST = ssh user@machine /root/run_test
+
+# The build type is any make config type or special command
+#  (default randconfig)
+#   nobuild - skip the clean and build step
+#   useconfig:/path/to/config - use the given config and run
+#              oldconfig on it.
+# This option is ignored if TEST_TYPE is patchcheck or bisect
+#BUILD_TYPE = randconfig
+
+# The make command (default make)
+# If you are building a 32bit x86 on a 64 bit host
+#MAKE_CMD = CC=i386-gcc AS=i386-as make ARCH=i386
+
+# Any build options for the make of the kernel (not for other makes, like configs)
+# (default "")
+#BUILD_OPTIONS = -j20
+
+# If you need an initrd, you can add a script or code here to install
+# it. The environment variable KERNEL_VERSION will be set to the
+# kernel version that is used. Remember to add the initrd line
+# to your grub menu.lst file.
+#
+# Here's a couple of examples to use:
+#POST_INSTALL = ssh user@target /sbin/mkinitrd --allow-missing -f /boot/initramfs-test.img $KERNEL_VERSION
+#
+# or on some systems:
+#POST_INSTALL = ssh user@target /sbin/dracut -f /boot/initramfs-test.img $KERNEL_VERSION
+
+# Way to reboot the box to the test kernel.
+# Only valid options so far are "grub" and "script"
+# (default grub)
+# If you specify grub, it will assume grub version 1
+# and will search in /boot/grub/menu.lst for the title $GRUB_MENU
+# and select that target to reboot to the kernel. If this is not
+# your setup, then specify "script" and have a command or script
+# specified in REBOOT_SCRIPT to boot to the target.
+#
+# The entry in /boot/grub/menu.lst must be entered in manually.
+# The test will not modify that file.
+#REBOOT_TYPE = grub
+
+# The min config that is needed to build for the machine
+# A nice way to create this is with the following:
+#
+#   $ ssh target
+#   $ lsmod > mymods
+#   $ scp mymods host:/tmp
+#   $ exit
+#   $ cd linux.git
+#   $ rm .config
+#   $ make LSMOD=mymods localyesconfig
+#   $ grep '^CONFIG' .config > /home/test/config-min
+#
+# If you want even less configs:
+#
+#   log in directly to target (do not ssh)
+#
+#   $ su
+#   # lsmod | cut -d' ' -f1 | xargs rmmod
+#
+#   repeat the above several times
+#
+#   # lsmod > mymods
+#   # reboot
+#
+# May need to reboot to get your network back to copy the mymods
+# to the host, and then remove the previous .config and run the
+# localyesconfig again. The CONFIG_MIN generated like this will
+# not guarantee network activity to the box so the TEST_TYPE of
+# test may fail.
+#
+# You might also want to set:
+#   CONFIG_CMDLINE="<your options here>"
+#  randconfig may set the above and override your real command
+#  line options.
+# (default undefined)
+#MIN_CONFIG = /home/test/config-min
+
+# Sometimes there's options that just break the boot and
+# you do not care about. Here are a few:
+#   # CONFIG_STAGING is not set
+#  Staging drivers are horrible, and can break the build.
+#   # CONFIG_SCSI_DEBUG is not set
+#  SCSI_DEBUG may change your root partition
+#   # CONFIG_KGDB_SERIAL_CONSOLE is not set
+#  KGDB may cause oops waiting for a connection that's not there.
+# This option points to the file containing config options that will be prepended
+# to the MIN_CONFIG (or be the MIN_CONFIG if it is not set)
+#
+# Note, config options in MIN_CONFIG will override these options.
+#
+# (default undefined)
+#ADD_CONFIG = /home/test/config-broken
+
+# The location on the host where to write temp files
+# (default /tmp/ktest)
+#TMP_DIR = /tmp/ktest
+
+# Optional log file to write the status (recommended)
+#  Note, this is a DEFAULT section only option.
+# (default undefined)
+#LOG_FILE = /home/test/logfiles/target.log
+
+# Remove old logfile if it exists before starting all tests.
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#CLEAR_LOG = 0
+
+# Line to define a successful boot up in console output.
+# This is what the line contains, not the entire line. If you need
+# the entire line to match, then use regural expression syntax like:
+#  (do not add any quotes around it)
+#
+#  SUCCESS_LINE = ^MyBox Login:$
+#
+# (default "login:")
+#SUCCESS_LINE = login:
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after success is recommended.
+# (in seconds)
+# (default 10)
+#STOP_AFTER_SUCCESS = 10
+
+# In case the console constantly fills the screen, having
+# a specified time to stop the test after failure is recommended.
+# (in seconds)
+# (default 60)
+#STOP_AFTER_FAILURE = 60
+
+# Stop testing if a build fails. If set, the script will end if
+# a failure is detected, otherwise it will save off the .config,
+# dmesg and bootlog in a directory called
+# MACHINE-TEST_TYPE_BUILD_TYPE-fail-yyyymmddhhmmss
+# if the STORE_FAILURES directory is set.
+# (default 1)
+# Note, even if this is set to zero, there are some errors that still
+# stop the tests.
+#DIE_ON_FAILURE = 1
+
+# Directory to store failure directories on failure. If this is not
+# set, DIE_ON_FAILURE=0 will not save off the .config, dmesg and
+# bootlog. This option is ignored if DIE_ON_FAILURE is not set.
+# (default undefined)
+#STORE_FAILURES = /home/test/failures
+
+# Build without doing a make mrproper, or removing .config
+# (default 0)
+#BUILD_NOCLEAN = 0
+
+# As the test reads the console, after it hits the SUCCESS_LINE
+# the time it waits for the monitor to settle down between reads
+# can usually be lowered.
+# (in seconds) (default 1)
+#BOOTED_TIMEOUT = 1
+
+# The timeout in seconds when we consider the box hung after
+# the console stop producing output. Be sure to leave enough
+# time here to get pass a reboot. Some machines may not produce
+# any console output for a long time during a reboot. You do
+# not want the test to fail just because the system was in
+# the process of rebooting to the test kernel.
+# (default 120)
+#TIMEOUT = 120
+
+# In between tests, a reboot of the box may occur, and this
+# is the time to wait for the console after it stops producing
+# output. Some machines may not produce a large lag on reboot
+# so this should accommodate it.
+# The difference between this and TIMEOUT, is that TIMEOUT happens
+# when rebooting to the test kernel. This sleep time happens
+# after a test has completed and we are about to start running
+# another test. If a reboot to the reliable kernel happens,
+# we wait SLEEP_TIME for the console to stop producing output
+# before starting the next test.
+# (default 60)
+#SLEEP_TIME = 60
+
+# The time in between bisects to sleep (in seconds)
+# (default 60)
+#BISECT_SLEEP_TIME = 60
+
+# Reboot the target box on error (default 0)
+#REBOOT_ON_ERROR = 0
+
+# Power off the target on error (ignored if REBOOT_ON_ERROR is set)
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_ERROR = 0
+
+# Power off the target after all tests have completed successfully
+#  Note, this is a DEFAULT section only option.
+# (default 0)
+#POWEROFF_ON_SUCCESS = 0
+
+# Reboot the target after all test completed successfully (default 1)
+# (ignored if POWEROFF_ON_SUCCESS is set)
+#REBOOT_ON_SUCCESS = 1
+
+# In case there are isses with rebooting, you can specify this
+# to always powercycle after this amount of time after calling
+# reboot.
+# Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
+# makes it powercycle immediately after rebooting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWERCYCLE_AFTER_REBOOT = 5
+
+# In case there's isses with halting, you can specify this
+# to always poweroff after this amount of time after calling
+# halt.
+# Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
+# makes it poweroff immediately after halting. Do not define
+# it if you do not want it.
+# (default undefined)
+#POWEROFF_AFTER_HALT = 20
+
+# A script or command to power off the box (default undefined)
+# Needed for POWEROFF_ON_ERROR and SUCCESS
+#
+# Example for digital loggers power switch:
+#POWER_OFF = wget --no-proxy -O /dev/null -q  --auth-no-challenge 'http://admin:admin@power/outlet?5=OFF'
+#
+# Example for a virtual guest call "Guest".
+#POWER_OFF = virsh destroy Guest
+
+# The way to execute a command on the target
+# (default ssh $SSH_USER@$MACHINE $SSH_COMMAND";)
+# The variables SSH_USER, MACHINE and SSH_COMMAND are defined
+#SSH_EXEC = ssh $SSH_USER@$MACHINE $SSH_COMMAND";
+
+# The way to copy a file to the target
+# (default scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE)
+# The variables SSH_USER, MACHINE, SRC_FILE and DST_FILE are defined.
+#SCP_TO_TARGET = scp $SRC_FILE $SSH_USER@$MACHINE:$DST_FILE
+
+# The nice way to reboot the target
+# (default ssh $SSH_USER@$MACHINE reboot)
+# The variables SSH_USER and MACHINE are defined.
+#REBOOT = ssh $SSH_USER@$MACHINE reboot
+
+#### Per test run options ####
+# The following options are only allowed in TEST_START sections.
+# They are ignored in the DEFAULTS sections.
+#
+# All of these are optional and undefined by default, although
+#  some of these options are required for TEST_TYPE of patchcheck
+#  and bisect.
+#
+#
+# CHECKOUT = branch
+#
+#  If the BUILD_DIR is a git repository, then you can set this option
+#  to checkout the given branch before running the TEST. If you
+#  specify this for the first run, that branch will be used for
+#  all preceding tests until a new CHECKOUT is set.
+#
+#
+#
+# For TEST_TYPE = patchcheck
+#
+#  This expects the BUILD_DIR to be a git repository, and
+#  will checkout the PATCHCHECK_START commit.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  The MIN_CONFIG will be used for all builds of the patchcheck. The build type
+#  used for patchcheck is oldconfig.
+#
+#  PATCHCHECK_START is required and is the first patch to
+#   test (the SHA1 of the commit). You may also specify anything
+#   that git checkout allows (branch name, tage, HEAD~3).
+#
+#  PATCHCHECK_END is the last patch to check (default HEAD)
+#
+#  PATCHCHECK_TYPE is required and is the type of test to run:
+#      build, boot, test.
+#
+#   Note, the build test will look for warnings, if a warning occurred
+#     in a file that a commit touches, the build will fail.
+#
+#   If BUILD_NOCLEAN is set, then make mrproper will not be run on
+#   any of the builds, just like all other TEST_TYPE tests. But
+#   what makes patchcheck different from the other tests, is if
+#   BUILD_NOCLEAN is not set, only the first and last patch run
+#   make mrproper. This helps speed up the test.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = patchcheck
+#   CHECKOUT = mybranch
+#   PATCHCHECK_TYPE = boot
+#   PATCHCHECK_START = 747e94ae3d1b4c9bf5380e569f614eb9040b79e7
+#   PATCHCHECK_END = HEAD~2
+#
+#
+#
+# For TEST_TYPE = bisect
+#
+#  You can specify a git bisect if the BUILD_DIR is a git repository.
+#  The MIN_CONFIG will be used for all builds of the bisect. The build type
+#  used for bisecting is oldconfig.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+# BISECT_GOOD is the commit (SHA1) to label as good (accepts all git good commit types)
+# BISECT_BAD is the commit to label as bad (accepts all git bad commit types)
+#
+# The above three options are required for a bisect operation.
+#
+# BISECT_REPLAY = /path/to/replay/file (optional, default undefined)
+#
+#   If an operation failed in the bisect that was not expected to
+#   fail. Then the test ends. The state of the BUILD_DIR will be
+#   left off at where the failure occurred. You can examine the
+#   reason for the failure, and perhaps even find a git commit
+#   that would work to continue with. You can run:
+#
+#   git bisect log > /path/to/replay/file
+#
+#   The adding:
+#
+#    BISECT_REPLAY= /path/to/replay/file
+#
+#   And running the test again. The test will perform the initial
+#    git bisect start, git bisect good, and git bisect bad, and
+#    then it will run git bisect replay on this file, before
+#    continuing with the bisect.
+#
+# BISECT_START = commit (optional, default undefined)
+#
+#   As with BISECT_REPLAY, if the test failed on a commit that
+#   just happen to have a bad commit in the middle of the bisect,
+#   and you need to skip it. If BISECT_START is defined, it
+#   will checkout that commit after doing the initial git bisect start,
+#   git bisect good, git bisect bad, and running the git bisect replay
+#   if the BISECT_REPLAY is set.
+#
+# BISECT_REVERSE = 1 (optional, default 0)
+#
+#   In those strange instances where it was broken forever
+#   and you are trying to find where it started to work!
+#   Set BISECT_GOOD to the commit that was last known to fail
+#   Set BISECT_BAD to the commit that is known to start working.
+#   With BISECT_REVERSE = 1, The test will consider failures as
+#   good, and success as bad.
+#
+# BISECT_CHECK = 1 (optional, default 0)
+#
+#   Just to be sure the good is good and bad is bad, setting
+#   BISECT_CHECK to 1 will start the bisect by first checking
+#   out BISECT_BAD and makes sure it fails, then it will check
+#   out BISECT_GOOD and makes sure it succeeds before starting
+#   the bisect (it works for BISECT_REVERSE too).
+#
+#   You can limit the test to just check BISECT_GOOD or
+#   BISECT_BAD with BISECT_CHECK = good or
+#   BISECT_CHECK = bad, respectively.
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = bisect
+#   BISECT_GOOD = v2.6.36
+#   BISECT_BAD = b5153163ed580e00c67bdfecb02b2e3843817b3e
+#   BISECT_TYPE = build
+#   MIN_CONFIG = /home/test/config-bisect
+#
+#
+#
+# For TEST_TYPE = config_bisect
+#
+#  In those cases that you have two different configs. One of them
+#  work, the other does not, and you do not know what config causes
+#  the problem.
+#  The TEST_TYPE config_bisect will bisect the bad config looking for
+#  what config causes the failure.
+#
+#  The way it works is this:
+#
+#   First it finds a config to work with. Since a different version, or
+#   MIN_CONFIG may cause different dependecies, it must run through this
+#   preparation.
+#
+#   Overwrites any config set in the bad config with a config set in
+#   either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
+#   are minimal and do not disable configs you want to test:
+#   (ie.  # CONFIG_FOO is not set).
+#
+#   An oldconfig is run on the bad config and any new config that
+#   appears will be added to the configs to test.
+#
+#   Finally, it generates a config with the above result and runs it
+#   again through make oldconfig to produce a config that should be
+#   satisfied by kconfig.
+#
+#   Then it starts the bisect.
+#
+#   The configs to test are cut in half. If all the configs in this
+#   half depend on a config in the other half, then the other half
+#   is tested instead. If no configs are enabled by either half, then
+#   this means a circular dependency exists and the test fails.
+#
+#   A config is created with the test half, and the bisect test is run.
+#
+#   If the bisect succeeds, then all configs in the generated config
+#   are removed from the configs to test and added to the configs that
+#   will be enabled for all builds (they will be enabled, but not be part
+#   of the configs to examine).
+#
+#   If the bisect fails, then all test configs that were not enabled by
+#   the config file are removed from the test. These configs will not
+#   be enabled in future tests. Since current config failed, we consider
+#   this to be a subset of the config that we started with.
+#
+#   When we are down to one config, it is considered the bad config.
+#
+#   Note, the config chosen may not be the true bad config. Due to
+#   dependencies and selections of the kbuild system, mulitple
+#   configs may be needed to cause a failure. If you disable the
+#   config that was found and restart the test, if the test fails
+#   again, it is recommended to rerun the config_bisect with a new
+#   bad config without the found config enabled.
+#
+#  The option BUILD_TYPE will be ignored.
+#
+#  CONFIG_BISECT_TYPE is the type of test to perform:
+#	build	- bad fails to build
+#	boot	- bad builds but fails to boot
+#	test	- bad boots but fails a test
+#
+#   CONFIG_BISECT is the config that failed to boot
+#
+# Example:
+#   TEST_START
+#   TEST_TYPE = config_bisect
+#   CONFIG_BISECT_TYPE = build
+#   CONFIG_BISECT = /home/test/¢onfig-bad
+#   MIN_CONFIG = /home/test/config-min
+#
diff --git a/usr/Kconfig b/usr/Kconfig
index c2c7fe2..4780dea 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -72,6 +72,15 @@
 	  Support loading of a LZMA encoded initial ramdisk or cpio buffer
 	  If unsure, say N.
 
+config RD_XZ
+	bool "Support initial ramdisks compressed using XZ" if EMBEDDED
+	default !EMBEDDED
+	depends on BLK_DEV_INITRD
+	select DECOMPRESS_XZ
+	help
+	  Support loading of a XZ encoded initial ramdisk or cpio buffer.
+	  If unsure, say N.
+
 config RD_LZO
 	bool "Support initial ramdisks compressed using LZO" if EMBEDDED
 	default !EMBEDDED
@@ -139,6 +148,15 @@
 	  three. Compression is slowest. The initramfs size is about 33%
 	  smaller with LZMA in comparison to gzip.
 
+config INITRAMFS_COMPRESSION_XZ
+	bool "XZ"
+	depends on RD_XZ
+	help
+	  XZ uses the LZMA2 algorithm. The initramfs size is about 30%
+	  smaller with XZ in comparison to gzip. Decompression speed
+	  is better than that of bzip2 but worse than gzip and LZO.
+	  Compression is slow.
+
 config INITRAMFS_COMPRESSION_LZO
 	bool "LZO"
 	depends on RD_LZO
diff --git a/usr/Makefile b/usr/Makefile
index 6faa444..029ffe6 100644
--- a/usr/Makefile
+++ b/usr/Makefile
@@ -15,6 +15,9 @@
 # Lzma
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
 
+# XZ
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_XZ)     = .xz
+
 # Lzo
 suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZO)   = .lzo
 
@@ -50,7 +53,7 @@
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.lzo initramfs_data.cpio
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio.xz initramfs_data.cpio.lzo initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 7f1178f..f63ccb0 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -15,3 +15,6 @@
 
 config KVM_MMIO
        bool
+
+config KVM_ASYNC_PF
+       bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 7c98928..ae72ae60 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -55,58 +55,31 @@
 	return index;
 }
 
-static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
+static irqreturn_t kvm_assigned_dev_thread(int irq, void *dev_id)
 {
-	struct kvm_assigned_dev_kernel *assigned_dev;
-	int i;
+	struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
+	u32 vector;
+	int index;
 
-	assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
-				    interrupt_work);
+	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_INTX) {
+		spin_lock(&assigned_dev->intx_lock);
+		disable_irq_nosync(irq);
+		assigned_dev->host_irq_disabled = true;
+		spin_unlock(&assigned_dev->intx_lock);
+	}
 
-	spin_lock_irq(&assigned_dev->assigned_dev_lock);
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		struct kvm_guest_msix_entry *guest_entries =
-			assigned_dev->guest_msix_entries;
-		for (i = 0; i < assigned_dev->entries_nr; i++) {
-			if (!(guest_entries[i].flags &
-					KVM_ASSIGNED_MSIX_PENDING))
-				continue;
-			guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
+		index = find_index_from_host_irq(assigned_dev, irq);
+		if (index >= 0) {
+			vector = assigned_dev->
+					guest_msix_entries[index].vector;
 			kvm_set_irq(assigned_dev->kvm,
-				    assigned_dev->irq_source_id,
-				    guest_entries[i].vector, 1);
+				    assigned_dev->irq_source_id, vector, 1);
 		}
 	} else
 		kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
 			    assigned_dev->guest_irq, 1);
 
-	spin_unlock_irq(&assigned_dev->assigned_dev_lock);
-}
-
-static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
-{
-	unsigned long flags;
-	struct kvm_assigned_dev_kernel *assigned_dev =
-		(struct kvm_assigned_dev_kernel *) dev_id;
-
-	spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
-		int index = find_index_from_host_irq(assigned_dev, irq);
-		if (index < 0)
-			goto out;
-		assigned_dev->guest_msix_entries[index].flags |=
-			KVM_ASSIGNED_MSIX_PENDING;
-	}
-
-	schedule_work(&assigned_dev->interrupt_work);
-
-	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
-		disable_irq_nosync(irq);
-		assigned_dev->host_irq_disabled = true;
-	}
-
-out:
-	spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
 	return IRQ_HANDLED;
 }
 
@@ -114,7 +87,6 @@
 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
 {
 	struct kvm_assigned_dev_kernel *dev;
-	unsigned long flags;
 
 	if (kian->gsi == -1)
 		return;
@@ -127,12 +99,12 @@
 	/* The guest irq may be shared so this ack may be
 	 * from another device.
 	 */
-	spin_lock_irqsave(&dev->assigned_dev_lock, flags);
+	spin_lock(&dev->intx_lock);
 	if (dev->host_irq_disabled) {
 		enable_irq(dev->host_irq);
 		dev->host_irq_disabled = false;
 	}
-	spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
+	spin_unlock(&dev->intx_lock);
 }
 
 static void deassign_guest_irq(struct kvm *kvm,
@@ -141,6 +113,9 @@
 	kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
 	assigned_dev->ack_notifier.gsi = -1;
 
+	kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
+		    assigned_dev->guest_irq, 0);
+
 	if (assigned_dev->irq_source_id != -1)
 		kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
 	assigned_dev->irq_source_id = -1;
@@ -152,28 +127,19 @@
 			      struct kvm_assigned_dev_kernel *assigned_dev)
 {
 	/*
-	 * In kvm_free_device_irq, cancel_work_sync return true if:
-	 * 1. work is scheduled, and then cancelled.
-	 * 2. work callback is executed.
-	 *
-	 * The first one ensured that the irq is disabled and no more events
-	 * would happen. But for the second one, the irq may be enabled (e.g.
-	 * for MSI). So we disable irq here to prevent further events.
+	 * We disable irq here to prevent further events.
 	 *
 	 * Notice this maybe result in nested disable if the interrupt type is
 	 * INTx, but it's OK for we are going to free it.
 	 *
 	 * If this function is a part of VM destroy, please ensure that till
 	 * now, the kvm state is still legal for probably we also have to wait
-	 * interrupt_work done.
+	 * on a currently running IRQ handler.
 	 */
 	if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
 		int i;
 		for (i = 0; i < assigned_dev->entries_nr; i++)
-			disable_irq_nosync(assigned_dev->
-					   host_msix_entries[i].vector);
-
-		cancel_work_sync(&assigned_dev->interrupt_work);
+			disable_irq(assigned_dev->host_msix_entries[i].vector);
 
 		for (i = 0; i < assigned_dev->entries_nr; i++)
 			free_irq(assigned_dev->host_msix_entries[i].vector,
@@ -185,8 +151,7 @@
 		pci_disable_msix(assigned_dev->dev);
 	} else {
 		/* Deal with MSI and INTx */
-		disable_irq_nosync(assigned_dev->host_irq);
-		cancel_work_sync(&assigned_dev->interrupt_work);
+		disable_irq(assigned_dev->host_irq);
 
 		free_irq(assigned_dev->host_irq, (void *)assigned_dev);
 
@@ -232,7 +197,8 @@
 {
 	kvm_free_assigned_irq(kvm, assigned_dev);
 
-	pci_reset_function(assigned_dev->dev);
+	__pci_reset_function(assigned_dev->dev);
+	pci_restore_state(assigned_dev->dev);
 
 	pci_release_regions(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
@@ -265,8 +231,8 @@
 	 * on the same interrupt line is not a happy situation: there
 	 * are going to be long delays in accepting, acking, etc.
 	 */
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
-			0, "kvm_assigned_intx_device", (void *)dev))
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 IRQF_ONESHOT, dev->irq_name, (void *)dev))
 		return -EIO;
 	return 0;
 }
@@ -284,8 +250,8 @@
 	}
 
 	dev->host_irq = dev->dev->irq;
-	if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
-			"kvm_assigned_msi_device", (void *)dev)) {
+	if (request_threaded_irq(dev->host_irq, NULL, kvm_assigned_dev_thread,
+				 0, dev->irq_name, (void *)dev)) {
 		pci_disable_msi(dev->dev);
 		return -EIO;
 	}
@@ -310,10 +276,9 @@
 		return r;
 
 	for (i = 0; i < dev->entries_nr; i++) {
-		r = request_irq(dev->host_msix_entries[i].vector,
-				kvm_assigned_dev_intr, 0,
-				"kvm_assigned_msix_device",
-				(void *)dev);
+		r = request_threaded_irq(dev->host_msix_entries[i].vector,
+					 NULL, kvm_assigned_dev_thread,
+					 0, dev->irq_name, (void *)dev);
 		if (r)
 			goto err;
 	}
@@ -370,6 +335,9 @@
 	if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
 		return r;
 
+	snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
+		 pci_name(dev->dev));
+
 	switch (host_irq_type) {
 	case KVM_DEV_IRQ_HOST_INTX:
 		r = assigned_device_enable_host_intx(kvm, dev);
@@ -547,6 +515,7 @@
 	}
 
 	pci_reset_function(dev);
+	pci_save_state(dev);
 
 	match->assigned_dev_id = assigned_dev->assigned_dev_id;
 	match->host_segnr = assigned_dev->segnr;
@@ -554,12 +523,10 @@
 	match->host_devfn = assigned_dev->devfn;
 	match->flags = assigned_dev->flags;
 	match->dev = dev;
-	spin_lock_init(&match->assigned_dev_lock);
+	spin_lock_init(&match->intx_lock);
 	match->irq_source_id = -1;
 	match->kvm = kvm;
 	match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
-	INIT_WORK(&match->interrupt_work,
-		  kvm_assigned_dev_interrupt_work_handler);
 
 	list_add(&match->list, &kvm->arch.assigned_dev_head);
 
@@ -579,6 +546,7 @@
 	mutex_unlock(&kvm->lock);
 	return r;
 out_list_del:
+	pci_restore_state(dev);
 	list_del(&match->list);
 	pci_release_regions(dev);
 out_disable:
@@ -651,9 +619,9 @@
 			r = -ENOMEM;
 			goto msix_nr_out;
 		}
-		adev->guest_msix_entries = kzalloc(
-				sizeof(struct kvm_guest_msix_entry) *
-				entry_nr->entry_nr, GFP_KERNEL);
+		adev->guest_msix_entries =
+			kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
+				GFP_KERNEL);
 		if (!adev->guest_msix_entries) {
 			kfree(adev->host_msix_entries);
 			r = -ENOMEM;
@@ -706,7 +674,7 @@
 				  unsigned long arg)
 {
 	void __user *argp = (void __user *)arg;
-	int r = -ENOTTY;
+	int r;
 
 	switch (ioctl) {
 	case KVM_ASSIGN_PCI_DEVICE: {
@@ -724,7 +692,6 @@
 		r = -EOPNOTSUPP;
 		break;
 	}
-#ifdef KVM_CAP_ASSIGN_DEV_IRQ
 	case KVM_ASSIGN_DEV_IRQ: {
 		struct kvm_assigned_irq assigned_irq;
 
@@ -747,8 +714,6 @@
 			goto out;
 		break;
 	}
-#endif
-#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
 	case KVM_DEASSIGN_PCI_DEVICE: {
 		struct kvm_assigned_pci_dev assigned_dev;
 
@@ -760,7 +725,6 @@
 			goto out;
 		break;
 	}
-#endif
 #ifdef KVM_CAP_IRQ_ROUTING
 	case KVM_SET_GSI_ROUTING: {
 		struct kvm_irq_routing routing;
@@ -813,6 +777,9 @@
 		break;
 	}
 #endif
+	default:
+		r = -ENOTTY;
+		break;
 	}
 out:
 	return r;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
new file mode 100644
index 0000000..74268b4
--- /dev/null
+++ b/virt/kvm/async_pf.c
@@ -0,0 +1,216 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mmu_context.h>
+
+#include "async_pf.h"
+#include <trace/events/kvm.h>
+
+static struct kmem_cache *async_pf_cache;
+
+int kvm_async_pf_init(void)
+{
+	async_pf_cache = KMEM_CACHE(kvm_async_pf, 0);
+
+	if (!async_pf_cache)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void kvm_async_pf_deinit(void)
+{
+	if (async_pf_cache)
+		kmem_cache_destroy(async_pf_cache);
+	async_pf_cache = NULL;
+}
+
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu)
+{
+	INIT_LIST_HEAD(&vcpu->async_pf.done);
+	INIT_LIST_HEAD(&vcpu->async_pf.queue);
+	spin_lock_init(&vcpu->async_pf.lock);
+}
+
+static void async_pf_execute(struct work_struct *work)
+{
+	struct page *page = NULL;
+	struct kvm_async_pf *apf =
+		container_of(work, struct kvm_async_pf, work);
+	struct mm_struct *mm = apf->mm;
+	struct kvm_vcpu *vcpu = apf->vcpu;
+	unsigned long addr = apf->addr;
+	gva_t gva = apf->gva;
+
+	might_sleep();
+
+	use_mm(mm);
+	down_read(&mm->mmap_sem);
+	get_user_pages(current, mm, addr, 1, 1, 0, &page, NULL);
+	up_read(&mm->mmap_sem);
+	unuse_mm(mm);
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&apf->link, &vcpu->async_pf.done);
+	apf->page = page;
+	apf->done = true;
+	spin_unlock(&vcpu->async_pf.lock);
+
+	/*
+	 * apf may be freed by kvm_check_async_pf_completion() after
+	 * this point
+	 */
+
+	trace_kvm_async_pf_completed(addr, page, gva);
+
+	if (waitqueue_active(&vcpu->wq))
+		wake_up_interruptible(&vcpu->wq);
+
+	mmdrop(mm);
+	kvm_put_kvm(vcpu->kvm);
+}
+
+void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
+{
+	/* cancel outstanding work queue item */
+	while (!list_empty(&vcpu->async_pf.queue)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.queue.next,
+				   typeof(*work), queue);
+		cancel_work_sync(&work->work);
+		list_del(&work->queue);
+		if (!work->done) /* work was canceled */
+			kmem_cache_free(async_pf_cache, work);
+	}
+
+	spin_lock(&vcpu->async_pf.lock);
+	while (!list_empty(&vcpu->async_pf.done)) {
+		struct kvm_async_pf *work =
+			list_entry(vcpu->async_pf.done.next,
+				   typeof(*work), link);
+		list_del(&work->link);
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued = 0;
+}
+
+void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	while (!list_empty_careful(&vcpu->async_pf.done) &&
+	      kvm_arch_can_inject_async_page_present(vcpu)) {
+		spin_lock(&vcpu->async_pf.lock);
+		work = list_first_entry(&vcpu->async_pf.done, typeof(*work),
+					      link);
+		list_del(&work->link);
+		spin_unlock(&vcpu->async_pf.lock);
+
+		if (work->page)
+			kvm_arch_async_page_ready(vcpu, work);
+		kvm_arch_async_page_present(vcpu, work);
+
+		list_del(&work->queue);
+		vcpu->async_pf.queued--;
+		if (work->page)
+			put_page(work->page);
+		kmem_cache_free(async_pf_cache, work);
+	}
+}
+
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
+		       struct kvm_arch_async_pf *arch)
+{
+	struct kvm_async_pf *work;
+
+	if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
+		return 0;
+
+	/* setup delayed work */
+
+	/*
+	 * do alloc nowait since if we are going to sleep anyway we
+	 * may as well sleep faulting in page
+	 */
+	work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+	if (!work)
+		return 0;
+
+	work->page = NULL;
+	work->done = false;
+	work->vcpu = vcpu;
+	work->gva = gva;
+	work->addr = gfn_to_hva(vcpu->kvm, gfn);
+	work->arch = *arch;
+	work->mm = current->mm;
+	atomic_inc(&work->mm->mm_count);
+	kvm_get_kvm(work->vcpu->kvm);
+
+	/* this can't really happen otherwise gfn_to_pfn_async
+	   would succeed */
+	if (unlikely(kvm_is_error_hva(work->addr)))
+		goto retry_sync;
+
+	INIT_WORK(&work->work, async_pf_execute);
+	if (!schedule_work(&work->work))
+		goto retry_sync;
+
+	list_add_tail(&work->queue, &vcpu->async_pf.queue);
+	vcpu->async_pf.queued++;
+	kvm_arch_async_page_not_present(vcpu, work);
+	return 1;
+retry_sync:
+	kvm_put_kvm(work->vcpu->kvm);
+	mmdrop(work->mm);
+	kmem_cache_free(async_pf_cache, work);
+	return 0;
+}
+
+int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
+{
+	struct kvm_async_pf *work;
+
+	if (!list_empty_careful(&vcpu->async_pf.done))
+		return 0;
+
+	work = kmem_cache_zalloc(async_pf_cache, GFP_ATOMIC);
+	if (!work)
+		return -ENOMEM;
+
+	work->page = bad_page;
+	get_page(bad_page);
+	INIT_LIST_HEAD(&work->queue); /* for list_del to work */
+
+	spin_lock(&vcpu->async_pf.lock);
+	list_add_tail(&work->link, &vcpu->async_pf.done);
+	spin_unlock(&vcpu->async_pf.lock);
+
+	vcpu->async_pf.queued++;
+	return 0;
+}
diff --git a/virt/kvm/async_pf.h b/virt/kvm/async_pf.h
new file mode 100644
index 0000000..e7ef6447
--- /dev/null
+++ b/virt/kvm/async_pf.h
@@ -0,0 +1,36 @@
+/*
+ * kvm asynchronous fault support
+ *
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Author:
+ *      Gleb Natapov <gleb@redhat.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __KVM_ASYNC_PF_H__
+#define __KVM_ASYNC_PF_H__
+
+#ifdef CONFIG_KVM_ASYNC_PF
+int kvm_async_pf_init(void);
+void kvm_async_pf_deinit(void);
+void kvm_async_pf_vcpu_init(struct kvm_vcpu *vcpu);
+#else
+#define kvm_async_pf_init() (0)
+#define kvm_async_pf_deinit() do{}while(0)
+#define kvm_async_pf_vcpu_init(C) do{}while(0)
+#endif
+
+#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index c1f1e3c..2ca4535 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -44,14 +44,19 @@
  */
 
 struct _irqfd {
-	struct kvm               *kvm;
-	struct eventfd_ctx       *eventfd;
-	int                       gsi;
-	struct list_head          list;
-	poll_table                pt;
-	wait_queue_t              wait;
-	struct work_struct        inject;
-	struct work_struct        shutdown;
+	/* Used for MSI fast-path */
+	struct kvm *kvm;
+	wait_queue_t wait;
+	/* Update side is protected by irqfds.lock */
+	struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
+	/* Used for level IRQ fast-path */
+	int gsi;
+	struct work_struct inject;
+	/* Used for setup/shutdown */
+	struct eventfd_ctx *eventfd;
+	struct list_head list;
+	poll_table pt;
+	struct work_struct shutdown;
 };
 
 static struct workqueue_struct *irqfd_cleanup_wq;
@@ -125,14 +130,22 @@
 {
 	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
 	unsigned long flags = (unsigned long)key;
+	struct kvm_kernel_irq_routing_entry *irq;
+	struct kvm *kvm = irqfd->kvm;
 
-	if (flags & POLLIN)
+	if (flags & POLLIN) {
+		rcu_read_lock();
+		irq = rcu_dereference(irqfd->irq_entry);
 		/* An event has been signaled, inject an interrupt */
-		schedule_work(&irqfd->inject);
+		if (irq)
+			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+		else
+			schedule_work(&irqfd->inject);
+		rcu_read_unlock();
+	}
 
 	if (flags & POLLHUP) {
 		/* The eventfd is closing, detach from KVM */
-		struct kvm *kvm = irqfd->kvm;
 		unsigned long flags;
 
 		spin_lock_irqsave(&kvm->irqfds.lock, flags);
@@ -163,9 +176,31 @@
 	add_wait_queue(wqh, &irqfd->wait);
 }
 
+/* Must be called under irqfds.lock */
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+			 struct kvm_irq_routing_table *irq_rt)
+{
+	struct kvm_kernel_irq_routing_entry *e;
+	struct hlist_node *n;
+
+	if (irqfd->gsi >= irq_rt->nr_rt_entries) {
+		rcu_assign_pointer(irqfd->irq_entry, NULL);
+		return;
+	}
+
+	hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+		/* Only fast-path MSI. */
+		if (e->type == KVM_IRQ_ROUTING_MSI)
+			rcu_assign_pointer(irqfd->irq_entry, e);
+		else
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
+	}
+}
+
 static int
 kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
 {
+	struct kvm_irq_routing_table *irq_rt;
 	struct _irqfd *irqfd, *tmp;
 	struct file *file = NULL;
 	struct eventfd_ctx *eventfd = NULL;
@@ -215,6 +250,10 @@
 		goto fail;
 	}
 
+	irq_rt = rcu_dereference_protected(kvm->irq_routing,
+					   lockdep_is_held(&kvm->irqfds.lock));
+	irqfd_update(kvm, irqfd, irq_rt);
+
 	events = file->f_op->poll(file, &irqfd->pt);
 
 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -271,8 +310,17 @@
 	spin_lock_irq(&kvm->irqfds.lock);
 
 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
+		if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+			/*
+			 * This rcu_assign_pointer is needed for when
+			 * another thread calls kvm_irqfd_update before
+			 * we flush workqueue below.
+			 * It is paired with synchronize_rcu done by caller
+			 * of that function.
+			 */
+			rcu_assign_pointer(irqfd->irq_entry, NULL);
 			irqfd_deactivate(irqfd);
+		}
 	}
 
 	spin_unlock_irq(&kvm->irqfds.lock);
@@ -322,6 +370,25 @@
 }
 
 /*
+ * Change irq_routing and irqfd.
+ * Caller must invoke synchronize_rcu afterwards.
+ */
+void kvm_irq_routing_update(struct kvm *kvm,
+			    struct kvm_irq_routing_table *irq_rt)
+{
+	struct _irqfd *irqfd;
+
+	spin_lock_irq(&kvm->irqfds.lock);
+
+	rcu_assign_pointer(kvm->irq_routing, irq_rt);
+
+	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+		irqfd_update(kvm, irqfd, irq_rt);
+
+	spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+/*
  * create a host-wide workqueue for issuing deferred shutdown requests
  * aggregated from all vm* instances. We need our own isolated single-thread
  * queue to prevent deadlock against flushing the normal work-queue.
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 8edca91..9f614b4 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -114,8 +114,8 @@
 	return r;
 }
 
-static int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
-		       struct kvm *kvm, int irq_source_id, int level)
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+		struct kvm *kvm, int irq_source_id, int level)
 {
 	struct kvm_lapic_irq irq;
 
@@ -409,8 +409,9 @@
 
 	mutex_lock(&kvm->irq_lock);
 	old = kvm->irq_routing;
-	rcu_assign_pointer(kvm->irq_routing, new);
+	kvm_irq_routing_update(kvm, new);
 	mutex_unlock(&kvm->irq_lock);
+
 	synchronize_rcu();
 
 	new = old;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5225052..7f68625 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -55,6 +55,7 @@
 #include <asm-generic/bitops/le.h>
 
 #include "coalesced_mmio.h"
+#include "async_pf.h"
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/kvm.h>
@@ -89,7 +90,8 @@
 
 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
 
-static bool kvm_rebooting;
+bool kvm_rebooting;
+EXPORT_SYMBOL_GPL(kvm_rebooting);
 
 static bool largepages_enabled = true;
 
@@ -167,8 +169,12 @@
 
 void kvm_flush_remote_tlbs(struct kvm *kvm)
 {
+	int dirty_count = kvm->tlbs_dirty;
+
+	smp_mb();
 	if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
 		++kvm->stat.remote_tlb_flush;
+	cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
 }
 
 void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -186,6 +192,7 @@
 	vcpu->kvm = kvm;
 	vcpu->vcpu_id = id;
 	init_waitqueue_head(&vcpu->wq);
+	kvm_async_pf_vcpu_init(vcpu);
 
 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 	if (!page) {
@@ -247,7 +254,7 @@
 	idx = srcu_read_lock(&kvm->srcu);
 	spin_lock(&kvm->mmu_lock);
 	kvm->mmu_notifier_seq++;
-	need_tlb_flush = kvm_unmap_hva(kvm, address);
+	need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -291,6 +298,7 @@
 	kvm->mmu_notifier_count++;
 	for (; start < end; start += PAGE_SIZE)
 		need_tlb_flush |= kvm_unmap_hva(kvm, start);
+	need_tlb_flush |= kvm->tlbs_dirty;
 	spin_unlock(&kvm->mmu_lock);
 	srcu_read_unlock(&kvm->srcu, idx);
 
@@ -381,11 +389,15 @@
 
 static struct kvm *kvm_create_vm(void)
 {
-	int r = 0, i;
-	struct kvm *kvm = kvm_arch_create_vm();
+	int r, i;
+	struct kvm *kvm = kvm_arch_alloc_vm();
 
-	if (IS_ERR(kvm))
-		goto out;
+	if (!kvm)
+		return ERR_PTR(-ENOMEM);
+
+	r = kvm_arch_init_vm(kvm);
+	if (r)
+		goto out_err_nodisable;
 
 	r = hardware_enable_all();
 	if (r)
@@ -399,23 +411,19 @@
 	r = -ENOMEM;
 	kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
 	if (!kvm->memslots)
-		goto out_err;
+		goto out_err_nosrcu;
 	if (init_srcu_struct(&kvm->srcu))
-		goto out_err;
+		goto out_err_nosrcu;
 	for (i = 0; i < KVM_NR_BUSES; i++) {
 		kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
 					GFP_KERNEL);
-		if (!kvm->buses[i]) {
-			cleanup_srcu_struct(&kvm->srcu);
+		if (!kvm->buses[i])
 			goto out_err;
-		}
 	}
 
 	r = kvm_init_mmu_notifier(kvm);
-	if (r) {
-		cleanup_srcu_struct(&kvm->srcu);
+	if (r)
 		goto out_err;
-	}
 
 	kvm->mm = current->mm;
 	atomic_inc(&kvm->mm->mm_count);
@@ -429,19 +437,35 @@
 	spin_lock(&kvm_lock);
 	list_add(&kvm->vm_list, &vm_list);
 	spin_unlock(&kvm_lock);
-out:
+
 	return kvm;
 
 out_err:
+	cleanup_srcu_struct(&kvm->srcu);
+out_err_nosrcu:
 	hardware_disable_all();
 out_err_nodisable:
 	for (i = 0; i < KVM_NR_BUSES; i++)
 		kfree(kvm->buses[i]);
 	kfree(kvm->memslots);
-	kfree(kvm);
+	kvm_arch_free_vm(kvm);
 	return ERR_PTR(r);
 }
 
+static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	if (!memslot->dirty_bitmap)
+		return;
+
+	if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
+		vfree(memslot->dirty_bitmap_head);
+	else
+		kfree(memslot->dirty_bitmap_head);
+
+	memslot->dirty_bitmap = NULL;
+	memslot->dirty_bitmap_head = NULL;
+}
+
 /*
  * Free any memory in @free but not in @dont.
  */
@@ -454,7 +478,7 @@
 		vfree(free->rmap);
 
 	if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
-		vfree(free->dirty_bitmap);
+		kvm_destroy_dirty_bitmap(free);
 
 
 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
@@ -465,7 +489,6 @@
 	}
 
 	free->npages = 0;
-	free->dirty_bitmap = NULL;
 	free->rmap = NULL;
 }
 
@@ -499,6 +522,9 @@
 	kvm_arch_flush_shadow(kvm);
 #endif
 	kvm_arch_destroy_vm(kvm);
+	kvm_free_physmem(kvm);
+	cleanup_srcu_struct(&kvm->srcu);
+	kvm_arch_free_vm(kvm);
 	hardware_disable_all();
 	mmdrop(mm);
 }
@@ -528,6 +554,27 @@
 }
 
 /*
+ * Allocation size is twice as large as the actual dirty bitmap size.
+ * This makes it possible to do double buffering: see x86's
+ * kvm_vm_ioctl_get_dirty_log().
+ */
+static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+	unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
+
+	if (dirty_bytes > PAGE_SIZE)
+		memslot->dirty_bitmap = vzalloc(dirty_bytes);
+	else
+		memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
+
+	if (!memslot->dirty_bitmap)
+		return -ENOMEM;
+
+	memslot->dirty_bitmap_head = memslot->dirty_bitmap;
+	return 0;
+}
+
+/*
  * Allocate some memory and give it an address in the guest physical address
  * space.
  *
@@ -604,13 +651,11 @@
 	/* Allocate if a slot is being created */
 #ifndef CONFIG_S390
 	if (npages && !new.rmap) {
-		new.rmap = vmalloc(npages * sizeof(*new.rmap));
+		new.rmap = vzalloc(npages * sizeof(*new.rmap));
 
 		if (!new.rmap)
 			goto out_free;
 
-		memset(new.rmap, 0, npages * sizeof(*new.rmap));
-
 		new.user_alloc = user_alloc;
 		new.userspace_addr = mem->userspace_addr;
 	}
@@ -633,14 +678,11 @@
 			     >> KVM_HPAGE_GFN_SHIFT(level));
 		lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
 
-		new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
+		new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
 
 		if (!new.lpage_info[i])
 			goto out_free;
 
-		memset(new.lpage_info[i], 0,
-		       lpages * sizeof(*new.lpage_info[i]));
-
 		if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
 			new.lpage_info[i][0].write_count = 1;
 		if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
@@ -661,12 +703,8 @@
 
 	/* Allocate page dirty bitmap if needed */
 	if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
-		unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
-
-		new.dirty_bitmap = vmalloc(dirty_bytes);
-		if (!new.dirty_bitmap)
+		if (kvm_create_dirty_bitmap(&new) < 0)
 			goto out_free;
-		memset(new.dirty_bitmap, 0, dirty_bytes);
 		/* destroy any largepage mappings for dirty tracking */
 		if (old.npages)
 			flush_shadow = 1;
@@ -685,6 +723,7 @@
 		memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 		if (mem->slot >= slots->nmemslots)
 			slots->nmemslots = mem->slot + 1;
+		slots->generation++;
 		slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
 
 		old_memslots = kvm->memslots;
@@ -719,6 +758,7 @@
 	memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
 	if (mem->slot >= slots->nmemslots)
 		slots->nmemslots = mem->slot + 1;
+	slots->generation++;
 
 	/* actual memory is freed via old in kvm_free_physmem_slot below */
 	if (!npages) {
@@ -849,10 +889,10 @@
 }
 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
 
-struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
+						gfn_t gfn)
 {
 	int i;
-	struct kvm_memslots *slots = kvm_memslots(kvm);
 
 	for (i = 0; i < slots->nmemslots; ++i) {
 		struct kvm_memory_slot *memslot = &slots->memslots[i];
@@ -863,6 +903,11 @@
 	}
 	return NULL;
 }
+
+struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
+{
+	return __gfn_to_memslot(kvm_memslots(kvm), gfn);
+}
 EXPORT_SYMBOL_GPL(gfn_to_memslot);
 
 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
@@ -925,12 +970,9 @@
 	return memslot - slots->memslots;
 }
 
-static unsigned long gfn_to_hva_many(struct kvm *kvm, gfn_t gfn,
+static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
 				     gfn_t *nr_pages)
 {
-	struct kvm_memory_slot *slot;
-
-	slot = gfn_to_memslot(kvm, gfn);
 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
 		return bad_hva();
 
@@ -942,28 +984,61 @@
 
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 {
-	return gfn_to_hva_many(kvm, gfn, NULL);
+	return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_hva);
 
-static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic)
+static pfn_t get_fault_pfn(void)
+{
+	get_page(fault_page);
+	return fault_pfn;
+}
+
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
+			bool *async, bool write_fault, bool *writable)
 {
 	struct page *page[1];
-	int npages;
+	int npages = 0;
 	pfn_t pfn;
 
-	if (atomic)
+	/* we can do it either atomically or asynchronously, not both */
+	BUG_ON(atomic && async);
+
+	BUG_ON(!write_fault && !writable);
+
+	if (writable)
+		*writable = true;
+
+	if (atomic || async)
 		npages = __get_user_pages_fast(addr, 1, 1, page);
-	else {
+
+	if (unlikely(npages != 1) && !atomic) {
 		might_sleep();
-		npages = get_user_pages_fast(addr, 1, 1, page);
+
+		if (writable)
+			*writable = write_fault;
+
+		npages = get_user_pages_fast(addr, 1, write_fault, page);
+
+		/* map read fault as writable if possible */
+		if (unlikely(!write_fault) && npages == 1) {
+			struct page *wpage[1];
+
+			npages = __get_user_pages_fast(addr, 1, 1, wpage);
+			if (npages == 1) {
+				*writable = true;
+				put_page(page[0]);
+				page[0] = wpage[0];
+			}
+			npages = 1;
+		}
 	}
 
 	if (unlikely(npages != 1)) {
 		struct vm_area_struct *vma;
 
 		if (atomic)
-			goto return_fault_page;
+			return get_fault_pfn();
 
 		down_read(&current->mm->mmap_sem);
 		if (is_hwpoison_address(addr)) {
@@ -972,19 +1047,20 @@
 			return page_to_pfn(hwpoison_page);
 		}
 
-		vma = find_vma(current->mm, addr);
+		vma = find_vma_intersection(current->mm, addr, addr+1);
 
-		if (vma == NULL || addr < vma->vm_start ||
-		    !(vma->vm_flags & VM_PFNMAP)) {
-			up_read(&current->mm->mmap_sem);
-return_fault_page:
-			get_page(fault_page);
-			return page_to_pfn(fault_page);
+		if (vma == NULL)
+			pfn = get_fault_pfn();
+		else if ((vma->vm_flags & VM_PFNMAP)) {
+			pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+				vma->vm_pgoff;
+			BUG_ON(!kvm_is_mmio_pfn(pfn));
+		} else {
+			if (async && (vma->vm_flags & VM_WRITE))
+				*async = true;
+			pfn = get_fault_pfn();
 		}
-
-		pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 		up_read(&current->mm->mmap_sem);
-		BUG_ON(!kvm_is_mmio_pfn(pfn));
 	} else
 		pfn = page_to_pfn(page[0]);
 
@@ -993,40 +1069,58 @@
 
 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
 {
-	return hva_to_pfn(kvm, addr, true);
+	return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
 
-static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic)
+static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
+			  bool write_fault, bool *writable)
 {
 	unsigned long addr;
 
+	if (async)
+		*async = false;
+
 	addr = gfn_to_hva(kvm, gfn);
 	if (kvm_is_error_hva(addr)) {
 		get_page(bad_page);
 		return page_to_pfn(bad_page);
 	}
 
-	return hva_to_pfn(kvm, addr, atomic);
+	return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
 }
 
 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, true);
+	return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
 
+pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
+		       bool write_fault, bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
+
 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
-	return __gfn_to_pfn(kvm, gfn, false);
+	return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
+pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
+		      bool *writable)
+{
+	return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
+}
+EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
+
 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
 			 struct kvm_memory_slot *slot, gfn_t gfn)
 {
 	unsigned long addr = gfn_to_hva_memslot(slot, gfn);
-	return hva_to_pfn(kvm, addr, false);
+	return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
 }
 
 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
@@ -1035,7 +1129,7 @@
 	unsigned long addr;
 	gfn_t entry;
 
-	addr = gfn_to_hva_many(kvm, gfn, &entry);
+	addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
 	if (kvm_is_error_hva(addr))
 		return -1;
 
@@ -1219,9 +1313,51 @@
 	return 0;
 }
 
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			      gpa_t gpa)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int offset = offset_in_page(gpa);
+	gfn_t gfn = gpa >> PAGE_SHIFT;
+
+	ghc->gpa = gpa;
+	ghc->generation = slots->generation;
+	ghc->memslot = __gfn_to_memslot(slots, gfn);
+	ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
+	if (!kvm_is_error_hva(ghc->hva))
+		ghc->hva += offset;
+	else
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
+
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+			   void *data, unsigned long len)
+{
+	struct kvm_memslots *slots = kvm_memslots(kvm);
+	int r;
+
+	if (slots->generation != ghc->generation)
+		kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
+
+	if (kvm_is_error_hva(ghc->hva))
+		return -EFAULT;
+
+	r = copy_to_user((void __user *)ghc->hva, data, len);
+	if (r)
+		return -EFAULT;
+	mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
+
 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
 {
-	return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
+	return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
+				    offset, len);
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
 
@@ -1244,11 +1380,9 @@
 }
 EXPORT_SYMBOL_GPL(kvm_clear_guest);
 
-void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
+			     gfn_t gfn)
 {
-	struct kvm_memory_slot *memslot;
-
-	memslot = gfn_to_memslot(kvm, gfn);
 	if (memslot && memslot->dirty_bitmap) {
 		unsigned long rel_gfn = gfn - memslot->base_gfn;
 
@@ -1256,6 +1390,14 @@
 	}
 }
 
+void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
+{
+	struct kvm_memory_slot *memslot;
+
+	memslot = gfn_to_memslot(kvm, gfn);
+	mark_page_dirty_in_slot(kvm, memslot, gfn);
+}
+
 /*
  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
  */
@@ -1457,6 +1599,7 @@
 		if (arg)
 			goto out;
 		r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
+		trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
 		break;
 	case KVM_GET_REGS: {
 		struct kvm_regs *kvm_regs;
@@ -1824,7 +1967,7 @@
 
 static int kvm_dev_ioctl_create_vm(void)
 {
-	int fd, r;
+	int r;
 	struct kvm *kvm;
 
 	kvm = kvm_create_vm();
@@ -1837,11 +1980,11 @@
 		return r;
 	}
 #endif
-	fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
-	if (fd < 0)
+	r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
+	if (r < 0)
 		kvm_put_kvm(kvm);
 
-	return fd;
+	return r;
 }
 
 static long kvm_dev_ioctl_check_extension_generic(long arg)
@@ -1922,7 +2065,7 @@
 	&kvm_chardev_ops,
 };
 
-static void hardware_enable(void *junk)
+static void hardware_enable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 	int r;
@@ -1942,7 +2085,14 @@
 	}
 }
 
-static void hardware_disable(void *junk)
+static void hardware_enable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_enable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
+static void hardware_disable_nolock(void *junk)
 {
 	int cpu = raw_smp_processor_id();
 
@@ -1952,13 +2102,20 @@
 	kvm_arch_hardware_disable(NULL);
 }
 
+static void hardware_disable(void *junk)
+{
+	spin_lock(&kvm_lock);
+	hardware_disable_nolock(junk);
+	spin_unlock(&kvm_lock);
+}
+
 static void hardware_disable_all_nolock(void)
 {
 	BUG_ON(!kvm_usage_count);
 
 	kvm_usage_count--;
 	if (!kvm_usage_count)
-		on_each_cpu(hardware_disable, NULL, 1);
+		on_each_cpu(hardware_disable_nolock, NULL, 1);
 }
 
 static void hardware_disable_all(void)
@@ -1977,7 +2134,7 @@
 	kvm_usage_count++;
 	if (kvm_usage_count == 1) {
 		atomic_set(&hardware_enable_failed, 0);
-		on_each_cpu(hardware_enable, NULL, 1);
+		on_each_cpu(hardware_enable_nolock, NULL, 1);
 
 		if (atomic_read(&hardware_enable_failed)) {
 			hardware_disable_all_nolock();
@@ -2008,27 +2165,19 @@
 	case CPU_STARTING:
 		printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
 		       cpu);
-		spin_lock(&kvm_lock);
 		hardware_enable(NULL);
-		spin_unlock(&kvm_lock);
 		break;
 	}
 	return NOTIFY_OK;
 }
 
 
-asmlinkage void kvm_handle_fault_on_reboot(void)
+asmlinkage void kvm_spurious_fault(void)
 {
-	if (kvm_rebooting) {
-		/* spin while reset goes on */
-		local_irq_enable();
-		while (true)
-			cpu_relax();
-	}
 	/* Fault while not rebooting.  We want the trace. */
 	BUG();
 }
-EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
+EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
 		      void *v)
@@ -2041,7 +2190,7 @@
 	 */
 	printk(KERN_INFO "kvm: exiting hardware virtualization\n");
 	kvm_rebooting = true;
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	return NOTIFY_OK;
 }
 
@@ -2211,7 +2360,7 @@
 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
 {
 	if (kvm_usage_count)
-		hardware_disable(NULL);
+		hardware_disable_nolock(NULL);
 	return 0;
 }
 
@@ -2219,7 +2368,7 @@
 {
 	if (kvm_usage_count) {
 		WARN_ON(spin_is_locked(&kvm_lock));
-		hardware_enable(NULL);
+		hardware_enable_nolock(NULL);
 	}
 	return 0;
 }
@@ -2336,6 +2485,10 @@
 		goto out_free_5;
 	}
 
+	r = kvm_async_pf_init();
+	if (r)
+		goto out_free;
+
 	kvm_chardev_ops.owner = module;
 	kvm_vm_fops.owner = module;
 	kvm_vcpu_fops.owner = module;
@@ -2343,7 +2496,7 @@
 	r = misc_register(&kvm_dev);
 	if (r) {
 		printk(KERN_ERR "kvm: misc device register failed\n");
-		goto out_free;
+		goto out_unreg;
 	}
 
 	kvm_preempt_ops.sched_in = kvm_sched_in;
@@ -2353,6 +2506,8 @@
 
 	return 0;
 
+out_unreg:
+	kvm_async_pf_deinit();
 out_free:
 	kmem_cache_destroy(kvm_vcpu_cache);
 out_free_5:
@@ -2385,11 +2540,12 @@
 	kvm_exit_debug();
 	misc_deregister(&kvm_dev);
 	kmem_cache_destroy(kvm_vcpu_cache);
+	kvm_async_pf_deinit();
 	sysdev_unregister(&kvm_sysdev);
 	sysdev_class_unregister(&kvm_sysdev_class);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
 	unregister_cpu_notifier(&kvm_cpu_notifier);
-	on_each_cpu(hardware_disable, NULL, 1);
+	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	kvm_arch_hardware_unsetup();
 	kvm_arch_exit();
 	free_cpumask_var(cpus_hardware_enabled);